From 53d03bbb1f5001695d69f83047ead96d2079a36a Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 10 Jan 2025 10:25:07 -0500 Subject: [PATCH 001/328] gate sd_notify to linux target_os only Signed-off-by: strawberry --- src/router/Cargo.toml | 2 +- src/router/run.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/router/Cargo.toml b/src/router/Cargo.toml index 1623590b..51e15aed 100644 --- a/src/router/Cargo.toml +++ b/src/router/Cargo.toml @@ -80,7 +80,7 @@ tower.workspace = true tower-http.workspace = true tracing.workspace = true -[target.'cfg(unix)'.dependencies] +[target.'cfg(all(unix, target_os = "linux"))'.dependencies] sd-notify.workspace = true sd-notify.optional = true diff --git a/src/router/run.rs b/src/router/run.rs index 1b4d7437..95d12559 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -63,7 +63,7 @@ pub(crate) async fn start(server: Arc) -> Result> { let services = Services::build(server).await?.start().await?; - #[cfg(feature = "systemd")] + #[cfg(all(feature = "systemd", target_os = "linux"))] sd_notify::notify(true, &[sd_notify::NotifyState::Ready]) .expect("failed to notify systemd of ready state"); @@ -99,7 +99,7 @@ pub(crate) async fn stop(services: Arc) -> Result<()> { ); } - #[cfg(feature = "systemd")] + #[cfg(all(feature = "systemd", target_os = "linux"))] sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]) .expect("failed to notify systemd of stopping state"); From 4c2999ccd15506a4acbc948d62c7ec0c03d46167 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 10 Jan 2025 10:46:32 -0500 Subject: [PATCH 002/328] gate libloading to conduwuit_mods feature and cfg only Signed-off-by: strawberry --- flake.nix | 22 ++++++++++++++++++++++ src/core/Cargo.toml | 4 ++++ src/core/mod.rs | 2 +- src/core/mods/mod.rs | 2 +- src/core/server.rs | 2 +- src/main/Cargo.toml | 3 +++ src/main/main.rs | 4 ++-- src/main/mods.rs | 2 +- src/main/server.rs | 4 ++-- src/main/signal.rs | 2 +- 10 files changed, 38 insertions(+), 9 deletions(-) diff --git a/flake.nix b/flake.nix index d8ad47a8..e3497d85 100644 --- a/flake.nix +++ b/flake.nix @@ -212,6 +212,8 @@ # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; all-features-debug = scopeHost.main.override { @@ -224,6 +226,8 @@ "hardened_malloc" # dont include experimental features "experimental" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; hmalloc = scopeHost.main.override { features = ["hardened_malloc"]; }; @@ -241,6 +245,8 @@ # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; }; @@ -255,6 +261,8 @@ "hardened_malloc" # dont include experimental features "experimental" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; }; @@ -330,6 +338,8 @@ # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; } @@ -349,6 +359,8 @@ # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false); }; @@ -367,6 +379,8 @@ "hardened_malloc" # dont include experimental features "experimental" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; } @@ -423,6 +437,8 @@ # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; }; @@ -444,6 +460,8 @@ # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false); }; @@ -464,6 +482,8 @@ "hardened_malloc" # dont include experimental features "experimental" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; }; @@ -510,6 +530,8 @@ # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; })); diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index 4a9cc462..c716e9c2 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -50,6 +50,9 @@ zstd_compression = [ ] perf_measurements = [] sentry_telemetry = [] +conduwuit_mods = [ + "dep:libloading" +] [dependencies] argon2.workspace = true @@ -75,6 +78,7 @@ ipaddress.workspace = true itertools.workspace = true libc.workspace = true libloading.workspace = true +libloading.optional = true log.workspace = true num-traits.workspace = true rand.workspace = true diff --git a/src/core/mod.rs b/src/core/mod.rs index 87cb58ae..1416ed9e 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -25,7 +25,7 @@ pub use crate as conduwuit_core; rustc_flags_capture! {} -#[cfg(not(conduwuit_mods))] +#[cfg(any(not(conduwuit_mods), not(feature = "conduwuit_mods")))] pub mod mods { #[macro_export] macro_rules! mod_ctor { diff --git a/src/core/mods/mod.rs b/src/core/mods/mod.rs index ac0c333b..b8f06f29 100644 --- a/src/core/mods/mod.rs +++ b/src/core/mods/mod.rs @@ -1,4 +1,4 @@ -#![cfg(conduwuit_mods)] +#![cfg(all(conduwuit_mods, feature = "conduwuit_mods"))] pub(crate) use libloading::os::unix::{Library, Symbol}; diff --git a/src/core/server.rs b/src/core/server.rs index 8a4d9f66..948eea36 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -59,7 +59,7 @@ impl Server { } pub fn reload(&self) -> Result<()> { - if cfg!(not(conduwuit_mods)) { + if cfg!(any(not(conduwuit_mods), not(feature = "conduwuit_mods"))) { return Err!("Reloading not enabled"); } diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index 38eb7188..baf5336f 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -135,6 +135,9 @@ zstd_compression = [ "conduwuit-database/zstd_compression", "conduwuit-router/zstd_compression", ] +conduwuit_mods = [ + "conduwuit-core/conduwuit_mods", +] [dependencies] conduwuit-admin.workspace = true diff --git a/src/main/main.rs b/src/main/main.rs index e7aaf3fc..dacc2a2e 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -37,7 +37,7 @@ fn main() -> Result<(), Error> { /// Operate the server normally in release-mode static builds. This will start, /// run and stop the server within the asynchronous runtime. -#[cfg(not(conduwuit_mods))] +#[cfg(any(not(conduwuit_mods), not(feature = "conduwuit_mods")))] #[tracing::instrument( name = "main", parent = None, @@ -89,7 +89,7 @@ async fn async_main(server: &Arc) -> Result<(), Error> { /// Operate the server in developer-mode dynamic builds. This will start, run, /// and hot-reload portions of the server as-needed before returning for an /// actual shutdown. This is not available in release-mode or static builds. -#[cfg(conduwuit_mods)] +#[cfg(all(conduwuit_mods, feature = "conduwuit_mods"))] async fn async_main(server: &Arc) -> Result<(), Error> { let mut starts = true; let mut reloads = true; diff --git a/src/main/mods.rs b/src/main/mods.rs index ca799b90..9ab36e6c 100644 --- a/src/main/mods.rs +++ b/src/main/mods.rs @@ -1,4 +1,4 @@ -#![cfg(conduwuit_mods)] +#![cfg(all(conduwuit_mods, feature = "conduwuit_mods"))] #[unsafe(no_link)] extern crate conduwuit_service; diff --git a/src/main/server.rs b/src/main/server.rs index a81b708d..359a029c 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -23,7 +23,7 @@ pub(crate) struct Server { #[cfg(feature = "sentry_telemetry")] _sentry_guard: Option<::sentry::ClientInitGuard>, - #[cfg(conduwuit_mods)] + #[cfg(all(conduwuit_mods, feature = "conduwuit_mods"))] // Module instances; TODO: move to mods::loaded mgmt vector pub(crate) mods: tokio::sync::RwLock>, } @@ -75,7 +75,7 @@ impl Server { #[cfg(feature = "sentry_telemetry")] _sentry_guard: sentry_guard, - #[cfg(conduwuit_mods)] + #[cfg(all(conduwuit_mods, feature = "conduwuit_mods"))] mods: tokio::sync::RwLock::new(Vec::new()), })) } diff --git a/src/main/signal.rs b/src/main/signal.rs index 0f541099..cecb718b 100644 --- a/src/main/signal.rs +++ b/src/main/signal.rs @@ -12,7 +12,7 @@ pub(super) async fn signal(server: Arc) { use unix::SignalKind; const CONSOLE: bool = cfg!(feature = "console"); - const RELOADING: bool = cfg!(all(conduwuit_mods, not(CONSOLE))); + const RELOADING: bool = cfg!(all(conduwuit_mods, feature = "conduwuit_mods", not(CONSOLE))); let mut quit = unix::signal(SignalKind::quit()).expect("SIGQUIT handler"); let mut term = unix::signal(SignalKind::terminate()).expect("SIGTERM handler"); From 5b6279b1c514ea65e00399c26bb7cdd007061e6d Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 10 Jan 2025 10:51:39 -0500 Subject: [PATCH 003/328] ci: require docker publishing to pass tests Signed-off-by: strawberry --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4a4df488..3ccbf5d9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -733,7 +733,7 @@ jobs: docker: name: Docker publish runs-on: ubuntu-24.04 - needs: [build, variables] + needs: [build, variables, tests] permissions: packages: write contents: read From 1852eeebf22d2bedfeff63b9c205640f5832ca49 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 10 Jan 2025 22:45:48 -0500 Subject: [PATCH 004/328] disable more unnecessary features in various build outputs Signed-off-by: strawberry --- flake.nix | 146 ++++++++++++++++++++------------ nix/pkgs/complement/default.nix | 10 +++ nix/pkgs/main/default.nix | 14 ++- 3 files changed, 115 insertions(+), 55 deletions(-) diff --git a/flake.nix b/flake.nix index e3497d85..fb40cae7 100644 --- a/flake.nix +++ b/flake.nix @@ -191,27 +191,57 @@ in { packages = { - default = scopeHost.main; - default-debug = scopeHost.main.override { - profile = "dev"; - # debug build users expect full logs - disable_release_max_log_level = true; - }; - default-test = scopeHost.main.override { - profile = "test"; - disable_release_max_log_level = true; - }; - all-features = scopeHost.main.override { - all_features = true; + default = scopeHost.main.override { disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" # dont include experimental features "experimental" # jemalloc profiling/stats features are expensive and shouldn't # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # this is non-functional on nix for some reason + "hardened_malloc" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" + ]; + }; + default-debug = scopeHost.main.override { + profile = "dev"; + # debug build users expect full logs + disable_release_max_log_level = true; + disable_features = [ + # dont include experimental features + "experimental" + # this is non-functional on nix for some reason + "hardened_malloc" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" + ]; + }; + # just a test profile used for things like CI and complement + default-test = scopeHost.main.override { + profile = "test"; + disable_release_max_log_level = true; + disable_features = [ + # dont include experimental features + "experimental" + # this is non-functional on nix for some reason + "hardened_malloc" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" + ]; + }; + all-features = scopeHost.main.override { + all_features = true; + disable_features = [ + # dont include experimental features + "experimental" + # jemalloc profiling/stats features are expensive and shouldn't + # be expected on non-debug builds. + "jemalloc_prof" + "jemalloc_stats" + # this is non-functional on nix for some reason + "hardened_malloc" # conduwuit_mods is a development-only hot reload feature "conduwuit_mods" ]; @@ -222,10 +252,10 @@ # debug build users expect full logs disable_release_max_log_level = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" # dont include experimental features "experimental" + # this is non-functional on nix for some reason + "hardened_malloc" # conduwuit_mods is a development-only hot reload feature "conduwuit_mods" ]; @@ -237,14 +267,14 @@ main = scopeHost.main.override { all_features = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" # dont include experimental features "experimental" # jemalloc profiling/stats features are expensive and shouldn't # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # this is non-functional on nix for some reason + "hardened_malloc" # conduwuit_mods is a development-only hot reload feature "conduwuit_mods" ]; @@ -257,10 +287,10 @@ # debug build users expect full logs disable_release_max_log_level = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" # dont include experimental features "experimental" + # this is non-functional on nix for some reason + "hardened_malloc" # conduwuit_mods is a development-only hot reload feature "conduwuit_mods" ]; @@ -321,6 +351,14 @@ value = scopeCrossStatic.main.override { profile = "test"; disable_release_max_log_level = true; + disable_features = [ + # dont include experimental features + "experimental" + # this is non-functional on nix for some reason + "hardened_malloc" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" + ]; }; } @@ -330,14 +368,14 @@ value = scopeCrossStatic.main.override { all_features = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" # dont include experimental features "experimental" # jemalloc profiling/stats features are expensive and shouldn't # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # this is non-functional on nix for some reason + "hardened_malloc" # conduwuit_mods is a development-only hot reload feature "conduwuit_mods" ]; @@ -351,14 +389,14 @@ value = scopeCrossStatic.main.override { all_features = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" # dont include experimental features "experimental" # jemalloc profiling/stats features are expensive and shouldn't # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # this is non-functional on nix for some reason + "hardened_malloc" # conduwuit_mods is a development-only hot reload feature "conduwuit_mods" ]; @@ -375,10 +413,10 @@ # debug build users expect full logs disable_release_max_log_level = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" # dont include experimental features "experimental" + # this is non-functional on nix for some reason + "hardened_malloc" # conduwuit_mods is a development-only hot reload feature "conduwuit_mods" ]; @@ -429,16 +467,16 @@ main = scopeCrossStatic.main.override { all_features = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" - # dont include experimental features - "experimental" - # jemalloc profiling/stats features are expensive and shouldn't - # be expected on non-debug builds. - "jemalloc_prof" - "jemalloc_stats" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" + # dont include experimental features + "experimental" + # jemalloc profiling/stats features are expensive and shouldn't + # be expected on non-debug builds. + "jemalloc_prof" + "jemalloc_stats" + # this is non-functional on nix for some reason + "hardened_malloc" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; }; @@ -452,16 +490,16 @@ main = scopeCrossStatic.main.override { all_features = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" - # dont include experimental features - "experimental" - # jemalloc profiling/stats features are expensive and shouldn't - # be expected on non-debug builds. - "jemalloc_prof" - "jemalloc_stats" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" + # dont include experimental features + "experimental" + # jemalloc profiling/stats features are expensive and shouldn't + # be expected on non-debug builds. + "jemalloc_prof" + "jemalloc_stats" + # this is non-functional on nix for some reason + "hardened_malloc" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false); }; @@ -478,12 +516,12 @@ # debug build users expect full logs disable_release_max_log_level = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" - # dont include experimental features - "experimental" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" + # dont include experimental features + "experimental" + # this is non-functional on nix for some reason + "hardened_malloc" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; }; @@ -522,14 +560,14 @@ main = prev.main.override { all_features = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" # dont include experimental features "experimental" # jemalloc profiling/stats features are expensive and shouldn't # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # this is non-functional on nix for some reason + "hardened_malloc" # conduwuit_mods is a development-only hot reload feature "conduwuit_mods" ]; diff --git a/nix/pkgs/complement/default.nix b/nix/pkgs/complement/default.nix index 36f12400..e35cbf04 100644 --- a/nix/pkgs/complement/default.nix +++ b/nix/pkgs/complement/default.nix @@ -20,6 +20,8 @@ let disable_features = [ # no reason to use jemalloc for complement, just has compatibility/build issues "jemalloc" + "jemalloc_stats" + "jemalloc_prof" # console/CLI stuff isn't used or relevant for complement "console" "tokio_console" @@ -32,6 +34,14 @@ let "hardened_malloc" # dont include experimental features "experimental" + # compression isn't needed for complement + "brotli_compression" + "gzip_compression" + "zstd_compression" + # complement doesn't need hot reloading + "conduwuit_mods" + # complement doesn't have URL preview media tests + "url_preview" ]; }; diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index a785e7f2..d7424d11 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -15,7 +15,19 @@ # Options (keep sorted) , all_features ? false , default_features ? true -, disable_features ? [] +# default list of disabled features +, disable_features ? [ + # dont include experimental features + "experimental" + # jemalloc profiling/stats features are expensive and shouldn't + # be expected on non-debug builds. + "jemalloc_prof" + "jemalloc_stats" + # this is non-functional on nix for some reason + "hardened_malloc" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" +] , disable_release_max_log_level ? false , features ? [] , profile ? "release" From 0074f903d8a0574b63b588438efba996ef467c26 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 11 Jan 2025 21:43:46 -0500 Subject: [PATCH 005/328] delete lix binary cache because it has terrible reliability Signed-off-by: strawberry --- .gitea/workflows/ci.yml | 8 ++++---- .github/workflows/ci.yml | 8 ++++---- .github/workflows/documentation.yml | 4 ++-- .gitlab-ci.yml | 8 ++------ 4 files changed, 12 insertions(+), 16 deletions(-) diff --git a/.gitea/workflows/ci.yml b/.gitea/workflows/ci.yml index 9ce7c993..ef436734 100644 --- a/.gitea/workflows/ci.yml +++ b/.gitea/workflows/ci.yml @@ -87,8 +87,8 @@ jobs: - name: Apply Nix binary cache configuration run: | sudo tee -a /etc/nix/nix.conf > /dev/null < /dev/null < /dev/null < /dev/null < /dev/null < /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi - if command -v nix > /dev/null && [ -n "$ATTIC_PUBLIC_KEY" ]; then echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf; fi - # Add Lix binary cache - - if command -v nix > /dev/null; then echo "extra-substituters = https://cache.lix.systems" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o=" >> /etc/nix/nix.conf; fi - # Add crane binary cache - if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi From 6f15c9b3f47143715280e485db2a5bcab35d77fd Mon Sep 17 00:00:00 2001 From: Holger Huo <50446405+HolgerHuo@users.noreply.github.com> Date: Wed, 8 Jan 2025 17:57:12 +0800 Subject: [PATCH 006/328] fix: presence timer not working --- src/api/client/presence.rs | 13 +++++++++---- src/api/client/sync/v3.rs | 17 ++++++++++++++--- src/service/presence/presence.rs | 6 +----- 3 files changed, 24 insertions(+), 12 deletions(-) diff --git a/src/api/client/presence.rs b/src/api/client/presence.rs index 1a3ad26e..d19e6ae1 100644 --- a/src/api/client/presence.rs +++ b/src/api/client/presence.rs @@ -82,14 +82,19 @@ pub(crate) async fn get_presence_route( presence.content.status_msg }; + let last_active_ago = match presence.content.currently_active { + | Some(true) => None, + | _ => presence + .content + .last_active_ago + .map(|millis| Duration::from_millis(millis.into())), + }; + Ok(get_presence::v3::Response { // TODO: Should ruma just use the presenceeventcontent type here? status_msg, currently_active: presence.content.currently_active, - last_active_ago: presence - .content - .last_active_ago - .map(|millis| Duration::from_millis(millis.into())), + last_active_ago, presence: presence.content.presence, }) } else { diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index b7ecd6b9..910a15d4 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -382,7 +382,16 @@ async fn process_presence_updates( .ready_fold(PresenceUpdates::new(), |mut updates, (user_id, event)| { match updates.entry(user_id.into()) { | Entry::Vacant(slot) => { - slot.insert(event); + let mut new_event = event; + new_event.content.last_active_ago = match new_event.content.currently_active { + | Some(true) => None, + | _ => new_event + .content + .last_active_ago + .or(new_event.content.last_active_ago), + }; + + slot.insert(new_event); }, | Entry::Occupied(mut slot) => { let curr_event = slot.get_mut(); @@ -394,8 +403,6 @@ async fn process_presence_updates( curr_content.status_msg = new_content .status_msg .or_else(|| curr_content.status_msg.take()); - curr_content.last_active_ago = - new_content.last_active_ago.or(curr_content.last_active_ago); curr_content.displayname = new_content .displayname .or_else(|| curr_content.displayname.take()); @@ -405,6 +412,10 @@ async fn process_presence_updates( curr_content.currently_active = new_content .currently_active .or(curr_content.currently_active); + curr_content.last_active_ago = match curr_content.currently_active { + | Some(true) => None, + | _ => new_content.last_active_ago.or(curr_content.last_active_ago), + }; }, }; diff --git a/src/service/presence/presence.rs b/src/service/presence/presence.rs index b88a004b..b322dfb4 100644 --- a/src/service/presence/presence.rs +++ b/src/service/presence/presence.rs @@ -46,11 +46,7 @@ impl Presence { users: &users::Service, ) -> PresenceEvent { let now = utils::millis_since_unix_epoch(); - let last_active_ago = if self.currently_active { - None - } else { - Some(UInt::new_saturating(now.saturating_sub(self.last_active_ts))) - }; + let last_active_ago = Some(UInt::new_saturating(now.saturating_sub(self.last_active_ts))); PresenceEvent { sender: user_id.to_owned(), From 8451ea3bc32748fc8ff64d817685cc7f344e2e64 Mon Sep 17 00:00:00 2001 From: Holger Huo <50446405+HolgerHuo@users.noreply.github.com> Date: Wed, 8 Jan 2025 18:24:27 +0800 Subject: [PATCH 007/328] update: refresh timeout greater than idle timeout --- src/service/presence/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index bf5258e1..3b0bdd15 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -95,7 +95,7 @@ impl Service { /// Pings the presence of the given user in the given room, setting the /// specified state. pub async fn ping_presence(&self, user_id: &UserId, new_state: &PresenceState) -> Result<()> { - const REFRESH_TIMEOUT: u64 = 60 * 25 * 1000; + const REFRESH_TIMEOUT: u64 = 60 * 1000; let last_presence = self.db.get_presence(user_id).await; let state_changed = match last_presence { From b71201cf19cddd72c689ada532dbe1400f6a997d Mon Sep 17 00:00:00 2001 From: Holger Huo <50446405+HolgerHuo@users.noreply.github.com> Date: Wed, 8 Jan 2025 18:42:46 +0800 Subject: [PATCH 008/328] add: clear online status on server boot --- src/service/presence/mod.rs | 38 +++++++++++++++++++++++++++++++++++++ src/service/services.rs | 5 +++++ 2 files changed, 43 insertions(+) diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index 3b0bdd15..a6eb1bcd 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -170,6 +170,44 @@ impl Service { self.db.remove_presence(user_id).await; } + // Unset online/unavailable presence to offline on startup + pub async fn unset_all_presence(&self) -> Result<()> { + for user_id in &self + .services + .users + .list_local_users() + .map(UserId::to_owned) + .collect::>() + .await + { + let presence = self.db.get_presence(user_id).await; + + let presence = match presence { + | Ok((_, ref presence)) => &presence.content, + | _ => return Ok(()), + }; + + let need_reset = match presence.presence { + | PresenceState::Unavailable | PresenceState::Online => true, + | _ => false, + }; + + if !need_reset { + return Ok(()); + } + + self.set_presence( + user_id, + &PresenceState::Offline, + Some(false), + presence.last_active_ago, + presence.status_msg.clone(), + ) + .await?; + } + Ok(()) + } + /// Returns the most recent presence updates that happened after the event /// with id `since`. pub fn presence_since( diff --git a/src/service/services.rs b/src/service/services.rs index c955834e..9e099759 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -123,6 +123,11 @@ impl Services { .start() .await?; + // clear online statuses + if self.server.config.allow_local_presence { + _ = self.presence.unset_all_presence().await; + } + // set the server user as online if self.server.config.allow_local_presence && !self.db.is_read_only() { _ = self From fde1b94e26f22f21c3c6e012331f23164c27d776 Mon Sep 17 00:00:00 2001 From: Holger Huo <50446405+HolgerHuo@users.noreply.github.com> Date: Fri, 10 Jan 2025 23:51:51 +0800 Subject: [PATCH 009/328] fix: logic mistake --- src/api/client/sync/v3.rs | 5 +---- src/service/presence/mod.rs | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 910a15d4..95c8c2d4 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -385,10 +385,7 @@ async fn process_presence_updates( let mut new_event = event; new_event.content.last_active_ago = match new_event.content.currently_active { | Some(true) => None, - | _ => new_event - .content - .last_active_ago - .or(new_event.content.last_active_ago), + | _ => new_event.content.last_active_ago, }; slot.insert(new_event); diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index a6eb1bcd..ab7c76a1 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -193,7 +193,7 @@ impl Service { }; if !need_reset { - return Ok(()); + continue; } self.set_presence( From 8c18481d1dab411bd9270ed56e2b6b5c1f465d3f Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 10 Jan 2025 23:51:08 -0500 Subject: [PATCH 010/328] optimise resetting all user presences Signed-off-by: strawberry --- src/admin/query/presence.rs | 2 +- src/service/presence/data.rs | 4 +-- src/service/presence/mod.rs | 53 +++++++++++++++++++++++------------- src/service/services.rs | 9 ++---- 4 files changed, 40 insertions(+), 28 deletions(-) diff --git a/src/admin/query/presence.rs b/src/admin/query/presence.rs index 45bb6bd9..0de6b696 100644 --- a/src/admin/query/presence.rs +++ b/src/admin/query/presence.rs @@ -32,7 +32,7 @@ pub(super) async fn process( match subcommand { | PresenceCommand::GetPresence { user_id } => { let timer = tokio::time::Instant::now(); - let results = services.presence.db.get_presence(&user_id).await; + let results = services.presence.get_presence(&user_id).await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( diff --git a/src/service/presence/data.rs b/src/service/presence/data.rs index 3d614333..4ec0a7ee 100644 --- a/src/service/presence/data.rs +++ b/src/service/presence/data.rs @@ -12,7 +12,7 @@ use ruma::{events::presence::PresenceEvent, presence::PresenceState, UInt, UserI use super::Presence; use crate::{globals, users, Dep}; -pub struct Data { +pub(crate) struct Data { presenceid_presence: Arc, userid_presenceid: Arc, services: Services, @@ -36,7 +36,7 @@ impl Data { } } - pub async fn get_presence(&self, user_id: &UserId) -> Result<(u64, PresenceEvent)> { + pub(super) async fn get_presence(&self, user_id: &UserId) -> Result<(u64, PresenceEvent)> { let count = self .userid_presenceid .get(user_id) diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index ab7c76a1..eb4105e5 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -4,7 +4,10 @@ mod presence; use std::{sync::Arc, time::Duration}; use async_trait::async_trait; -use conduwuit::{checked, debug, error, result::LogErr, Error, Result, Server}; +use conduwuit::{ + checked, debug, debug_warn, error, result::LogErr, trace, Error, Result, Server, +}; +use database::Database; use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt}; use loole::{Receiver, Sender}; use ruma::{events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, UInt, UserId}; @@ -18,12 +21,13 @@ pub struct Service { timeout_remote_users: bool, idle_timeout: u64, offline_timeout: u64, - pub db: Data, + db: Data, services: Services, } struct Services { server: Arc, + db: Arc, globals: Dep, users: Dep, } @@ -44,6 +48,7 @@ impl crate::Service for Service { db: Data::new(&args), services: Services { server: args.server.clone(), + db: args.db.clone(), globals: args.depend::("globals"), users: args.depend::("users"), }, @@ -171,7 +176,9 @@ impl Service { } // Unset online/unavailable presence to offline on startup - pub async fn unset_all_presence(&self) -> Result<()> { + pub async fn unset_all_presence(&self) { + let _cork = self.services.db.cork(); + for user_id in &self .services .users @@ -184,28 +191,36 @@ impl Service { let presence = match presence { | Ok((_, ref presence)) => &presence.content, - | _ => return Ok(()), + | _ => continue, }; - let need_reset = match presence.presence { - | PresenceState::Unavailable | PresenceState::Online => true, - | _ => false, - }; - - if !need_reset { + if !matches!( + presence.presence, + PresenceState::Unavailable | PresenceState::Online | PresenceState::Busy + ) { + trace!(?user_id, ?presence, "Skipping user"); continue; } - self.set_presence( - user_id, - &PresenceState::Offline, - Some(false), - presence.last_active_ago, - presence.status_msg.clone(), - ) - .await?; + trace!(?user_id, ?presence, "Resetting presence to offline"); + + _ = self + .set_presence( + user_id, + &PresenceState::Offline, + Some(false), + presence.last_active_ago, + presence.status_msg.clone(), + ) + .await + .inspect_err(|e| { + debug_warn!( + ?presence, + "{user_id} has invalid presence in database and failed to reset it to \ + offline: {e}" + ); + }); } - Ok(()) } /// Returns the most recent presence updates that happened after the event diff --git a/src/service/services.rs b/src/service/services.rs index 9e099759..1aa87f58 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -123,13 +123,10 @@ impl Services { .start() .await?; - // clear online statuses - if self.server.config.allow_local_presence { - _ = self.presence.unset_all_presence().await; - } - - // set the server user as online + // reset dormant online/away statuses to offline, and set the server user as + // online if self.server.config.allow_local_presence && !self.db.is_read_only() { + self.presence.unset_all_presence().await; _ = self .presence .ping_presence(&self.globals.server_user, &ruma::presence::PresenceState::Online) From 9bda5a43e5ac742fb6b30ae14c1d1e89c0a68c36 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 11 Jan 2025 00:24:51 -0500 Subject: [PATCH 011/328] fix /kick endpoint unbanning banned users Signed-off-by: strawberry --- src/api/client/membership.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 4046b493..0c493a37 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -439,6 +439,16 @@ pub(crate) async fn kick_user_route( return Ok(kick_user::v3::Response::new()); }; + if !matches!( + event.membership, + MembershipState::Invite | MembershipState::Knock | MembershipState::Join, + ) { + return Err!(Request(Forbidden( + "Cannot kick a user who is not apart of the room (current membership: {})", + event.membership + ))); + } + services .rooms .timeline @@ -527,7 +537,7 @@ pub(crate) async fn unban_user_route( if current_member_content.membership != MembershipState::Ban { return Err!(Request(Forbidden( - "Cannot ban a user who is not banned (current membership: {})", + "Cannot unban a user who is not banned (current membership: {})", current_member_content.membership ))); } From 5e21b43f2505fd8369f4ce09ef36950983d12182 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 11 Jan 2025 00:25:10 -0500 Subject: [PATCH 012/328] run direnv exec in engage default steps Signed-off-by: strawberry --- .github/workflows/ci.yml | 2 -- engage.toml | 18 +++++++++++------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d73af24f..d06de5e3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -50,8 +50,6 @@ env: experimental-features = nix-command flakes extra-experimental-features = nix-command flakes accept-flake-config = true - # complement uses libolm - NIXPKGS_ALLOW_INSECURE: 1 WEB_UPLOAD_SSH_USERNAME: ${{ secrets.WEB_UPLOAD_SSH_USERNAME }} GH_SHA: ${{ github.sha }} GH_REF_NAME: ${{ github.ref_name }} diff --git a/engage.toml b/engage.toml index 9a6ef8ca..1d6a5475 100644 --- a/engage.toml +++ b/engage.toml @@ -97,6 +97,7 @@ env DIRENV_DEVSHELL=all-features \ name = "clippy/default" group = "lints" script = """ +direnv exec . \ cargo clippy \ --workspace \ --profile test \ @@ -126,6 +127,7 @@ env DIRENV_DEVSHELL=all-features \ name = "clippy/jemalloc" group = "lints" script = """ +direnv exec . \ cargo clippy \ --workspace \ --profile test \ @@ -179,13 +181,15 @@ env DIRENV_DEVSHELL=all-features \ name = "cargo/default" group = "tests" script = """ -cargo test \ - --workspace \ - --profile test \ - --all-targets \ - --color=always \ - -- \ - --color=always +env DIRENV_DEVSHELL=default \ + direnv exec . \ + cargo test \ + --workspace \ + --profile test \ + --all-targets \ + --color=always \ + -- \ + --color=always """ # Checks if the generated example config differs from the checked in repo's From fabd3cf567c9c676d8d546ced79e81f69bb70ae4 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 12 Jan 2025 19:18:07 -0500 Subject: [PATCH 013/328] ci: set binary as executable before uploading to webserver Signed-off-by: strawberry --- .github/workflows/ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d06de5e3..007adace 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -490,6 +490,7 @@ jobs: if: ${{ matrix.target == 'x86_64-linux-musl' }} run: | if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + chmod +x static-x86_64-linux-musl-x86_64-haswell-optimised scp static-x86_64-linux-musl-x86_64-haswell-optimised website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-x86_64-linux-musl-x86_64-haswell-optimised fi @@ -497,6 +498,7 @@ jobs: if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + chmod +x static-${{ matrix.target }} scp static-${{ matrix.target }} website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-${{ matrix.target }} fi @@ -692,6 +694,7 @@ jobs: if: ${{ matrix.os == 'macos-13' }} run: | if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + chmod +x conduwuit-macos-x86_64 scp conduwuit-macos-x86_64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/conduwuit-macos-x86_64 fi @@ -699,6 +702,7 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} run: | if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + chmod +x conduwuit-macos-arm64 scp conduwuit-macos-arm64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/conduwuit-macos-arm64 fi From 5a1c41e66b4fec8ab76fd268fc9c9e282fd19428 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 11 Jan 2025 18:43:54 -0500 Subject: [PATCH 014/328] knocking implementation Signed-off-by: strawberry add sync bit of knocking Signed-off-by: strawberry --- src/api/client/membership.rs | 716 +++++++++++++++++++++++++-- src/api/client/sync/v3.rs | 39 +- src/api/client/sync/v4.rs | 9 + src/api/router.rs | 3 + src/api/server/invite.rs | 14 +- src/api/server/make_knock.rs | 38 +- src/api/server/make_leave.rs | 6 +- src/api/server/mod.rs | 4 + src/api/server/send_join.rs | 13 +- src/api/server/send_knock.rs | 75 ++- src/api/server/utils.rs | 17 +- src/database/maps.rs | 8 + src/service/rooms/state_cache/mod.rs | 142 +++++- src/service/rooms/timeline/mod.rs | 11 +- 14 files changed, 978 insertions(+), 117 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 0c493a37..d94fc3c7 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1,4 +1,5 @@ use std::{ + borrow::Borrow, collections::{BTreeMap, HashMap, HashSet}, net::IpAddr, sync::Arc, @@ -8,7 +9,7 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ debug, debug_info, debug_warn, err, info, - pdu::{self, gen_event_id_canonical_json, PduBuilder}, + pdu::{gen_event_id_canonical_json, PduBuilder}, result::FlatOk, trace, utils::{self, shuffle, IterStream, ReadyExt}, @@ -19,6 +20,7 @@ use ruma::{ api::{ client::{ error::ErrorKind, + knock::knock_room, membership::{ ban_user, forget_room, get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, @@ -37,11 +39,12 @@ use ruma::{ }, StateEventType, }, - state_res, CanonicalJsonObject, CanonicalJsonValue, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, + state_res, CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, + OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, }; use service::{ appservice::RegistrationInfo, + pdu::gen_event_id, rooms::{state::RoomMutexGuard, state_compressor::HashSetCompressStateEvent}, Services, }; @@ -348,6 +351,116 @@ pub(crate) async fn join_room_by_id_or_alias_route( Ok(join_room_by_id_or_alias::v3::Response { room_id: join_room_response.room_id }) } +/// # `POST /_matrix/client/*/knock/{roomIdOrAlias}` +/// +/// Tries to knock the room to ask permission to join for the sender user. +#[tracing::instrument(skip_all, fields(%client), name = "knock")] +pub(crate) async fn knock_room_route( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let body = body.body; + + let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { + | Ok(room_id) => { + banned_room_check( + &services, + sender_user, + Some(&room_id), + room_id.server_name(), + client, + ) + .await?; + + let mut servers = body.via.clone(); + servers.extend( + services + .rooms + .state_cache + .servers_invite_via(&room_id) + .map(ToOwned::to_owned) + .collect::>() + .await, + ); + + servers.extend( + services + .rooms + .state_cache + .invite_state(sender_user, &room_id) + .await + .unwrap_or_default() + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); + + if let Some(server) = room_id.server_name() { + servers.push(server.to_owned()); + } + + servers.sort_unstable(); + servers.dedup(); + shuffle(&mut servers); + + (servers, room_id) + }, + | Err(room_alias) => { + let (room_id, mut servers) = services + .rooms + .alias + .resolve_alias(&room_alias, Some(body.via.clone())) + .await?; + + banned_room_check( + &services, + sender_user, + Some(&room_id), + Some(room_alias.server_name()), + client, + ) + .await?; + + let addl_via_servers = services + .rooms + .state_cache + .servers_invite_via(&room_id) + .map(ToOwned::to_owned); + + let addl_state_servers = services + .rooms + .state_cache + .invite_state(sender_user, &room_id) + .await + .unwrap_or_default(); + + let mut addl_servers: Vec<_> = addl_state_servers + .iter() + .map(|event| event.get_field("sender")) + .filter_map(FlatOk::flat_ok) + .map(|user: &UserId| user.server_name().to_owned()) + .stream() + .chain(addl_via_servers) + .collect() + .await; + + addl_servers.sort_unstable(); + addl_servers.dedup(); + shuffle(&mut addl_servers); + servers.append(&mut addl_servers); + + (servers, room_id) + }, + }; + + knock_room_by_id_helper(&services, sender_user, &room_id, body.reason.clone(), &servers) + .boxed() + .await +} + /// # `POST /_matrix/client/v3/rooms/{roomId}/leave` /// /// Tries to leave the sender user from a room. @@ -403,6 +516,17 @@ pub(crate) async fn invite_user_route( ))); } + if let Ok(target_user_membership) = services + .rooms + .state_accessor + .get_member(&body.room_id, user_id) + .await + { + if target_user_membership.membership == MembershipState::Ban { + return Err!(Request(Forbidden("User is banned from this room."))); + } + } + if recipient_ignored_by_sender { // silently drop the invite to the recipient if they've been ignored by the // sender, pretend it worked @@ -862,7 +986,7 @@ async fn join_room_by_id_helper_remote( .hash_and_sign_event(&mut join_event_stub, &room_version_id)?; // Generate event id - let event_id = pdu::gen_event_id(&join_event_stub, &room_version_id)?; + let event_id = gen_event_id(&join_event_stub, &room_version_id)?; // Add event_id back join_event_stub @@ -1030,7 +1154,7 @@ async fn join_room_by_id_helper_remote( }; let auth_check = state_res::event_auth::auth_check( - &state_res::RoomVersion::new(&room_version_id).expect("room version is supported"), + &state_res::RoomVersion::new(&room_version_id)?, &parsed_join_pdu, None, // TODO: third party invite |k, s| state_fetch(k, s.to_owned()), @@ -1043,10 +1167,10 @@ async fn join_room_by_id_helper_remote( } info!("Compressing state from send_join"); - let compressed = state - .iter() - .stream() - .then(|(&k, id)| services.rooms.state_compressor.compress_state_event(k, id)) + let compressed: HashSet<_> = services + .rooms + .state_compressor + .compress_state_events(state.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) .collect() .await; @@ -1282,7 +1406,7 @@ async fn join_room_by_id_helper_local( .hash_and_sign_event(&mut join_event_stub, &room_version_id)?; // Generate event id - let event_id = pdu::gen_event_id(&join_event_stub, &room_version_id)?; + let event_id = gen_event_id(&join_event_stub, &room_version_id)?; // Add event_id back join_event_stub @@ -1392,6 +1516,7 @@ async fn make_join_request( ); make_join_response_and_server = Err!(BadServerResponse("No server available to assist in joining.")); + return make_join_response_and_server; } } @@ -1569,7 +1694,7 @@ pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { for room_id in all_rooms { // ignore errors if let Err(e) = leave_room(services, user_id, &room_id, None).await { - warn!(%room_id, %user_id, %e, "Failed to leave room"); + warn!(%user_id, "Failed to leave {room_id} remotely: {e}"); } services.rooms.state_cache.forget(&room_id, user_id); @@ -1585,11 +1710,15 @@ pub async fn leave_room( //use conduwuit::utils::stream::OptionStream; use futures::TryFutureExt; - // Ask a remote server if we don't have this room + // Ask a remote server if we don't have this room and are not knocking on it if !services .rooms .state_cache .server_in_room(services.globals.server_name(), room_id) + .await && !services + .rooms + .state_cache + .is_knocked(user_id, room_id) .await { if let Err(e) = remote_leave_room(services, user_id, room_id).await { @@ -1601,7 +1730,8 @@ pub async fn leave_room( .rooms .state_cache .invite_state(user_id, room_id) - .map_err(|_| services.rooms.state_cache.left_state(user_id, room_id)) + .or_else(|_| services.rooms.state_cache.knock_state(user_id, room_id)) + .or_else(|_| services.rooms.state_cache.left_state(user_id, room_id)) .await .ok(); @@ -1683,13 +1813,6 @@ async fn remote_leave_room( let mut make_leave_response_and_server = Err!(BadServerResponse("No server available to assist in leaving.")); - let invite_state = services - .rooms - .state_cache - .invite_state(user_id, room_id) - .await - .map_err(|_| err!(Request(BadState("User is not invited."))))?; - let mut servers: HashSet = services .rooms .state_cache @@ -1698,13 +1821,39 @@ async fn remote_leave_room( .collect() .await; - servers.extend( - invite_state - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()), - ); + if let Ok(invite_state) = services + .rooms + .state_cache + .invite_state(user_id, room_id) + .await + { + servers.extend( + invite_state + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); + } else if let Ok(knock_state) = services + .rooms + .state_cache + .knock_state(user_id, room_id) + .await + { + servers.extend( + knock_state + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .filter_map(|sender| { + if !services.globals.user_is_local(sender) { + Some(sender.server_name().to_owned()) + } else { + None + } + }), + ); + } if let Some(room_id_server_name) = room_id.server_name() { servers.insert(room_id_server_name.to_owned()); @@ -1779,7 +1928,7 @@ async fn remote_leave_room( .hash_and_sign_event(&mut leave_event_stub, &room_version_id)?; // Generate event id - let event_id = pdu::gen_event_id(&leave_event_stub, &room_version_id)?; + let event_id = gen_event_id(&leave_event_stub, &room_version_id)?; // Add event_id back leave_event_stub @@ -1805,3 +1954,514 @@ async fn remote_leave_room( Ok(()) } + +async fn knock_room_by_id_helper( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], +) -> Result { + let state_lock = services.rooms.state.mutex.lock(room_id).await; + + if services + .rooms + .state_cache + .is_invited(sender_user, room_id) + .await + { + debug_warn!("{sender_user} is already invited in {room_id} but attempted to knock"); + return Err!(Request(Forbidden( + "You cannot knock on a room you are already invited/accepted to." + ))); + } + + if services + .rooms + .state_cache + .is_joined(sender_user, room_id) + .await + { + debug_warn!("{sender_user} is already joined in {room_id} but attempted to knock"); + return Err!(Request(Forbidden("You cannot knock on a room you are already joined in."))); + } + + if services + .rooms + .state_cache + .is_knocked(sender_user, room_id) + .await + { + debug_warn!("{sender_user} is already knocked in {room_id}"); + return Ok(knock_room::v3::Response { room_id: room_id.into() }); + } + + if let Ok(membership) = services + .rooms + .state_accessor + .get_member(room_id, sender_user) + .await + { + if membership.membership == MembershipState::Ban { + debug_warn!("{sender_user} is banned from {room_id} but attempted to knock"); + return Err!(Request(Forbidden("You cannot knock on a room you are banned from."))); + } + } + + let server_in_room = services + .rooms + .state_cache + .server_in_room(services.globals.server_name(), room_id) + .await; + + let local_knock = server_in_room + || servers.is_empty() + || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])); + + if local_knock { + knock_room_helper_local(services, sender_user, room_id, reason, servers, state_lock) + .boxed() + .await?; + } else { + knock_room_helper_remote(services, sender_user, room_id, reason, servers, state_lock) + .boxed() + .await?; + } + + Ok(knock_room::v3::Response::new(room_id.to_owned())) +} + +async fn knock_room_helper_local( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], + state_lock: RoomMutexGuard, +) -> Result { + debug_info!("We can knock locally"); + + let room_version_id = services.rooms.state.get_room_version(room_id).await?; + + if matches!( + room_version_id, + RoomVersionId::V1 + | RoomVersionId::V2 + | RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + ) { + return Err!(Request(Forbidden("This room does not support knocking."))); + } + + let content = RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + reason: reason.clone(), + ..RoomMemberEventContent::new(MembershipState::Knock) + }; + + // Try normal knock first + let Err(error) = services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder::state(sender_user.to_string(), &content), + sender_user, + room_id, + &state_lock, + ) + .await + else { + return Ok(()); + }; + + if servers.is_empty() || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])) + { + return Err(error); + } + + warn!("We couldn't do the knock locally, maybe federation can help to satisfy the knock"); + + let (make_knock_response, remote_server) = + make_knock_request(services, sender_user, room_id, servers).await?; + + info!("make_knock finished"); + + let room_version_id = make_knock_response.room_version; + + if !services.server.supported_room_version(&room_version_id) { + return Err!(BadServerResponse( + "Remote room version {room_version_id} is not supported by conduwuit" + )); + } + + let mut knock_event_stub = serde_json::from_str::( + make_knock_response.event.get(), + ) + .map_err(|e| { + err!(BadServerResponse("Invalid make_knock event json received from server: {e:?}")) + })?; + + knock_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), + ); + knock_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + knock_event_stub.insert( + "content".to_owned(), + to_canonical_value(RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + reason, + ..RoomMemberEventContent::new(MembershipState::Knock) + }) + .expect("event is valid, we just created it"), + ); + + // In order to create a compatible ref hash (EventID) the `hashes` field needs + // to be present + services + .server_keys + .hash_and_sign_event(&mut knock_event_stub, &room_version_id)?; + + // Generate event id + let event_id = gen_event_id(&knock_event_stub, &room_version_id)?; + + // Add event_id + knock_event_stub + .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); + + // It has enough fields to be called a proper event now + let knock_event = knock_event_stub; + + info!("Asking {remote_server} for send_knock in room {room_id}"); + let send_knock_request = federation::knock::send_knock::v1::Request { + room_id: room_id.to_owned(), + event_id: event_id.clone(), + pdu: services + .sending + .convert_to_outgoing_federation_event(knock_event.clone()) + .await, + }; + + let send_knock_response = services + .sending + .send_federation_request(&remote_server, send_knock_request) + .await?; + + info!("send_knock finished"); + + services + .rooms + .short + .get_or_create_shortroomid(room_id) + .await; + + info!("Parsing knock event"); + + let parsed_knock_pdu = PduEvent::from_id_val(&event_id, knock_event.clone()) + .map_err(|e| err!(BadServerResponse("Invalid knock event PDU: {e:?}")))?; + + info!("Updating membership locally to knock state with provided stripped state events"); + services + .rooms + .state_cache + .update_membership( + room_id, + sender_user, + parsed_knock_pdu + .get_content::() + .expect("we just created this"), + sender_user, + Some(send_knock_response.knock_room_state), + None, + false, + ) + .await?; + + info!("Appending room knock event locally"); + services + .rooms + .timeline + .append_pdu( + &parsed_knock_pdu, + knock_event, + vec![(*parsed_knock_pdu.event_id).to_owned()], + &state_lock, + ) + .await?; + + Ok(()) +} + +async fn knock_room_helper_remote( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], + state_lock: RoomMutexGuard, +) -> Result { + info!("Knocking {room_id} over federation."); + + let (make_knock_response, remote_server) = + make_knock_request(services, sender_user, room_id, servers).await?; + + info!("make_knock finished"); + + let room_version_id = make_knock_response.room_version; + + if !services.server.supported_room_version(&room_version_id) { + return Err!(BadServerResponse( + "Remote room version {room_version_id} is not supported by conduwuit" + )); + } + + let mut knock_event_stub: CanonicalJsonObject = + serde_json::from_str(make_knock_response.event.get()).map_err(|e| { + err!(BadServerResponse("Invalid make_knock event json received from server: {e:?}")) + })?; + + knock_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), + ); + knock_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + knock_event_stub.insert( + "content".to_owned(), + to_canonical_value(RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + reason, + ..RoomMemberEventContent::new(MembershipState::Knock) + }) + .expect("event is valid, we just created it"), + ); + + // In order to create a compatible ref hash (EventID) the `hashes` field needs + // to be present + services + .server_keys + .hash_and_sign_event(&mut knock_event_stub, &room_version_id)?; + + // Generate event id + let event_id = gen_event_id(&knock_event_stub, &room_version_id)?; + + // Add event_id + knock_event_stub + .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); + + // It has enough fields to be called a proper event now + let knock_event = knock_event_stub; + + info!("Asking {remote_server} for send_knock in room {room_id}"); + let send_knock_request = federation::knock::send_knock::v1::Request { + room_id: room_id.to_owned(), + event_id: event_id.clone(), + pdu: services + .sending + .convert_to_outgoing_federation_event(knock_event.clone()) + .await, + }; + + let send_knock_response = services + .sending + .send_federation_request(&remote_server, send_knock_request) + .await?; + + info!("send_knock finished"); + + services + .rooms + .short + .get_or_create_shortroomid(room_id) + .await; + + info!("Parsing knock event"); + let parsed_knock_pdu = PduEvent::from_id_val(&event_id, knock_event.clone()) + .map_err(|e| err!(BadServerResponse("Invalid knock event PDU: {e:?}")))?; + + info!("Going through send_knock response knock state events"); + let state = send_knock_response + .knock_room_state + .iter() + .map(|event| serde_json::from_str::(event.clone().into_json().get())) + .filter_map(Result::ok); + + let mut state_map: HashMap = HashMap::new(); + + for event in state { + let Some(state_key) = event.get("state_key") else { + debug_warn!("send_knock stripped state event missing state_key: {event:?}"); + continue; + }; + let Some(event_type) = event.get("type") else { + debug_warn!("send_knock stripped state event missing event type: {event:?}"); + continue; + }; + + let Ok(state_key) = serde_json::from_value::(state_key.clone().into()) else { + debug_warn!("send_knock stripped state event has invalid state_key: {event:?}"); + continue; + }; + let Ok(event_type) = serde_json::from_value::(event_type.clone().into()) + else { + debug_warn!("send_knock stripped state event has invalid event type: {event:?}"); + continue; + }; + + let event_id = gen_event_id(&event, &room_version_id)?; + let shortstatekey = services + .rooms + .short + .get_or_create_shortstatekey(&event_type, &state_key) + .await; + + services.rooms.outlier.add_pdu_outlier(&event_id, &event); + state_map.insert(shortstatekey, event_id.clone()); + } + + info!("Compressing state from send_knock"); + let compressed: HashSet<_> = services + .rooms + .state_compressor + .compress_state_events(state_map.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) + .collect() + .await; + + debug!("Saving compressed state"); + let HashSetCompressStateEvent { + shortstatehash: statehash_before_knock, + added, + removed, + } = services + .rooms + .state_compressor + .save_state(room_id, Arc::new(compressed)) + .await?; + + debug!("Forcing state for new room"); + services + .rooms + .state + .force_state(room_id, statehash_before_knock, added, removed, &state_lock) + .await?; + + let statehash_after_knock = services + .rooms + .state + .append_to_state(&parsed_knock_pdu) + .await?; + + info!("Updating membership locally to knock state with provided stripped state events"); + services + .rooms + .state_cache + .update_membership( + room_id, + sender_user, + parsed_knock_pdu + .get_content::() + .expect("we just created this"), + sender_user, + Some(send_knock_response.knock_room_state), + None, + false, + ) + .await?; + + info!("Appending room knock event locally"); + services + .rooms + .timeline + .append_pdu( + &parsed_knock_pdu, + knock_event, + vec![(*parsed_knock_pdu.event_id).to_owned()], + &state_lock, + ) + .await?; + + info!("Setting final room state for new room"); + // We set the room state after inserting the pdu, so that we never have a moment + // in time where events in the current room state do not exist + services + .rooms + .state + .set_room_state(room_id, statehash_after_knock, &state_lock); + + Ok(()) +} + +async fn make_knock_request( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + servers: &[OwnedServerName], +) -> Result<(federation::knock::create_knock_event_template::v1::Response, OwnedServerName)> { + let mut make_knock_response_and_server = + Err!(BadServerResponse("No server available to assist in knocking.")); + + let mut make_knock_counter: usize = 0; + + for remote_server in servers { + if services.globals.server_is_ours(remote_server) { + continue; + } + + info!("Asking {remote_server} for make_knock ({make_knock_counter})"); + + let make_knock_response = services + .sending + .send_federation_request( + remote_server, + federation::knock::create_knock_event_template::v1::Request { + room_id: room_id.to_owned(), + user_id: sender_user.to_owned(), + ver: services.server.supported_room_versions().collect(), + }, + ) + .await; + + trace!("make_knock response: {make_knock_response:?}"); + make_knock_counter = make_knock_counter.saturating_add(1); + + make_knock_response_and_server = make_knock_response.map(|r| (r, remote_server.clone())); + + if make_knock_response_and_server.is_ok() { + break; + } + + if make_knock_counter > 40 { + warn!( + "50 servers failed to provide valid make_knock response, assuming no server can \ + assist in knocking." + ); + make_knock_response_and_server = + Err!(BadServerResponse("No server available to assist in knocking.")); + + return make_knock_response_and_server; + } + } + + make_knock_response_and_server +} diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 95c8c2d4..a4dc0205 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -33,8 +33,8 @@ use ruma::{ self, v3::{ Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, - LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State as RoomState, - Timeline, ToDevice, + KnockState, KnockedRoom, LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, + State as RoomState, Timeline, ToDevice, }, DeviceLists, UnreadNotificationsCount, }, @@ -266,6 +266,35 @@ pub(crate) async fn build_sync_events( invited_rooms }); + let knocked_rooms = services + .rooms + .state_cache + .rooms_knocked(sender_user) + .fold_default(|mut knocked_rooms: BTreeMap<_, _>, (room_id, knock_state)| async move { + // Get and drop the lock to wait for remaining operations to finish + let insert_lock = services.rooms.timeline.mutex_insert.lock(&room_id).await; + drop(insert_lock); + + let knock_count = services + .rooms + .state_cache + .get_knock_count(&room_id, sender_user) + .await + .ok(); + + // Knocked before last sync + if Some(since) >= knock_count { + return knocked_rooms; + } + + let knocked_room = KnockedRoom { + knock_state: KnockState { events: knock_state }, + }; + + knocked_rooms.insert(room_id, knocked_room); + knocked_rooms + }); + let presence_updates: OptionFuture<_> = services .globals .allow_local_presence() @@ -300,7 +329,7 @@ pub(crate) async fn build_sync_events( .users .remove_to_device_events(sender_user, sender_device, since); - let rooms = join3(joined_rooms, left_rooms, invited_rooms); + let rooms = join4(joined_rooms, left_rooms, invited_rooms, knocked_rooms); let ephemeral = join3(remove_to_device_events, to_device_events, presence_updates); let top = join5(account_data, ephemeral, device_one_time_keys_count, keys_changed, rooms) .boxed() @@ -308,7 +337,7 @@ pub(crate) async fn build_sync_events( let (account_data, ephemeral, device_one_time_keys_count, keys_changed, rooms) = top; let ((), to_device_events, presence_updates) = ephemeral; - let (joined_rooms, left_rooms, invited_rooms) = rooms; + let (joined_rooms, left_rooms, invited_rooms, knocked_rooms) = rooms; let (joined_rooms, mut device_list_updates, left_encrypted_users) = joined_rooms; device_list_updates.extend(keys_changed); @@ -349,7 +378,7 @@ pub(crate) async fn build_sync_events( leave: left_rooms, join: joined_rooms, invite: invited_rooms, - knock: BTreeMap::new(), // TODO + knock: knocked_rooms, }, to_device: ToDevice { events: to_device_events }, }; diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 9915752e..24c7e286 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -113,9 +113,18 @@ pub(crate) async fn sync_events_v4_route( .collect() .await; + let all_knocked_rooms: Vec<_> = services + .rooms + .state_cache + .rooms_knocked(sender_user) + .map(|r| r.0) + .collect() + .await; + let all_rooms = all_joined_rooms .iter() .chain(all_invited_rooms.iter()) + .chain(all_knocked_rooms.iter()) .map(Clone::clone) .collect(); diff --git a/src/api/router.rs b/src/api/router.rs index 1b38670d..1d42fc5e 100644 --- a/src/api/router.rs +++ b/src/api/router.rs @@ -99,6 +99,7 @@ pub fn build(router: Router, server: &Server) -> Router { .ruma_route(&client::join_room_by_id_route) .ruma_route(&client::join_room_by_id_or_alias_route) .ruma_route(&client::joined_members_route) + .ruma_route(&client::knock_room_route) .ruma_route(&client::leave_room_route) .ruma_route(&client::forget_room_route) .ruma_route(&client::joined_rooms_route) @@ -204,8 +205,10 @@ pub fn build(router: Router, server: &Server) -> Router { .ruma_route(&server::get_room_state_route) .ruma_route(&server::get_room_state_ids_route) .ruma_route(&server::create_leave_event_template_route) + .ruma_route(&server::create_knock_event_template_route) .ruma_route(&server::create_leave_event_v1_route) .ruma_route(&server::create_leave_event_v2_route) + .ruma_route(&server::create_knock_event_v1_route) .ruma_route(&server::create_join_event_template_route) .ruma_route(&server::create_join_event_v1_route) .ruma_route(&server::create_join_event_v2_route) diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index 6d3be04c..1fea268b 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -6,8 +6,9 @@ use ruma::{ api::{client::error::ErrorKind, federation::membership::create_invite}, events::room::member::{MembershipState, RoomMemberEventContent}, serde::JsonObject, - CanonicalJsonValue, OwnedEventId, OwnedUserId, UserId, + CanonicalJsonValue, OwnedUserId, UserId, }; +use service::pdu::gen_event_id; use crate::Ruma; @@ -86,12 +87,7 @@ pub(crate) async fn create_invite_route( .map_err(|e| err!(Request(InvalidParam("Failed to sign event: {e}"))))?; // Generate event id - let event_id = OwnedEventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&signed_event, &body.room_version) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); + let event_id = gen_event_id(&signed_event, &body.room_version)?; // Add event_id back signed_event.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.to_string())); @@ -115,12 +111,12 @@ pub(crate) async fn create_invite_route( let mut invite_state = body.invite_room_state.clone(); let mut event: JsonObject = serde_json::from_str(body.event.get()) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes."))?; + .map_err(|e| err!(Request(BadJson("Invalid invite event PDU: {e}"))))?; event.insert("event_id".to_owned(), "$placeholder".into()); let pdu: PduEvent = serde_json::from_value(event.into()) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event."))?; + .map_err(|e| err!(Request(BadJson("Invalid invite event PDU: {e}"))))?; invite_state.push(pdu.to_stripped_state_event()); diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs index 6d9d6d55..90b9b629 100644 --- a/src/api/server/make_knock.rs +++ b/src/api/server/make_knock.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::Err; +use conduwuit::{debug_warn, Err}; use ruma::{ api::{client::error::ErrorKind, federation::knock::create_knock_event_template}, events::room::member::{MembershipState, RoomMemberEventContent}, @@ -15,7 +15,8 @@ use crate::{service::pdu::PduBuilder, Error, Result, Ruma}; /// /// Creates a knock template. pub(crate) async fn create_knock_event_template_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if !services.rooms.metadata.exists(&body.room_id).await { return Err!(Request(NotFound("Room is unknown to this server."))); @@ -39,8 +40,8 @@ pub(crate) async fn create_knock_event_template_route( .contains(body.origin()) { warn!( - "Server {} for remote user {} tried knocking room ID {} which has a server name that is globally \ - forbidden. Rejecting.", + "Server {} for remote user {} tried knocking room ID {} which has a server name \ + that is globally forbidden. Rejecting.", body.origin(), &body.user_id, &body.room_id, @@ -63,29 +64,44 @@ pub(crate) async fn create_knock_event_template_route( if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6) { return Err(Error::BadRequest( - ErrorKind::IncompatibleRoomVersion { - room_version: room_version_id, - }, + ErrorKind::IncompatibleRoomVersion { room_version: room_version_id }, "Room version does not support knocking.", )); } if !body.ver.contains(&room_version_id) { return Err(Error::BadRequest( - ErrorKind::IncompatibleRoomVersion { - room_version: room_version_id, - }, + ErrorKind::IncompatibleRoomVersion { room_version: room_version_id }, "Your homeserver does not support the features required to knock on this room.", )); } let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; + if let Ok(membership) = services + .rooms + .state_accessor + .get_member(&body.room_id, &body.user_id) + .await + { + if membership.membership == MembershipState::Ban { + debug_warn!( + "Remote user {} is banned from {} but attempted to knock", + &body.user_id, + &body.room_id + ); + return Err!(Request(Forbidden("You cannot knock on a room you are banned from."))); + } + } + let (_pdu, mut pdu_json) = services .rooms .timeline .create_hash_and_sign_event( - PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent::new(MembershipState::Knock)), + PduBuilder::state( + body.user_id.to_string(), + &RoomMemberEventContent::new(MembershipState::Knock), + ), &body.user_id, &body.room_id, &state_lock, diff --git a/src/api/server/make_leave.rs b/src/api/server/make_leave.rs index 746a4858..936e0fbb 100644 --- a/src/api/server/make_leave.rs +++ b/src/api/server/make_leave.rs @@ -9,7 +9,7 @@ use serde_json::value::to_raw_value; use super::make_join::maybe_strip_event_id; use crate::{service::pdu::PduBuilder, Ruma}; -/// # `PUT /_matrix/federation/v1/make_leave/{roomId}/{eventId}` +/// # `GET /_matrix/federation/v1/make_leave/{roomId}/{eventId}` /// /// Creates a leave template. pub(crate) async fn create_leave_event_template_route( @@ -21,7 +21,9 @@ pub(crate) async fn create_leave_event_template_route( } if body.user_id.server_name() != body.origin() { - return Err!(Request(BadJson("Not allowed to leave on behalf of another server/user."))); + return Err!(Request(Forbidden( + "Not allowed to leave on behalf of another server/user." + ))); } // ACL check origin diff --git a/src/api/server/mod.rs b/src/api/server/mod.rs index 9b7d91cb..5c1ff3f7 100644 --- a/src/api/server/mod.rs +++ b/src/api/server/mod.rs @@ -6,6 +6,7 @@ pub(super) mod hierarchy; pub(super) mod invite; pub(super) mod key; pub(super) mod make_join; +pub(super) mod make_knock; pub(super) mod make_leave; pub(super) mod media; pub(super) mod openid; @@ -13,6 +14,7 @@ pub(super) mod publicrooms; pub(super) mod query; pub(super) mod send; pub(super) mod send_join; +pub(super) mod send_knock; pub(super) mod send_leave; pub(super) mod state; pub(super) mod state_ids; @@ -28,6 +30,7 @@ pub(super) use hierarchy::*; pub(super) use invite::*; pub(super) use key::*; pub(super) use make_join::*; +pub(super) use make_knock::*; pub(super) use make_leave::*; pub(super) use media::*; pub(super) use openid::*; @@ -35,6 +38,7 @@ pub(super) use publicrooms::*; pub(super) use query::*; pub(super) use send::*; pub(super) use send_join::*; +pub(super) use send_knock::*; pub(super) use send_leave::*; pub(super) use state::*; pub(super) use state_ids::*; diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 6cbe5143..97a65bf8 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -186,14 +186,13 @@ async fn create_join_event( .map_err(|e| err!(Request(InvalidParam(warn!("Failed to sign send_join event: {e}")))))?; let origin: OwnedServerName = serde_json::from_value( - serde_json::to_value( - value - .get("origin") - .ok_or_else(|| err!(Request(BadJson("Event missing origin property."))))?, - ) - .expect("CanonicalJson is valid json value"), + value + .get("origin") + .ok_or_else(|| err!(Request(BadJson("Event does not have an origin server name."))))? + .clone() + .into(), ) - .map_err(|e| err!(Request(BadJson(warn!("origin field is not a valid server name: {e}")))))?; + .map_err(|e| err!(Request(BadJson("Event has an invalid origin server name: {e}"))))?; let mutex_lock = services .rooms diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index 49ec4bf8..95478081 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -1,7 +1,8 @@ use axum::extract::State; -use conduwuit::{err, pdu::gen_event_id_canonical_json, warn, Err, Error, PduEvent, Result}; +use conduwuit::{err, pdu::gen_event_id_canonical_json, warn, Err, PduEvent, Result}; +use futures::FutureExt; use ruma::{ - api::{client::error::ErrorKind, federation::knock::send_knock}, + api::federation::knock::send_knock, events::{ room::member::{MembershipState, RoomMemberEventContent}, StateEventType, @@ -17,7 +18,8 @@ use crate::Ruma; /// /// Submits a signed knock event. pub(crate) async fn create_knock_event_v1_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if services .globals @@ -26,7 +28,8 @@ pub(crate) async fn create_knock_event_v1_route( .contains(body.origin()) { warn!( - "Server {} tried knocking room ID {} who has a server name that is globally forbidden. Rejecting.", + "Server {} tried knocking room ID {} who has a server name that is globally \ + forbidden. Rejecting.", body.origin(), &body.room_id, ); @@ -41,7 +44,8 @@ pub(crate) async fn create_knock_event_v1_route( .contains(&server.to_owned()) { warn!( - "Server {} tried knocking room ID {} which has a server name that is globally forbidden. Rejecting.", + "Server {} tried knocking room ID {} which has a server name that is globally \ + forbidden. Rejecting.", body.origin(), &body.room_id, ); @@ -50,7 +54,7 @@ pub(crate) async fn create_knock_event_v1_route( } if !services.rooms.metadata.exists(&body.room_id).await { - return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server.")); + return Err!(Request(NotFound("Room is unknown to this server."))); } // ACL check origin server @@ -74,44 +78,42 @@ pub(crate) async fn create_knock_event_v1_route( let event_type: StateEventType = serde_json::from_value( value .get("type") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing type property."))? + .ok_or_else(|| err!(Request(InvalidParam("Event has no event type."))))? .clone() .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event has invalid event type."))?; + .map_err(|e| err!(Request(InvalidParam("Event has invalid event type: {e}"))))?; if event_type != StateEventType::RoomMember { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, + return Err!(Request(InvalidParam( "Not allowed to send non-membership state event to knock endpoint.", - )); + ))); } let content: RoomMemberEventContent = serde_json::from_value( value .get("content") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing content property"))? + .ok_or_else(|| err!(Request(InvalidParam("Membership event has no content"))))? .clone() .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event content is empty or invalid"))?; + .map_err(|e| err!(Request(InvalidParam("Event has invalid membership content: {e}"))))?; if content.membership != MembershipState::Knock { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Not allowed to send a non-knock membership event to knock endpoint.", - )); + return Err!(Request(InvalidParam( + "Not allowed to send a non-knock membership event to knock endpoint." + ))); } // ACL check sender server name let sender: OwnedUserId = serde_json::from_value( value .get("sender") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing sender property."))? + .ok_or_else(|| err!(Request(InvalidParam("Event has no sender user ID."))))? .clone() .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "sender is not a valid user ID."))?; + .map_err(|e| err!(Request(BadJson("Event sender is not a valid user ID: {e}"))))?; services .rooms @@ -127,36 +129,32 @@ pub(crate) async fn create_knock_event_v1_route( let state_key: OwnedUserId = serde_json::from_value( value .get("state_key") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing state_key property."))? + .ok_or_else(|| err!(Request(InvalidParam("Event does not have a state_key"))))? .clone() .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "state_key is invalid or not a user ID."))?; + .map_err(|e| err!(Request(BadJson("Event does not have a valid state_key: {e}"))))?; if state_key != sender { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "State key does not match sender user", - )); + return Err!(Request(InvalidParam("state_key does not match sender user of event."))); }; let origin: OwnedServerName = serde_json::from_value( - serde_json::to_value( - value - .get("origin") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing origin property."))?, - ) - .expect("CanonicalJson is valid json value"), + value + .get("origin") + .ok_or_else(|| err!(Request(BadJson("Event does not have an origin server name."))))? + .clone() + .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "origin is not a server name."))?; + .map_err(|e| err!(Request(BadJson("Event has an invalid origin server name: {e}"))))?; let mut event: JsonObject = serde_json::from_str(body.pdu.get()) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid knock event PDU."))?; + .map_err(|e| err!(Request(InvalidParam("Invalid knock event PDU: {e}"))))?; event.insert("event_id".to_owned(), "$placeholder".into()); let pdu: PduEvent = serde_json::from_value(event.into()) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid knock event PDU."))?; + .map_err(|e| err!(Request(InvalidParam("Invalid knock event PDU: {e}"))))?; let mutex_lock = services .rooms @@ -169,19 +167,18 @@ pub(crate) async fn create_knock_event_v1_route( .rooms .event_handler .handle_incoming_pdu(&origin, &body.room_id, &event_id, value.clone(), true) + .boxed() .await? .ok_or_else(|| err!(Request(InvalidParam("Could not accept as timeline event."))))?; drop(mutex_lock); - let knock_room_state = services.rooms.state.summary_stripped(&pdu).await; - services .sending .send_pdu_room(&body.room_id, &pdu_id) .await?; - Ok(send_knock::v1::Response { - knock_room_state, - }) + let knock_room_state = services.rooms.state.summary_stripped(&pdu).await; + + Ok(send_knock::v1::Response { knock_room_state }) } diff --git a/src/api/server/utils.rs b/src/api/server/utils.rs index 112cf858..4f3fa245 100644 --- a/src/api/server/utils.rs +++ b/src/api/server/utils.rs @@ -1,6 +1,6 @@ use conduwuit::{implement, is_false, Err, Result}; use conduwuit_service::Services; -use futures::{future::OptionFuture, join, FutureExt}; +use futures::{future::OptionFuture, join, FutureExt, StreamExt}; use ruma::{EventId, RoomId, ServerName}; pub(super) struct AccessCheck<'a> { @@ -31,6 +31,15 @@ pub(super) async fn check(&self) -> Result { .state_cache .server_in_room(self.origin, self.room_id); + // if any user on our homeserver is trying to knock this room, we'll need to + // acknowledge bans or leaves + let user_is_knocking = self + .services + .rooms + .state_cache + .room_members_knocked(self.room_id) + .count(); + let server_can_see: OptionFuture<_> = self .event_id .map(|event_id| { @@ -42,14 +51,14 @@ pub(super) async fn check(&self) -> Result { }) .into(); - let (world_readable, server_in_room, server_can_see, acl_check) = - join!(world_readable, server_in_room, server_can_see, acl_check); + let (world_readable, server_in_room, server_can_see, acl_check, user_is_knocking) = + join!(world_readable, server_in_room, server_can_see, acl_check, user_is_knocking); if !acl_check { return Err!(Request(Forbidden("Server access denied."))); } - if !world_readable && !server_in_room { + if !world_readable && !server_in_room && user_is_knocking == 0 { return Err!(Request(Forbidden("Server is not in room."))); } diff --git a/src/database/maps.rs b/src/database/maps.rs index e9b26818..bc409919 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -184,6 +184,10 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "roomuserid_leftcount", ..descriptor::RANDOM }, + Descriptor { + name: "roomuserid_knockedcount", + ..descriptor::RANDOM_SMALL + }, Descriptor { name: "roomuserid_privateread", ..descriptor::RANDOM_SMALL @@ -377,6 +381,10 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "userroomid_leftstate", ..descriptor::RANDOM }, + Descriptor { + name: "userroomid_knockedstate", + ..descriptor::RANDOM_SMALL + }, Descriptor { name: "userroomid_notificationcount", ..descriptor::RANDOM diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 89421dfd..0d25142d 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -10,7 +10,7 @@ use conduwuit::{ warn, Result, }; use database::{serialize_key, Deserialized, Ignore, Interfix, Json, Map}; -use futures::{future::join4, pin_mut, stream::iter, Stream, StreamExt}; +use futures::{future::join5, pin_mut, stream::iter, Stream, StreamExt}; use itertools::Itertools; use ruma::{ events::{ @@ -51,11 +51,13 @@ struct Data { roomuserid_invitecount: Arc, roomuserid_joined: Arc, roomuserid_leftcount: Arc, + roomuserid_knockedcount: Arc, roomuseroncejoinedids: Arc, serverroomids: Arc, userroomid_invitestate: Arc, userroomid_joined: Arc, userroomid_leftstate: Arc, + userroomid_knockedstate: Arc, } type AppServiceInRoomCache = RwLock>>; @@ -81,11 +83,13 @@ impl crate::Service for Service { roomuserid_invitecount: args.db["roomuserid_invitecount"].clone(), roomuserid_joined: args.db["roomuserid_joined"].clone(), roomuserid_leftcount: args.db["roomuserid_leftcount"].clone(), + roomuserid_knockedcount: args.db["roomuserid_knockedcount"].clone(), roomuseroncejoinedids: args.db["roomuseroncejoinedids"].clone(), serverroomids: args.db["serverroomids"].clone(), userroomid_invitestate: args.db["userroomid_invitestate"].clone(), userroomid_joined: args.db["userroomid_joined"].clone(), userroomid_leftstate: args.db["userroomid_leftstate"].clone(), + userroomid_knockedstate: args.db["userroomid_knockedstate"].clone(), }, })) } @@ -336,6 +340,9 @@ impl Service { self.db.userroomid_leftstate.remove(&userroom_id); self.db.roomuserid_leftcount.remove(&roomuser_id); + self.db.userroomid_knockedstate.remove(&userroom_id); + self.db.roomuserid_knockedcount.remove(&roomuser_id); + self.db.roomid_inviteviaservers.remove(room_id); } @@ -352,12 +359,13 @@ impl Service { // (timo) TODO let leftstate = Vec::>::new(); - let count = self.services.globals.next_count().unwrap(); self.db .userroomid_leftstate .raw_put(&userroom_id, Json(leftstate)); - self.db.roomuserid_leftcount.raw_put(&roomuser_id, count); + self.db + .roomuserid_leftcount + .raw_aput::<8, _, _>(&roomuser_id, self.services.globals.next_count().unwrap()); self.db.userroomid_joined.remove(&userroom_id); self.db.roomuserid_joined.remove(&roomuser_id); @@ -365,6 +373,44 @@ impl Service { self.db.userroomid_invitestate.remove(&userroom_id); self.db.roomuserid_invitecount.remove(&roomuser_id); + self.db.userroomid_knockedstate.remove(&userroom_id); + self.db.roomuserid_knockedcount.remove(&roomuser_id); + + self.db.roomid_inviteviaservers.remove(room_id); + } + + /// Direct DB function to directly mark a user as knocked. It is not + /// recommended to use this directly. You most likely should use + /// `update_membership` instead + #[tracing::instrument(skip(self), level = "debug")] + pub fn mark_as_knocked( + &self, + user_id: &UserId, + room_id: &RoomId, + knocked_state: Option>>, + ) { + let userroom_id = (user_id, room_id); + let userroom_id = serialize_key(userroom_id).expect("failed to serialize userroom_id"); + + let roomuser_id = (room_id, user_id); + let roomuser_id = serialize_key(roomuser_id).expect("failed to serialize roomuser_id"); + + self.db + .userroomid_knockedstate + .raw_put(&userroom_id, Json(knocked_state.unwrap_or_default())); + self.db + .roomuserid_knockedcount + .raw_aput::<8, _, _>(&roomuser_id, self.services.globals.next_count().unwrap()); + + self.db.userroomid_joined.remove(&userroom_id); + self.db.roomuserid_joined.remove(&roomuser_id); + + self.db.userroomid_invitestate.remove(&userroom_id); + self.db.roomuserid_invitecount.remove(&roomuser_id); + + self.db.userroomid_leftstate.remove(&userroom_id); + self.db.roomuserid_leftcount.remove(&roomuser_id); + self.db.roomid_inviteviaservers.remove(room_id); } @@ -528,6 +574,20 @@ impl Service { .map(|(_, user_id): (Ignore, &UserId)| user_id) } + /// Returns an iterator over all knocked members of a room. + #[tracing::instrument(skip(self), level = "debug")] + pub fn room_members_knocked<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream + Send + 'a { + let prefix = (room_id, Interfix); + self.db + .roomuserid_knockedcount + .keys_prefix(&prefix) + .ignore_err() + .map(|(_, user_id): (Ignore, &UserId)| user_id) + } + #[tracing::instrument(skip(self), level = "trace")] pub async fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { let key = (room_id, user_id); @@ -538,6 +598,16 @@ impl Service { .deserialized() } + #[tracing::instrument(skip(self), level = "trace")] + pub async fn get_knock_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { + let key = (room_id, user_id); + self.db + .roomuserid_knockedcount + .qry(&key) + .await + .deserialized() + } + #[tracing::instrument(skip(self), level = "trace")] pub async fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { let key = (room_id, user_id); @@ -576,6 +646,25 @@ impl Service { .ignore_err() } + /// Returns an iterator over all rooms a user is currently knocking. + #[tracing::instrument(skip(self), level = "trace")] + pub fn rooms_knocked<'a>( + &'a self, + user_id: &'a UserId, + ) -> impl Stream + Send + 'a { + type KeyVal<'a> = (Key<'a>, Raw>); + type Key<'a> = (&'a UserId, &'a RoomId); + + let prefix = (user_id, Interfix); + self.db + .userroomid_knockedstate + .stream_prefix(&prefix) + .ignore_err() + .map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state)) + .map(|(room_id, state)| Ok((room_id, state.deserialize_as()?))) + .ignore_err() + } + #[tracing::instrument(skip(self), level = "trace")] pub async fn invite_state( &self, @@ -593,6 +682,23 @@ impl Service { }) } + #[tracing::instrument(skip(self), level = "trace")] + pub async fn knock_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>> { + let key = (user_id, room_id); + self.db + .userroomid_knockedstate + .qry(&key) + .await + .deserialized() + .and_then(|val: Raw>| { + val.deserialize_as().map_err(Into::into) + }) + } + #[tracing::instrument(skip(self), level = "trace")] pub async fn left_state( &self, @@ -641,6 +747,12 @@ impl Service { self.db.userroomid_joined.qry(&key).await.is_ok() } + #[tracing::instrument(skip(self), level = "trace")] + pub async fn is_knocked<'a>(&'a self, user_id: &'a UserId, room_id: &'a RoomId) -> bool { + let key = (user_id, room_id); + self.db.userroomid_knockedstate.qry(&key).await.is_ok() + } + #[tracing::instrument(skip(self), level = "trace")] pub async fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> bool { let key = (user_id, room_id); @@ -659,9 +771,10 @@ impl Service { user_id: &UserId, room_id: &RoomId, ) -> Option { - let states = join4( + let states = join5( self.is_joined(user_id, room_id), self.is_left(user_id, room_id), + self.is_knocked(user_id, room_id), self.is_invited(user_id, room_id), self.once_joined(user_id, room_id), ) @@ -670,8 +783,9 @@ impl Service { match states { | (true, ..) => Some(MembershipState::Join), | (_, true, ..) => Some(MembershipState::Leave), - | (_, _, true, ..) => Some(MembershipState::Invite), - | (false, false, false, true) => Some(MembershipState::Ban), + | (_, _, true, ..) => Some(MembershipState::Knock), + | (_, _, _, true, ..) => Some(MembershipState::Invite), + | (false, false, false, false, true) => Some(MembershipState::Ban), | _ => None, } } @@ -747,6 +861,7 @@ impl Service { pub async fn update_joined_count(&self, room_id: &RoomId) { let mut joinedcount = 0_u64; let mut invitedcount = 0_u64; + let mut knockedcount = 0_u64; let mut joined_servers = HashSet::new(); self.room_members(room_id) @@ -764,8 +879,19 @@ impl Service { .unwrap_or(0), ); + knockedcount = knockedcount.saturating_add( + self.room_members_knocked(room_id) + .count() + .await + .try_into() + .unwrap_or(0), + ); + self.db.roomid_joinedcount.raw_put(room_id, joinedcount); self.db.roomid_invitedcount.raw_put(room_id, invitedcount); + self.db + .roomuserid_knockedcount + .raw_put(room_id, knockedcount); self.room_servers(room_id) .ready_for_each(|old_joined_server| { @@ -820,7 +946,6 @@ impl Service { self.db .userroomid_invitestate .raw_put(&userroom_id, Json(last_state.unwrap_or_default())); - self.db .roomuserid_invitecount .raw_aput::<8, _, _>(&roomuser_id, self.services.globals.next_count().unwrap()); @@ -831,6 +956,9 @@ impl Service { self.db.userroomid_leftstate.remove(&userroom_id); self.db.roomuserid_leftcount.remove(&roomuser_id); + self.db.userroomid_knockedstate.remove(&userroom_id); + self.db.roomuserid_knockedcount.remove(&roomuser_id); + if let Some(servers) = invite_via.filter(is_not_empty!()) { self.add_servers_invite_via(room_id, servers).await; } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index fe7f885a..3ebc432f 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -498,14 +498,15 @@ impl Service { .expect("This state_key was previously validated"); let content: RoomMemberEventContent = pdu.get_content()?; - let invite_state = match content.membership { - | MembershipState::Invite => + let stripped_state = match content.membership { + | MembershipState::Invite | MembershipState::Knock => self.services.state.summary_stripped(pdu).await.into(), | _ => None, }; - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth + // Update our membership info, we do this here incase a user is invited or + // knocked and immediately leaves we need the DB to record the invite or + // knock event for auth self.services .state_cache .update_membership( @@ -513,7 +514,7 @@ impl Service { target_user_id, content, &pdu.sender, - invite_state, + stripped_state, None, true, ) From 9dd058de60cc2a07a61a0c783b8967a779d7881c Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 12 Jan 2025 21:02:03 -0500 Subject: [PATCH 015/328] update complement results Signed-off-by: strawberry --- .../complement/test_results.jsonl | 68 +++++++++---------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 9b4d2838..11339049 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -1,12 +1,12 @@ {"Action":"pass","Test":"TestACLs"} {"Action":"pass","Test":"TestBannedUserCannotSendJoin"} -{"Action":"fail","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room"} -{"Action":"fail","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/event_with_mismatched_state_key"} -{"Action":"fail","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/invite_event"} -{"Action":"fail","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/join_event"} -{"Action":"fail","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/leave_event"} -{"Action":"fail","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/non-state_membership_event"} -{"Action":"fail","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/regular_event"} +{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room"} +{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/event_with_mismatched_state_key"} +{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/invite_event"} +{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/join_event"} +{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/leave_event"} +{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/non-state_membership_event"} +{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/regular_event"} {"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV1"} {"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV1/event_with_mismatched_state_key"} {"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV1/invite_event"} @@ -21,13 +21,13 @@ {"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV2/leave_event"} {"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV2/non-state_membership_event"} {"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV2/regular_event"} -{"Action":"fail","Test":"TestCannotSendNonKnockViaSendKnock"} -{"Action":"fail","Test":"TestCannotSendNonKnockViaSendKnock/event_with_mismatched_state_key"} -{"Action":"fail","Test":"TestCannotSendNonKnockViaSendKnock/invite_event"} -{"Action":"fail","Test":"TestCannotSendNonKnockViaSendKnock/join_event"} -{"Action":"fail","Test":"TestCannotSendNonKnockViaSendKnock/leave_event"} -{"Action":"fail","Test":"TestCannotSendNonKnockViaSendKnock/non-state_membership_event"} -{"Action":"fail","Test":"TestCannotSendNonKnockViaSendKnock/regular_event"} +{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock"} +{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock/event_with_mismatched_state_key"} +{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock/invite_event"} +{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock/join_event"} +{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock/leave_event"} +{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock/non-state_membership_event"} +{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock/regular_event"} {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV1"} {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV1/event_with_mismatched_state_key"} {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV1/invite_event"} @@ -90,10 +90,10 @@ {"Action":"fail","Test":"TestKnocking"} {"Action":"fail","Test":"TestKnocking/A_user_can_knock_on_a_room_without_a_reason"} {"Action":"fail","Test":"TestKnocking/A_user_can_knock_on_a_room_without_a_reason#01"} -{"Action":"fail","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in"} -{"Action":"fail","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in#01"} -{"Action":"fail","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to"} -{"Action":"fail","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01"} +{"Action":"pass","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in"} +{"Action":"pass","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in#01"} +{"Action":"pass","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to"} +{"Action":"pass","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01"} {"Action":"pass","Test":"TestKnocking/A_user_in_the_room_can_accept_a_knock"} {"Action":"pass","Test":"TestKnocking/A_user_in_the_room_can_accept_a_knock#01"} {"Action":"fail","Test":"TestKnocking/A_user_in_the_room_can_reject_a_knock"} @@ -101,25 +101,25 @@ {"Action":"fail","Test":"TestKnocking/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room"} {"Action":"fail","Test":"TestKnocking/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room#01"} {"Action":"fail","Test":"TestKnocking/A_user_that_has_knocked_on_a_local_room_can_rescind_their_knock_and_then_knock_again"} -{"Action":"fail","Test":"TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it"} -{"Action":"fail","Test":"TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01"} +{"Action":"pass","Test":"TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it"} +{"Action":"pass","Test":"TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01"} {"Action":"pass","Test":"TestKnocking/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail"} {"Action":"pass","Test":"TestKnocking/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail#01"} {"Action":"pass","Test":"TestKnocking/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'"} {"Action":"pass","Test":"TestKnocking/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'#01"} -{"Action":"fail","Test":"TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail"} -{"Action":"fail","Test":"TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01"} +{"Action":"pass","Test":"TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail"} +{"Action":"pass","Test":"TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01"} {"Action":"fail","Test":"TestKnocking/Knocking_on_a_room_with_join_rule_'knock'_should_succeed"} {"Action":"fail","Test":"TestKnocking/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01"} -{"Action":"fail","Test":"TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock"} -{"Action":"fail","Test":"TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"} +{"Action":"pass","Test":"TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock"} +{"Action":"pass","Test":"TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"} {"Action":"fail","Test":"TestKnockingInMSC3787Room"} {"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_can_knock_on_a_room_without_a_reason"} {"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_can_knock_on_a_room_without_a_reason#01"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in#01"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in#01"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_in_the_room_can_accept_a_knock"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_in_the_room_can_accept_a_knock#01"} {"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_in_the_room_can_reject_a_knock"} @@ -127,18 +127,18 @@ {"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room"} {"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room#01"} {"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_that_has_knocked_on_a_local_room_can_rescind_their_knock_and_then_knock_again"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail#01"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'#01"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01"} {"Action":"fail","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed"} {"Action":"fail","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"} {"Action":"pass","Test":"TestLocalPngThumbnail"} {"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/client/v1/media_endpoint"} {"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/media/v3_endpoint"} From be16f84410c09db478eaa7998f451952bbe0fabe Mon Sep 17 00:00:00 2001 From: morguldir Date: Wed, 6 Nov 2024 03:17:50 +0100 Subject: [PATCH 016/328] syncv3: use a function for repeated pattern of fetching sticky params --- src/service/sync/mod.rs | 124 +++++++++++++++++----------------------- 1 file changed, 52 insertions(+), 72 deletions(-) diff --git a/src/service/sync/mod.rs b/src/service/sync/mod.rs index 97f4ce9c..02658a70 100644 --- a/src/service/sync/mod.rs +++ b/src/service/sync/mod.rs @@ -85,6 +85,17 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } +fn list_or_sticky(target: &mut Vec, cached: &Vec) { + if target.is_empty() { + target.clone_from(cached); + } +} +fn some_or_sticky(target: &mut Option, cached: Option) { + if target.is_none() { + *target = cached; + } +} + impl Service { pub fn remembered( &self, @@ -136,57 +147,27 @@ impl Service { for (list_id, list) in &mut request.lists { if let Some(cached_list) = cached.lists.get(list_id) { - if list.sort.is_empty() { - list.sort.clone_from(&cached_list.sort); - }; - if list.room_details.required_state.is_empty() { - list.room_details - .required_state - .clone_from(&cached_list.room_details.required_state); - }; - list.room_details.timeline_limit = list - .room_details - .timeline_limit - .or(cached_list.room_details.timeline_limit); - list.include_old_rooms = list - .include_old_rooms - .clone() - .or_else(|| cached_list.include_old_rooms.clone()); + list_or_sticky(&mut list.sort, &cached_list.sort); + list_or_sticky(&mut list.room_details.required_state, &cached_list.room_details.required_state); + some_or_sticky(&mut list.room_details.timeline_limit, cached_list.room_details.timeline_limit); + some_or_sticky(&mut list.include_old_rooms, cached_list.include_old_rooms.clone()); match (&mut list.filters, cached_list.filters.clone()) { - | (Some(list_filters), Some(cached_filters)) => { - list_filters.is_dm = list_filters.is_dm.or(cached_filters.is_dm); - if list_filters.spaces.is_empty() { - list_filters.spaces = cached_filters.spaces; - } - list_filters.is_encrypted = - list_filters.is_encrypted.or(cached_filters.is_encrypted); - list_filters.is_invite = - list_filters.is_invite.or(cached_filters.is_invite); - if list_filters.room_types.is_empty() { - list_filters.room_types = cached_filters.room_types; - } - if list_filters.not_room_types.is_empty() { - list_filters.not_room_types = cached_filters.not_room_types; - } - list_filters.room_name_like = list_filters - .room_name_like - .clone() - .or(cached_filters.room_name_like); - if list_filters.tags.is_empty() { - list_filters.tags = cached_filters.tags; - } - if list_filters.not_tags.is_empty() { - list_filters.not_tags = cached_filters.not_tags; - } + (Some(list_filters), Some(cached_filters)) => { + some_or_sticky(&mut list_filters.is_dm, cached_filters.is_dm); + list_or_sticky(&mut list_filters.spaces, &cached_filters.spaces); + some_or_sticky(&mut list_filters.is_encrypted, cached_filters.is_encrypted); + some_or_sticky(&mut list_filters.is_invite, cached_filters.is_invite); + list_or_sticky(&mut list_filters.room_types, &cached_filters.room_types); + list_or_sticky(&mut list_filters.not_room_types, &cached_filters.not_room_types); + some_or_sticky(&mut list_filters.room_name_like, cached_filters.room_name_like); + list_or_sticky(&mut list_filters.tags, &cached_filters.tags); + list_or_sticky(&mut list_filters.not_tags, &cached_filters.not_tags); }, | (_, Some(cached_filters)) => list.filters = Some(cached_filters), | (Some(list_filters), _) => list.filters = Some(list_filters.clone()), | (..) => {}, } - if list.bump_event_types.is_empty() { - list.bump_event_types - .clone_from(&cached_list.bump_event_types); - }; + list_or_sticky(&mut list.bump_event_types, &cached_list.bump_event_types); } cached.lists.insert(list_id.clone(), list.clone()); } @@ -241,16 +222,18 @@ impl Service { subscriptions: BTreeMap, ) { let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone(cache.entry((user_id, device_id, conn_id)).or_insert_with( - || { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }, - )); + let cached = Arc::clone( + cache + .entry((user_id, device_id, conn_id)) + .or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + }), + ); let cached = &mut cached.lock().expect("locked"); drop(cache); @@ -258,25 +241,22 @@ impl Service { } pub fn update_sync_known_rooms( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, - list_id: String, - new_cached_rooms: BTreeSet, - globalsince: u64, + &self, user_id: OwnedUserId, device_id: OwnedDeviceId, conn_id: String, list_id: String, + new_cached_rooms: BTreeSet, globalsince: u64, ) { let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone(cache.entry((user_id, device_id, conn_id)).or_insert_with( - || { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }, - )); + let cached = Arc::clone( + cache + .entry((user_id, device_id, conn_id)) + .or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + }), + ); let cached = &mut cached.lock().expect("locked"); drop(cache); From 6cb3275be0828d7d64da30bacb759b8c796c3c99 Mon Sep 17 00:00:00 2001 From: morguldir Date: Fri, 3 Jan 2025 08:32:54 +0100 Subject: [PATCH 017/328] Add initial MSC4186 (Simplified Sliding Sync) implementation Signed-off-by: morguldir Signed-off-by: strawberry --- src/api/client/sync/mod.rs | 51 +- src/api/client/sync/v4.rs | 94 +-- src/api/client/sync/v5.rs | 886 ++++++++++++++++++++++++++ src/api/client/unversioned.rs | 1 + src/api/router.rs | 1 + src/service/rooms/read_receipt/mod.rs | 1 + src/service/sync/mod.rs | 273 +++++++- 7 files changed, 1214 insertions(+), 93 deletions(-) create mode 100644 src/api/client/sync/v5.rs diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 79e4b1ca..1967f4a2 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -1,16 +1,31 @@ mod v3; mod v4; +mod v5; use conduwuit::{ - utils::stream::{BroadbandExt, ReadyExt, TryIgnore}, + utils::{ + stream::{BroadbandExt, ReadyExt, TryIgnore}, + IterStream, + }, PduCount, }; use futures::{pin_mut, StreamExt}; -use ruma::{RoomId, UserId}; +use ruma::{ + directory::RoomTypeFilter, + events::TimelineEventType::{ + self, Beacon, CallInvite, PollStart, RoomEncrypted, RoomMessage, Sticker, + }, + RoomId, UserId, +}; -pub(crate) use self::{v3::sync_events_route, v4::sync_events_v4_route}; +pub(crate) use self::{ + v3::sync_events_route, v4::sync_events_v4_route, v5::sync_events_v5_route, +}; use crate::{service::Services, Error, PduEvent, Result}; +pub(crate) const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] = + &[CallInvite, PollStart, Beacon, RoomEncrypted, RoomMessage, Sticker]; + async fn load_timeline( services: &Services, sender_user: &UserId, @@ -69,3 +84,33 @@ async fn share_encrypted_room( }) .await } + +pub(crate) async fn filter_rooms<'a>( + services: &Services, + rooms: &[&'a RoomId], + filter: &[RoomTypeFilter], + negate: bool, +) -> Vec<&'a RoomId> { + rooms + .iter() + .stream() + .filter_map(|r| async move { + let room_type = services.rooms.state_accessor.get_room_type(r).await; + + if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { + return None; + } + + let room_type_filter = RoomTypeFilter::from(room_type.ok()); + + let include = if negate { + !filter.contains(&room_type_filter) + } else { + filter.is_empty() || filter.contains(&room_type_filter) + }; + + include.then_some(r) + }) + .collect() + .await +} diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 24c7e286..a82e9309 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -23,24 +23,23 @@ use ruma::{ DeviceLists, UnreadNotificationsCount, }, }, - directory::RoomTypeFilter, events::{ room::member::{MembershipState, RoomMemberEventContent}, AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, - TimelineEventType::{self, *}, + TimelineEventType::*, }, serde::Raw, - uint, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedUserId, UInt, + uint, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UInt, }; -use service::{rooms::read_receipt::pack_receipts, Services}; +use service::rooms::read_receipt::pack_receipts; use super::{load_timeline, share_encrypted_room}; -use crate::{client::ignored_filter, Ruma}; +use crate::{ + client::{filter_rooms, ignored_filter, sync::v5::TodoRooms, DEFAULT_BUMP_TYPES}, + Ruma, +}; -const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; - -const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] = - &[CallInvite, PollStart, Beacon, RoomEncrypted, RoomMessage, Sticker]; +pub(crate) const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; /// POST `/_matrix/client/unstable/org.matrix.msc3575/sync` /// @@ -121,13 +120,19 @@ pub(crate) async fn sync_events_v4_route( .collect() .await; - let all_rooms = all_joined_rooms + let all_invited_rooms: Vec<&RoomId> = all_invited_rooms.iter().map(AsRef::as_ref).collect(); + let all_knocked_rooms: Vec<&RoomId> = all_knocked_rooms.iter().map(AsRef::as_ref).collect(); + + let all_rooms: Vec<&RoomId> = all_joined_rooms .iter() - .chain(all_invited_rooms.iter()) - .chain(all_knocked_rooms.iter()) - .map(Clone::clone) + .map(AsRef::as_ref) + .chain(all_invited_rooms.iter().map(AsRef::as_ref)) + .chain(all_knocked_rooms.iter().map(AsRef::as_ref)) .collect(); + let all_joined_rooms = all_joined_rooms.iter().map(AsRef::as_ref).collect(); + let all_invited_rooms = all_invited_rooms.iter().map(AsRef::as_ref).collect(); + if body.extensions.to_device.enabled.unwrap_or(false) { services .users @@ -180,6 +185,7 @@ pub(crate) async fn sync_events_v4_route( ); for room_id in &all_joined_rooms { + let room_id: &&RoomId = room_id; let Ok(current_shortstatehash) = services.rooms.state.get_room_shortstatehash(room_id).await else { @@ -332,7 +338,7 @@ pub(crate) async fn sync_events_v4_route( } let mut lists = BTreeMap::new(); - let mut todo_rooms = BTreeMap::new(); // and required state + let mut todo_rooms: TodoRooms = BTreeMap::new(); // and required state for (list_id, list) in &body.lists { let active_rooms = match list.filters.clone().and_then(|f| f.is_invite) { @@ -353,7 +359,7 @@ pub(crate) async fn sync_events_v4_route( | None => active_rooms, }; - let mut new_known_rooms = BTreeSet::new(); + let mut new_known_rooms: BTreeSet = BTreeSet::new(); let ranges = list.ranges.clone(); lists.insert(list_id.clone(), sync_events::v4::SyncList { @@ -375,9 +381,9 @@ pub(crate) async fn sync_events_v4_route( Vec::new() }; - new_known_rooms.extend(room_ids.iter().cloned()); + new_known_rooms.extend(room_ids.clone().into_iter().map(ToOwned::to_owned)); for room_id in &room_ids { - let todo_room = todo_rooms.entry(room_id.clone()).or_insert(( + let todo_room = todo_rooms.entry((*room_id).to_owned()).or_insert(( BTreeSet::new(), 0_usize, u64::MAX, @@ -399,7 +405,7 @@ pub(crate) async fn sync_events_v4_route( todo_room.2 = todo_room.2.min( known_rooms .get(list_id.as_str()) - .and_then(|k| k.get(room_id)) + .and_then(|k| k.get(*room_id)) .copied() .unwrap_or(0), ); @@ -408,7 +414,7 @@ pub(crate) async fn sync_events_v4_route( op: SlidingOp::Sync, range: Some(r), index: None, - room_ids, + room_ids: room_ids.into_iter().map(ToOwned::to_owned).collect(), room_id: None, } }) @@ -418,8 +424,8 @@ pub(crate) async fn sync_events_v4_route( if let Some(conn_id) = &body.conn_id { services.sync.update_sync_known_rooms( - sender_user.clone(), - sender_device.clone(), + sender_user, + &sender_device, conn_id.clone(), list_id.clone(), new_known_rooms, @@ -464,8 +470,8 @@ pub(crate) async fn sync_events_v4_route( if let Some(conn_id) = &body.conn_id { services.sync.update_sync_known_rooms( - sender_user.clone(), - sender_device.clone(), + sender_user, + &sender_device, conn_id.clone(), "subscriptions".to_owned(), known_subscription_rooms, @@ -489,7 +495,8 @@ pub(crate) async fn sync_events_v4_route( let mut timestamp: Option<_> = None; let mut invite_state = None; let (timeline_pdus, limited); - if all_invited_rooms.contains(room_id) { + let new_room_id: &RoomId = (*room_id).as_ref(); + if all_invited_rooms.contains(&new_room_id) { // TODO: figure out a timestamp we can use for remote invites invite_state = services .rooms @@ -519,7 +526,7 @@ pub(crate) async fn sync_events_v4_route( } account_data.rooms.insert( - room_id.clone(), + room_id.to_owned(), services .account_data .changes_since(Some(room_id), sender_user, *roomsince) @@ -749,10 +756,9 @@ pub(crate) async fn sync_events_v4_route( }); } - if rooms - .iter() - .all(|(_, r)| r.timeline.is_empty() && r.required_state.is_empty()) - { + if rooms.iter().all(|(id, r)| { + r.timeline.is_empty() && r.required_state.is_empty() && !receipts.rooms.contains_key(id) + }) { // Hang a few seconds so requests are not spammed // Stop hanging if new info arrives let default = Duration::from_secs(30); @@ -798,33 +804,3 @@ pub(crate) async fn sync_events_v4_route( delta_token: None, }) } - -async fn filter_rooms( - services: &Services, - rooms: &[OwnedRoomId], - filter: &[RoomTypeFilter], - negate: bool, -) -> Vec { - rooms - .iter() - .stream() - .filter_map(|r| async move { - let room_type = services.rooms.state_accessor.get_room_type(r).await; - - if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { - return None; - } - - let room_type_filter = RoomTypeFilter::from(room_type.ok()); - - let include = if negate { - !filter.contains(&room_type_filter) - } else { - filter.is_empty() || filter.contains(&room_type_filter) - }; - - include.then_some(r.to_owned()) - }) - .collect() - .await -} diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs new file mode 100644 index 00000000..1c4f3504 --- /dev/null +++ b/src/api/client/sync/v5.rs @@ -0,0 +1,886 @@ +use std::{ + cmp::{self, Ordering}, + collections::{BTreeMap, BTreeSet, HashMap, HashSet}, + time::Duration, +}; + +use axum::extract::State; +use conduwuit::{ + debug, error, extract_variant, trace, + utils::{ + math::{ruma_from_usize, usize_from_ruma}, + BoolExt, IterStream, ReadyExt, TryFutureExtExt, + }, + warn, Error, Result, +}; +use futures::{FutureExt, StreamExt, TryFutureExt}; +use ruma::{ + api::client::{ + error::ErrorKind, + sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, + }, + events::{ + room::member::{MembershipState, RoomMemberEventContent}, + AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType, + }, + serde::Raw, + state_res::TypeStateKey, + uint, DeviceId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UInt, UserId, +}; +use service::{rooms::read_receipt::pack_receipts, PduCount}; + +use super::{filter_rooms, share_encrypted_room}; +use crate::{ + client::{ignored_filter, sync::load_timeline, DEFAULT_BUMP_TYPES}, + Ruma, +}; + +type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request); + +/// `POST /_matrix/client/unstable/org.matrix.simplified_msc3575/sync` +/// ([MSC4186]) +/// +/// A simplified version of sliding sync ([MSC3575]). +/// +/// Get all new events in a sliding window of rooms since the last sync or a +/// given point in time. +/// +/// [MSC3575]: https://github.com/matrix-org/matrix-spec-proposals/pull/3575 +/// [MSC4186]: https://github.com/matrix-org/matrix-spec-proposals/pull/4186 +pub(crate) async fn sync_events_v5_route( + State(services): State, + body: Ruma, +) -> Result { + debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let mut body = body.body; + + // Setup watchers, so if there's no response, we can wait for them + let watcher = services.sync.watch(sender_user, sender_device); + + let next_batch = services.globals.next_count()?; + + let conn_id = body.conn_id.clone(); + + let globalsince = body + .pos + .as_ref() + .and_then(|string| string.parse().ok()) + .unwrap_or(0); + + if globalsince != 0 + && !services.sync.snake_connection_cached( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + ) { + debug!("Restarting sync stream because it was gone from the database"); + return Err(Error::Request( + ErrorKind::UnknownPos, + "Connection data lost since last time".into(), + http::StatusCode::BAD_REQUEST, + )); + } + + // Client / User requested an initial sync + if globalsince == 0 { + services.sync.forget_snake_sync_connection( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + ); + } + + // Get sticky parameters from cache + let known_rooms = services.sync.update_snake_sync_request_with_cache( + sender_user.clone(), + sender_device.clone(), + &mut body, + ); + + let all_joined_rooms: Vec<_> = services + .rooms + .state_cache + .rooms_joined(sender_user) + .map(ToOwned::to_owned) + .collect() + .await; + + let all_invited_rooms: Vec<_> = services + .rooms + .state_cache + .rooms_invited(sender_user) + .map(|r| r.0) + .collect() + .await; + + let all_knocked_rooms: Vec<_> = services + .rooms + .state_cache + .rooms_knocked(sender_user) + .map(|r| r.0) + .collect() + .await; + + let all_rooms: Vec<&RoomId> = all_joined_rooms + .iter() + .map(AsRef::as_ref) + .chain(all_invited_rooms.iter().map(AsRef::as_ref)) + .chain(all_knocked_rooms.iter().map(AsRef::as_ref)) + .collect(); + + let all_joined_rooms = all_joined_rooms.iter().map(AsRef::as_ref).collect(); + let all_invited_rooms = all_invited_rooms.iter().map(AsRef::as_ref).collect(); + + let pos = next_batch.clone().to_string(); + + let mut todo_rooms: TodoRooms = BTreeMap::new(); + + let sync_info: SyncInfo<'_> = (sender_user, sender_device, globalsince, &body); + let mut response = sync_events::v5::Response { + txn_id: body.txn_id.clone(), + pos, + lists: BTreeMap::new(), + rooms: BTreeMap::new(), + extensions: sync_events::v5::response::Extensions { + account_data: collect_account_data(services, sync_info).await, + e2ee: collect_e2ee(services, sync_info, &all_joined_rooms).await?, + to_device: collect_to_device(services, sync_info, next_batch).await, + receipts: collect_receipts(services).await, + typing: sync_events::v5::response::Typing::default(), + }, + }; + + handle_lists( + services, + sync_info, + &all_invited_rooms, + &all_joined_rooms, + &all_rooms, + &mut todo_rooms, + &known_rooms, + &mut response, + ) + .await; + + fetch_subscriptions(services, sync_info, &known_rooms, &mut todo_rooms).await; + + response.rooms = process_rooms( + services, + sender_user, + next_batch, + &all_invited_rooms, + &todo_rooms, + &mut response, + &body, + ) + .await?; + + if response.rooms.iter().all(|(id, r)| { + r.timeline.is_empty() + && r.required_state.is_empty() + && !response.extensions.receipts.rooms.contains_key(id) + }) && response + .extensions + .to_device + .clone() + .is_none_or(|to| to.events.is_empty()) + { + // Hang a few seconds so requests are not spammed + // Stop hanging if new info arrives + let default = Duration::from_secs(30); + let duration = cmp::min(body.timeout.unwrap_or(default), default); + _ = tokio::time::timeout(duration, watcher).await; + } + + trace!( + rooms=?response.rooms.len(), + account_data=?response.extensions.account_data.rooms.len(), + receipts=?response.extensions.receipts.rooms.len(), + "responding to request with" + ); + Ok(response) +} + +type KnownRooms = BTreeMap>; +pub(crate) type TodoRooms = BTreeMap, usize, u64)>; + +async fn fetch_subscriptions( + services: crate::State, + (sender_user, sender_device, globalsince, body): SyncInfo<'_>, + known_rooms: &KnownRooms, + todo_rooms: &mut TodoRooms, +) { + let mut known_subscription_rooms = BTreeSet::new(); + for (room_id, room) in &body.room_subscriptions { + if !services.rooms.metadata.exists(room_id).await { + continue; + } + let todo_room = + todo_rooms + .entry(room_id.clone()) + .or_insert((BTreeSet::new(), 0_usize, u64::MAX)); + + let limit: UInt = room.timeline_limit; + + todo_room.0.extend(room.required_state.iter().cloned()); + todo_room.1 = todo_room.1.max(usize_from_ruma(limit)); + // 0 means unknown because it got out of date + todo_room.2 = todo_room.2.min( + known_rooms + .get("subscriptions") + .and_then(|k| k.get(room_id)) + .copied() + .unwrap_or(0), + ); + known_subscription_rooms.insert(room_id.clone()); + } + // where this went (protomsc says it was removed) + //for r in body.unsubscribe_rooms { + // known_subscription_rooms.remove(&r); + // body.room_subscriptions.remove(&r); + //} + + if let Some(conn_id) = &body.conn_id { + services.sync.update_snake_sync_known_rooms( + sender_user, + sender_device, + conn_id.clone(), + "subscriptions".to_owned(), + known_subscription_rooms, + globalsince, + ); + } +} + +#[allow(clippy::too_many_arguments)] +async fn handle_lists<'a>( + services: crate::State, + (sender_user, sender_device, globalsince, body): SyncInfo<'_>, + all_invited_rooms: &Vec<&'a RoomId>, + all_joined_rooms: &Vec<&'a RoomId>, + all_rooms: &Vec<&'a RoomId>, + todo_rooms: &'a mut TodoRooms, + known_rooms: &'a KnownRooms, + response: &'_ mut sync_events::v5::Response, +) -> KnownRooms { + for (list_id, list) in &body.lists { + let active_rooms = match list.filters.clone().and_then(|f| f.is_invite) { + | Some(true) => all_invited_rooms, + | Some(false) => all_joined_rooms, + | None => all_rooms, + }; + + let active_rooms = match list.filters.clone().map(|f| f.not_room_types) { + | Some(filter) if filter.is_empty() => active_rooms, + | Some(value) => &filter_rooms(&services, active_rooms, &value, true).await, + | None => active_rooms, + }; + + let mut new_known_rooms: BTreeSet = BTreeSet::new(); + + let ranges = list.ranges.clone(); + + for mut range in ranges { + range.0 = uint!(0); + range.1 = range + .1 + .clamp(range.0, UInt::try_from(active_rooms.len()).unwrap_or(UInt::MAX)); + + let room_ids = + active_rooms[usize_from_ruma(range.0)..usize_from_ruma(range.1)].to_vec(); + + let new_rooms: BTreeSet = + room_ids.clone().into_iter().map(From::from).collect(); + new_known_rooms.extend(new_rooms); + //new_known_rooms.extend(room_ids..cloned()); + for room_id in room_ids { + let todo_room = todo_rooms.entry(room_id.to_owned()).or_insert(( + BTreeSet::new(), + 0_usize, + u64::MAX, + )); + + let limit: usize = usize_from_ruma(list.room_details.timeline_limit).min(100); + + todo_room + .0 + .extend(list.room_details.required_state.iter().cloned()); + + todo_room.1 = todo_room.1.max(limit); + // 0 means unknown because it got out of date + todo_room.2 = todo_room.2.min( + known_rooms + .get(list_id.as_str()) + .and_then(|k| k.get(room_id)) + .copied() + .unwrap_or(0), + ); + } + } + response + .lists + .insert(list_id.clone(), sync_events::v5::response::List { + count: ruma_from_usize(active_rooms.len()), + }); + + if let Some(conn_id) = &body.conn_id { + services.sync.update_snake_sync_known_rooms( + sender_user, + sender_device, + conn_id.clone(), + list_id.clone(), + new_known_rooms, + globalsince, + ); + } + } + BTreeMap::default() +} + +async fn process_rooms( + services: crate::State, + sender_user: &UserId, + next_batch: u64, + all_invited_rooms: &[&RoomId], + todo_rooms: &TodoRooms, + response: &mut sync_events::v5::Response, + body: &sync_events::v5::Request, +) -> Result> { + let mut rooms = BTreeMap::new(); + for (room_id, (required_state_request, timeline_limit, roomsince)) in todo_rooms { + let roomsincecount = PduCount::Normal(*roomsince); + + let mut timestamp: Option<_> = None; + let mut invite_state = None; + let (timeline_pdus, limited); + let new_room_id: &RoomId = (*room_id).as_ref(); + if all_invited_rooms.contains(&new_room_id) { + // TODO: figure out a timestamp we can use for remote invites + invite_state = services + .rooms + .state_cache + .invite_state(sender_user, room_id) + .await + .ok(); + + (timeline_pdus, limited) = (Vec::new(), true); + } else { + (timeline_pdus, limited) = match load_timeline( + &services, + sender_user, + room_id, + roomsincecount, + Some(PduCount::from(next_batch)), + *timeline_limit, + ) + .await + { + | Ok(value) => value, + | Err(err) => { + warn!("Encountered missing timeline in {}, error {}", room_id, err); + continue; + }, + }; + } + + if body.extensions.account_data.enabled == Some(true) { + response.extensions.account_data.rooms.insert( + room_id.to_owned(), + services + .account_data + .changes_since(Some(room_id), sender_user, *roomsince) + .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) + .collect() + .await, + ); + } + + let last_privateread_update = services + .rooms + .read_receipt + .last_privateread_update(sender_user, room_id) + .await > *roomsince; + + let private_read_event = if last_privateread_update { + services + .rooms + .read_receipt + .private_read_get(room_id, sender_user) + .await + .ok() + } else { + None + }; + + let mut receipts: Vec> = services + .rooms + .read_receipt + .readreceipts_since(room_id, *roomsince) + .filter_map(|(read_user, _ts, v)| async move { + services + .users + .user_is_ignored(read_user, sender_user) + .await + .or_some(v) + }) + .collect() + .await; + + if let Some(private_read_event) = private_read_event { + receipts.push(private_read_event); + } + + let receipt_size = receipts.len(); + + if receipt_size > 0 { + response + .extensions + .receipts + .rooms + .insert(room_id.clone(), pack_receipts(Box::new(receipts.into_iter()))); + } + + if roomsince != &0 + && timeline_pdus.is_empty() + && response + .extensions + .account_data + .rooms + .get(room_id) + .is_none_or(Vec::is_empty) + && receipt_size == 0 + { + continue; + } + + let prev_batch = timeline_pdus + .first() + .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { + Ok(Some(match pdu_count { + | PduCount::Backfilled(_) => { + error!("timeline in backfill state?!"); + "0".to_owned() + }, + | PduCount::Normal(c) => c.to_string(), + })) + })? + .or_else(|| { + if roomsince != &0 { + Some(roomsince.to_string()) + } else { + None + } + }); + + let room_events: Vec<_> = timeline_pdus + .iter() + .stream() + .filter_map(|item| ignored_filter(&services, item.clone(), sender_user)) + .map(|(_, pdu)| pdu.to_sync_room_event()) + .collect() + .await; + + for (_, pdu) in timeline_pdus { + let ts = pdu.origin_server_ts; + if DEFAULT_BUMP_TYPES.binary_search(&pdu.kind).is_ok() + && timestamp.is_none_or(|time| time <= ts) + { + timestamp = Some(ts); + } + } + + let required_state = required_state_request + .iter() + .stream() + .filter_map(|state| async move { + services + .rooms + .state_accessor + .room_state_get(room_id, &state.0, &state.1) + .await + .map(|s| s.to_sync_state_event()) + .ok() + }) + .collect() + .await; + + // Heroes + let heroes: Vec<_> = services + .rooms + .state_cache + .room_members(room_id) + .ready_filter(|member| *member != sender_user) + .filter_map(|user_id| { + services + .rooms + .state_accessor + .get_member(room_id, user_id) + .map_ok(|memberevent| sync_events::v5::response::Hero { + user_id: user_id.into(), + name: memberevent.displayname, + avatar: memberevent.avatar_url, + }) + .ok() + }) + .take(5) + .collect() + .await; + + let name = match heroes.len().cmp(&(1_usize)) { + | Ordering::Greater => { + let firsts = heroes[1..] + .iter() + .map(|h| h.name.clone().unwrap_or_else(|| h.user_id.to_string())) + .collect::>() + .join(", "); + + let last = heroes[0] + .name + .clone() + .unwrap_or_else(|| heroes[0].user_id.to_string()); + + Some(format!("{firsts} and {last}")) + }, + | Ordering::Equal => Some( + heroes[0] + .name + .clone() + .unwrap_or_else(|| heroes[0].user_id.to_string()), + ), + | Ordering::Less => None, + }; + + let heroes_avatar = if heroes.len() == 1 { + heroes[0].avatar.clone() + } else { + None + }; + + rooms.insert(room_id.clone(), sync_events::v5::response::Room { + name: services + .rooms + .state_accessor + .get_name(room_id) + .await + .ok() + .or(name), + avatar: if let Some(heroes_avatar) = heroes_avatar { + ruma::JsOption::Some(heroes_avatar) + } else { + match services.rooms.state_accessor.get_avatar(room_id).await { + | ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url), + | ruma::JsOption::Null => ruma::JsOption::Null, + | ruma::JsOption::Undefined => ruma::JsOption::Undefined, + } + }, + initial: Some(roomsince == &0), + is_dm: None, + invite_state, + unread_notifications: UnreadNotificationsCount { + highlight_count: Some( + services + .rooms + .user + .highlight_count(sender_user, room_id) + .await + .try_into() + .expect("notification count can't go that high"), + ), + notification_count: Some( + services + .rooms + .user + .notification_count(sender_user, room_id) + .await + .try_into() + .expect("notification count can't go that high"), + ), + }, + timeline: room_events, + required_state, + prev_batch, + limited, + joined_count: Some( + services + .rooms + .state_cache + .room_joined_count(room_id) + .await + .unwrap_or(0) + .try_into() + .unwrap_or_else(|_| uint!(0)), + ), + invited_count: Some( + services + .rooms + .state_cache + .room_invited_count(room_id) + .await + .unwrap_or(0) + .try_into() + .unwrap_or_else(|_| uint!(0)), + ), + num_live: None, // Count events in timeline greater than global sync counter + bump_stamp: timestamp, + heroes: Some(heroes), + }); + } + Ok(rooms) +} +async fn collect_account_data( + services: crate::State, + (sender_user, _, globalsince, body): (&UserId, &DeviceId, u64, &sync_events::v5::Request), +) -> sync_events::v5::response::AccountData { + let mut account_data = sync_events::v5::response::AccountData { + global: Vec::new(), + rooms: BTreeMap::new(), + }; + + if !body.extensions.account_data.enabled.unwrap_or(false) { + return sync_events::v5::response::AccountData::default(); + } + + account_data.global = services + .account_data + .changes_since(None, sender_user, globalsince) + .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) + .collect() + .await; + + if let Some(rooms) = &body.extensions.account_data.rooms { + for room in rooms { + account_data.rooms.insert( + room.clone(), + services + .account_data + .changes_since(Some(room), sender_user, globalsince) + .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) + .collect() + .await, + ); + } + } + + account_data +} + +async fn collect_e2ee<'a>( + services: crate::State, + (sender_user, sender_device, globalsince, body): ( + &UserId, + &DeviceId, + u64, + &sync_events::v5::Request, + ), + all_joined_rooms: &'a Vec<&'a RoomId>, +) -> Result { + if !body.extensions.e2ee.enabled.unwrap_or(false) { + return Ok(sync_events::v5::response::E2EE::default()); + } + let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in + let mut device_list_changes = HashSet::new(); + let mut device_list_left = HashSet::new(); + // Look for device list updates of this account + device_list_changes.extend( + services + .users + .keys_changed(sender_user, globalsince, None) + .map(ToOwned::to_owned) + .collect::>() + .await, + ); + + for room_id in all_joined_rooms { + let Ok(current_shortstatehash) = + services.rooms.state.get_room_shortstatehash(room_id).await + else { + error!("Room {room_id} has no state"); + continue; + }; + + let since_shortstatehash = services + .rooms + .user + .get_token_shortstatehash(room_id, globalsince) + .await + .ok(); + + let encrypted_room = services + .rooms + .state_accessor + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") + .await + .is_ok(); + + if let Some(since_shortstatehash) = since_shortstatehash { + // Skip if there are only timeline changes + if since_shortstatehash == current_shortstatehash { + continue; + } + + let since_encryption = services + .rooms + .state_accessor + .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "") + .await; + + let since_sender_member: Option = services + .rooms + .state_accessor + .state_get_content( + since_shortstatehash, + &StateEventType::RoomMember, + sender_user.as_str(), + ) + .ok() + .await; + + let joined_since_last_sync = since_sender_member + .as_ref() + .is_none_or(|member| member.membership != MembershipState::Join); + + let new_encrypted_room = encrypted_room && since_encryption.is_err(); + + if encrypted_room { + let current_state_ids: HashMap<_, OwnedEventId> = services + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + + let since_state_ids = services + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .await?; + + for (key, id) in current_state_ids { + if since_state_ids.get(&key) != Some(&id) { + let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else { + error!("Pdu in state not found: {id}"); + continue; + }; + if pdu.kind == TimelineEventType::RoomMember { + if let Some(state_key) = &pdu.state_key { + let user_id = + OwnedUserId::parse(state_key.clone()).map_err(|_| { + Error::bad_database("Invalid UserId in member PDU.") + })?; + + if user_id == *sender_user { + continue; + } + + let content: RoomMemberEventContent = pdu.get_content()?; + match content.membership { + | MembershipState::Join => { + // A new user joined an encrypted room + if !share_encrypted_room( + &services, + sender_user, + &user_id, + Some(room_id), + ) + .await + { + device_list_changes.insert(user_id); + } + }, + | MembershipState::Leave => { + // Write down users that have left encrypted rooms we + // are in + left_encrypted_users.insert(user_id); + }, + | _ => {}, + } + } + } + } + } + if joined_since_last_sync || new_encrypted_room { + // If the user is in a new encrypted room, give them all joined users + device_list_changes.extend( + services + .rooms + .state_cache + .room_members(room_id) + // Don't send key updates from the sender to the sender + .ready_filter(|user_id| sender_user != *user_id) + // Only send keys if the sender doesn't share an encrypted room with the target + // already + .filter_map(|user_id| { + share_encrypted_room(&services, sender_user, user_id, Some(room_id)) + .map(|res| res.or_some(user_id.to_owned())) + }) + .collect::>() + .await, + ); + } + } + } + // Look for device list updates in this room + device_list_changes.extend( + services + .users + .room_keys_changed(room_id, globalsince, None) + .map(|(user_id, _)| user_id) + .map(ToOwned::to_owned) + .collect::>() + .await, + ); + } + + for user_id in left_encrypted_users { + let dont_share_encrypted_room = + !share_encrypted_room(&services, sender_user, &user_id, None).await; + + // If the user doesn't share an encrypted room with the target anymore, we need + // to tell them + if dont_share_encrypted_room { + device_list_left.insert(user_id); + } + } + + Ok(sync_events::v5::response::E2EE { + device_lists: DeviceLists { + changed: device_list_changes.into_iter().collect(), + left: device_list_left.into_iter().collect(), + }, + device_one_time_keys_count: services + .users + .count_one_time_keys(sender_user, sender_device) + .await, + device_unused_fallback_key_types: None, + }) +} + +async fn collect_to_device( + services: crate::State, + (sender_user, sender_device, globalsince, body): SyncInfo<'_>, + next_batch: u64, +) -> Option { + if !body.extensions.to_device.enabled.unwrap_or(false) { + return None; + } + + services + .users + .remove_to_device_events(sender_user, sender_device, globalsince) + .await; + + Some(sync_events::v5::response::ToDevice { + next_batch: next_batch.to_string(), + events: services + .users + .get_to_device_events(sender_user, sender_device) + .collect() + .await, + }) +} + +async fn collect_receipts(_services: crate::State) -> sync_events::v5::response::Receipts { + sync_events::v5::response::Receipts { rooms: BTreeMap::new() } + // TODO: get explicitly requested read receipts +} diff --git a/src/api/client/unversioned.rs b/src/api/client/unversioned.rs index b4856d72..904f1d2f 100644 --- a/src/api/client/unversioned.rs +++ b/src/api/client/unversioned.rs @@ -52,6 +52,7 @@ pub(crate) async fn get_supported_versions_route( ("org.matrix.msc4180".to_owned(), true), /* stable flag for 3916 (https://github.com/matrix-org/matrix-spec-proposals/pull/4180) */ ("uk.tcpip.msc4133".to_owned(), true), /* Extending User Profile API with Key:Value Pairs (https://github.com/matrix-org/matrix-spec-proposals/pull/4133) */ ("us.cloke.msc4175".to_owned(), true), /* Profile field for user time zone (https://github.com/matrix-org/matrix-spec-proposals/pull/4175) */ + ("org.matrix.simplified_msc3575".to_owned(), true), /* Simplified Sliding sync (https://github.com/matrix-org/matrix-spec-proposals/pull/4186) */ ]), }; diff --git a/src/api/router.rs b/src/api/router.rs index 1d42fc5e..e7cd368d 100644 --- a/src/api/router.rs +++ b/src/api/router.rs @@ -145,6 +145,7 @@ pub fn build(router: Router, server: &Server) -> Router { ) .ruma_route(&client::sync_events_route) .ruma_route(&client::sync_events_v4_route) + .ruma_route(&client::sync_events_v5_route) .ruma_route(&client::get_context_route) .ruma_route(&client::get_message_events_route) .ruma_route(&client::search_events_route) diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index 9777faeb..2bc21355 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -155,6 +155,7 @@ where } let content = ReceiptEventContent::from_iter(json); + conduwuit::trace!(?content); Raw::from_json( serde_json::value::to_raw_value(&SyncEphemeralRoomEvent { content }) .expect("received valid json"), diff --git a/src/service/sync/mod.rs b/src/service/sync/mod.rs index 02658a70..0b86377a 100644 --- a/src/service/sync/mod.rs +++ b/src/service/sync/mod.rs @@ -11,8 +11,9 @@ use ruma::{ api::client::sync::sync_events::{ self, v4::{ExtensionsConfig, SyncRequestList}, + v5, }, - OwnedDeviceId, OwnedRoomId, OwnedUserId, + DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, UserId, }; use crate::{rooms, Dep}; @@ -20,7 +21,8 @@ use crate::{rooms, Dep}; pub struct Service { db: Data, services: Services, - connections: DbConnections, + connections: DbConnections, + snake_connections: DbConnections, } pub struct Data { @@ -52,9 +54,19 @@ struct SlidingSyncCache { extensions: ExtensionsConfig, } -type DbConnections = Mutex>; +#[derive(Default)] +struct SnakeSyncCache { + lists: BTreeMap, + subscriptions: BTreeMap, + known_rooms: BTreeMap>, + extensions: v5::request::Extensions, +} + +type DbConnections = Mutex>; type DbConnectionsKey = (OwnedUserId, OwnedDeviceId, String); type DbConnectionsVal = Arc>; +type SnakeConnectionsKey = (OwnedUserId, OwnedDeviceId, Option); +type SnakeConnectionsVal = Arc>; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { @@ -79,12 +91,15 @@ impl crate::Service for Service { typing: args.depend::("rooms::typing"), }, connections: StdMutex::new(BTreeMap::new()), + snake_connections: StdMutex::new(BTreeMap::new()), })) } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } +/// load params from cache if body doesn't contain it, as long as it's allowed +/// in some cases we may need to allow an empty list as an actual value fn list_or_sticky(target: &mut Vec, cached: &Vec) { if target.is_empty() { target.clone_from(cached); @@ -97,6 +112,30 @@ fn some_or_sticky(target: &mut Option, cached: Option) { } impl Service { + pub fn snake_connection_cached( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + conn_id: Option, + ) -> bool { + self.snake_connections + .lock() + .unwrap() + .contains_key(&(user_id, device_id, conn_id)) + } + + pub fn forget_snake_sync_connection( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + conn_id: Option, + ) { + self.snake_connections + .lock() + .expect("locked") + .remove(&(user_id, device_id, conn_id)); + } + pub fn remembered( &self, user_id: OwnedUserId, @@ -121,6 +160,112 @@ impl Service { .remove(&(user_id, device_id, conn_id)); } + pub fn update_snake_sync_request_with_cache( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + request: &mut v5::Request, + ) -> BTreeMap> { + let conn_id = request.conn_id.clone(); + let mut cache = self.snake_connections.lock().expect("locked"); + let cached = Arc::clone( + cache + .entry((user_id, device_id, conn_id)) + .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), + ); + let cached = &mut cached.lock().expect("locked"); + drop(cache); + + //v5::Request::try_from_http_request(req, path_args); + for (list_id, list) in &mut request.lists { + if let Some(cached_list) = cached.lists.get(list_id) { + list_or_sticky( + &mut list.room_details.required_state, + &cached_list.room_details.required_state, + ); + some_or_sticky(&mut list.include_heroes, cached_list.include_heroes); + + match (&mut list.filters, cached_list.filters.clone()) { + | (Some(filters), Some(cached_filters)) => { + some_or_sticky(&mut filters.is_invite, cached_filters.is_invite); + // TODO (morguldir): Find out how a client can unset this, probably need + // to change into an option inside ruma + list_or_sticky( + &mut filters.not_room_types, + &cached_filters.not_room_types, + ); + }, + | (_, Some(cached_filters)) => list.filters = Some(cached_filters), + | (Some(list_filters), _) => list.filters = Some(list_filters.clone()), + | (..) => {}, + } + } + cached.lists.insert(list_id.clone(), list.clone()); + } + + cached + .subscriptions + .extend(request.room_subscriptions.clone()); + request + .room_subscriptions + .extend(cached.subscriptions.clone()); + + request.extensions.e2ee.enabled = request + .extensions + .e2ee + .enabled + .or(cached.extensions.e2ee.enabled); + + request.extensions.to_device.enabled = request + .extensions + .to_device + .enabled + .or(cached.extensions.to_device.enabled); + + request.extensions.account_data.enabled = request + .extensions + .account_data + .enabled + .or(cached.extensions.account_data.enabled); + request.extensions.account_data.lists = request + .extensions + .account_data + .lists + .clone() + .or_else(|| cached.extensions.account_data.lists.clone()); + request.extensions.account_data.rooms = request + .extensions + .account_data + .rooms + .clone() + .or_else(|| cached.extensions.account_data.rooms.clone()); + + some_or_sticky(&mut request.extensions.typing.enabled, cached.extensions.typing.enabled); + some_or_sticky( + &mut request.extensions.typing.rooms, + cached.extensions.typing.rooms.clone(), + ); + some_or_sticky( + &mut request.extensions.typing.lists, + cached.extensions.typing.lists.clone(), + ); + some_or_sticky( + &mut request.extensions.receipts.enabled, + cached.extensions.receipts.enabled, + ); + some_or_sticky( + &mut request.extensions.receipts.rooms, + cached.extensions.receipts.rooms.clone(), + ); + some_or_sticky( + &mut request.extensions.receipts.lists, + cached.extensions.receipts.lists.clone(), + ); + + cached.extensions = request.extensions.clone(); + cached.known_rooms.clone() + } + pub fn update_sync_request_with_cache( &self, user_id: OwnedUserId, @@ -148,20 +293,30 @@ impl Service { for (list_id, list) in &mut request.lists { if let Some(cached_list) = cached.lists.get(list_id) { list_or_sticky(&mut list.sort, &cached_list.sort); - list_or_sticky(&mut list.room_details.required_state, &cached_list.room_details.required_state); - some_or_sticky(&mut list.room_details.timeline_limit, cached_list.room_details.timeline_limit); - some_or_sticky(&mut list.include_old_rooms, cached_list.include_old_rooms.clone()); + list_or_sticky( + &mut list.room_details.required_state, + &cached_list.room_details.required_state, + ); + some_or_sticky( + &mut list.room_details.timeline_limit, + cached_list.room_details.timeline_limit, + ); + some_or_sticky( + &mut list.include_old_rooms, + cached_list.include_old_rooms.clone(), + ); match (&mut list.filters, cached_list.filters.clone()) { - (Some(list_filters), Some(cached_filters)) => { - some_or_sticky(&mut list_filters.is_dm, cached_filters.is_dm); - list_or_sticky(&mut list_filters.spaces, &cached_filters.spaces); - some_or_sticky(&mut list_filters.is_encrypted, cached_filters.is_encrypted); - some_or_sticky(&mut list_filters.is_invite, cached_filters.is_invite); - list_or_sticky(&mut list_filters.room_types, &cached_filters.room_types); - list_or_sticky(&mut list_filters.not_room_types, &cached_filters.not_room_types); - some_or_sticky(&mut list_filters.room_name_like, cached_filters.room_name_like); - list_or_sticky(&mut list_filters.tags, &cached_filters.tags); - list_or_sticky(&mut list_filters.not_tags, &cached_filters.not_tags); + | (Some(filter), Some(cached_filter)) => { + some_or_sticky(&mut filter.is_dm, cached_filter.is_dm); + list_or_sticky(&mut filter.spaces, &cached_filter.spaces); + some_or_sticky(&mut filter.is_encrypted, cached_filter.is_encrypted); + some_or_sticky(&mut filter.is_invite, cached_filter.is_invite); + list_or_sticky(&mut filter.room_types, &cached_filter.room_types); + // Should be made possible to change + list_or_sticky(&mut filter.not_room_types, &cached_filter.not_room_types); + some_or_sticky(&mut filter.room_name_like, cached_filter.room_name_like); + list_or_sticky(&mut filter.tags, &cached_filter.tags); + list_or_sticky(&mut filter.not_tags, &cached_filter.not_tags); }, | (_, Some(cached_filters)) => list.filters = Some(cached_filters), | (Some(list_filters), _) => list.filters = Some(list_filters.clone()), @@ -222,18 +377,16 @@ impl Service { subscriptions: BTreeMap, ) { let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone( - cache - .entry((user_id, device_id, conn_id)) - .or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); + let cached = Arc::clone(cache.entry((user_id, device_id, conn_id)).or_insert_with( + || { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + }, + )); let cached = &mut cached.lock().expect("locked"); drop(cache); @@ -241,13 +394,18 @@ impl Service { } pub fn update_sync_known_rooms( - &self, user_id: OwnedUserId, device_id: OwnedDeviceId, conn_id: String, list_id: String, - new_cached_rooms: BTreeSet, globalsince: u64, + &self, + user_id: &UserId, + device_id: &DeviceId, + conn_id: String, + list_id: String, + new_cached_rooms: BTreeSet, + globalsince: u64, ) { let mut cache = self.connections.lock().expect("locked"); let cached = Arc::clone( cache - .entry((user_id, device_id, conn_id)) + .entry((user_id.to_owned(), device_id.to_owned(), conn_id)) .or_insert_with(|| { Arc::new(Mutex::new(SlidingSyncCache { lists: BTreeMap::new(), @@ -275,4 +433,57 @@ impl Service { list.insert(roomid, globalsince); } } + + pub fn update_snake_sync_known_rooms( + &self, + user_id: &UserId, + device_id: &DeviceId, + conn_id: String, + list_id: String, + new_cached_rooms: BTreeSet, + globalsince: u64, + ) { + let mut cache = self.snake_connections.lock().expect("locked"); + let cached = Arc::clone( + cache + .entry((user_id.to_owned(), device_id.to_owned(), Some(conn_id))) + .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), + ); + let cached = &mut cached.lock().expect("locked"); + drop(cache); + + for (roomid, lastsince) in cached + .known_rooms + .entry(list_id.clone()) + .or_default() + .iter_mut() + { + if !new_cached_rooms.contains(roomid) { + *lastsince = 0; + } + } + let list = cached.known_rooms.entry(list_id).or_default(); + for roomid in new_cached_rooms { + list.insert(roomid, globalsince); + } + } + + pub fn update_snake_sync_subscriptions( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + conn_id: Option, + subscriptions: BTreeMap, + ) { + let mut cache = self.snake_connections.lock().expect("locked"); + let cached = Arc::clone( + cache + .entry((user_id, device_id, conn_id)) + .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), + ); + let cached = &mut cached.lock().expect("locked"); + drop(cache); + + cached.subscriptions = subscriptions; + } } From f59e3d8850bfa93244f855e757fa69d610d16e2b Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 14 Jan 2025 13:05:25 -0500 Subject: [PATCH 018/328] bump nix lockfile, cargo.lock, rust to 1.84, and fix tracing fork Signed-off-by: June Clementine Strawberry --- Cargo.lock | 457 ++++++++++++++++++++++---------------------- Cargo.toml | 10 +- flake.lock | 30 +-- flake.nix | 2 +- rust-toolchain.toml | 2 +- 5 files changed, 252 insertions(+), 249 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d25197e0..f777a50c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -49,9 +49,9 @@ checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anyhow" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" [[package]] name = "arc-swap" @@ -138,18 +138,18 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] name = "async-trait" -version = "0.1.83" +version = "0.1.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -175,9 +175,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "aws-lc-rs" -version = "1.11.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f47bb8cc16b669d267eeccf585aea077d0882f4777b1c1f740217885d6e6e5a3" +checksum = "f409eb70b561706bf8abba8ca9c112729c481595893fd06a2dd9af8ed8441148" dependencies = [ "aws-lc-sys", "paste", @@ -186,16 +186,15 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.23.1" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2101df3813227bbaaaa0b04cd61c534c7954b22bd68d399b440be937dc63ff7" +checksum = "923ded50f602b3007e5e63e3f094c479d9c8a9b42d7f4034e4afe456aa48bfd2" dependencies = [ "bindgen", "cc", "cmake", "dunce", "fs_extra", - "libc", "paste", ] @@ -225,9 +224,9 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper 1.0.2", + "sync_wrapper", "tokio", - "tower 0.5.1", + "tower 0.5.2", "tower-layer", "tower-service", "tracing", @@ -259,7 +258,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 1.0.2", + "sync_wrapper", "tower-layer", "tower-service", "tracing", @@ -282,7 +281,7 @@ dependencies = [ "mime", "pin-project-lite", "serde", - "tower 0.5.1", + "tower 0.5.2", "tower-layer", "tower-service", ] @@ -369,7 +368,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -382,7 +381,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.90", + "syn 2.0.96", "which", ] @@ -394,9 +393,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +checksum = "1be3f42a67d6d345ecd59f675f3f012d6974981560836e938c22b424b85ce1be" [[package]] name = "blake2" @@ -445,9 +444,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytemuck" -version = "1.20.0" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a" +checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" [[package]] name = "byteorder" @@ -496,9 +495,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.3" +version = "1.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27f657647bcff5394bf56c7317665bbf790a137a50eaaa5c6bfbb9e27a518f2d" +checksum = "c8293772165d9345bdaaa39b45b2109591e63fe5e6fbc23c6ff930a048aa310b" dependencies = [ "jobserver", "libc", @@ -557,9 +556,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.23" +version = "4.5.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" +checksum = "a8eb5e908ef3a6efbe1ed62520fb7287959888c88485abe072543190ecc66783" dependencies = [ "clap_builder", "clap_derive", @@ -567,9 +566,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.23" +version = "4.5.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" +checksum = "96b01801b5fc6a0a232407abc821660c9c6d25a1cafc0d4f85f29fb8d9afc121" dependencies = [ "anstyle", "clap_lex", @@ -577,14 +576,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.18" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -740,7 +739,7 @@ dependencies = [ "serde_json", "serde_regex", "serde_yaml", - "thiserror 2.0.7", + "thiserror 2.0.11", "tikv-jemalloc-ctl", "tikv-jemalloc-sys", "tikv-jemallocator", @@ -778,7 +777,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -809,7 +808,7 @@ dependencies = [ "sentry-tracing", "serde_json", "tokio", - "tower 0.5.1", + "tower 0.5.2", "tower-http", "tracing", ] @@ -905,9 +904,9 @@ checksum = "3618cccc083bb987a415d85c02ca6c9994ea5b44731ec28b9ecf09658655fba9" [[package]] name = "const_panic" -version = "0.2.10" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "013b6c2c3a14d678f38cd23994b02da3a1a1b6a5d1eedddfe63a5a5f11b13a81" +checksum = "2459fc9262a1aa204eb4b5764ad4f189caec88aea9634389c0a25f8be7f6265e" [[package]] name = "coolor" @@ -1003,18 +1002,18 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.13" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ "crossbeam-epoch", "crossbeam-utils", @@ -1031,18 +1030,18 @@ dependencies = [ [[package]] name = "crossbeam-queue" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crossterm" @@ -1050,7 +1049,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "crossterm_winapi", "futures-core", "mio", @@ -1087,7 +1086,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1114,7 +1113,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1183,7 +1182,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1235,7 +1234,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1421,7 +1420,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1495,9 +1494,9 @@ checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "glob" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "h2" @@ -1647,11 +1646,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.9" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -1687,7 +1686,7 @@ dependencies = [ "markup5ever", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1753,9 +1752,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "1.5.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" +checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0" dependencies = [ "bytes", "futures-channel", @@ -1774,9 +1773,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.3" +version = "0.27.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" +checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" dependencies = [ "futures-util", "http", @@ -1939,7 +1938,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1982,9 +1981,9 @@ dependencies = [ [[package]] name = "image-webp" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e031e8e3d94711a9ccb5d6ea357439ef3dcbed361798bd4071dc4d9793fbe22f" +checksum = "b77d01e822461baa8409e156015a1d91735549f0f2c17691bd2d996bef238f7f" dependencies = [ "byteorder-lite", "quick-error 2.0.1", @@ -2090,9 +2089,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.76" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" dependencies = [ "once_cell", "wasm-bindgen", @@ -2131,9 +2130,9 @@ dependencies = [ [[package]] name = "konst" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "298ddf99f06a97c1ecd0e910932662b7842855046234b0d0376d35d93add087f" +checksum = "4381b9b00c55f251f2ebe9473aef7c117e96828def1a7cb3bd3f0f903c6894e9" dependencies = [ "const_panic", "konst_kernel", @@ -2151,9 +2150,9 @@ dependencies = [ [[package]] name = "lazy-regex" -version = "3.3.0" +version = "3.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d8e41c97e6bc7ecb552016274b99fbb5d035e8de288c582d9b933af6677bfda" +checksum = "60c7310b93682b36b98fa7ea4de998d3463ccbebd94d935d6b48ba5b6ffa7126" dependencies = [ "lazy-regex-proc_macros", "once_cell", @@ -2162,14 +2161,14 @@ dependencies = [ [[package]] name = "lazy-regex-proc_macros" -version = "3.3.0" +version = "3.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76e1d8b05d672c53cb9c7b920bbba8783845ae4f0b076e02a3db1d02c81b4163" +checksum = "4ba01db5ef81e17eb10a5e0f2109d1b3a3e29bac3070fdbd7d156bf7dbd206a1" dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -2186,9 +2185,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.168" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libloading" @@ -2202,9 +2201,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.20" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" +checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" dependencies = [ "cc", "pkg-config", @@ -2219,9 +2218,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.4.14" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" @@ -2241,9 +2240,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.22" +version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" [[package]] name = "loole" @@ -2362,9 +2361,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.0" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" dependencies = [ "adler2", "simd-adler32", @@ -2394,7 +2393,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "cfg-if", "cfg_aliases", "libc", @@ -2517,9 +2516,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.5" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] @@ -2591,7 +2590,7 @@ dependencies = [ "glob", "once_cell", "opentelemetry", - "ordered-float 4.5.0", + "ordered-float 4.6.0", "percent-encoding", "rand", "thiserror 1.0.69", @@ -2610,18 +2609,18 @@ dependencies = [ [[package]] name = "ordered-float" -version = "4.5.0" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c65ee1f9701bf938026630b455d5315f490640234259037edb259798b3bcf85e" +checksum = "7bb71e1b3fa6ca1c61f383464aaf2bb0e2f8e772a1f01d486832464de363b951" dependencies = [ "num-traits", ] [[package]] name = "os_info" -version = "3.9.0" +version = "3.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ca711d8b83edbb00b44d504503cd247c9c0bd8b0fa2694f2a1a3d8165379ce" +checksum = "6e6520c8cc998c5741ee68ec1dc369fc47e5f0ea5320018ecf2a1ccd6328f48b" dependencies = [ "log", "serde", @@ -2700,7 +2699,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -2711,21 +2710,21 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "phf" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ - "phf_shared 0.11.2", + "phf_shared 0.11.3", ] [[package]] name = "phf_codegen" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8d39688d359e6b34654d328e262234662d16cc0f60ec8dcbe5e718709342a5a" +checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a" dependencies = [ - "phf_generator 0.11.2", - "phf_shared 0.11.2", + "phf_generator 0.11.3", + "phf_shared 0.11.3", ] [[package]] @@ -2740,11 +2739,11 @@ dependencies = [ [[package]] name = "phf_generator" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ - "phf_shared 0.11.2", + "phf_shared 0.11.3", "rand", ] @@ -2754,43 +2753,43 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" dependencies = [ - "siphasher", + "siphasher 0.3.11", ] [[package]] name = "phf_shared" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" dependencies = [ - "siphasher", + "siphasher 1.0.1", ] [[package]] name = "pin-project" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" +checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" +checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] name = "pin-project-lite" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -2816,9 +2815,9 @@ checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "png" -version = "0.17.15" +version = "0.17.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67582bd5b65bdff614270e2ea89a1cf15bef71245cc1e5f7ea126977144211d" +checksum = "82151a2fc869e011c153adc57cf2789ccb8d9906ce52c0b39a6b5697749d7526" dependencies = [ "bitflags 1.3.2", "crc32fast", @@ -2850,12 +2849,12 @@ checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" [[package]] name = "prettyplease" -version = "0.2.25" +version = "0.2.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" +checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" dependencies = [ "proc-macro2", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -2869,9 +2868,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.92" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" dependencies = [ "unicode-ident", ] @@ -2884,7 +2883,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", "version_check", "yansi", ] @@ -2909,7 +2908,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -2927,7 +2926,7 @@ version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f86ba2052aebccc42cbbb3ed234b8b13ce76f75c3551a303cb2bcffcff12bb14" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "memchr", "pulldown-cmark-escape", "unicase", @@ -2964,7 +2963,7 @@ dependencies = [ "rustc-hash 2.1.0", "rustls", "socket2", - "thiserror 2.0.7", + "thiserror 2.0.11", "tokio", "tracing", ] @@ -2983,7 +2982,7 @@ dependencies = [ "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.7", + "thiserror 2.0.11", "tinyvec", "tracing", "web-time 1.1.0", @@ -2991,9 +2990,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52cd4b1eff68bf27940dd39811292c49e007f4d0b4c357358dc9b0197be6b527" +checksum = "1c40286217b4ba3a71d644d752e6a0b71f13f1b6a2c5311acfcbe0c2418ed904" dependencies = [ "cfg_aliases", "libc", @@ -3005,9 +3004,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" dependencies = [ "proc-macro2", ] @@ -3044,11 +3043,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", ] [[package]] @@ -3130,7 +3129,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.2", + "sync_wrapper", "tokio", "tokio-rustls", "tokio-socks", @@ -3221,7 +3220,7 @@ dependencies = [ "serde", "serde_html_form", "serde_json", - "thiserror 2.0.7", + "thiserror 2.0.11", "url", "web-time 1.1.0", ] @@ -3248,7 +3247,7 @@ dependencies = [ "serde_html_form", "serde_json", "smallvec", - "thiserror 2.0.7", + "thiserror 2.0.11", "time", "tracing", "url", @@ -3275,7 +3274,7 @@ dependencies = [ "serde", "serde_json", "smallvec", - "thiserror 2.0.7", + "thiserror 2.0.11", "tracing", "url", "web-time 1.1.0", @@ -3306,7 +3305,7 @@ version = "0.9.5" source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" dependencies = [ "js_int", - "thiserror 2.0.7", + "thiserror 2.0.11", ] [[package]] @@ -3330,7 +3329,7 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.90", + "syn 2.0.96", "toml", ] @@ -3355,7 +3354,7 @@ dependencies = [ "http", "http-auth", "ruma-common", - "thiserror 2.0.7", + "thiserror 2.0.11", "tracing", ] @@ -3372,7 +3371,7 @@ dependencies = [ "serde_json", "sha2", "subslice", - "thiserror 2.0.7", + "thiserror 2.0.11", ] [[package]] @@ -3386,7 +3385,7 @@ dependencies = [ "ruma-events", "serde", "serde_json", - "thiserror 2.0.7", + "thiserror 2.0.11", "tracing", ] @@ -3453,11 +3452,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.42" +version = "0.38.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" +checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "errno", "libc", "linux-raw-sys", @@ -3466,9 +3465,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.19" +version = "0.23.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" +checksum = "8f287924602bf649d949c63dc8ac8b235fa5387d394020705b80c4eb597ce5b8" dependencies = [ "aws-lc-rs", "log", @@ -3503,9 +3502,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +checksum = "d2bf47e6ff922db3825eb750c4e2ff784c6ff8fb9e13046ef6a1d1c5401b0b37" dependencies = [ "web-time 1.1.0", ] @@ -3524,9 +3523,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" +checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" [[package]] name = "rustyline-async" @@ -3538,7 +3537,7 @@ dependencies = [ "futures-util", "pin-project", "thingbuf", - "thiserror 2.0.7", + "thiserror 2.0.11", "unicode-segmentation", "unicode-width 0.2.0", ] @@ -3581,11 +3580,11 @@ checksum = "1be20c5f7f393ee700f8b2f28ea35812e4e212f40774b550cd2a93ea91684451" [[package]] name = "security-framework" -version = "3.0.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1415a607e92bec364ea2cf9264646dcce0f91e6d65281bd6f2819cca3bf39c8" +checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "core-foundation", "core-foundation-sys", "libc", @@ -3594,9 +3593,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.1" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" dependencies = [ "core-foundation-sys", "libc", @@ -3604,9 +3603,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" [[package]] name = "sentry" @@ -3745,29 +3744,29 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.216" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.216" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] name = "serde_html_form" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de514ef58196f1fc96dcaef80fe6170a1ce6215df9687a93fe8300e773fefc5" +checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" dependencies = [ "form_urlencoded", "indexmap 2.7.0", @@ -3778,9 +3777,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.133" +version = "1.0.135" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" +checksum = "2b0d7ba2887406110130a978386c4e1befb98c674b4fba677954e4db976630d9" dependencies = [ "itoa", "memchr", @@ -3930,6 +3929,12 @@ version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" +[[package]] +name = "siphasher" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" + [[package]] name = "slab" version = "0.4.9" @@ -4040,21 +4045,15 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.90" +version = "2.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" +checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] -[[package]] -name = "sync_wrapper" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" - [[package]] name = "sync_wrapper" version = "1.0.2" @@ -4072,7 +4071,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -4123,11 +4122,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.7" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93605438cbd668185516ab499d589afb7ee1859ea3d5fc8f6b0755e1c7443767" +checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" dependencies = [ - "thiserror-impl 2.0.7", + "thiserror-impl 2.0.11", ] [[package]] @@ -4138,18 +4137,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] name = "thiserror-impl" -version = "2.0.7" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d8749b4531af2117677a5fcd12b1348a3fe2b81e36e61ffeac5c4aa3273e36" +checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -4255,9 +4254,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" dependencies = [ "tinyvec_macros", ] @@ -4270,9 +4269,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.42.0" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" +checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" dependencies = [ "backtrace", "bytes", @@ -4288,13 +4287,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -4441,14 +4440,14 @@ dependencies = [ [[package]] name = "tower" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", "pin-project-lite", - "sync_wrapper 0.1.2", + "sync_wrapper", "tokio", "tower-layer", "tower-service", @@ -4461,7 +4460,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", - "bitflags 2.6.0", + "bitflags 2.7.0", "bytes", "futures-core", "futures-util", @@ -4471,7 +4470,7 @@ dependencies = [ "pin-project-lite", "tokio", "tokio-util", - "tower 0.5.1", + "tower 0.5.2", "tower-layer", "tower-service", "tracing", @@ -4492,7 +4491,7 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" version = "0.1.41" -source = "git+https://github.com/girlbossceo/tracing?rev=ccc4fbd8238c2d5ba354e61ec17ac610af11401d#ccc4fbd8238c2d5ba354e61ec17ac610af11401d" +source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" dependencies = [ "log", "pin-project-lite", @@ -4503,17 +4502,17 @@ dependencies = [ [[package]] name = "tracing-attributes" version = "0.1.28" -source = "git+https://github.com/girlbossceo/tracing?rev=ccc4fbd8238c2d5ba354e61ec17ac610af11401d#ccc4fbd8238c2d5ba354e61ec17ac610af11401d" +source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] name = "tracing-core" version = "0.1.33" -source = "git+https://github.com/girlbossceo/tracing?rev=ccc4fbd8238c2d5ba354e61ec17ac610af11401d#ccc4fbd8238c2d5ba354e61ec17ac610af11401d" +source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" dependencies = [ "once_cell", "valuable", @@ -4533,7 +4532,7 @@ dependencies = [ [[package]] name = "tracing-log" version = "0.2.0" -source = "git+https://github.com/girlbossceo/tracing?rev=ccc4fbd8238c2d5ba354e61ec17ac610af11401d#ccc4fbd8238c2d5ba354e61ec17ac610af11401d" +source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" dependencies = [ "log", "once_cell", @@ -4561,7 +4560,7 @@ dependencies = [ [[package]] name = "tracing-subscriber" version = "0.3.18" -source = "git+https://github.com/girlbossceo/tracing?rev=ccc4fbd8238c2d5ba354e61ec17ac610af11401d#ccc4fbd8238c2d5ba354e61ec17ac610af11401d" +source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" dependencies = [ "matchers", "nu-ansi-term", @@ -4622,9 +4621,9 @@ dependencies = [ [[package]] name = "unicase" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" @@ -4715,9 +4714,9 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" +checksum = "744018581f9a3454a9e15beb8a33b017183f1e7c0cd170232a2d1453b23a51c4" dependencies = [ "getrandom", "serde", @@ -4758,34 +4757,35 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ "cfg-if", "once_cell", + "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" dependencies = [ "bumpalo", "log", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.49" +version = "0.4.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38176d9b44ea84e9184eff0bc34cc167ed044f816accfe5922e54d84cf48eca2" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" dependencies = [ "cfg-if", "js-sys", @@ -4796,9 +4796,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4806,28 +4806,31 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] [[package]] name = "web-sys" -version = "0.3.76" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" dependencies = [ "js-sys", "wasm-bindgen", @@ -5125,9 +5128,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.20" +version = "0.6.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +checksum = "c8d71a593cc5c42ad7876e2c1fda56f314f3754c084128833e64f1345ff8a03a" dependencies = [ "memchr", ] @@ -5191,7 +5194,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", "synstructure", ] @@ -5213,7 +5216,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -5233,7 +5236,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", "synstructure", ] @@ -5262,7 +5265,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -5301,9 +5304,9 @@ checksum = "3f423a2c17029964870cfaabb1f13dfab7d092a62a29a89264f4d36990ca414a" [[package]] name = "zune-jpeg" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16099418600b4d8f028622f73ff6e3deaabdff330fb9a2a131dea781ee8b0768" +checksum = "99a5bab8d7dedf81405c4bb1f2b83ea057643d9cb28778cea9eecddeedd2e028" dependencies = [ "zune-core", ] diff --git a/Cargo.toml b/Cargo.toml index 76acda80..855b8dda 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ license = "Apache-2.0" # See also `rust-toolchain.toml` readme = "README.md" repository = "https://github.com/girlbossceo/conduwuit" -rust-version = "1.83.0" +rust-version = "1.84.0" version = "0.5.0" [workspace.metadata.crane] @@ -513,16 +513,16 @@ version = "0.2" # https://github.com/girlbossceo/tracing/commit/b348dca742af641c47bc390261f60711c2af573c [patch.crates-io.tracing-subscriber] git = "https://github.com/girlbossceo/tracing" -rev = "ccc4fbd8238c2d5ba354e61ec17ac610af11401d" +rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" [patch.crates-io.tracing] git = "https://github.com/girlbossceo/tracing" -rev = "ccc4fbd8238c2d5ba354e61ec17ac610af11401d" +rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" [patch.crates-io.tracing-core] git = "https://github.com/girlbossceo/tracing" -rev = "ccc4fbd8238c2d5ba354e61ec17ac610af11401d" +rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" [patch.crates-io.tracing-log] git = "https://github.com/girlbossceo/tracing" -rev = "ccc4fbd8238c2d5ba354e61ec17ac610af11401d" +rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" # adds a tab completion callback: https://github.com/girlbossceo/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50 # adds event for CTRL+\: https://github.com/girlbossceo/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b diff --git a/flake.lock b/flake.lock index 35029076..210e8e08 100644 --- a/flake.lock +++ b/flake.lock @@ -117,11 +117,11 @@ }, "crane_2": { "locked": { - "lastModified": 1734808813, - "narHash": "sha256-3aH/0Y6ajIlfy7j52FGZ+s4icVX0oHhqBzRdlOeztqg=", + "lastModified": 1736566337, + "narHash": "sha256-SC0eDcZPqISVt6R0UfGPyQLrI0+BppjjtQ3wcSlk0oI=", "owner": "ipetkov", "repo": "crane", - "rev": "72e2d02dbac80c8c86bf6bf3e785536acf8ee926", + "rev": "9172acc1ee6c7e1cbafc3044ff850c568c75a5a3", "type": "github" }, "original": { @@ -170,11 +170,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1735799625, - "narHash": "sha256-lFadwWDvVIub11bwfZhsh2WUByf9LOi6yjsSUMmE0xk=", + "lastModified": 1736836313, + "narHash": "sha256-zdZ7/T6yG0/hzoVOiNpDiR/sW3zR6oSMrfIFJK2BrrE=", "owner": "nix-community", "repo": "fenix", - "rev": "a9d84a1545814910cb4ab0515ed6921e8b07ee95", + "rev": "056c9393c821a4df356df6ce7f14c722dc8717ec", "type": "github" }, "original": { @@ -364,11 +364,11 @@ "liburing": { "flake": false, "locked": { - "lastModified": 1733603756, - "narHash": "sha256-eTKnZDZ1Ex++v+BI0DBcUBmCXAO/tE8hxK9MiyztZkU=", + "lastModified": 1736719310, + "narHash": "sha256-Turvx60THwzTiUHb49WV3upUgsPuktr7tVy2Lwu2xJg=", "owner": "axboe", "repo": "liburing", - "rev": "c3d5d6270cd5ed48d817fc1e8e95f7c8b222f2ff", + "rev": "3124a4619e4daf26b06d48ccf0186a947070c415", "type": "github" }, "original": { @@ -550,11 +550,11 @@ }, "nixpkgs_5": { "locked": { - "lastModified": 1735685343, - "narHash": "sha256-h1CpBzdJDNtSUb5QMyfFHKHocTTky+4McgQEBQBM+xA=", + "lastModified": 1736817698, + "narHash": "sha256-1m+JP9RUsbeLVv/tF1DX3Ew9Vl/fatXnlh/g5k3jcSk=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "81934660d6e9ea54d2f0cdee821e8533b10c221a", + "rev": "2b1fca3296ddd1602d2c4f104a4050e006f4b0cb", "type": "github" }, "original": { @@ -599,11 +599,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1735742096, - "narHash": "sha256-q3a80h8Jf8wfmPURUgRR46nQCB3I5fhZ+/swulTF5HY=", + "lastModified": 1736690231, + "narHash": "sha256-g9gyxX+F6CrkT5gRIMKPnCPom0o9ZDzYnzzeNF86D6Q=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "7e639ee3dda6ed9cecc79d41f6d38235121e483d", + "rev": "8364ef299790cb6ec22b9e09e873c97dbe9f2cb5", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index fb40cae7..920d3d14 100644 --- a/flake.nix +++ b/flake.nix @@ -26,7 +26,7 @@ file = ./rust-toolchain.toml; # See also `rust-toolchain.toml` - sha256 = "sha256-s1RPtyvDGJaX/BisLT+ifVfuhDT1nZkZ1NcK8sbwELM="; + sha256 = "sha256-lMLAupxng4Fd9F1oDw8gx+qA0RuF7ou7xhNU8wgs0PU="; }; mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: { diff --git a/rust-toolchain.toml b/rust-toolchain.toml index ddd952a2..97e33c91 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -9,7 +9,7 @@ # If you're having trouble making the relevant changes, bug a maintainer. [toolchain] -channel = "1.83.0" +channel = "1.84.0" profile = "minimal" components = [ # For rust-analyzer From 9ebb39ca4f35789e54b73cd33805943b362819ae Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 15 Jan 2025 14:34:21 -0500 Subject: [PATCH 019/328] add missing cfg_attr on deserialisation db test Signed-off-by: strawberry --- src/database/tests.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/database/tests.rs b/src/database/tests.rs index 0c9fb41a..0a42ad60 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -219,7 +219,10 @@ fn de_tuple_incomplete_with_sep() { } #[test] -#[should_panic(expected = "deserialization failed to consume trailing bytes")] +#[cfg_attr( + debug_assertions, + should_panic(expected = "deserialization failed to consume trailing bytes") +)] fn de_tuple_unfinished() { let user_id: &UserId = "@user:example.com".try_into().unwrap(); let room_id: &RoomId = "!room:example.com".try_into().unwrap(); From afe9e5536bc8afded76c30304dc782deeda3c9c4 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 11 Jan 2025 16:04:19 +0000 Subject: [PATCH 020/328] remove undocumented jwt token login --- Cargo.lock | 15 --------------- Cargo.toml | 4 ---- conduwuit-example.toml | 4 ---- src/api/Cargo.toml | 1 - src/api/client/session.rs | 38 ++++---------------------------------- src/core/config/mod.rs | 6 ------ src/service/Cargo.toml | 1 - src/service/globals/mod.rs | 11 ----------- 8 files changed, 4 insertions(+), 76 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f777a50c..18bd7aab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -685,7 +685,6 @@ dependencies = [ "http-body-util", "hyper", "ipaddress", - "jsonwebtoken", "log", "rand", "reqwest", @@ -831,7 +830,6 @@ dependencies = [ "image", "ipaddress", "itertools 0.13.0", - "jsonwebtoken", "log", "loole", "lru-cache", @@ -2115,19 +2113,6 @@ dependencies = [ "serde", ] -[[package]] -name = "jsonwebtoken" -version = "9.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" -dependencies = [ - "base64 0.21.7", - "js-sys", - "ring", - "serde", - "serde_json", -] - [[package]] name = "konst" version = "0.3.16" diff --git a/Cargo.toml b/Cargo.toml index 855b8dda..c0b31a69 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,10 +58,6 @@ features = ["parse"] [workspace.dependencies.sanitize-filename] version = "0.6.0" -[workspace.dependencies.jsonwebtoken] -version = "9.3.0" -default-features = false - [workspace.dependencies.base64] version = "0.22.1" default-features = false diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 9eefedbb..28e7012b 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -563,10 +563,6 @@ # #proxy = "none" -# This item is undocumented. Please contribute documentation for it. -# -#jwt_secret = - # Servers listed here will be used to gather public keys of other servers # (notary trusted key servers). # diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 1bc73624..1b463fbc 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -50,7 +50,6 @@ http.workspace = true http-body-util.workspace = true hyper.workspace = true ipaddress.workspace = true -jsonwebtoken.workspace = true log.workspace = true rand.workspace = true reqwest.workspace = true diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 26377c55..21b8786c 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -20,17 +20,10 @@ use ruma::{ }, OwnedUserId, UserId, }; -use serde::Deserialize; use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; use crate::{utils, utils::hash, Error, Result, Ruma}; -#[derive(Debug, Deserialize)] -struct Claims { - sub: String, - //exp: usize, -} - /// # `GET /_matrix/client/v3/login` /// /// Get the supported login types of this server. One of these should be used as @@ -106,34 +99,11 @@ pub(crate) async fn login_route( user_id }, - | login::v3::LoginInfo::Token(login::v3::Token { token }) => { + | login::v3::LoginInfo::Token(login::v3::Token { token: _ }) => { debug!("Got token login type"); - if let Some(jwt_decoding_key) = services.globals.jwt_decoding_key() { - let token = jsonwebtoken::decode::( - token, - jwt_decoding_key, - &jsonwebtoken::Validation::default(), - ) - .map_err(|e| { - warn!("Failed to parse JWT token from user logging in: {e}"); - Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid.") - })?; - - let username = token.claims.sub.to_lowercase(); - - UserId::parse_with_server_name(username, services.globals.server_name()).map_err( - |e| { - err!(Request(InvalidUsername(debug_error!( - ?e, - "Failed to parse login username" - )))) - }, - )? - } else { - return Err!(Request(Unknown( - "Token login is not supported (server has no jwt decoding key)." - ))); - } + return Err!(Request(Unknown( + "Token login is not supported." + ))); }, #[allow(deprecated)] | login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService { diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 97ecbeaf..d65d3812 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -671,8 +671,6 @@ pub struct Config { #[serde(default)] pub proxy: ProxyConfig, - pub jwt_secret: Option, - /// Servers listed here will be used to gather public keys of other servers /// (notary trusted key servers). /// @@ -2005,10 +2003,6 @@ impl fmt::Display for Config { "Lockdown public room directory (only allow admins to publish)", &self.lockdown_public_room_directory.to_string(), ); - line("JWT secret", match self.jwt_secret { - | Some(_) => "set", - | None => "not set", - }); line( "Trusted key servers", &self diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index 4708ff4e..21fbb417 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -61,7 +61,6 @@ image.workspace = true image.optional = true ipaddress.workspace = true itertools.workspace = true -jsonwebtoken.workspace = true log.workspace = true loole.workspace = true lru-cache.workspace = true diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 38d7f786..f6ff2b09 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -18,7 +18,6 @@ pub struct Service { pub db: Data, pub config: Config, - jwt_decoding_key: Option, pub bad_event_ratelimiter: Arc>>, pub server_user: OwnedUserId, pub admin_alias: OwnedRoomAliasId, @@ -33,11 +32,6 @@ impl crate::Service for Service { let db = Data::new(&args); let config = &args.server.config; - let jwt_decoding_key = config - .jwt_secret - .as_ref() - .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes())); - let turn_secret = config .turn_secret_file @@ -66,7 +60,6 @@ impl crate::Service for Service { let mut s = Self { db, config: config.clone(), - jwt_decoding_key, bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), admin_alias: OwnedRoomAliasId::try_from(format!("#admins:{}", &config.server_name)) .expect("#admins:server_name is valid alias name"), @@ -158,10 +151,6 @@ impl Service { pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.config.trusted_servers } - pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey> { - self.jwt_decoding_key.as_ref() - } - pub fn turn_password(&self) -> &String { &self.config.turn_password } pub fn turn_ttl(&self) -> u64 { self.config.turn_ttl } From 2cc6ad8df32610531eb56a5e3bf06320afafde97 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 11 Jan 2025 18:49:21 +0000 Subject: [PATCH 021/328] implement `/login/get_token` (MSC3882) --- conduwuit-example.toml | 16 +++++ src/api/client/capabilities.rs | 5 +- src/api/client/session.rs | 124 +++++++++++++++++++++++++++------ src/api/router.rs | 1 + src/core/config/mod.rs | 20 ++++++ src/database/maps.rs | 4 ++ src/service/users/mod.rs | 50 +++++++++++++ 7 files changed, 196 insertions(+), 24 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 28e7012b..96578da3 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -645,6 +645,22 @@ # #openid_token_ttl = 3600 +# Allow an existing session to mint a login token for another client. +# This requires interactive authentication, but has security ramifications +# as a malicious client could use the mechanism to spawn more than one +# session. +# Enabled by default. +# +#login_via_existing_session = true + +# Login token expiration/TTL in milliseconds. +# +# These are short-lived tokens for the m.login.token endpoint. +# This is used to allow existing sessions to create new sessions. +# see login_via_existing_session. +# +#login_token_ttl = 120000 + # Static TURN username to provide the client if not using a shared secret # ("turn_secret"), It is recommended to use a shared secret over static # credentials. diff --git a/src/api/client/capabilities.rs b/src/api/client/capabilities.rs index e122611f..87cdb43d 100644 --- a/src/api/client/capabilities.rs +++ b/src/api/client/capabilities.rs @@ -32,8 +32,9 @@ pub(crate) async fn get_capabilities_route( // we do not implement 3PID stuff capabilities.thirdparty_id_changes = ThirdPartyIdChangesCapability { enabled: false }; - // we dont support generating tokens yet - capabilities.get_login_token = GetLoginTokenCapability { enabled: false }; + capabilities.get_login_token = GetLoginTokenCapability { + enabled: services.server.config.login_via_existing_session, + }; // MSC4133 capability capabilities diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 21b8786c..4881ade7 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{debug, err, info, utils::ReadyExt, warn, Err}; @@ -6,9 +8,10 @@ use ruma::{ api::client::{ error::ErrorKind, session::{ + get_login_token, get_login_types::{ self, - v3::{ApplicationServiceLoginType, PasswordLoginType}, + v3::{ApplicationServiceLoginType, PasswordLoginType, TokenLoginType}, }, login::{ self, @@ -16,10 +19,11 @@ use ruma::{ }, logout, logout_all, }, - uiaa::UserIdentifier, + uiaa, }, OwnedUserId, UserId, }; +use service::uiaa::SESSION_ID_LENGTH; use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; use crate::{utils, utils::hash, Error, Result, Ruma}; @@ -30,12 +34,16 @@ use crate::{utils, utils::hash, Error, Result, Ruma}; /// the `type` field when logging in. #[tracing::instrument(skip_all, fields(%client), name = "login")] pub(crate) async fn get_login_types_route( + State(services): State, InsecureClientIp(client): InsecureClientIp, _body: Ruma, ) -> Result { Ok(get_login_types::v3::Response::new(vec![ get_login_types::v3::LoginType::Password(PasswordLoginType::default()), get_login_types::v3::LoginType::ApplicationService(ApplicationServiceLoginType::default()), + get_login_types::v3::LoginType::Token(TokenLoginType { + get_login_token: services.server.config.login_via_existing_session, + }), ])) } @@ -70,7 +78,9 @@ pub(crate) async fn login_route( .. }) => { debug!("Got password login type"); - let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { + let user_id = if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = + identifier + { UserId::parse_with_server_name( user_id.to_lowercase(), services.globals.server_name(), @@ -99,11 +109,12 @@ pub(crate) async fn login_route( user_id }, - | login::v3::LoginInfo::Token(login::v3::Token { token: _ }) => { + | login::v3::LoginInfo::Token(login::v3::Token { token }) => { debug!("Got token login type"); - return Err!(Request(Unknown( - "Token login is not supported." - ))); + if !services.server.config.login_via_existing_session { + return Err!(Request(Unknown("Token login is not enabled."))); + } + services.users.find_from_login_token(token).await? }, #[allow(deprecated)] | login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService { @@ -111,21 +122,22 @@ pub(crate) async fn login_route( user, }) => { debug!("Got appservice login type"); - let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { - UserId::parse_with_server_name( - user_id.to_lowercase(), - services.globals.server_name(), - ) - } else if let Some(user) = user { - OwnedUserId::parse(user) - } else { - warn!("Bad login type: {:?}", &body.login_info); - return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type.")); - } - .map_err(|e| { - warn!("Failed to parse username from appservice logging in: {e}"); - Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") - })?; + let user_id = + if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { + UserId::parse_with_server_name( + user_id.to_lowercase(), + services.globals.server_name(), + ) + } else if let Some(user) = user { + OwnedUserId::parse(user) + } else { + warn!("Bad login type: {:?}", &body.login_info); + return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type.")); + } + .map_err(|e| { + warn!("Failed to parse username from appservice logging in: {e}"); + Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") + })?; if let Some(ref info) = body.appservice_info { if !info.is_user_match(&user_id) { @@ -217,6 +229,74 @@ pub(crate) async fn login_route( }) } +/// # `POST /_matrix/client/v1/login/get_token` +/// +/// Allows a logged-in user to get a short-lived token which can be used +/// to log in with the m.login.token flow. +/// +/// +#[tracing::instrument(skip_all, fields(%client), name = "login_token")] +pub(crate) async fn login_token_route( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result { + if !services.server.config.login_via_existing_session { + return Err!(Request(Unknown("Login via an existing session is not enabled"))); + } + // Authentication for this endpoint was made optional, but we need + // authentication. + let sender_user = body + .sender_user + .as_ref() + .ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?; + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + + // This route SHOULD have UIA + // TODO: How do we make only UIA sessions that have not been used before valid? + + let mut uiaainfo = uiaa::UiaaInfo { + flows: vec![uiaa::AuthFlow { stages: vec![uiaa::AuthType::Password] }], + completed: Vec::new(), + params: Box::default(), + session: None, + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; + + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + + // Success! + } else if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json); + + return Err(Error::Uiaa(uiaainfo)); + } else { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + } + + let login_token = utils::random_string(TOKEN_LENGTH); + + let expires_in = services + .users + .create_login_token(sender_user, &login_token)?; + + Ok(get_login_token::v1::Response { + expires_in: Duration::from_millis(expires_in), + login_token, + }) +} + /// # `POST /_matrix/client/v3/logout` /// /// Log out the current device. diff --git a/src/api/router.rs b/src/api/router.rs index e7cd368d..7855ddfa 100644 --- a/src/api/router.rs +++ b/src/api/router.rs @@ -34,6 +34,7 @@ pub fn build(router: Router, server: &Server) -> Router { .ruma_route(&client::register_route) .ruma_route(&client::get_login_types_route) .ruma_route(&client::login_route) + .ruma_route(&client::login_token_route) .ruma_route(&client::whoami_route) .ruma_route(&client::logout_route) .ruma_route(&client::logout_all_route) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index d65d3812..84b88c7c 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -767,6 +767,24 @@ pub struct Config { #[serde(default = "default_openid_token_ttl")] pub openid_token_ttl: u64, + /// Allow an existing session to mint a login token for another client. + /// This requires interactive authentication, but has security ramifications + /// as a malicious client could use the mechanism to spawn more than one + /// session. + /// Enabled by default. + #[serde(default = "true_fn")] + pub login_via_existing_session: bool, + + /// Login token expiration/TTL in milliseconds. + /// + /// These are short-lived tokens for the m.login.token endpoint. + /// This is used to allow existing sessions to create new sessions. + /// see login_via_existing_session. + /// + /// default: 120000 + #[serde(default = "default_login_token_ttl")] + pub login_token_ttl: u64, + /// Static TURN username to provide the client if not using a shared secret /// ("turn_secret"), It is recommended to use a shared secret over static /// credentials. @@ -2373,6 +2391,8 @@ fn default_notification_push_path() -> String { "/_matrix/push/v1/notify".to_own fn default_openid_token_ttl() -> u64 { 60 * 60 } +fn default_login_token_ttl() -> u64 { 2 * 60 * 1000 } + fn default_turn_ttl() -> u64 { 60 * 60 * 24 } fn default_presence_idle_timeout_s() -> u64 { 5 * 60 } diff --git a/src/database/maps.rs b/src/database/maps.rs index bc409919..19e19955 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -365,6 +365,10 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "openidtoken_expiresatuserid", ..descriptor::RANDOM_SMALL }, + Descriptor { + name: "logintoken_expiresatuserid", + ..descriptor::RANDOM_SMALL + }, Descriptor { name: "userroomid_highlightcount", ..descriptor::RANDOM diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index fe064d9c..971cea7c 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -41,6 +41,7 @@ struct Data { keyid_key: Arc, onetimekeyid_onetimekeys: Arc, openidtoken_expiresatuserid: Arc, + logintoken_expiresatuserid: Arc, todeviceid_events: Arc, token_userdeviceid: Arc, userdeviceid_metadata: Arc, @@ -76,6 +77,7 @@ impl crate::Service for Service { keyid_key: args.db["keyid_key"].clone(), onetimekeyid_onetimekeys: args.db["onetimekeyid_onetimekeys"].clone(), openidtoken_expiresatuserid: args.db["openidtoken_expiresatuserid"].clone(), + logintoken_expiresatuserid: args.db["logintoken_expiresatuserid"].clone(), todeviceid_events: args.db["todeviceid_events"].clone(), token_userdeviceid: args.db["token_userdeviceid"].clone(), userdeviceid_metadata: args.db["userdeviceid_metadata"].clone(), @@ -941,6 +943,54 @@ impl Service { .map_err(|e| err!(Database("User ID in openid_userid is invalid. {e}"))) } + /// Creates a short-lived login token, which can be used to log in using the + /// `m.login.token` mechanism. + pub fn create_login_token(&self, user_id: &UserId, token: &str) -> Result { + use std::num::Saturating as Sat; + + let expires_in = self.services.server.config.login_token_ttl; + let expires_at = Sat(utils::millis_since_unix_epoch()) + Sat(expires_in); + + let mut value = expires_at.0.to_be_bytes().to_vec(); + value.extend_from_slice(user_id.as_bytes()); + + self.db + .logintoken_expiresatuserid + .insert(token.as_bytes(), value.as_slice()); + + Ok(expires_in) + } + + /// Find out which user a login token belongs to. + /// Removes the token to prevent double-use attacks. + pub async fn find_from_login_token(&self, token: &str) -> Result { + let Ok(value) = self.db.logintoken_expiresatuserid.get(token).await else { + return Err!(Request(Unauthorized("Login token is unrecognised"))); + }; + + let (expires_at_bytes, user_bytes) = value.split_at(0_u64.to_be_bytes().len()); + let expires_at = u64::from_be_bytes( + expires_at_bytes + .try_into() + .map_err(|e| err!(Database("expires_at in login_userid is invalid u64. {e}")))?, + ); + + if expires_at < utils::millis_since_unix_epoch() { + debug_warn!("Login token is expired, removing"); + self.db.openidtoken_expiresatuserid.remove(token.as_bytes()); + + return Err!(Request(Unauthorized("Login token is expired"))); + } + + self.db.openidtoken_expiresatuserid.remove(token.as_bytes()); + + let user_string = utils::string_from_bytes(user_bytes) + .map_err(|e| err!(Database("User ID in login_userid is invalid unicode. {e}")))?; + + OwnedUserId::try_from(user_string) + .map_err(|e| err!(Database("User ID in login_userid is invalid. {e}"))) + } + /// Gets a specific user profile key pub async fn profile_key( &self, From 5b8464252c2c03edf65e43153be026dbb768a12a Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 17 Jan 2025 00:01:47 -0500 Subject: [PATCH 022/328] cleanup+fix login get_token code, use db ser/deser instead Signed-off-by: strawberry --- src/api/client/session.rs | 23 ++++++++-------------- src/service/users/mod.rs | 41 +++++++++++++-------------------------- 2 files changed, 22 insertions(+), 42 deletions(-) diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 4881ade7..7155351c 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -242,15 +242,11 @@ pub(crate) async fn login_token_route( body: Ruma, ) -> Result { if !services.server.config.login_via_existing_session { - return Err!(Request(Unknown("Login via an existing session is not enabled"))); + return Err!(Request(Forbidden("Login via an existing session is not enabled"))); } - // Authentication for this endpoint was made optional, but we need - // authentication. - let sender_user = body - .sender_user - .as_ref() - .ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?; - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + + let sender_user = body.sender_user(); + let sender_device = body.sender_device(); // This route SHOULD have UIA // TODO: How do we make only UIA sessions that have not been used before valid? @@ -274,22 +270,19 @@ pub(crate) async fn login_token_route( } // Success! - } else if let Some(json) = body.json_body { + } else if let Some(json) = body.json_body.as_ref() { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + .create(sender_user, sender_device, &uiaainfo, json); return Err(Error::Uiaa(uiaainfo)); } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err!(Request(NotJson("No JSON body was sent when required."))); } let login_token = utils::random_string(TOKEN_LENGTH); - - let expires_in = services - .users - .create_login_token(sender_user, &login_token)?; + let expires_in = services.users.create_login_token(sender_user, &login_token); Ok(get_login_token::v1::Response { expires_in: Duration::from_millis(expires_in), diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 971cea7c..b2d3a94a 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,8 +1,8 @@ use std::{collections::BTreeMap, mem, mem::size_of, sync::Arc}; use conduwuit::{ - debug_warn, err, utils, - utils::{stream::TryIgnore, string::Unquoted, ReadyExt}, + debug_warn, err, trace, + utils::{self, stream::TryIgnore, string::Unquoted, ReadyExt}, Err, Error, Result, Server, }; use database::{Database, Deserialized, Ignore, Interfix, Json, Map}; @@ -945,50 +945,37 @@ impl Service { /// Creates a short-lived login token, which can be used to log in using the /// `m.login.token` mechanism. - pub fn create_login_token(&self, user_id: &UserId, token: &str) -> Result { + pub fn create_login_token(&self, user_id: &UserId, token: &str) -> u64 { use std::num::Saturating as Sat; let expires_in = self.services.server.config.login_token_ttl; let expires_at = Sat(utils::millis_since_unix_epoch()) + Sat(expires_in); - let mut value = expires_at.0.to_be_bytes().to_vec(); - value.extend_from_slice(user_id.as_bytes()); + let value = (expires_at.0, user_id); + self.db.logintoken_expiresatuserid.raw_put(token, value); - self.db - .logintoken_expiresatuserid - .insert(token.as_bytes(), value.as_slice()); - - Ok(expires_in) + expires_in } /// Find out which user a login token belongs to. /// Removes the token to prevent double-use attacks. pub async fn find_from_login_token(&self, token: &str) -> Result { let Ok(value) = self.db.logintoken_expiresatuserid.get(token).await else { - return Err!(Request(Unauthorized("Login token is unrecognised"))); + return Err!(Request(Forbidden("Login token is unrecognised"))); }; - - let (expires_at_bytes, user_bytes) = value.split_at(0_u64.to_be_bytes().len()); - let expires_at = u64::from_be_bytes( - expires_at_bytes - .try_into() - .map_err(|e| err!(Database("expires_at in login_userid is invalid u64. {e}")))?, - ); + let (expires_at, user_id): (u64, OwnedUserId) = value.deserialized()?; if expires_at < utils::millis_since_unix_epoch() { - debug_warn!("Login token is expired, removing"); - self.db.openidtoken_expiresatuserid.remove(token.as_bytes()); + trace!(?user_id, ?token, "Removing expired login token"); - return Err!(Request(Unauthorized("Login token is expired"))); + self.db.logintoken_expiresatuserid.remove(token); + + return Err!(Request(Forbidden("Login token is expired"))); } - self.db.openidtoken_expiresatuserid.remove(token.as_bytes()); + self.db.logintoken_expiresatuserid.remove(token); - let user_string = utils::string_from_bytes(user_bytes) - .map_err(|e| err!(Database("User ID in login_userid is invalid unicode. {e}")))?; - - OwnedUserId::try_from(user_string) - .map_err(|e| err!(Database("User ID in login_userid is invalid. {e}"))) + Ok(user_id) } /// Gets a specific user profile key From afcd0bfeef8e68232dd92aeddfd20397493a409e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 14 Jan 2025 05:55:49 +0000 Subject: [PATCH 023/328] add deref_at macro util Signed-off-by: Jason Volk --- src/core/utils/mod.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 631b2820..2bbadb50 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -90,6 +90,13 @@ macro_rules! ref_at { }; } +#[macro_export] +macro_rules! deref_at { + ($idx:tt) => { + |t| *t.$idx + }; +} + /// Functor for equality i.e. .is_some_and(is_equal!(2)) #[macro_export] macro_rules! is_equal_to { From e56d3c6cb3939bfe9b10c5a18a62104f85a02fef Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 14 Jan 2025 05:56:06 +0000 Subject: [PATCH 024/328] add multi_get_statekey_from_short Signed-off-by: Jason Volk --- src/service/rooms/short/mod.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index b645f9f1..4a591592 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -196,6 +196,20 @@ pub async fn get_statekey_from_short( }) } +#[implement(Service)] +pub fn multi_get_statekey_from_short<'a, S>( + &'a self, + shortstatekey: S, +) -> impl Stream> + Send + 'a +where + S: Stream + Send + 'a, +{ + self.db + .shortstatekey_statekey + .qry_batch(shortstatekey) + .map(Deserialized::deserialized) +} + /// Returns (shortstatehash, already_existed) #[implement(Service)] pub async fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> (ShortStateHash, bool) { From 5167e1f06dce7bbf2cb521dbfb5ca28c15b2a547 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 9 Jan 2025 21:01:58 +0000 Subject: [PATCH 025/328] add option to disable listeners Signed-off-by: Jason Volk --- conduwuit-example.toml | 5 +++++ src/core/config/mod.rs | 5 +++++ src/router/serve/mod.rs | 14 ++++++++++---- 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 96578da3..54143ced 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1503,6 +1503,11 @@ # #sender_workers = 0 +# Enables listener sockets; can be set to false to disable listening. This +# option is intended for developer/diagnostic purposes only. +# +#listening = true + [global.tls] # Path to a valid TLS certificate file. diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 84b88c7c..cb42940b 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1710,6 +1710,11 @@ pub struct Config { #[serde(default)] pub sender_workers: usize, + /// Enables listener sockets; can be set to false to disable listening. This + /// option is intended for developer/diagnostic purposes only. + #[serde(default = "true_fn")] + pub listening: bool, + #[serde(flatten)] #[allow(clippy::zero_sized_map_values)] // this is a catchall, the map shouldn't be zero at runtime diff --git a/src/router/serve/mod.rs b/src/router/serve/mod.rs index f6262202..5c822f2b 100644 --- a/src/router/serve/mod.rs +++ b/src/router/serve/mod.rs @@ -6,7 +6,7 @@ mod unix; use std::sync::Arc; use axum_server::Handle as ServerHandle; -use conduwuit::Result; +use conduwuit::{err, Result}; use conduwuit_service::Services; use tokio::sync::broadcast; @@ -16,13 +16,19 @@ use super::layers; pub(super) async fn serve( services: Arc, handle: ServerHandle, - shutdown: broadcast::Receiver<()>, -) -> Result<()> { + mut shutdown: broadcast::Receiver<()>, +) -> Result { let server = &services.server; let config = &server.config; + if !config.listening { + return shutdown + .recv() + .await + .map_err(|e| err!(error!("channel error: {e}"))); + } + let addrs = config.get_bind_addrs(); let (app, _guard) = layers::build(&services)?; - if cfg!(unix) && config.unix_socket_path.is_some() { unix::serve(server, app, shutdown).await } else if config.tls.certs.is_some() { From 98d8e5c63cc7019d6ecbe235f380a3c954b9e6b5 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 16 Jan 2025 08:00:01 +0000 Subject: [PATCH 026/328] add standard error trait and thread access error conversions Signed-off-by: Jason Volk --- src/core/error/mod.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index ffa829d9..2468811e 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -4,7 +4,7 @@ mod panic; mod response; mod serde; -use std::{any::Any, borrow::Cow, convert::Infallible, fmt, sync::PoisonError}; +use std::{any::Any, borrow::Cow, convert::Infallible, sync::PoisonError}; pub use self::{err::visit, log::*}; @@ -17,7 +17,7 @@ pub enum Error { // std #[error(transparent)] - Fmt(#[from] fmt::Error), + Fmt(#[from] std::fmt::Error), #[error(transparent)] FromUtf8(#[from] std::string::FromUtf8Error), #[error("I/O error: {0}")] @@ -27,6 +27,10 @@ pub enum Error { #[error(transparent)] ParseInt(#[from] std::num::ParseIntError), #[error(transparent)] + Std(#[from] Box), + #[error(transparent)] + ThreadAccessError(#[from] std::thread::AccessError), + #[error(transparent)] TryFromInt(#[from] std::num::TryFromIntError), #[error(transparent)] TryFromSlice(#[from] std::array::TryFromSliceError), @@ -189,8 +193,10 @@ impl Error { pub fn is_not_found(&self) -> bool { self.status_code() == http::StatusCode::NOT_FOUND } } -impl fmt::Debug for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.message()) } +impl std::fmt::Debug for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.message()) + } } impl From> for Error { From 80832cb0bb2fc725164370bb808e192ee3480172 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 16 Jan 2025 12:04:31 +0000 Subject: [PATCH 027/328] add checked math wrapper Signed-off-by: Jason Volk --- src/core/utils/math.rs | 3 ++- src/core/utils/math/tried.rs | 47 ++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 src/core/utils/math/tried.rs diff --git a/src/core/utils/math.rs b/src/core/utils/math.rs index a08cb206..ed157daf 100644 --- a/src/core/utils/math.rs +++ b/src/core/utils/math.rs @@ -1,10 +1,11 @@ mod expected; +mod tried; use std::{cmp, convert::TryFrom}; pub use checked_ops::checked_ops; -pub use self::expected::Expected; +pub use self::{expected::Expected, tried::Tried}; use crate::{debug::type_name, err, Err, Error, Result}; /// Checked arithmetic expression. Returns a Result diff --git a/src/core/utils/math/tried.rs b/src/core/utils/math/tried.rs new file mode 100644 index 00000000..2006d2d5 --- /dev/null +++ b/src/core/utils/math/tried.rs @@ -0,0 +1,47 @@ +use num_traits::ops::checked::{CheckedAdd, CheckedDiv, CheckedMul, CheckedRem, CheckedSub}; + +use crate::{checked, Result}; + +pub trait Tried { + #[inline] + fn try_add(self, rhs: Self) -> Result + where + Self: CheckedAdd + Sized, + { + checked!(self + rhs) + } + + #[inline] + fn try_sub(self, rhs: Self) -> Result + where + Self: CheckedSub + Sized, + { + checked!(self - rhs) + } + + #[inline] + fn try_mul(self, rhs: Self) -> Result + where + Self: CheckedMul + Sized, + { + checked!(self * rhs) + } + + #[inline] + fn try_div(self, rhs: Self) -> Result + where + Self: CheckedDiv + Sized, + { + checked!(self / rhs) + } + + #[inline] + fn try_rem(self, rhs: Self) -> Result + where + Self: CheckedRem + Sized, + { + checked!(self % rhs) + } +} + +impl Tried for T {} From 7a8ca8842af65435239ae9358587fdabb18ee6c0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 16 Jan 2025 08:58:40 +0000 Subject: [PATCH 028/328] add jemallctl base; add trim to interface w/ console cmd Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 9 +++ src/admin/debug/mod.rs | 3 + src/core/alloc/default.rs | 3 + src/core/alloc/hardened.rs | 2 + src/core/alloc/je.rs | 127 +++++++++++++++++++++++++++++++++++- src/core/alloc/mod.rs | 6 +- 6 files changed, 144 insertions(+), 6 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 07daaf0a..d027fa73 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -923,3 +923,12 @@ pub(super) async fn database_stats( Ok(RoomMessageEventContent::notice_markdown(out)) } + +#[admin_command] +pub(super) async fn trim_memory(&self) -> Result { + conduwuit::alloc::trim()?; + + writeln!(self, "done").await?; + + Ok(RoomMessageEventContent::notice_plain("")) +} diff --git a/src/admin/debug/mod.rs b/src/admin/debug/mod.rs index c87dbb0a..cc2a8ddd 100644 --- a/src/admin/debug/mod.rs +++ b/src/admin/debug/mod.rs @@ -207,6 +207,9 @@ pub(super) enum DebugCommand { map: Option, }, + /// - Trim memory usage + TrimMemory, + /// - Developer test stubs #[command(subcommand)] #[allow(non_snake_case)] diff --git a/src/core/alloc/default.rs b/src/core/alloc/default.rs index 83bfca7d..5db02884 100644 --- a/src/core/alloc/default.rs +++ b/src/core/alloc/default.rs @@ -1,5 +1,8 @@ //! Default allocator with no special features +/// Always returns Ok +pub fn trim() -> crate::Result { Ok(()) } + /// Always returns None #[must_use] pub fn memory_stats() -> Option { None } diff --git a/src/core/alloc/hardened.rs b/src/core/alloc/hardened.rs index 335a3307..e2d9b28e 100644 --- a/src/core/alloc/hardened.rs +++ b/src/core/alloc/hardened.rs @@ -3,6 +3,8 @@ #[global_allocator] static HMALLOC: hardened_malloc_rs::HardenedMalloc = hardened_malloc_rs::HardenedMalloc; +pub fn trim() -> crate::Result { Ok(()) } + #[must_use] //TODO: get usage pub fn memory_usage() -> Option { None } diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 423f5408..b2c1fe85 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -1,18 +1,45 @@ //! jemalloc allocator -use std::ffi::{c_char, c_void}; +use std::{ + cell::OnceCell, + ffi::{c_char, c_void}, + fmt::{Debug, Write}, +}; +use arrayvec::ArrayVec; +use tikv_jemalloc_ctl as mallctl; use tikv_jemalloc_sys as ffi; use tikv_jemallocator as jemalloc; +use crate::{err, is_equal_to, utils::math::Tried, Result}; + +#[cfg(feature = "jemalloc_conf")] +#[no_mangle] +pub static malloc_conf: &[u8] = b"\ +metadata_thp:always\ +,percpu_arena:percpu\ +,background_thread:true\ +,max_background_threads:-1\ +,lg_extent_max_active_fit:4\ +,oversize_threshold:33554432\ +,tcache_max:2097152\ +,dirty_decay_ms:16000\ +,muzzy_decay_ms:144000\ +\0"; + #[global_allocator] static JEMALLOC: jemalloc::Jemalloc = jemalloc::Jemalloc; +type Key = ArrayVec; +type Name = ArrayVec; + +const KEY_SEGS: usize = 8; +const NAME_MAX: usize = 128; + #[must_use] #[cfg(feature = "jemalloc_stats")] pub fn memory_usage() -> Option { use mallctl::stats; - use tikv_jemalloc_ctl as mallctl; let mibs = |input: Result| { let input = input.unwrap_or_default(); @@ -62,7 +89,12 @@ pub fn memory_stats() -> Option { unsafe extern "C" fn malloc_stats_cb(opaque: *mut c_void, msg: *const c_char) { // SAFETY: we have to trust the opaque points to our String - let res: &mut String = unsafe { opaque.cast::().as_mut().unwrap() }; + let res: &mut String = unsafe { + opaque + .cast::() + .as_mut() + .expect("failed to cast void* to &mut String") + }; // SAFETY: we have to trust the string is null terminated. let msg = unsafe { std::ffi::CStr::from_ptr(msg) }; @@ -70,3 +102,92 @@ unsafe extern "C" fn malloc_stats_cb(opaque: *mut c_void, msg: *const c_char) { let msg = String::from_utf8_lossy(msg.to_bytes()); res.push_str(msg.as_ref()); } + +macro_rules! mallctl { + ($name:literal) => {{ + thread_local! { + static KEY: OnceCell = OnceCell::default(); + }; + + KEY.with(|once| { + once.get_or_init(move || key($name).expect("failed to translate name into mib key")) + .clone() + }) + }}; +} + +pub fn trim() -> Result { set(&mallctl!("arena.4096.purge"), ()) } + +pub fn decay() -> Result { set(&mallctl!("arena.4096.purge"), ()) } + +pub fn set_by_name(name: &str, val: T) -> Result { set(&key(name)?, val) } + +pub fn get_by_name(name: &str) -> Result { get(&key(name)?) } + +pub mod this_thread { + use super::{get, key, set, Key, OnceCell, Result}; + + pub fn trim() -> Result { + let mut key = mallctl!("arena.0.purge"); + key[1] = arena_id()?.try_into()?; + set(&key, ()) + } + + pub fn decay() -> Result { + let mut key = mallctl!("arena.0.decay"); + key[1] = arena_id()?.try_into()?; + set(&key, ()) + } + + pub fn cache(enable: bool) -> Result { + set(&mallctl!("thread.tcache.enabled"), u8::from(enable)) + } + + pub fn flush() -> Result { set(&mallctl!("thread.tcache.flush"), ()) } + + pub fn allocated() -> Result { get::(&mallctl!("thread.allocated")) } + + pub fn deallocated() -> Result { get::(&mallctl!("thread.deallocated")) } + + pub fn arena_id() -> Result { get::(&mallctl!("thread.arena")) } +} + +fn set(key: &Key, val: T) -> Result +where + T: Copy + Debug, +{ + // SAFETY: T must be the exact expected type. + unsafe { mallctl::raw::write_mib(key.as_slice(), val) }.map_err(map_err) +} + +fn get(key: &Key) -> Result +where + T: Copy + Debug, +{ + // SAFETY: T must be perfectly valid to receive value. + unsafe { mallctl::raw::read_mib(key.as_slice()) }.map_err(map_err) +} + +fn key(name: &str) -> Result { + // tikv asserts the output buffer length is tight to the number of required mibs + // so we slice that down here. + let segs = name.chars().filter(is_equal_to!(&'.')).count().try_add(1)?; + + let name = self::name(name)?; + let mut buf = [0_usize; KEY_SEGS]; + mallctl::raw::name_to_mib(name.as_slice(), &mut buf[0..segs]) + .map_err(map_err) + .map(move |()| buf.into_iter().take(segs).collect()) +} + +fn name(name: &str) -> Result { + let mut buf = Name::new(); + buf.try_extend_from_slice(name.as_bytes())?; + buf.try_extend_from_slice(b"\0")?; + + Ok(buf) +} + +fn map_err(error: tikv_jemalloc_ctl::Error) -> crate::Error { + err!("mallctl: {}", error.to_string()) +} diff --git a/src/core/alloc/mod.rs b/src/core/alloc/mod.rs index 31eb033c..0ed1b1a6 100644 --- a/src/core/alloc/mod.rs +++ b/src/core/alloc/mod.rs @@ -4,7 +4,7 @@ #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] pub mod je; #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] -pub use je::{memory_stats, memory_usage}; +pub use je::{memory_stats, memory_usage, trim}; #[cfg(all(not(target_env = "msvc"), feature = "hardened_malloc", not(feature = "jemalloc")))] pub mod hardened; @@ -13,7 +13,7 @@ pub mod hardened; feature = "hardened_malloc", not(feature = "jemalloc") ))] -pub use hardened::{memory_stats, memory_usage}; +pub use hardened::{memory_stats, memory_usage, trim}; #[cfg(any( target_env = "msvc", @@ -24,4 +24,4 @@ pub mod default; target_env = "msvc", all(not(feature = "hardened_malloc"), not(feature = "jemalloc")) ))] -pub use default::{memory_stats, memory_usage}; +pub use default::{memory_stats, memory_usage, trim}; From 77d8e26efe3b46e73325386837a0f95107213842 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 16 Jan 2025 19:08:54 +0000 Subject: [PATCH 029/328] integrate trim-on-park memory reclamation to runtime Signed-off-by: Jason Volk --- src/main/clap.rs | 12 ++++++++++++ src/main/runtime.rs | 22 +++++++++++++++++++++- 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/src/main/clap.rs b/src/main/clap.rs index ad5c815a..d3d40491 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -80,6 +80,18 @@ pub(crate) struct Args { default_missing_value = "true", )] pub(crate) worker_affinity: bool, + + /// Toggles feature to promote memory reclamation by the operating system + /// when tokio worker runs out of work. + #[arg( + long, + hide(true), + env = "CONDUWUIT_RUNTIME_GC_ON_PARK", + action = ArgAction::Set, + num_args = 0..=1, + require_equals(false), + )] + pub(crate) gc_on_park: Option, } /// Parse commandline arguments into structured data diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 3039ef1b..315336b0 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -9,6 +9,7 @@ use std::{ }; use conduwuit::{ + result::LogErr, utils::sys::compute::{nth_core_available, set_affinity}, Result, }; @@ -22,12 +23,17 @@ const WORKER_KEEPALIVE: u64 = 36; const MAX_BLOCKING_THREADS: usize = 1024; static WORKER_AFFINITY: OnceLock = OnceLock::new(); +static GC_ON_PARK: OnceLock> = OnceLock::new(); pub(super) fn new(args: &Args) -> Result { WORKER_AFFINITY .set(args.worker_affinity) .expect("set WORKER_AFFINITY from program argument"); + GC_ON_PARK + .set(args.gc_on_park) + .expect("set GC_ON_PARK from program argument"); + let mut builder = Builder::new_multi_thread(); builder .enable_io() @@ -138,7 +144,21 @@ fn thread_unpark() {} name = %thread::current().name().unwrap_or("None"), ), )] -fn thread_park() {} +fn thread_park() { + match GC_ON_PARK + .get() + .as_ref() + .expect("GC_ON_PARK initialized by runtime::new()") + { + | Some(true) | None if cfg!(feature = "jemalloc_conf") => gc_on_park(), + | _ => (), + } +} + +fn gc_on_park() { + #[cfg(feature = "jemalloc")] + conduwuit::alloc::je::this_thread::decay().log_err().ok(); +} #[cfg(tokio_unstable)] #[tracing::instrument( From 3759d1be6ca01a687b29df9e68c24c7723a55427 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 11 Jan 2025 00:08:35 +0000 Subject: [PATCH 030/328] tweak per-column write_buffer down from default Signed-off-by: Jason Volk --- src/database/engine/cf_opts.rs | 4 +--- src/database/engine/descriptor.rs | 8 ++++++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index a68eb8b6..8cb659ac 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -32,9 +32,7 @@ fn descriptor_cf_options( opts.set_min_write_buffer_number(1); opts.set_max_write_buffer_number(2); - if let Some(write_size) = desc.write_size { - opts.set_write_buffer_size(write_size); - } + opts.set_write_buffer_size(desc.write_size); opts.set_target_file_size_base(desc.file_size); opts.set_target_file_size_multiplier(desc.file_shape[0]); diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index 9cf57d8f..ef08945e 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -20,7 +20,7 @@ pub(crate) struct Descriptor { pub(crate) val_size_hint: Option, pub(crate) block_size: usize, pub(crate) index_size: usize, - pub(crate) write_size: Option, + pub(crate) write_size: usize, pub(crate) cache_size: usize, pub(crate) level_size: u64, pub(crate) level_shape: [i32; 7], @@ -46,7 +46,7 @@ pub(crate) static BASE: Descriptor = Descriptor { val_size_hint: None, block_size: 1024 * 4, index_size: 1024 * 4, - write_size: None, + write_size: 1024 * 1024 * 2, cache_size: 1024 * 1024 * 4, level_size: 1024 * 1024 * 8, level_shape: [1, 1, 1, 3, 7, 15, 31], @@ -66,11 +66,13 @@ pub(crate) static BASE: Descriptor = Descriptor { pub(crate) static RANDOM: Descriptor = Descriptor { compaction_pri: CompactionPri::OldestSmallestSeqFirst, + write_size: 1024 * 1024 * 32, ..BASE }; pub(crate) static SEQUENTIAL: Descriptor = Descriptor { compaction_pri: CompactionPri::OldestLargestSeqFirst, + write_size: 1024 * 1024 * 64, level_size: 1024 * 1024 * 32, file_size: 1024 * 1024 * 2, ..BASE @@ -78,6 +80,7 @@ pub(crate) static SEQUENTIAL: Descriptor = Descriptor { pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { compaction: CompactionStyle::Universal, + write_size: 1024 * 1024 * 16, level_size: 1024 * 512, file_size: 1024 * 128, ..RANDOM @@ -85,6 +88,7 @@ pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { compaction: CompactionStyle::Universal, + write_size: 1024 * 1024 * 16, level_size: 1024 * 1024, file_size: 1024 * 512, ..SEQUENTIAL From aad42bdaa0734d76ae390b7c865673a1bf68c4ca Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 16 Jan 2025 20:47:00 +0000 Subject: [PATCH 031/328] reduce block size on small tables Signed-off-by: Jason Volk --- src/database/engine/descriptor.rs | 5 +++++ src/database/maps.rs | 17 ++++++++++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index ef08945e..06e1a29b 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -83,6 +83,9 @@ pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { write_size: 1024 * 1024 * 16, level_size: 1024 * 512, file_size: 1024 * 128, + index_size: 512, + block_size: 512, + cache_shards: 64, ..RANDOM }; @@ -91,5 +94,7 @@ pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { write_size: 1024 * 1024 * 16, level_size: 1024 * 1024, file_size: 1024 * 512, + block_size: 512, + cache_shards: 64, ..SEQUENTIAL }; diff --git a/src/database/maps.rs b/src/database/maps.rs index 19e19955..82c5a4a8 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -58,6 +58,8 @@ pub(super) static MAPS: &[Descriptor] = &[ cache_disp: CacheDisp::SharedWith("pduid_pdu"), key_size_hint: Some(48), val_size_hint: Some(1488), + block_size: 1024, + index_size: 512, ..descriptor::RANDOM }, Descriptor { @@ -65,6 +67,8 @@ pub(super) static MAPS: &[Descriptor] = &[ cache_disp: CacheDisp::Unique, key_size_hint: Some(48), val_size_hint: Some(16), + block_size: 512, + index_size: 512, ..descriptor::RANDOM }, Descriptor { @@ -72,6 +76,8 @@ pub(super) static MAPS: &[Descriptor] = &[ cache_disp: CacheDisp::Unique, key_size_hint: Some(48), val_size_hint: Some(8), + block_size: 512, + index_size: 512, ..descriptor::RANDOM }, Descriptor { @@ -111,6 +117,8 @@ pub(super) static MAPS: &[Descriptor] = &[ cache_disp: CacheDisp::SharedWith("eventid_outlierpdu"), key_size_hint: Some(16), val_size_hint: Some(1520), + block_size: 2048, + index_size: 512, ..descriptor::SEQUENTIAL }, Descriptor { @@ -162,6 +170,7 @@ pub(super) static MAPS: &[Descriptor] = &[ Descriptor { name: "roomsynctoken_shortstatehash", val_size_hint: Some(8), + block_size: 512, ..descriptor::SEQUENTIAL }, Descriptor { @@ -243,6 +252,8 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "shorteventid_shortstatehash", key_size_hint: Some(8), val_size_hint: Some(8), + block_size: 512, + index_size: 512, ..descriptor::SEQUENTIAL }, Descriptor { @@ -292,7 +303,11 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "token_userdeviceid", ..descriptor::RANDOM_SMALL }, - Descriptor { name: "tokenids", ..descriptor::RANDOM }, + Descriptor { + name: "tokenids", + block_size: 512, + ..descriptor::RANDOM + }, Descriptor { name: "url_previews", ..descriptor::RANDOM From bab40a374707f9a50e2b440ab830d1a4456dd678 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 17 Jan 2025 20:34:56 +0000 Subject: [PATCH 032/328] enable hashing on large-block indexes Signed-off-by: Jason Volk --- src/database/engine/cf_opts.rs | 16 +++++++--------- src/database/engine/descriptor.rs | 5 +++-- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 8cb659ac..158fb3c8 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -1,8 +1,4 @@ -use conduwuit::{ - err, - utils::{math::Expected, BoolExt}, - Config, Result, -}; +use conduwuit::{err, utils::math::Expected, Config, Result}; use rocksdb::{ BlockBasedIndexType, BlockBasedOptions, BlockBasedPinningTier, Cache, DBCompressionType as CompressionType, DataBlockIndexType, LruCacheOptions, Options, @@ -133,10 +129,12 @@ fn table_options(desc: &Descriptor, has_cache: bool) -> BlockBasedOptions { opts.set_partition_filters(true); opts.set_use_delta_encoding(false); opts.set_index_type(BlockBasedIndexType::TwoLevelIndexSearch); - opts.set_data_block_index_type( - desc.block_index_hashing - .map_or(DataBlockIndexType::BinarySearch, || DataBlockIndexType::BinaryAndHash), - ); + + opts.set_data_block_index_type(match desc.block_index_hashing { + | None if desc.index_size > 512 => DataBlockIndexType::BinaryAndHash, + | Some(enable) if enable => DataBlockIndexType::BinaryAndHash, + | Some(_) | None => DataBlockIndexType::BinarySearch, + }); opts } diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index 06e1a29b..d668862b 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -34,7 +34,7 @@ pub(crate) struct Descriptor { pub(crate) compression: CompressionType, pub(crate) compression_level: i32, pub(crate) bottommost_level: Option, - pub(crate) block_index_hashing: bool, + pub(crate) block_index_hashing: Option, pub(crate) cache_shards: u32, } @@ -60,7 +60,7 @@ pub(crate) static BASE: Descriptor = Descriptor { compression: CompressionType::Zstd, compression_level: 32767, bottommost_level: Some(32767), - block_index_hashing: false, + block_index_hashing: None, cache_shards: 64, }; @@ -96,5 +96,6 @@ pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { file_size: 1024 * 512, block_size: 512, cache_shards: 64, + block_index_hashing: Some(false), ..SEQUENTIAL }; From 819e35f81fdfb6d7045b24f7fdf7d647e75ffb22 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 14 Jan 2025 08:01:12 +0000 Subject: [PATCH 033/328] remove mutex lock/unlock during sync iteration. Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index a4dc0205..d6b9f15c 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -242,10 +242,6 @@ pub(crate) async fn build_sync_events( .state_cache .rooms_invited(sender_user) .fold_default(|mut invited_rooms: BTreeMap<_, _>, (room_id, invite_state)| async move { - // Get and drop the lock to wait for remaining operations to finish - let insert_lock = services.rooms.timeline.mutex_insert.lock(&room_id).await; - drop(insert_lock); - let invite_count = services .rooms .state_cache @@ -271,10 +267,6 @@ pub(crate) async fn build_sync_events( .state_cache .rooms_knocked(sender_user) .fold_default(|mut knocked_rooms: BTreeMap<_, _>, (room_id, knock_state)| async move { - // Get and drop the lock to wait for remaining operations to finish - let insert_lock = services.rooms.timeline.mutex_insert.lock(&room_id).await; - drop(insert_lock); - let knock_count = services .rooms .state_cache @@ -470,10 +462,6 @@ async fn handle_left_room( full_state: bool, lazy_load_enabled: bool, ) -> Result> { - // Get and drop the lock to wait for remaining operations to finish - let insert_lock = services.rooms.timeline.mutex_insert.lock(room_id).await; - drop(insert_lock); - let left_count = services .rooms .state_cache @@ -627,11 +615,6 @@ async fn load_joined_room( lazy_load_send_redundant: bool, full_state: bool, ) -> Result<(JoinedRoom, HashSet, HashSet)> { - // Get and drop the lock to wait for remaining operations to finish - // This will make sure the we have all events until next_batch - let insert_lock = services.rooms.timeline.mutex_insert.lock(room_id).await; - drop(insert_lock); - let sincecount = PduCount::Normal(since); let next_batchcount = PduCount::Normal(next_batch); From fc1170e12a17f47cb1c0b60fc873dcfae127ccea Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 14 Jan 2025 05:20:42 +0000 Subject: [PATCH 034/328] additional tracing span tweaks Signed-off-by: Jason Volk --- src/api/server/send.rs | 10 ++++- src/database/pool.rs | 5 +-- src/service/rooms/auth_chain/mod.rs | 4 +- src/service/rooms/event_handler/fetch_prev.rs | 2 +- .../rooms/event_handler/fetch_state.rs | 2 +- .../event_handler/handle_incoming_pdu.rs | 2 +- .../rooms/event_handler/handle_prev_pdu.rs | 2 +- .../rooms/event_handler/resolve_state.rs | 2 +- .../rooms/event_handler/state_at_incoming.rs | 4 +- src/service/rooms/state_compressor/mod.rs | 43 +++++++++++-------- src/service/rooms/timeline/mod.rs | 4 +- 11 files changed, 47 insertions(+), 33 deletions(-) diff --git a/src/api/server/send.rs b/src/api/server/send.rs index c0c8a0c9..56a17c22 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -39,7 +39,15 @@ type ResolvedMap = BTreeMap>; /// # `PUT /_matrix/federation/v1/send/{txnId}` /// /// Push EDUs and PDUs to this server. -#[tracing::instrument(skip_all, fields(%client, origin = body.origin().as_str()), name = "send")] +#[tracing::instrument( + name = "send", + level = "debug", + skip_all, + fields( + %client, + origin = body.origin().as_str() + ), +)] pub(crate) async fn send_transaction_message_route( State(services): State, InsecureClientIp(client): InsecureClientIp, diff --git a/src/database/pool.rs b/src/database/pool.rs index b972e763..f5600c36 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -12,7 +12,7 @@ use std::{ use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; use conduwuit::{ - debug, debug_warn, defer, err, error, implement, + debug, debug_warn, err, error, implement, result::DebugInspect, trace, utils::sys::compute::{get_affinity, nth_core_available, set_affinity}, @@ -271,9 +271,6 @@ async fn execute(&self, queue: &Sender, cmd: Cmd) -> Result { ), )] fn worker(self: Arc, id: usize, recv: Receiver) { - defer! {{ trace!("worker finished"); }} - trace!("worker spawned"); - self.worker_init(id); self.worker_loop(&recv); } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index f6534825..74064701 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -79,7 +79,7 @@ impl Service { Ok(event_ids) } - #[tracing::instrument(skip_all, name = "auth_chain")] + #[tracing::instrument(name = "auth_chain", level = "debug", skip_all)] pub async fn get_auth_chain<'a, I>( &'a self, room_id: &RoomId, @@ -179,7 +179,7 @@ impl Service { Ok(full_auth_chain) } - #[tracing::instrument(skip(self, room_id))] + #[tracing::instrument(name = "inner", level = "trace", skip(self, room_id))] async fn get_auth_chain_inner( &self, room_id: &RoomId, diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index 0d64e98e..5966aeba 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -16,7 +16,7 @@ use super::check_room_id; #[implement(super::Service)] #[tracing::instrument( - level = "warn", + level = "debug", skip_all, fields(%origin), )] diff --git a/src/service/rooms/event_handler/fetch_state.rs b/src/service/rooms/event_handler/fetch_state.rs index cc4a3e46..0892655e 100644 --- a/src/service/rooms/event_handler/fetch_state.rs +++ b/src/service/rooms/event_handler/fetch_state.rs @@ -14,7 +14,7 @@ use crate::rooms::short::ShortStateKey; /// on the events #[implement(super::Service)] #[tracing::instrument( - level = "warn", + level = "debug", skip_all, fields(%origin), )] diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index c2e6ccc9..4e6f0b0c 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -41,7 +41,7 @@ use crate::rooms::timeline::RawPduId; #[implement(super::Service)] #[tracing::instrument( name = "pdu", - level = "warn", + level = "debug", skip_all, fields(%room_id, %event_id), )] diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index ad71c173..2bec4eba 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -14,7 +14,7 @@ use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName}; #[allow(clippy::too_many_arguments)] #[tracing::instrument( name = "prev", - level = "warn", + level = "debug", skip_all, fields(%prev_id), )] diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index f21f7b66..8640c582 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -18,7 +18,7 @@ use ruma::{ use crate::rooms::state_compressor::CompressedStateEvent; #[implement(super::Service)] -#[tracing::instrument(skip_all, name = "resolve")] +#[tracing::instrument(name = "resolve", level = "debug", skip_all)] pub async fn resolve_state( &self, room_id: &RoomId, diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index fa2ce1cd..9e7f8d2a 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -16,7 +16,7 @@ use ruma::{state_res::StateMap, OwnedEventId, RoomId, RoomVersionId}; // TODO: if we know the prev_events of the incoming event we can avoid the #[implement(super::Service)] // request and build the state from a known point and resolve if > 1 prev_event -#[tracing::instrument(skip_all, name = "state")] +#[tracing::instrument(name = "state", level = "debug", skip_all)] pub(super) async fn state_at_incoming_degree_one( &self, incoming_pdu: &Arc, @@ -66,7 +66,7 @@ pub(super) async fn state_at_incoming_degree_one( } #[implement(super::Service)] -#[tracing::instrument(skip_all, name = "state")] +#[tracing::instrument(name = "state", level = "debug", skip_all)] pub(super) async fn state_at_incoming_resolved( &self, incoming_pdu: &Arc, diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index a61a66a1..532df360 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -7,7 +7,7 @@ use std::{ use arrayvec::ArrayVec; use conduwuit::{ - at, checked, debug, err, expected, utils, + at, checked, err, expected, utils, utils::{bytes, math::usize_from_f64, stream::IterStream}, Result, }; @@ -117,35 +117,44 @@ impl crate::Service for Service { impl Service { /// Returns a stack with info on shortstatehash, full state, added diff and /// removed diff for the selected shortstatehash and each parent layer. + #[tracing::instrument(name = "load", level = "debug", skip(self))] pub async fn load_shortstatehash_info( &self, shortstatehash: ShortStateHash, ) -> Result { - if let Some(r) = self - .stateinfo_cache - .lock() - .expect("locked") - .get_mut(&shortstatehash) - { + if let Some(r) = self.stateinfo_cache.lock()?.get_mut(&shortstatehash) { return Ok(r.clone()); } let stack = self.new_shortstatehash_info(shortstatehash).await?; - debug!( - ?shortstatehash, - len = %stack.len(), - "cache update" - ); - - self.stateinfo_cache - .lock() - .expect("locked") - .insert(shortstatehash, stack.clone()); + self.cache_shortstatehash_info(shortstatehash, stack.clone()) + .await?; Ok(stack) } + /// Returns a stack with info on shortstatehash, full state, added diff and + /// removed diff for the selected shortstatehash and each parent layer. + #[tracing::instrument( + name = "cache", + level = "debug", + skip_all, + fields( + ?shortstatehash, + stack = stack.len(), + ), + )] + async fn cache_shortstatehash_info( + &self, + shortstatehash: ShortStateHash, + stack: ShortStateInfoVec, + ) -> Result { + self.stateinfo_cache.lock()?.insert(shortstatehash, stack); + + Ok(()) + } + async fn new_shortstatehash_info( &self, shortstatehash: ShortStateHash, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 3ebc432f..bd60e40e 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -259,7 +259,7 @@ impl Service { /// happens in `append_pdu`. /// /// Returns pdu id - #[tracing::instrument(skip_all)] + #[tracing::instrument(level = "debug", skip_all)] pub async fn append_pdu( &self, pdu: &PduEvent, @@ -942,7 +942,7 @@ impl Service { /// Append the incoming event setting the state snapshot to the state from /// the server that sent the event. - #[tracing::instrument(skip_all)] + #[tracing::instrument(level = "debug", skip_all)] pub async fn append_incoming_pdu( &self, pdu: &PduEvent, From 96e85adc32f68e2080da9cf0088d9da84858747e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 01:34:14 +0000 Subject: [PATCH 035/328] use cache builder for row and table cache options add cache check using multi-get path Signed-off-by: Jason Volk --- src/database/engine/cf_opts.rs | 2 +- src/database/engine/context.rs | 19 +++++++--- src/database/engine/descriptor.rs | 2 ++ src/database/map/get.rs | 60 +++++++++++++++++++++---------- src/database/map/get_batch.rs | 41 ++++++++++++++++----- 5 files changed, 90 insertions(+), 34 deletions(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 158fb3c8..7b3a1d49 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -178,7 +178,7 @@ fn get_cache(ctx: &Context, desc: &Descriptor) -> Option { .try_into() .expect("u32 to i32 conversion"); - debug_assert!(shard_bits <= 6, "cache shards limited to 64"); + debug_assert!(shard_bits <= 10, "cache shards probably too large"); let mut cache_opts = LruCacheOptions::default(); cache_opts.set_num_shard_bits(shard_bits); cache_opts.set_capacity(size); diff --git a/src/database/engine/context.rs b/src/database/engine/context.rs index 76238f7d..04e08854 100644 --- a/src/database/engine/context.rs +++ b/src/database/engine/context.rs @@ -4,7 +4,7 @@ use std::{ }; use conduwuit::{debug, utils::math::usize_from_f64, Result, Server}; -use rocksdb::{Cache, Env}; +use rocksdb::{Cache, Env, LruCacheOptions}; use crate::{or_else, pool::Pool}; @@ -25,12 +25,21 @@ impl Context { let config = &server.config; let cache_capacity_bytes = config.db_cache_capacity_mb * 1024.0 * 1024.0; - let row_cache_capacity_bytes = usize_from_f64(cache_capacity_bytes * 0.50)?; - let row_cache = Cache::new_lru_cache(row_cache_capacity_bytes); - + let col_shard_bits = 7; let col_cache_capacity_bytes = usize_from_f64(cache_capacity_bytes * 0.50)?; - let col_cache = Cache::new_lru_cache(col_cache_capacity_bytes); + let row_shard_bits = 7; + let row_cache_capacity_bytes = usize_from_f64(cache_capacity_bytes * 0.50)?; + + let mut row_cache_opts = LruCacheOptions::default(); + row_cache_opts.set_num_shard_bits(row_shard_bits); + row_cache_opts.set_capacity(row_cache_capacity_bytes); + let row_cache = Cache::new_lru_cache_opts(&row_cache_opts); + + let mut col_cache_opts = LruCacheOptions::default(); + col_cache_opts.set_num_shard_bits(col_shard_bits); + col_cache_opts.set_capacity(col_cache_capacity_bytes); + let col_cache = Cache::new_lru_cache_opts(&col_cache_opts); let col_cache: BTreeMap<_, _> = [("Shared".to_owned(), col_cache)].into(); let mut env = Env::new().or_else(or_else)?; diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index d668862b..234ca2bf 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -67,6 +67,7 @@ pub(crate) static BASE: Descriptor = Descriptor { pub(crate) static RANDOM: Descriptor = Descriptor { compaction_pri: CompactionPri::OldestSmallestSeqFirst, write_size: 1024 * 1024 * 32, + cache_shards: 128, ..BASE }; @@ -75,6 +76,7 @@ pub(crate) static SEQUENTIAL: Descriptor = Descriptor { write_size: 1024 * 1024 * 64, level_size: 1024 * 1024 * 32, file_size: 1024 * 1024 * 2, + cache_shards: 128, ..BASE }; diff --git a/src/database/map/get.rs b/src/database/map/get.rs index e64ef2ec..73182042 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -3,6 +3,7 @@ use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; use arrayvec::ArrayVec; use conduwuit::{err, implement, utils::result::MapExpect, Err, Result}; use futures::{future::ready, Future, FutureExt, TryFutureExt}; +use rocksdb::{DBPinnableSlice, ReadOptions}; use serde::Serialize; use tokio::task; @@ -90,6 +91,17 @@ where .boxed() } +/// Fetch a value from the cache without I/O. +#[implement(super::Map)] +#[tracing::instrument(skip(self, key), name = "cache", level = "trace")] +pub(crate) fn get_cached(&self, key: &K) -> Result>> +where + K: AsRef<[u8]> + Debug + ?Sized, +{ + let res = self.get_blocking_opts(key, &self.cache_read_options); + cached_handle_from(res) +} + /// Fetch a value from the database into cache, returning a reference-handle. /// The key is referenced directly to perform the query. This is a thread- /// blocking call. @@ -99,37 +111,47 @@ pub fn get_blocking(&self, key: &K) -> Result> where K: AsRef<[u8]> + ?Sized, { - self.db - .db - .get_pinned_cf_opt(&self.cf(), key, &self.read_options) + let res = self.get_blocking_opts(key, &self.read_options); + handle_from(res) +} + +#[implement(super::Map)] +fn get_blocking_opts( + &self, + key: &K, + read_options: &ReadOptions, +) -> Result>, rocksdb::Error> +where + K: AsRef<[u8]> + ?Sized, +{ + self.db.db.get_pinned_cf_opt(&self.cf(), key, read_options) +} + +#[inline] +pub(super) fn handle_from( + result: Result>, rocksdb::Error>, +) -> Result> { + result .map_err(map_err)? .map(Handle::from) .ok_or(err!(Request(NotFound("Not found in database")))) } -/// Fetch a value from the cache without I/O. -#[implement(super::Map)] -#[tracing::instrument(skip(self, key), name = "cache", level = "trace")] -pub(crate) fn get_cached(&self, key: &K) -> Result>> -where - K: AsRef<[u8]> + Debug + ?Sized, -{ - let res = self - .db - .db - .get_pinned_cf_opt(&self.cf(), key, &self.cache_read_options); - - match res { +#[inline] +pub(super) fn cached_handle_from( + result: Result>, rocksdb::Error>, +) -> Result>> { + match result { // cache hit; not found | Ok(None) => Err!(Request(NotFound("Not found in database"))), // cache hit; value found - | Ok(Some(res)) => Ok(Some(Handle::from(res))), + | Ok(Some(result)) => Ok(Some(Handle::from(result))), // cache miss; unknown - | Err(e) if is_incomplete(&e) => Ok(None), + | Err(error) if is_incomplete(&error) => Ok(None), // some other error occurred - | Err(e) => or_else(e), + | Err(error) => or_else(error), } } diff --git a/src/database/map/get_batch.rs b/src/database/map/get_batch.rs index 452697f1..ee9269e3 100644 --- a/src/database/map/get_batch.rs +++ b/src/database/map/get_batch.rs @@ -1,7 +1,7 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; use conduwuit::{ - err, implement, + implement, utils::{ stream::{automatic_amplification, automatic_width, WidebandExt}, IterStream, @@ -9,9 +9,11 @@ use conduwuit::{ Result, }; use futures::{Stream, StreamExt, TryStreamExt}; +use rocksdb::{DBPinnableSlice, ReadOptions}; use serde::Serialize; -use crate::{keyval::KeyBuf, ser, util::map_err, Handle}; +use super::get::{cached_handle_from, handle_from}; +use crate::{keyval::KeyBuf, ser, Handle}; #[implement(super::Map)] #[tracing::instrument(skip(self, keys), level = "trace")] @@ -66,12 +68,40 @@ where .try_flatten() } +#[implement(super::Map)] +#[tracing::instrument(name = "batch_cached", level = "trace", skip_all)] +pub(crate) fn get_batch_cached<'a, I, K>( + &self, + keys: I, +) -> impl Iterator>>> + Send +where + I: Iterator + ExactSizeIterator + Send, + K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, +{ + self.get_batch_blocking_opts(keys, &self.cache_read_options) + .map(cached_handle_from) +} + #[implement(super::Map)] #[tracing::instrument(name = "batch_blocking", level = "trace", skip_all)] pub(crate) fn get_batch_blocking<'a, I, K>( &self, keys: I, ) -> impl Iterator>> + Send +where + I: Iterator + ExactSizeIterator + Send, + K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, +{ + self.get_batch_blocking_opts(keys, &self.read_options) + .map(handle_from) +} + +#[implement(super::Map)] +fn get_batch_blocking_opts<'a, I, K>( + &self, + keys: I, + read_options: &ReadOptions, +) -> impl Iterator>, rocksdb::Error>> + Send where I: Iterator + ExactSizeIterator + Send, K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, @@ -80,15 +110,8 @@ where // comparator**. const SORTED: bool = false; - let read_options = &self.read_options; self.db .db .batched_multi_get_cf_opt(&self.cf(), keys, SORTED, read_options) .into_iter() - .map(|result| { - result - .map_err(map_err)? - .map(Handle::from) - .ok_or(err!(Request(NotFound("Not found in database")))) - }) } From abf33013e37e89e1581c8ca9ab9b1411d51d7513 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 01:38:09 +0000 Subject: [PATCH 036/328] check-in additional database test related Signed-off-by: Jason Volk --- src/database/tests.rs | 97 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/src/database/tests.rs b/src/database/tests.rs index 0a42ad60..2f143698 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -35,6 +35,29 @@ fn ser_tuple() { assert_eq!(a, b); } +#[test] +fn ser_tuple_option() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut a = Vec::::new(); + a.push(0xFF); + a.extend_from_slice(user_id.as_bytes()); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let b: (Option<&RoomId>, &UserId) = (None, user_id); + let b = serialize_to_vec(&b).expect("failed to serialize tuple"); + assert_eq!(a, b); + + let bb: (Option<&RoomId>, &UserId) = (Some(room_id), user_id); + let bb = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bb); +} + #[test] #[should_panic(expected = "I/O error: failed to write whole buffer")] fn ser_overflow() { @@ -284,6 +307,8 @@ fn ser_array() { let b: u64 = 987_654; let arr: &[u64] = &[a, b]; + let vec: Vec = vec![a, b]; + let arv: ArrayVec = [a, b].into(); let mut v = Vec::new(); v.extend_from_slice(&a.to_be_bytes()); @@ -291,4 +316,76 @@ fn ser_array() { let s = serialize_to_vec(arr).expect("failed to serialize"); assert_eq!(&s, &v, "serialization does not match"); + + let s = serialize_to_vec(arv.as_slice()).expect("failed to serialize arrayvec"); + assert_eq!(&s, &v, "arrayvec serialization does not match"); + + let s = serialize_to_vec(&vec).expect("failed to serialize vec"); + assert_eq!(&s, &v, "vec serialization does not match"); +} + +#[cfg(todo)] +#[test] +fn de_array() { + let a: u64 = 123_456; + let b: u64 = 987_654; + + let mut v: Vec = Vec::new(); + v.extend_from_slice(&a.to_be_bytes()); + v.extend_from_slice(&b.to_be_bytes()); + + let arv: ArrayVec = de::from_slice::>(v.as_slice()) + .map(TryInto::try_into) + .expect("failed to deserialize to arrayvec") + .expect("failed to deserialize into"); + + assert_eq!(arv[0], a, "deserialized arv [0] does not match"); + assert_eq!(arv[1], b, "deserialized arv [1] does not match"); + + let arr: [u64; 2] = de::from_slice::<[u64; 2]>(v.as_slice()) + .map(TryInto::try_into) + .expect("failed to deserialize to array") + .expect("failed to deserialize into"); + + assert_eq!(arr[0], a, "deserialized arr [0] does not match"); + assert_eq!(arr[1], b, "deserialized arr [1] does not match"); + + let vec: Vec = de::from_slice(v.as_slice()).expect("failed to deserialize to vec"); + + assert_eq!(vec[0], a, "deserialized vec [0] does not match"); + assert_eq!(vec[1], b, "deserialized vec [1] does not match"); +} + +#[cfg(todo)] +#[test] +fn de_complex() { + type Key<'a> = (&'a UserId, ArrayVec, &'a RoomId); + + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let a: u64 = 123_456; + let b: u64 = 987_654; + + let mut v = Vec::new(); + v.extend_from_slice(user_id.as_bytes()); + v.extend_from_slice(b"\xFF"); + v.extend_from_slice(&a.to_be_bytes()); + v.extend_from_slice(&b.to_be_bytes()); + v.extend_from_slice(b"\xFF"); + v.extend_from_slice(room_id.as_bytes()); + + let arr: &[u64] = &[a, b]; + let key = (user_id, arr, room_id); + let s = serialize_to_vec(&key).expect("failed to serialize"); + + assert_eq!(&s, &v, "serialization does not match"); + + let key = (user_id, [a, b].into(), room_id); + let arr: Key<'_> = de::from_slice(&v).expect("failed to deserialize"); + + assert_eq!(arr, key, "deserialization does not match"); + + let arr: Key<'_> = de::from_slice(&s).expect("failed to deserialize"); + + assert_eq!(arr, key, "deserialization of serialization does not match"); } From 8141ca34448452fd8fd910c85626a2568a3ebe55 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 4 Jan 2025 16:57:07 +0000 Subject: [PATCH 037/328] refactor admin outputs to asyncwrite Signed-off-by: Jason Volk --- src/admin/admin.rs | 12 +- src/admin/check/mod.rs | 15 +- src/admin/command.rs | 30 +- src/admin/debug/commands.rs | 39 ++- src/admin/debug/mod.rs | 12 +- src/admin/debug/tester.rs | 2 +- src/admin/processor.rs | 43 ++- src/admin/query/account_data.rs | 74 ++--- src/admin/query/appservice.rs | 15 +- src/admin/query/globals.rs | 24 +- src/admin/query/mod.rs | 20 +- src/admin/query/presence.rs | 16 +- src/admin/query/pusher.rs | 12 +- src/admin/query/raw.rs | 457 ++++++++++++++++++++++++++++ src/admin/query/resolver.rs | 82 ++--- src/admin/query/room_alias.rs | 20 +- src/admin/query/room_state_cache.rs | 47 +-- src/admin/query/sending.rs | 9 +- src/admin/query/short.rs | 45 +++ src/admin/query/users.rs | 46 +++ src/admin/room/alias.rs | 9 +- src/admin/room/directory.rs | 8 +- src/admin/user/commands.rs | 51 ++-- src/macros/admin.rs | 16 +- 24 files changed, 877 insertions(+), 227 deletions(-) create mode 100644 src/admin/query/raw.rs create mode 100644 src/admin/query/short.rs diff --git a/src/admin/admin.rs b/src/admin/admin.rs index 9097a613..b6de1ec6 100644 --- a/src/admin/admin.rs +++ b/src/admin/admin.rs @@ -1,6 +1,5 @@ use clap::Parser; use conduwuit::Result; -use ruma::events::room::message::RoomMessageEventContent; use crate::{ appservice, appservice::AppserviceCommand, check, check::CheckCommand, command::Command, @@ -50,13 +49,10 @@ pub(super) enum AdminCommand { } #[tracing::instrument(skip_all, name = "command")] -pub(super) async fn process( - command: AdminCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(command: AdminCommand, context: &Command<'_>) -> Result { use AdminCommand::*; - Ok(match command { + match command { | Appservices(command) => appservice::process(command, context).await?, | Media(command) => media::process(command, context).await?, | Users(command) => user::process(command, context).await?, @@ -66,5 +62,7 @@ pub(super) async fn process( | Debug(command) => debug::process(command, context).await?, | Query(command) => query::process(command, context).await?, | Check(command) => check::process(command, context).await?, - }) + }; + + Ok(()) } diff --git a/src/admin/check/mod.rs b/src/admin/check/mod.rs index 4790a6de..30b335c4 100644 --- a/src/admin/check/mod.rs +++ b/src/admin/check/mod.rs @@ -2,20 +2,11 @@ mod commands; use clap::Subcommand; use conduwuit::Result; -use ruma::events::room::message::RoomMessageEventContent; -use crate::Command; +use crate::admin_command_dispatch; +#[admin_command_dispatch] #[derive(Debug, Subcommand)] pub(super) enum CheckCommand { - AllUsers, -} - -pub(super) async fn process( - command: CheckCommand, - context: &Command<'_>, -) -> Result { - Ok(match command { - | CheckCommand::AllUsers => context.check_all_users().await?, - }) + CheckAllUsers, } diff --git a/src/admin/command.rs b/src/admin/command.rs index 5277b976..5ad9e581 100644 --- a/src/admin/command.rs +++ b/src/admin/command.rs @@ -1,6 +1,12 @@ -use std::time::SystemTime; +use std::{fmt, time::SystemTime}; +use conduwuit::Result; use conduwuit_service::Services; +use futures::{ + io::{AsyncWriteExt, BufWriter}, + lock::Mutex, + Future, FutureExt, +}; use ruma::EventId; pub(crate) struct Command<'a> { @@ -8,4 +14,26 @@ pub(crate) struct Command<'a> { pub(crate) body: &'a [&'a str], pub(crate) timer: SystemTime, pub(crate) reply_id: Option<&'a EventId>, + pub(crate) output: Mutex>>, +} + +impl Command<'_> { + pub(crate) fn write_fmt( + &self, + arguments: fmt::Arguments<'_>, + ) -> impl Future + Send + '_ { + let buf = format!("{arguments}"); + self.output.lock().then(|mut output| async move { + output.write_all(buf.as_bytes()).await.map_err(Into::into) + }) + } + + pub(crate) fn write_str<'a>( + &'a self, + s: &'a str, + ) -> impl Future + Send + 'a { + self.output.lock().then(move |mut output| async move { + output.write_all(s.as_bytes()).await.map_err(Into::into) + }) + } } diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index d027fa73..b6189f6a 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -6,7 +6,8 @@ use std::{ }; use conduwuit::{ - debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, Result, + debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, PduId, + RawPduId, Result, }; use futures::{FutureExt, StreamExt}; use ruma::{ @@ -15,7 +16,10 @@ use ruma::{ CanonicalJsonObject, EventId, OwnedEventId, OwnedRoomOrAliasId, RoomId, RoomVersionId, ServerName, }; -use service::rooms::state_compressor::HashSetCompressStateEvent; +use service::rooms::{ + short::{ShortEventId, ShortRoomId}, + state_compressor::HashSetCompressStateEvent, +}; use tracing_subscriber::EnvFilter; use crate::admin_command; @@ -131,6 +135,35 @@ pub(super) async fn get_pdu(&self, event_id: Box) -> Result Result { + let pdu_id: RawPduId = PduId { + shortroomid, + shorteventid: shorteventid.into(), + } + .into(); + + let pdu_json = self + .services + .rooms + .timeline + .get_pdu_json_from_id(&pdu_id) + .await; + + match pdu_json { + | Ok(json) => { + let json_text = + serde_json::to_string_pretty(&json).expect("canonical json is valid json"); + Ok(RoomMessageEventContent::notice_markdown(format!("```json\n{json_text}\n```",))) + }, + | Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")), + } +} + #[admin_command] pub(super) async fn get_remote_pdu_list( &self, @@ -895,7 +928,7 @@ pub(super) async fn list_dependencies(&self, names: bool) -> Result, }, + /// - Retrieve and print a PDU by PduId from the conduwuit database + GetShortPdu { + /// Shortroomid integer + shortroomid: ShortRoomId, + + /// Shorteventid integer + shorteventid: ShortEventId, + }, + /// - Attempts to retrieve a PDU from a remote server. Inserts it into our /// database/timeline if found and we do not have this PDU already /// (following normal event auth rules, handles it as an incoming PDU). diff --git a/src/admin/debug/tester.rs b/src/admin/debug/tester.rs index 5f922ece..5200fa0d 100644 --- a/src/admin/debug/tester.rs +++ b/src/admin/debug/tester.rs @@ -31,7 +31,7 @@ async fn failure(&self) -> Result { #[admin_command] async fn tester(&self) -> Result { - Ok(RoomMessageEventContent::notice_plain("completed")) + Ok(RoomMessageEventContent::notice_plain("legacy")) } #[inline(never)] diff --git a/src/admin/processor.rs b/src/admin/processor.rs index ed7d5ed1..eefcdcd6 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -1,5 +1,6 @@ use std::{ fmt::Write, + mem::take, panic::AssertUnwindSafe, sync::{Arc, Mutex}, time::SystemTime, @@ -17,7 +18,7 @@ use conduwuit::{ utils::string::{collect_stream, common_prefix}, warn, Error, Result, }; -use futures::future::FutureExt; +use futures::{future::FutureExt, io::BufWriter, AsyncWriteExt}; use ruma::{ events::{ relation::InReplyTo, @@ -62,9 +63,32 @@ async fn process_command(services: Arc, input: &CommandInput) -> Proce body: &body, timer: SystemTime::now(), reply_id: input.reply_id.as_deref(), + output: BufWriter::new(Vec::new()).into(), }; - process(&context, command, &args).await + let (result, mut logs) = process(&context, command, &args).await; + + let output = &mut context.output.lock().await; + output.flush().await.expect("final flush of output stream"); + + let output = + String::from_utf8(take(output.get_mut())).expect("invalid utf8 in command output stream"); + + match result { + | Ok(()) if logs.is_empty() => + Ok(Some(reply(RoomMessageEventContent::notice_markdown(output), context.reply_id))), + + | Ok(()) => { + logs.write_str(output.as_str()).expect("output buffer"); + Ok(Some(reply(RoomMessageEventContent::notice_markdown(logs), context.reply_id))) + }, + | Err(error) => { + write!(&mut logs, "Command failed with error:\n```\n{error:#?}\n```") + .expect("output buffer"); + + Err(reply(RoomMessageEventContent::notice_markdown(logs), context.reply_id)) + }, + } } fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { @@ -81,7 +105,7 @@ async fn process( context: &Command<'_>, command: AdminCommand, args: &[String], -) -> ProcessorResult { +) -> (Result, String) { let (capture, logs) = capture_create(context); let capture_scope = capture.start(); @@ -104,18 +128,7 @@ async fn process( } drop(logs); - match result { - | Ok(content) => { - write!(&mut output, "{0}", content.body()) - .expect("failed to format command result to output buffer"); - Ok(Some(reply(RoomMessageEventContent::notice_markdown(output), context.reply_id))) - }, - | Err(error) => { - write!(&mut output, "Command failed with error:\n```\n{error:#?}\n```") - .expect("failed to format command result to output"); - Err(reply(RoomMessageEventContent::notice_markdown(output), context.reply_id)) - }, - } + (result, output) } fn capture_create(context: &Command<'_>) -> (Arc, Arc>) { diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index 43762789..b75d8234 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -3,8 +3,9 @@ use conduwuit::Result; use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, RoomId, UserId}; -use crate::Command; +use crate::{admin_command, admin_command_dispatch}; +#[admin_command_dispatch] #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/account_data.rs pub(crate) enum AccountDataCommand { @@ -19,7 +20,7 @@ pub(crate) enum AccountDataCommand { }, /// - Searches the account data for a specific kind. - Get { + AccountDataGet { /// Full user ID user_id: Box, /// Account data event type @@ -29,38 +30,43 @@ pub(crate) enum AccountDataCommand { }, } -/// All the getters and iterators from src/database/key_value/account_data.rs -pub(super) async fn process( - subcommand: AccountDataCommand, - context: &Command<'_>, +#[admin_command] +async fn changes_since( + &self, + user_id: Box, + since: u64, + room_id: Option>, ) -> Result { - let services = context.services; + let timer = tokio::time::Instant::now(); + let results: Vec<_> = self + .services + .account_data + .changes_since(room_id.as_deref(), &user_id, since) + .collect() + .await; + let query_time = timer.elapsed(); - match subcommand { - | AccountDataCommand::ChangesSince { user_id, since, room_id } => { - let timer = tokio::time::Instant::now(); - let results: Vec<_> = services - .account_data - .changes_since(room_id.as_deref(), &user_id, since) - .collect() - .await; - let query_time = timer.elapsed(); - - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) - }, - | AccountDataCommand::Get { user_id, kind, room_id } => { - let timer = tokio::time::Instant::now(); - let results = services - .account_data - .get_raw(room_id.as_deref(), &user_id, &kind) - .await; - let query_time = timer.elapsed(); - - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) - }, - } + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + ))) +} + +#[admin_command] +async fn account_data_get( + &self, + user_id: Box, + kind: String, + room_id: Option>, +) -> Result { + let timer = tokio::time::Instant::now(); + let results = self + .services + .account_data + .get_raw(room_id.as_deref(), &user_id, &kind) + .await; + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + ))) } diff --git a/src/admin/query/appservice.rs b/src/admin/query/appservice.rs index fe4861bc..f9e1fd2c 100644 --- a/src/admin/query/appservice.rs +++ b/src/admin/query/appservice.rs @@ -1,6 +1,5 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::events::room::message::RoomMessageEventContent; use crate::Command; @@ -18,10 +17,7 @@ pub(crate) enum AppserviceCommand { } /// All the getters and iterators from src/database/key_value/appservice.rs -pub(super) async fn process( - subcommand: AppserviceCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_>) -> Result { let services = context.services; match subcommand { @@ -31,18 +27,15 @@ pub(super) async fn process( let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, | AppserviceCommand::All => { let timer = tokio::time::Instant::now(); let results = services.appservice.all().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, } + .await } diff --git a/src/admin/query/globals.rs b/src/admin/query/globals.rs index e356453f..1642f7cd 100644 --- a/src/admin/query/globals.rs +++ b/src/admin/query/globals.rs @@ -1,6 +1,6 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::{events::room::message::RoomMessageEventContent, ServerName}; +use ruma::ServerName; use crate::Command; @@ -21,10 +21,7 @@ pub(crate) enum GlobalsCommand { } /// All the getters and iterators from src/database/key_value/globals.rs -pub(super) async fn process( - subcommand: GlobalsCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) -> Result { let services = context.services; match subcommand { @@ -33,36 +30,29 @@ pub(super) async fn process( let results = services.globals.db.database_version().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, | GlobalsCommand::CurrentCount => { let timer = tokio::time::Instant::now(); let results = services.globals.db.current_count(); let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, | GlobalsCommand::LastCheckForUpdatesId => { let timer = tokio::time::Instant::now(); let results = services.updates.last_check_for_updates_id().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, | GlobalsCommand::SigningKeysFor { origin } => { let timer = tokio::time::Instant::now(); let results = services.server_keys.verify_keys_for(&origin).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, } + .await } diff --git a/src/admin/query/mod.rs b/src/admin/query/mod.rs index ab269a40..da27eb1d 100644 --- a/src/admin/query/mod.rs +++ b/src/admin/query/mod.rs @@ -3,10 +3,13 @@ mod appservice; mod globals; mod presence; mod pusher; +mod raw; mod resolver; mod room_alias; mod room_state_cache; +mod room_timeline; mod sending; +mod short; mod users; use clap::Subcommand; @@ -14,9 +17,10 @@ use conduwuit::Result; use self::{ account_data::AccountDataCommand, appservice::AppserviceCommand, globals::GlobalsCommand, - presence::PresenceCommand, pusher::PusherCommand, resolver::ResolverCommand, + presence::PresenceCommand, pusher::PusherCommand, raw::RawCommand, resolver::ResolverCommand, room_alias::RoomAliasCommand, room_state_cache::RoomStateCacheCommand, - sending::SendingCommand, users::UsersCommand, + room_timeline::RoomTimelineCommand, sending::SendingCommand, short::ShortCommand, + users::UsersCommand, }; use crate::admin_command_dispatch; @@ -44,6 +48,10 @@ pub(super) enum QueryCommand { #[command(subcommand)] RoomStateCache(RoomStateCacheCommand), + /// - rooms/timeline iterators and getters + #[command(subcommand)] + RoomTimeline(RoomTimelineCommand), + /// - globals.rs iterators and getters #[command(subcommand)] Globals(GlobalsCommand), @@ -63,4 +71,12 @@ pub(super) enum QueryCommand { /// - pusher service #[command(subcommand)] Pusher(PusherCommand), + + /// - short service + #[command(subcommand)] + Short(ShortCommand), + + /// - raw service + #[command(subcommand)] + Raw(RawCommand), } diff --git a/src/admin/query/presence.rs b/src/admin/query/presence.rs index 0de6b696..38272749 100644 --- a/src/admin/query/presence.rs +++ b/src/admin/query/presence.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, UserId}; +use ruma::UserId; use crate::Command; @@ -23,10 +23,7 @@ pub(crate) enum PresenceCommand { } /// All the getters and iterators in key_value/presence.rs -pub(super) async fn process( - subcommand: PresenceCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(subcommand: PresenceCommand, context: &Command<'_>) -> Result { let services = context.services; match subcommand { @@ -35,9 +32,7 @@ pub(super) async fn process( let results = services.presence.get_presence(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, | PresenceCommand::PresenceSince { since } => { let timer = tokio::time::Instant::now(); @@ -49,9 +44,8 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, } + .await } diff --git a/src/admin/query/pusher.rs b/src/admin/query/pusher.rs index 55532e54..34edf4db 100644 --- a/src/admin/query/pusher.rs +++ b/src/admin/query/pusher.rs @@ -1,6 +1,6 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::{events::room::message::RoomMessageEventContent, UserId}; +use ruma::UserId; use crate::Command; @@ -13,10 +13,7 @@ pub(crate) enum PusherCommand { }, } -pub(super) async fn process( - subcommand: PusherCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(subcommand: PusherCommand, context: &Command<'_>) -> Result { let services = context.services; match subcommand { @@ -25,9 +22,8 @@ pub(super) async fn process( let results = services.pusher.get_pushers(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, } + .await } diff --git a/src/admin/query/raw.rs b/src/admin/query/raw.rs new file mode 100644 index 00000000..678d21c9 --- /dev/null +++ b/src/admin/query/raw.rs @@ -0,0 +1,457 @@ +use std::{borrow::Cow, collections::BTreeMap, ops::Deref}; + +use clap::Subcommand; +use conduwuit::{ + apply, at, + utils::{ + stream::{ReadyExt, TryIgnore}, + string::EMPTY, + IterStream, + }, + Result, +}; +use futures::{FutureExt, StreamExt, TryStreamExt}; +use ruma::events::room::message::RoomMessageEventContent; +use tokio::time::Instant; + +use crate::{admin_command, admin_command_dispatch}; + +#[admin_command_dispatch] +#[derive(Debug, Subcommand)] +#[allow(clippy::enum_variant_names)] +/// Query tables from database +pub(crate) enum RawCommand { + /// - List database maps + RawMaps, + + /// - Raw database query + RawGet { + /// Map name + map: String, + + /// Key + key: String, + }, + + /// - Raw database keys iteration + RawKeys { + /// Map name + map: String, + + /// Key prefix + prefix: Option, + }, + + /// - Raw database key size breakdown + RawKeysSizes { + /// Map name + map: Option, + + /// Key prefix + prefix: Option, + }, + + /// - Raw database keys total bytes + RawKeysTotal { + /// Map name + map: Option, + + /// Key prefix + prefix: Option, + }, + + /// - Raw database values size breakdown + RawValsSizes { + /// Map name + map: Option, + + /// Key prefix + prefix: Option, + }, + + /// - Raw database values total bytes + RawValsTotal { + /// Map name + map: Option, + + /// Key prefix + prefix: Option, + }, + + /// - Raw database items iteration + RawIter { + /// Map name + map: String, + + /// Key prefix + prefix: Option, + }, + + /// - Raw database keys iteration + RawKeysFrom { + /// Map name + map: String, + + /// Lower-bound + start: String, + + /// Limit + #[arg(short, long)] + limit: Option, + }, + + /// - Raw database items iteration + RawIterFrom { + /// Map name + map: String, + + /// Lower-bound + start: String, + + /// Limit + #[arg(short, long)] + limit: Option, + }, + + /// - Raw database record count + RawCount { + /// Map name + map: Option, + + /// Key prefix + prefix: Option, + }, +} + +#[admin_command] +pub(super) async fn raw_count( + &self, + map: Option, + prefix: Option, +) -> Result { + let prefix = prefix.as_deref().unwrap_or(EMPTY); + + let default_all_maps = map + .is_none() + .then(|| self.services.db.keys().map(Deref::deref)) + .into_iter() + .flatten(); + + let maps: Vec<_> = map + .iter() + .map(String::as_str) + .chain(default_all_maps) + .map(|map| self.services.db.get(map)) + .filter_map(Result::ok) + .cloned() + .collect(); + + let timer = Instant::now(); + let count = maps + .iter() + .stream() + .then(|map| map.raw_count_prefix(&prefix)) + .ready_fold(0_usize, usize::saturating_add) + .await; + + let query_time = timer.elapsed(); + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{count:#?}\n```")) + .await?; + + Ok(RoomMessageEventContent::text_plain("")) +} + +#[admin_command] +pub(super) async fn raw_keys( + &self, + map: String, + prefix: Option, +) -> Result { + writeln!(self, "```").boxed().await?; + + let map = self.services.db.get(map.as_str())?; + let timer = Instant::now(); + prefix + .as_deref() + .map_or_else(|| map.raw_keys().boxed(), |prefix| map.raw_keys_prefix(prefix).boxed()) + .map_ok(String::from_utf8_lossy) + .try_for_each(|str| writeln!(self, "{str:?}")) + .boxed() + .await?; + + let query_time = timer.elapsed(); + let out = format!("\n```\n\nQuery completed in {query_time:?}"); + self.write_str(out.as_str()).await?; + + Ok(RoomMessageEventContent::text_plain("")) +} + +#[admin_command] +pub(super) async fn raw_keys_sizes( + &self, + map: Option, + prefix: Option, +) -> Result { + let prefix = prefix.as_deref().unwrap_or(EMPTY); + + let default_all_maps = map + .is_none() + .then(|| self.services.db.keys().map(Deref::deref)) + .into_iter() + .flatten(); + + let maps: Vec<_> = map + .iter() + .map(String::as_str) + .chain(default_all_maps) + .map(|map| self.services.db.get(map)) + .filter_map(Result::ok) + .cloned() + .collect(); + + let timer = Instant::now(); + let result = maps + .iter() + .stream() + .map(|map| map.raw_keys_prefix(&prefix)) + .flatten() + .ignore_err() + .map(<[u8]>::len) + .ready_fold_default(|mut map: BTreeMap<_, usize>, len| { + let entry = map.entry(len).or_default(); + *entry = entry.saturating_add(1); + map + }) + .await; + + let query_time = timer.elapsed(); + let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}"); + self.write_str(result.as_str()).await?; + + Ok(RoomMessageEventContent::text_plain("")) +} + +#[admin_command] +pub(super) async fn raw_keys_total( + &self, + map: Option, + prefix: Option, +) -> Result { + let prefix = prefix.as_deref().unwrap_or(EMPTY); + + let default_all_maps = map + .is_none() + .then(|| self.services.db.keys().map(Deref::deref)) + .into_iter() + .flatten(); + + let maps: Vec<_> = map + .iter() + .map(String::as_str) + .chain(default_all_maps) + .map(|map| self.services.db.get(map)) + .filter_map(Result::ok) + .cloned() + .collect(); + + let timer = Instant::now(); + let result = maps + .iter() + .stream() + .map(|map| map.raw_keys_prefix(&prefix)) + .flatten() + .ignore_err() + .map(<[u8]>::len) + .ready_fold_default(|acc: usize, len| acc.saturating_add(len)) + .await; + + let query_time = timer.elapsed(); + + self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}")) + .await?; + + Ok(RoomMessageEventContent::text_plain("")) +} + +#[admin_command] +pub(super) async fn raw_vals_sizes( + &self, + map: Option, + prefix: Option, +) -> Result { + let prefix = prefix.as_deref().unwrap_or(EMPTY); + + let default_all_maps = map + .is_none() + .then(|| self.services.db.keys().map(Deref::deref)) + .into_iter() + .flatten(); + + let maps: Vec<_> = map + .iter() + .map(String::as_str) + .chain(default_all_maps) + .map(|map| self.services.db.get(map)) + .filter_map(Result::ok) + .cloned() + .collect(); + + let timer = Instant::now(); + let result = maps + .iter() + .stream() + .map(|map| map.raw_stream_prefix(&prefix)) + .flatten() + .ignore_err() + .map(at!(1)) + .map(<[u8]>::len) + .ready_fold_default(|mut map: BTreeMap<_, usize>, len| { + let entry = map.entry(len).or_default(); + *entry = entry.saturating_add(1); + map + }) + .await; + + let query_time = timer.elapsed(); + let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}"); + self.write_str(result.as_str()).await?; + + Ok(RoomMessageEventContent::text_plain("")) +} + +#[admin_command] +pub(super) async fn raw_vals_total( + &self, + map: Option, + prefix: Option, +) -> Result { + let prefix = prefix.as_deref().unwrap_or(EMPTY); + + let default_all_maps = map + .is_none() + .then(|| self.services.db.keys().map(Deref::deref)) + .into_iter() + .flatten(); + + let maps: Vec<_> = map + .iter() + .map(String::as_str) + .chain(default_all_maps) + .map(|map| self.services.db.get(map)) + .filter_map(Result::ok) + .cloned() + .collect(); + + let timer = Instant::now(); + let result = maps + .iter() + .stream() + .map(|map| map.raw_stream_prefix(&prefix)) + .flatten() + .ignore_err() + .map(at!(1)) + .map(<[u8]>::len) + .ready_fold_default(|acc: usize, len| acc.saturating_add(len)) + .await; + + let query_time = timer.elapsed(); + + self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}")) + .await?; + + Ok(RoomMessageEventContent::text_plain("")) +} + +#[admin_command] +pub(super) async fn raw_iter( + &self, + map: String, + prefix: Option, +) -> Result { + writeln!(self, "```").await?; + + let map = self.services.db.get(&map)?; + let timer = Instant::now(); + prefix + .as_deref() + .map_or_else(|| map.raw_stream().boxed(), |prefix| map.raw_stream_prefix(prefix).boxed()) + .map_ok(apply!(2, String::from_utf8_lossy)) + .map_ok(apply!(2, Cow::into_owned)) + .try_for_each(|keyval| writeln!(self, "{keyval:?}")) + .boxed() + .await?; + + let query_time = timer.elapsed(); + self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}")) + .await?; + + Ok(RoomMessageEventContent::text_plain("")) +} + +#[admin_command] +pub(super) async fn raw_keys_from( + &self, + map: String, + start: String, + limit: Option, +) -> Result { + writeln!(self, "```").await?; + + let map = self.services.db.get(&map)?; + let timer = Instant::now(); + map.raw_keys_from(&start) + .map_ok(String::from_utf8_lossy) + .take(limit.unwrap_or(usize::MAX)) + .try_for_each(|str| writeln!(self, "{str:?}")) + .boxed() + .await?; + + let query_time = timer.elapsed(); + self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}")) + .await?; + + Ok(RoomMessageEventContent::text_plain("")) +} + +#[admin_command] +pub(super) async fn raw_iter_from( + &self, + map: String, + start: String, + limit: Option, +) -> Result { + let map = self.services.db.get(&map)?; + let timer = Instant::now(); + let result = map + .raw_stream_from(&start) + .map_ok(apply!(2, String::from_utf8_lossy)) + .map_ok(apply!(2, Cow::into_owned)) + .take(limit.unwrap_or(usize::MAX)) + .try_collect::>() + .await?; + + let query_time = timer.elapsed(); + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + ))) +} + +#[admin_command] +pub(super) async fn raw_get(&self, map: String, key: String) -> Result { + let map = self.services.db.get(&map)?; + let timer = Instant::now(); + let handle = map.get(&key).await?; + let query_time = timer.elapsed(); + let result = String::from_utf8_lossy(&handle); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```" + ))) +} + +#[admin_command] +pub(super) async fn raw_maps(&self) -> Result { + let list: Vec<_> = self.services.db.iter().map(at!(0)).copied().collect(); + + Ok(RoomMessageEventContent::notice_markdown(format!("{list:#?}"))) +} diff --git a/src/admin/query/resolver.rs b/src/admin/query/resolver.rs index 3b950d13..b53661fc 100644 --- a/src/admin/query/resolver.rs +++ b/src/admin/query/resolver.rs @@ -28,56 +28,66 @@ async fn destinations_cache( ) -> Result { use service::resolver::cache::CachedDest; + writeln!(self, "| Server Name | Destination | Hostname | Expires |").await?; + writeln!(self, "| ----------- | ----------- | -------- | ------- |").await?; + let mut out = String::new(); - writeln!(out, "| Server Name | Destination | Hostname | Expires |")?; - writeln!(out, "| ----------- | ----------- | -------- | ------- |")?; - let row = |(name, &CachedDest { ref dest, ref host, expire })| { - let expire = time::format(expire, "%+"); - writeln!(out, "| {name} | {dest} | {host} | {expire} |").expect("wrote line"); - }; + { + let map = self + .services + .resolver + .cache + .destinations + .read() + .expect("locked"); - let map = self - .services - .resolver - .cache - .destinations - .read() - .expect("locked"); + for (name, &CachedDest { ref dest, ref host, expire }) in map.iter() { + if let Some(server_name) = server_name.as_ref() { + if name != server_name { + continue; + } + } - if let Some(server_name) = server_name.as_ref() { - map.get_key_value(server_name).map(row); - } else { - map.iter().for_each(row); + let expire = time::format(expire, "%+"); + writeln!(out, "| {name} | {dest} | {host} | {expire} |")?; + } } - Ok(RoomMessageEventContent::notice_markdown(out)) + self.write_str(out.as_str()).await?; + + Ok(RoomMessageEventContent::notice_plain("")) } #[admin_command] async fn overrides_cache(&self, server_name: Option) -> Result { use service::resolver::cache::CachedOverride; + writeln!(self, "| Server Name | IP | Port | Expires |").await?; + writeln!(self, "| ----------- | --- | ----:| ------- |").await?; + let mut out = String::new(); - writeln!(out, "| Server Name | IP | Port | Expires |")?; - writeln!(out, "| ----------- | --- | ----:| ------- |")?; - let row = |(name, &CachedOverride { ref ips, port, expire })| { - let expire = time::format(expire, "%+"); - writeln!(out, "| {name} | {ips:?} | {port} | {expire} |").expect("wrote line"); - }; + { + let map = self + .services + .resolver + .cache + .overrides + .read() + .expect("locked"); - let map = self - .services - .resolver - .cache - .overrides - .read() - .expect("locked"); + for (name, &CachedOverride { ref ips, port, expire }) in map.iter() { + if let Some(server_name) = server_name.as_ref() { + if name != server_name { + continue; + } + } - if let Some(server_name) = server_name.as_ref() { - map.get_key_value(server_name).map(row); - } else { - map.iter().for_each(row); + let expire = time::format(expire, "%+"); + writeln!(out, "| {name} | {ips:?} | {port} | {expire} |")?; + } } - Ok(RoomMessageEventContent::notice_markdown(out)) + self.write_str(out.as_str()).await?; + + Ok(RoomMessageEventContent::notice_plain("")) } diff --git a/src/admin/query/room_alias.rs b/src/admin/query/room_alias.rs index e1bf1622..2d4d8104 100644 --- a/src/admin/query/room_alias.rs +++ b/src/admin/query/room_alias.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, RoomAliasId, RoomId}; +use ruma::{RoomAliasId, RoomId}; use crate::Command; @@ -24,10 +24,7 @@ pub(crate) enum RoomAliasCommand { } /// All the getters and iterators in src/database/key_value/rooms/alias.rs -pub(super) async fn process( - subcommand: RoomAliasCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(subcommand: RoomAliasCommand, context: &Command<'_>) -> Result { let services = context.services; match subcommand { @@ -36,9 +33,7 @@ pub(super) async fn process( let results = services.rooms.alias.resolve_local_alias(&alias).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, | RoomAliasCommand::LocalAliasesForRoom { room_id } => { let timer = tokio::time::Instant::now(); @@ -51,9 +46,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{aliases:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{aliases:#?}\n```") }, | RoomAliasCommand::AllLocalAliases => { let timer = tokio::time::Instant::now(); @@ -66,9 +59,8 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{aliases:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{aliases:#?}\n```") }, } + .await } diff --git a/src/admin/query/room_state_cache.rs b/src/admin/query/room_state_cache.rs index cd7f5af7..71dadc99 100644 --- a/src/admin/query/room_state_cache.rs +++ b/src/admin/query/room_state_cache.rs @@ -1,5 +1,5 @@ use clap::Subcommand; -use conduwuit::Result; +use conduwuit::{Error, Result}; use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, RoomId, ServerName, UserId}; @@ -76,13 +76,10 @@ pub(crate) enum RoomStateCacheCommand { }, } -pub(super) async fn process( - subcommand: RoomStateCacheCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command<'_>) -> Result { let services = context.services; - match subcommand { + let c = match subcommand { | RoomStateCacheCommand::ServerInRoom { server, room_id } => { let timer = tokio::time::Instant::now(); let result = services @@ -92,7 +89,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" ))) }, @@ -107,7 +104,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -122,7 +119,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -137,7 +134,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -152,7 +149,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -167,7 +164,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -176,7 +173,7 @@ pub(super) async fn process( let results = services.rooms.state_cache.room_joined_count(&room_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -189,7 +186,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -204,7 +201,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -219,7 +216,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -232,7 +229,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -245,7 +242,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -260,7 +257,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -274,7 +271,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -288,7 +285,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -301,9 +298,13 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - } + }?; + + context.write_str(c.body()).await?; + + Ok(()) } diff --git a/src/admin/query/sending.rs b/src/admin/query/sending.rs index 3edbbe87..8c6fb25f 100644 --- a/src/admin/query/sending.rs +++ b/src/admin/query/sending.rs @@ -62,7 +62,14 @@ pub(crate) enum SendingCommand { } /// All the getters and iterators in key_value/sending.rs -pub(super) async fn process( +pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) -> Result { + let c = reprocess(subcommand, context).await?; + context.write_str(c.body()).await?; + Ok(()) +} + +/// All the getters and iterators in key_value/sending.rs +pub(super) async fn reprocess( subcommand: SendingCommand, context: &Command<'_>, ) -> Result { diff --git a/src/admin/query/short.rs b/src/admin/query/short.rs new file mode 100644 index 00000000..7f0f3449 --- /dev/null +++ b/src/admin/query/short.rs @@ -0,0 +1,45 @@ +use clap::Subcommand; +use conduwuit::Result; +use ruma::{events::room::message::RoomMessageEventContent, OwnedEventId, OwnedRoomOrAliasId}; + +use crate::{admin_command, admin_command_dispatch}; + +#[admin_command_dispatch] +#[derive(Debug, Subcommand)] +/// Query tables from database +pub(crate) enum ShortCommand { + ShortEventId { + event_id: OwnedEventId, + }, + + ShortRoomId { + room_id: OwnedRoomOrAliasId, + }, +} + +#[admin_command] +pub(super) async fn short_event_id( + &self, + event_id: OwnedEventId, +) -> Result { + let shortid = self + .services + .rooms + .short + .get_shorteventid(&event_id) + .await?; + + Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}"))) +} + +#[admin_command] +pub(super) async fn short_room_id( + &self, + room_id: OwnedRoomOrAliasId, +) -> Result { + let room_id = self.services.rooms.alias.resolve(&room_id).await?; + + let shortid = self.services.rooms.short.get_shortroomid(&room_id).await?; + + Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}"))) +} diff --git a/src/admin/query/users.rs b/src/admin/query/users.rs index 2149a103..3715ac25 100644 --- a/src/admin/query/users.rs +++ b/src/admin/query/users.rs @@ -15,6 +15,8 @@ pub(crate) enum UsersCommand { IterUsers, + IterUsers2, + PasswordHash { user_id: OwnedUserId, }, @@ -89,6 +91,33 @@ pub(crate) enum UsersCommand { room_id: OwnedRoomId, session_id: String, }, + + GetSharedRooms { + user_a: OwnedUserId, + user_b: OwnedUserId, + }, +} + +#[admin_command] +async fn get_shared_rooms( + &self, + user_a: OwnedUserId, + user_b: OwnedUserId, +) -> Result { + let timer = tokio::time::Instant::now(); + let result: Vec<_> = self + .services + .rooms + .state_cache + .get_shared_rooms(&user_a, &user_b) + .map(ToOwned::to_owned) + .collect() + .await; + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + ))) } #[admin_command] @@ -207,6 +236,23 @@ async fn iter_users(&self) -> Result { ))) } +#[admin_command] +async fn iter_users2(&self) -> Result { + let timer = tokio::time::Instant::now(); + let result: Vec<_> = self.services.users.stream().collect().await; + let result: Vec<_> = result + .into_iter() + .map(ruma::UserId::as_bytes) + .map(String::from_utf8_lossy) + .collect(); + + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```" + ))) +} + #[admin_command] async fn count_users(&self) -> Result { let timer = tokio::time::Instant::now(); diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index 4490600d..9710cfc8 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -44,7 +44,14 @@ pub(crate) enum RoomAliasCommand { }, } -pub(super) async fn process( +pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) -> Result { + let c = reprocess(command, context).await?; + context.write_str(c.body()).await?; + + Ok(()) +} + +pub(super) async fn reprocess( command: RoomAliasCommand, context: &Command<'_>, ) -> Result { diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index 81f25478..791b9204 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -25,7 +25,13 @@ pub(crate) enum RoomDirectoryCommand { }, } -pub(super) async fn process( +pub(super) async fn process(command: RoomDirectoryCommand, context: &Command<'_>) -> Result { + let c = reprocess(command, context).await?; + context.write_str(c.body()).await?; + Ok(()) +} + +pub(super) async fn reprocess( command: RoomDirectoryCommand, context: &Command<'_>, ) -> Result { diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 5758d937..57aedd9c 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -31,19 +31,21 @@ const BULK_JOIN_REASON: &str = "Bulk force joining this room as initiated by the #[admin_command] pub(super) async fn list_users(&self) -> Result { - let users = self + let users: Vec<_> = self .services .users .list_local_users() .map(ToString::to_string) - .collect::>() + .collect() .await; let mut plain_msg = format!("Found {} local user account(s):\n```\n", users.len()); plain_msg += users.join("\n").as_str(); plain_msg += "\n```"; - Ok(RoomMessageEventContent::notice_markdown(plain_msg)) + self.write_str(plain_msg.as_str()).await?; + + Ok(RoomMessageEventContent::text_plain("")) } #[admin_command] @@ -912,29 +914,30 @@ pub(super) async fn redact_event( self.services.globals.server_name() ); - let state_lock = self.services.rooms.state.mutex.lock(&room_id).await; + let redaction_event_id = { + let state_lock = self.services.rooms.state.mutex.lock(&room_id).await; - let redaction_event_id = self - .services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - redacts: Some(event.event_id.clone()), - ..PduBuilder::timeline(&RoomRedactionEventContent { + self.services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { redacts: Some(event.event_id.clone()), - reason: Some(reason), - }) - }, - &sender_user, - &room_id, - &state_lock, - ) - .await?; + ..PduBuilder::timeline(&RoomRedactionEventContent { + redacts: Some(event.event_id.clone()), + reason: Some(reason), + }) + }, + &sender_user, + &room_id, + &state_lock, + ) + .await? + }; - drop(state_lock); + let out = format!("Successfully redacted event. Redaction event ID: {redaction_event_id}"); - Ok(RoomMessageEventContent::text_plain(format!( - "Successfully redacted event. Redaction event ID: {redaction_event_id}" - ))) + self.write_str(out.as_str()).await?; + + Ok(RoomMessageEventContent::text_plain("")) } diff --git a/src/macros/admin.rs b/src/macros/admin.rs index e98e914c..e35bd586 100644 --- a/src/macros/admin.rs +++ b/src/macros/admin.rs @@ -22,7 +22,7 @@ pub(super) fn command_dispatch(item: ItemEnum, _args: &[Meta]) -> Result - ) -> Result { + ) -> Result { use #name::*; #[allow(non_snake_case)] Ok(match command { @@ -46,7 +46,10 @@ fn dispatch_arm(v: &Variant) -> Result { let field = fields.named.iter().filter_map(|f| f.ident.as_ref()); let arg = field.clone(); quote! { - #name { #( #field ),* } => Box::pin(context.#handler(#( #arg ),*)).await?, + #name { #( #field ),* } => { + let c = Box::pin(context.#handler(#( #arg ),*)).await?; + Box::pin(context.write_str(c.body())).await?; + }, } }, | Fields::Unnamed(fields) => { @@ -54,12 +57,17 @@ fn dispatch_arm(v: &Variant) -> Result { return Err(Error::new(Span::call_site().into(), "One unnamed field required")); }; quote! { - #name ( #field ) => Box::pin(#handler::process(#field, context)).await?, + #name ( #field ) => { + Box::pin(#handler::process(#field, context)).await?; + } } }, | Fields::Unit => { quote! { - #name => Box::pin(context.#handler()).await?, + #name => { + let c = Box::pin(context.#handler()).await?; + Box::pin(context.write_str(c.body())).await?; + }, } }, }; From f9e76d6239632bd3e74cd0b1c76dd72dbc24dc7a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 01:32:37 +0000 Subject: [PATCH 038/328] improve debug memory-stats options Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 30 +++++++++++++++++++----------- src/admin/debug/mod.rs | 8 +++++++- src/core/alloc/default.rs | 2 +- src/core/alloc/hardened.rs | 4 ++-- src/core/alloc/je.rs | 14 ++++++-------- 5 files changed, 35 insertions(+), 23 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index b6189f6a..ad61440c 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -843,19 +843,27 @@ pub(super) async fn resolve_true_destination( } #[admin_command] -pub(super) async fn memory_stats(&self) -> Result { - let html_body = conduwuit::alloc::memory_stats(); +pub(super) async fn memory_stats(&self, opts: Option) -> Result { + const OPTS: &str = "abcdefghijklmnopqrstuvwxyz"; - if html_body.is_none() { - return Ok(RoomMessageEventContent::text_plain( - "malloc stats are not supported on your compiled malloc.", - )); - } + let opts: String = OPTS + .chars() + .filter(|&c| { + let allow_any = opts.as_ref().is_some_and(|opts| opts == "*"); - Ok(RoomMessageEventContent::text_html( - "This command's output can only be viewed by clients that render HTML.".to_owned(), - html_body.expect("string result"), - )) + let allow = allow_any || opts.as_ref().is_some_and(|opts| opts.contains(c)); + + !allow + }) + .collect(); + + let stats = conduwuit::alloc::memory_stats(&opts).unwrap_or_default(); + + self.write_str("```\n").await?; + self.write_str(&stats).await?; + self.write_str("\n```").await?; + + Ok(RoomMessageEventContent::text_plain("")) } #[cfg(tokio_unstable)] diff --git a/src/admin/debug/mod.rs b/src/admin/debug/mod.rs index 2367f80d..07f7296b 100644 --- a/src/admin/debug/mod.rs +++ b/src/admin/debug/mod.rs @@ -191,7 +191,13 @@ pub(super) enum DebugCommand { }, /// - Print extended memory usage - MemoryStats, + /// + /// Optional argument is a character mask (a sequence of characters in any + /// order) which enable additional extended statistics. Known characters are + /// "abdeglmx". For convenience, a '*' will enable everything. + MemoryStats { + opts: Option, + }, /// - Print general tokio runtime metric totals. RuntimeMetrics, diff --git a/src/core/alloc/default.rs b/src/core/alloc/default.rs index 5db02884..56e8c407 100644 --- a/src/core/alloc/default.rs +++ b/src/core/alloc/default.rs @@ -5,7 +5,7 @@ pub fn trim() -> crate::Result { Ok(()) } /// Always returns None #[must_use] -pub fn memory_stats() -> Option { None } +pub fn memory_stats(_opts: &str) -> Option { None } /// Always returns None #[must_use] diff --git a/src/core/alloc/hardened.rs b/src/core/alloc/hardened.rs index e2d9b28e..ff10cf2b 100644 --- a/src/core/alloc/hardened.rs +++ b/src/core/alloc/hardened.rs @@ -7,9 +7,9 @@ pub fn trim() -> crate::Result { Ok(()) } #[must_use] //TODO: get usage -pub fn memory_usage() -> Option { None } +pub fn memory_usage() -> Option { None } #[must_use] -pub fn memory_stats() -> Option { +pub fn memory_stats(_opts: &str) -> Option { Some("Extended statistics are not available from hardened_malloc.".to_owned()) } diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index b2c1fe85..ccb213c9 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -3,7 +3,7 @@ use std::{ cell::OnceCell, ffi::{c_char, c_void}, - fmt::{Debug, Write}, + fmt::Debug, }; use arrayvec::ArrayVec; @@ -66,15 +66,12 @@ pub fn memory_usage() -> Option { #[cfg(not(feature = "jemalloc_stats"))] pub fn memory_usage() -> Option { None } -#[must_use] -pub fn memory_stats() -> Option { - const MAX_LENGTH: usize = 65536 - 4096; +pub fn memory_stats(opts: &str) -> Option { + const MAX_LENGTH: usize = 1_048_576; - let opts_s = "d"; let mut str = String::new(); - let opaque = std::ptr::from_mut(&mut str).cast::(); - let opts_p: *const c_char = std::ffi::CString::new(opts_s) + let opts_p: *const c_char = std::ffi::CString::new(opts) .expect("cstring") .into_raw() .cast_const(); @@ -84,7 +81,8 @@ pub fn memory_stats() -> Option { unsafe { ffi::malloc_stats_print(Some(malloc_stats_cb), opaque, opts_p) }; str.truncate(MAX_LENGTH); - Some(format!("
{str}
")) + + Some(str) } unsafe extern "C" fn malloc_stats_cb(opaque: *mut c_void, msg: *const c_char) { From afdf5a07b52bd7102d5b98c4d3b6aa1b1fc905ce Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 05:32:17 +0000 Subject: [PATCH 039/328] abstract hidden line related in config generator macro --- src/macros/config.rs | 70 +++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 36 deletions(-) diff --git a/src/macros/config.rs b/src/macros/config.rs index 0fb79728..452abd20 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -15,6 +15,8 @@ use crate::{ const UNDOCUMENTED: &str = "# This item is undocumented. Please contribute documentation for it."; +const HIDDEN: &[&str] = &["default"]; + #[allow(clippy::needless_pass_by_value)] pub(super) fn example_generator(input: ItemStruct, args: &[Meta]) -> Result { if is_cargo_build() && !is_cargo_test() { @@ -93,7 +95,7 @@ fn generate_example(input: &ItemStruct, args: &[Meta]) -> Result<()> { format!("{doc}\n#\n") }; - let default = get_doc_default(field) + let default = get_doc_comment_line(field, "default") .or_else(|| get_default(field)) .unwrap_or_default(); @@ -163,40 +165,40 @@ fn get_default(field: &Field) -> Option { None } -fn get_doc_default(field: &Field) -> Option { - for attr in &field.attrs { - let Meta::NameValue(MetaNameValue { path, value, .. }) = &attr.meta else { - continue; - }; +fn get_doc_comment(field: &Field) -> Option { + let comment = get_doc_comment_full(field)?; - if path.segments.iter().next().is_none_or(|s| s.ident != "doc") { - continue; - } + let out = comment + .lines() + .filter(|line| { + !HIDDEN.iter().any(|key| { + line.trim().starts_with(key) && line.trim().chars().nth(key.len()) == Some(':') + }) + }) + .fold(String::new(), |full, line| full + "#" + line + "\n"); - let Expr::Lit(ExprLit { lit, .. }) = &value else { - continue; - }; - - let Lit::Str(token) = &lit else { - continue; - }; - - let value = token.value(); - if !value.trim().starts_with("default:") { - continue; - } - - return value - .split_once(':') - .map(|(_, v)| v) - .map(str::trim) - .map(ToOwned::to_owned); - } - - None + (!out.is_empty()).then_some(out) } -fn get_doc_comment(field: &Field) -> Option { +fn get_doc_comment_line(field: &Field, label: &str) -> Option { + let comment = get_doc_comment_full(field)?; + + comment + .lines() + .map(str::trim) + .filter(|line| line.starts_with(label)) + .filter(|line| line.chars().nth(label.len()) == Some(':')) + .map(|line| { + line.split_once(':') + .map(|(_, v)| v) + .map(str::trim) + .map(ToOwned::to_owned) + }) + .next() + .flatten() +} + +fn get_doc_comment_full(field: &Field) -> Option { let mut out = String::new(); for attr in &field.attrs { let Meta::NameValue(MetaNameValue { path, value, .. }) = &attr.meta else { @@ -216,11 +218,7 @@ fn get_doc_comment(field: &Field) -> Option { }; let value = token.value(); - if value.trim().starts_with("default:") { - continue; - } - - writeln!(&mut out, "#{value}").expect("wrote to output string buffer"); + writeln!(&mut out, "{value}").expect("wrote to output string buffer"); } (!out.is_empty()).then_some(out) From c6ae6adc80e562a44f96e10f03eb4d14dc312984 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 09:47:17 +0000 Subject: [PATCH 040/328] pre-allocate some amount of media read buffer Signed-off-by: Jason Volk --- src/service/media/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 7e77090c..0d98853d 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -157,7 +157,7 @@ impl Service { if let Ok(Metadata { content_disposition, content_type, key }) = self.db.search_file_metadata(mxc, &Dim::default()).await { - let mut content = Vec::new(); + let mut content = Vec::with_capacity(8192); let path = self.get_media_file(&key); BufReader::new(fs::File::open(path).await?) .read_to_end(&mut content) From 7045481fae69150eea84983a55b762ecfaa04e2f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 21:58:49 +0000 Subject: [PATCH 041/328] add from_errno construction to Error Signed-off-by: Jason Volk --- src/core/error/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 2468811e..88ac6d09 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -131,6 +131,10 @@ pub enum Error { } impl Error { + #[inline] + #[must_use] + pub fn from_errno() -> Self { Self::Io(std::io::Error::last_os_error()) } + //#[deprecated] pub fn bad_database(message: &'static str) -> Self { crate::err!(Database(error!("{message}"))) From df3eb95d4f18f61839b296f48401ca75f61ad750 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 4 Jan 2025 01:32:45 +0000 Subject: [PATCH 042/328] additional affinity utils Signed-off-by: Jason Volk --- src/core/utils/mod.rs | 2 +- src/core/utils/sys.rs | 2 +- src/core/utils/sys/compute.rs | 145 ++++++++++++++++++++++++++-------- 3 files changed, 116 insertions(+), 33 deletions(-) diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 2bbadb50..4b5330ed 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -37,7 +37,7 @@ pub use self::{ rand::{shuffle, string as random_string}, stream::{IterStream, ReadyExt, Tools as StreamTools, TryReadyExt}, string::{str_from_bytes, string_from_bytes}, - sys::compute::parallelism as available_parallelism, + sys::compute::available_parallelism, time::{ exponential_backoff::{continue_exponential_backoff, continue_exponential_backoff_secs}, now_millis as millis_since_unix_epoch, timepoint_ago, timepoint_from_now, diff --git a/src/core/utils/sys.rs b/src/core/utils/sys.rs index 5c5564c4..a0d5be52 100644 --- a/src/core/utils/sys.rs +++ b/src/core/utils/sys.rs @@ -3,7 +3,7 @@ pub mod storage; use std::path::PathBuf; -pub use compute::parallelism as available_parallelism; +pub use compute::available_parallelism; use crate::{debug, Result}; diff --git a/src/core/utils/sys/compute.rs b/src/core/utils/sys/compute.rs index 9e90fc90..ce2aa504 100644 --- a/src/core/utils/sys/compute.rs +++ b/src/core/utils/sys/compute.rs @@ -1,23 +1,31 @@ //! System utilities related to compute/processing -use std::{cell::Cell, fmt::Debug, sync::LazyLock}; +use std::{cell::Cell, fmt::Debug, path::PathBuf, sync::LazyLock}; -use crate::is_equal_to; +use crate::{is_equal_to, Result}; -/// The list of cores available to the process (at startup) -static CORES_AVAILABLE: LazyLock = LazyLock::new(|| { - core_affinity::get_core_ids() - .unwrap_or_default() - .into_iter() - .map(|core_id| core_id.id) - .inspect(|&id| debug_assert!(id < 128, "Core ID must be < 128 at least for now")) - .fold(0_u128, |mask, id| mask | (1 << id)) -}); +type Id = usize; + +type Mask = u128; +type Masks = [Mask; MASK_BITS]; + +const MASK_BITS: usize = 128; + +/// The mask of logical cores available to the process (at startup). +static CORES_AVAILABLE: LazyLock = LazyLock::new(|| into_mask(query_cores_available())); + +/// Stores the mask of logical-cores with thread/HT/SMT association. Each group +/// here makes up a physical-core. +static SMT_TOPOLOGY: LazyLock = LazyLock::new(init_smt_topology); + +/// Stores the mask of logical-core associations on a node/socket. Bits are set +/// for all logical cores within all physical cores of the node. +static NODE_TOPOLOGY: LazyLock = LazyLock::new(init_node_topology); thread_local! { /// Tracks the affinity for this thread. This is updated when affinities /// are set via our set_affinity() interface. - static CORE_AFFINITY: Cell = Cell::default(); + static CORE_AFFINITY: Cell = Cell::default(); } /// Set the core affinity for this thread. The ID should be listed in @@ -28,19 +36,19 @@ thread_local! { fields( id = ?std::thread::current().id(), name = %std::thread::current().name().unwrap_or("None"), - set = ?ids.by_ref().collect::>(), + set = ?ids.clone().collect::>(), CURRENT = %format!("[b{:b}]", CORE_AFFINITY.get()), AVAILABLE = %format!("[b{:b}]", *CORES_AVAILABLE), ), )] pub fn set_affinity(mut ids: I) where - I: Iterator + Clone + Debug, + I: Iterator + Clone + Debug, { use core_affinity::{set_each_for_current, set_for_current, CoreId}; let n = ids.clone().count(); - let mask: u128 = ids.clone().fold(0, |mask, id| { + let mask: Mask = ids.clone().fold(0, |mask, id| { debug_assert!(is_core_available(id), "setting affinity to unavailable core"); mask | (1 << id) }); @@ -57,35 +65,110 @@ where } /// Get the core affinity for this thread. -pub fn get_affinity() -> impl Iterator { iter_bits(CORE_AFFINITY.get()) } +pub fn get_affinity() -> impl Iterator { from_mask(CORE_AFFINITY.get()) } + +/// List the cores sharing SMT-tier resources +pub fn smt_siblings() -> impl Iterator { + from_mask(get_affinity().fold(0_u128, |mask, id| { + mask | SMT_TOPOLOGY.get(id).expect("ID must not exceed max cpus") + })) +} + +/// List the cores sharing Node-tier resources relative to this threads current +/// affinity. +pub fn node_siblings() -> impl Iterator { + from_mask(get_affinity().fold(0_u128, |mask, id| { + mask | NODE_TOPOLOGY.get(id).expect("Id must not exceed max cpus") + })) +} + +/// Get the cores sharing SMT resources relative to id. +#[inline] +pub fn smt_affinity(id: Id) -> impl Iterator { + from_mask(*SMT_TOPOLOGY.get(id).expect("ID must not exceed max cpus")) +} + +/// Get the cores sharing Node resources relative to id. +#[inline] +pub fn node_affinity(id: Id) -> impl Iterator { + from_mask(*NODE_TOPOLOGY.get(id).expect("ID must not exceed max cpus")) +} + +/// Get the number of threads which could execute in parallel based on hardware +/// constraints of this system. +#[inline] +#[must_use] +pub fn available_parallelism() -> usize { cores_available().count() } /// Gets the ID of the nth core available. This bijects our sequence of cores to /// actual ID's which may have gaps for cores which are not available. #[inline] #[must_use] -pub fn nth_core_available(i: usize) -> Option { cores_available().nth(i) } +pub fn nth_core_available(i: usize) -> Option { cores_available().nth(i) } /// Determine if core (by id) is available to the process. #[inline] #[must_use] -pub fn is_core_available(id: usize) -> bool { cores_available().any(is_equal_to!(id)) } +pub fn is_core_available(id: Id) -> bool { cores_available().any(is_equal_to!(id)) } /// Get the list of cores available. The values were recorded at program start. #[inline] -pub fn cores_available() -> impl Iterator { iter_bits(*CORES_AVAILABLE) } +pub fn cores_available() -> impl Iterator { from_mask(*CORES_AVAILABLE) } -/// Get the number of threads which could execute in parallel based on the -/// hardware and administrative constraints of this system. This value should be -/// used to hint the size of thread-pools and divide-and-conquer algorithms. -/// -/// * -#[must_use] -pub fn parallelism() -> usize { - std::thread::available_parallelism() - .expect("Unable to query for available parallelism.") - .get() +#[cfg(target_os = "linux")] +#[inline] +pub fn getcpu() -> Result { + use crate::{utils::math, Error}; + + // SAFETY: This is part of an interface with many low-level calls taking many + // raw params, but it's unclear why this specific call is unsafe. Nevertheless + // the value obtained here is semantically unsafe because it can change on the + // instruction boundary trailing its own acquisition and also any other time. + let ret: i32 = unsafe { libc::sched_getcpu() }; + + #[cfg(target_os = "linux")] + // SAFETY: On modern linux systems with a vdso if we can optimize away the branch checking + // for error (see getcpu(2)) then this system call becomes a memory access. + unsafe { + std::hint::assert_unchecked(ret >= 0); + }; + + if ret == -1 { + return Err(Error::from_errno()); + } + + math::try_into(ret) } -fn iter_bits(v: u128) -> impl Iterator { - (0..128).filter(move |&i| (v & (1 << i)) != 0) +#[cfg(not(target_os = "linux"))] +#[inline] +pub fn getcpu() -> Result { Err(crate::Error::Io(std::io::ErrorKind::Unsupported.into())) } + +fn query_cores_available() -> impl Iterator { + core_affinity::get_core_ids() + .unwrap_or_default() + .into_iter() + .map(|core_id| core_id.id) +} + +fn init_smt_topology() -> [Mask; MASK_BITS] { [Mask::default(); MASK_BITS] } + +fn init_node_topology() -> [Mask; MASK_BITS] { [Mask::default(); MASK_BITS] } + +fn into_mask(ids: I) -> Mask +where + I: Iterator, +{ + ids.inspect(|&id| { + debug_assert!(id < MASK_BITS, "Core ID must be < Mask::BITS at least for now"); + }) + .fold(Mask::default(), |mask, id| mask | (1 << id)) +} + +fn from_mask(v: Mask) -> impl Iterator { + (0..MASK_BITS).filter(move |&i| (v & (1 << i)) != 0) +} + +fn _sys_path(id: usize, suffix: &str) -> PathBuf { + format!("/sys/devices/system/cpu/cpu{id}/{suffix}").into() } From 4fbbfe5d3056669982becae1ae7ec0f3edd80439 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 19:56:16 +0000 Subject: [PATCH 043/328] add alt argument format for detecting cargo build phase Signed-off-by: Jason Volk --- src/macros/utils.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/macros/utils.rs b/src/macros/utils.rs index e33ee8b4..af2519a7 100644 --- a/src/macros/utils.rs +++ b/src/macros/utils.rs @@ -23,6 +23,16 @@ pub(crate) fn get_simple_settings(args: &[Meta]) -> HashMap { } pub(crate) fn is_cargo_build() -> bool { + legacy_is_cargo_build() + || std::env::args() + .skip_while(|flag| !flag.starts_with("--emit")) + .nth(1) + .iter() + .flat_map(|flag| flag.split(',')) + .any(|elem| elem == "link") +} + +pub(crate) fn legacy_is_cargo_build() -> bool { std::env::args() .find(|flag| flag.starts_with("--emit")) .as_ref() From 3eed408b2975564ed2c0b103a665f1a022e150b8 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 20 Jan 2025 04:42:01 +0000 Subject: [PATCH 044/328] additional util macros and reorg Signed-off-by: Jason Volk --- src/core/utils/mod.rs | 85 ++++++++++++++++++++++++++----------------- 1 file changed, 52 insertions(+), 33 deletions(-) diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 4b5330ed..1a4b52da 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -57,6 +57,14 @@ macro_rules! extract_variant { }; } +/// Functor for !is_empty() +#[macro_export] +macro_rules! is_not_empty { + () => { + |x| !x.is_empty() + }; +} + #[macro_export] macro_rules! apply { (1, $($idx:tt)+) => { @@ -76,24 +84,35 @@ macro_rules! apply { }; } +/// Functor for truthy #[macro_export] -macro_rules! at { - ($idx:tt) => { - |t| t.$idx +macro_rules! is_true { + () => { + |x| !!x }; } +/// Functor for falsy #[macro_export] -macro_rules! ref_at { - ($idx:tt) => { - |ref t| &t.$idx +macro_rules! is_false { + () => { + |x| !x }; } +/// Functor for equality to non-zero #[macro_export] -macro_rules! deref_at { - ($idx:tt) => { - |t| *t.$idx +macro_rules! is_nonzero { + () => { + |x| x != 0 + }; +} + +/// Functor for equality to zero +#[macro_export] +macro_rules! is_zero { + () => { + $crate::is_matching!(0) }; } @@ -121,14 +140,6 @@ macro_rules! is_less_than { }; } -/// Functor for equality to zero -#[macro_export] -macro_rules! is_zero { - () => { - $crate::is_matching!(0) - }; -} - /// Functor for matches! i.e. .is_some_and(is_matching!('A'..='Z')) #[macro_export] macro_rules! is_matching { @@ -141,14 +152,6 @@ macro_rules! is_matching { }; } -/// Functor for !is_empty() -#[macro_export] -macro_rules! is_not_empty { - () => { - |x| !x.is_empty() - }; -} - /// Functor for equality i.e. (a, b).map(is_equal!()) #[macro_export] macro_rules! is_equal { @@ -157,18 +160,34 @@ macro_rules! is_equal { }; } -/// Functor for truthy +/// Functor for |x| *x.$i #[macro_export] -macro_rules! is_true { - () => { - |x| !!x +macro_rules! deref_at { + ($idx:tt) => { + |t| *t.$idx }; } -/// Functor for falsy +/// Functor for |ref x| x.$i #[macro_export] -macro_rules! is_false { - () => { - |x| !x +macro_rules! ref_at { + ($idx:tt) => { + |ref t| &t.$idx + }; +} + +/// Functor for |&x| x.$i +#[macro_export] +macro_rules! val_at { + ($idx:tt) => { + |&t| t.$idx + }; +} + +/// Functor for |x| x.$i +#[macro_export] +macro_rules! at { + ($idx:tt) => { + |t| t.$idx }; } From 3dae02b886a3428c58d0a11e1cc19271722b4b47 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 01:30:41 +0000 Subject: [PATCH 045/328] add preferred jemalloc config add muzzy/dirty configuration mallctl interface add program argument for --gc-muzzy=false Signed-off-by: Jason Volk --- Cargo.lock | 57 +++++----- Cargo.toml | 16 ++- deps/rust-rocksdb/Cargo.toml | 2 +- src/admin/debug/commands.rs | 2 +- src/core/Cargo.toml | 1 + src/core/alloc/default.rs | 2 +- src/core/alloc/hardened.rs | 2 +- src/core/alloc/je.rs | 194 +++++++++++++++++++++++++++++------ src/database/pool.rs | 16 ++- src/main/Cargo.toml | 4 + src/main/clap.rs | 16 +++ src/main/runtime.rs | 59 ++++++++--- 12 files changed, 289 insertions(+), 82 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 18bd7aab..8de3abf4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -175,9 +175,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "aws-lc-rs" -version = "1.12.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f409eb70b561706bf8abba8ca9c112729c481595893fd06a2dd9af8ed8441148" +checksum = "1ea835662a0af02443aa1396d39be523bbf8f11ee6fad20329607c480bea48c3" dependencies = [ "aws-lc-sys", "paste", @@ -186,9 +186,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.24.1" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "923ded50f602b3007e5e63e3f094c479d9c8a9b42d7f4034e4afe456aa48bfd2" +checksum = "71b2ddd3ada61a305e1d8bb6c005d1eaa7d14d903681edfc400406d523a9b491" dependencies = [ "bindgen", "cc", @@ -368,7 +368,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.8.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -393,9 +393,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1be3f42a67d6d345ecd59f675f3f012d6974981560836e938c22b424b85ce1be" +checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" [[package]] name = "blake2" @@ -495,9 +495,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.9" +version = "1.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8293772165d9345bdaaa39b45b2109591e63fe5e6fbc23c6ff930a048aa310b" +checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229" dependencies = [ "jobserver", "libc", @@ -1047,7 +1047,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.8.0", "crossterm_winapi", "futures-core", "mio", @@ -1122,9 +1122,9 @@ checksum = "817fa642fb0ee7fe42e95783e00e0969927b96091bdd4b9b1af082acd943913b" [[package]] name = "data-encoding" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" +checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f" [[package]] name = "date_header" @@ -2378,7 +2378,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.8.0", "cfg-if", "cfg_aliases", "libc", @@ -2911,7 +2911,7 @@ version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f86ba2052aebccc42cbbb3ed234b8b13ce76f75c3551a303cb2bcffcff12bb14" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.8.0", "memchr", "pulldown-cmark-escape", "unicase", @@ -3032,7 +3032,7 @@ version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.8.0", ] [[package]] @@ -3377,7 +3377,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.31.0+9.9.3" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=123d6302fed23fc706344becb2f19623265a83f8#123d6302fed23fc706344becb2f19623265a83f8" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2d31cf323df7c6d95396ef0213e28936c2218bd6#2d31cf323df7c6d95396ef0213e28936c2218bd6" dependencies = [ "bindgen", "bzip2-sys", @@ -3394,7 +3394,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.35.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=123d6302fed23fc706344becb2f19623265a83f8#123d6302fed23fc706344becb2f19623265a83f8" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2d31cf323df7c6d95396ef0213e28936c2218bd6#2d31cf323df7c6d95396ef0213e28936c2218bd6" dependencies = [ "libc", "rust-librocksdb-sys", @@ -3441,7 +3441,7 @@ version = "0.38.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.8.0", "errno", "libc", "linux-raw-sys", @@ -3559,9 +3559,12 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sd-notify" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1be20c5f7f393ee700f8b2f28ea35812e4e212f40774b550cd2a93ea91684451" +checksum = "561e6b346a5e59e0b8a07894004897d7160567e3352d2ebd6c3741d4e086b6f5" +dependencies = [ + "libc", +] [[package]] name = "security-framework" @@ -3569,7 +3572,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.8.0", "core-foundation", "core-foundation-sys", "libc", @@ -4171,7 +4174,7 @@ dependencies = [ [[package]] name = "tikv-jemalloc-ctl" version = "0.6.0" -source = "git+https://github.com/girlbossceo/jemallocator?rev=d87938bfddc26377dd7fdf14bbcd345f3ab19442#d87938bfddc26377dd7fdf14bbcd345f3ab19442" +source = "git+https://github.com/girlbossceo/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" dependencies = [ "libc", "paste", @@ -4181,7 +4184,7 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" -source = "git+https://github.com/girlbossceo/jemallocator?rev=d87938bfddc26377dd7fdf14bbcd345f3ab19442#d87938bfddc26377dd7fdf14bbcd345f3ab19442" +source = "git+https://github.com/girlbossceo/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" dependencies = [ "cc", "libc", @@ -4190,7 +4193,7 @@ dependencies = [ [[package]] name = "tikv-jemallocator" version = "0.6.0" -source = "git+https://github.com/girlbossceo/jemallocator?rev=d87938bfddc26377dd7fdf14bbcd345f3ab19442#d87938bfddc26377dd7fdf14bbcd345f3ab19442" +source = "git+https://github.com/girlbossceo/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" dependencies = [ "libc", "tikv-jemalloc-sys", @@ -4445,7 +4448,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", - "bitflags 2.7.0", + "bitflags 2.8.0", "bytes", "futures-core", "futures-util", @@ -4709,9 +4712,9 @@ dependencies = [ [[package]] name = "valuable" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "vcpkg" diff --git a/Cargo.toml b/Cargo.toml index c0b31a69..4d738a11 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -431,17 +431,23 @@ version = "0.35.0" # jemalloc usage [workspace.dependencies.tikv-jemalloc-sys] git = "https://github.com/girlbossceo/jemallocator" -rev = "d87938bfddc26377dd7fdf14bbcd345f3ab19442" +rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" default-features = false -features = ["unprefixed_malloc_on_supported_platforms"] +features = [ + "background_threads_runtime_support", + "unprefixed_malloc_on_supported_platforms", +] [workspace.dependencies.tikv-jemallocator] git = "https://github.com/girlbossceo/jemallocator" -rev = "d87938bfddc26377dd7fdf14bbcd345f3ab19442" +rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" default-features = false -features = ["unprefixed_malloc_on_supported_platforms"] +features = [ + "background_threads_runtime_support", + "unprefixed_malloc_on_supported_platforms", +] [workspace.dependencies.tikv-jemalloc-ctl] git = "https://github.com/girlbossceo/jemallocator" -rev = "d87938bfddc26377dd7fdf14bbcd345f3ab19442" +rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" default-features = false features = ["use_std"] diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index f06c44e8..96554aed 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "123d6302fed23fc706344becb2f19623265a83f8" +rev = "2d31cf323df7c6d95396ef0213e28936c2218bd6" #branch = "master" default-features = false diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index ad61440c..a77587b0 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -967,7 +967,7 @@ pub(super) async fn database_stats( #[admin_command] pub(super) async fn trim_memory(&self) -> Result { - conduwuit::alloc::trim()?; + conduwuit::alloc::trim(None)?; writeln!(self, "done").await?; diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index c716e9c2..ef2df4ff 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -36,6 +36,7 @@ jemalloc_stats = [ "tikv-jemalloc-ctl/stats", "tikv-jemallocator/stats", ] +jemalloc_conf = [] hardened_malloc = [ "dep:hardened_malloc-rs" ] diff --git a/src/core/alloc/default.rs b/src/core/alloc/default.rs index 56e8c407..65354b7d 100644 --- a/src/core/alloc/default.rs +++ b/src/core/alloc/default.rs @@ -1,7 +1,7 @@ //! Default allocator with no special features /// Always returns Ok -pub fn trim() -> crate::Result { Ok(()) } +pub fn trim>>(_: I) -> crate::Result { Ok(()) } /// Always returns None #[must_use] diff --git a/src/core/alloc/hardened.rs b/src/core/alloc/hardened.rs index ff10cf2b..5f850673 100644 --- a/src/core/alloc/hardened.rs +++ b/src/core/alloc/hardened.rs @@ -3,7 +3,7 @@ #[global_allocator] static HMALLOC: hardened_malloc_rs::HardenedMalloc = hardened_malloc_rs::HardenedMalloc; -pub fn trim() -> crate::Result { Ok(()) } +pub fn trim>>(_: I) -> crate::Result { Ok(()) } #[must_use] //TODO: get usage diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index ccb213c9..119ff45e 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -2,8 +2,9 @@ use std::{ cell::OnceCell, - ffi::{c_char, c_void}, + ffi::{c_char, c_void, CStr}, fmt::Debug, + sync::RwLock, }; use arrayvec::ArrayVec; @@ -11,10 +12,14 @@ use tikv_jemalloc_ctl as mallctl; use tikv_jemalloc_sys as ffi; use tikv_jemallocator as jemalloc; -use crate::{err, is_equal_to, utils::math::Tried, Result}; +use crate::{ + err, is_equal_to, is_nonzero, + utils::{math, math::Tried}, + Result, +}; #[cfg(feature = "jemalloc_conf")] -#[no_mangle] +#[unsafe(no_mangle)] pub static malloc_conf: &[u8] = b"\ metadata_thp:always\ ,percpu_arena:percpu\ @@ -22,19 +27,26 @@ metadata_thp:always\ ,max_background_threads:-1\ ,lg_extent_max_active_fit:4\ ,oversize_threshold:33554432\ -,tcache_max:2097152\ +,tcache_max:1048576\ ,dirty_decay_ms:16000\ ,muzzy_decay_ms:144000\ \0"; #[global_allocator] static JEMALLOC: jemalloc::Jemalloc = jemalloc::Jemalloc; +static CONTROL: RwLock<()> = RwLock::new(()); -type Key = ArrayVec; type Name = ArrayVec; +type Key = ArrayVec; -const KEY_SEGS: usize = 8; const NAME_MAX: usize = 128; +const KEY_SEGS: usize = 8; + +#[crate::ctor] +fn _static_initialization() { + acq_epoch().expect("pre-initialization of jemalloc failed"); + acq_epoch().expect("pre-initialization of jemalloc failed"); +} #[must_use] #[cfg(feature = "jemalloc_stats")] @@ -49,6 +61,9 @@ pub fn memory_usage() -> Option { kibs / 1024.0 }; + // Acquire the epoch; ensure latest stats are pulled in + acq_epoch().ok()?; + let allocated = mibs(stats::allocated::read()); let active = mibs(stats::active::read()); let mapped = mibs(stats::mapped::read()); @@ -76,6 +91,9 @@ pub fn memory_stats(opts: &str) -> Option { .into_raw() .cast_const(); + // Acquire the epoch; ensure latest stats are pulled in + acq_epoch().ok()?; + // SAFETY: calls malloc_stats_print() with our string instance which must remain // in this frame. https://docs.rs/tikv-jemalloc-sys/latest/tikv_jemalloc_sys/fn.malloc_stats_print.html unsafe { ffi::malloc_stats_print(Some(malloc_stats_cb), opaque, opts_p) }; @@ -95,7 +113,7 @@ unsafe extern "C" fn malloc_stats_cb(opaque: *mut c_void, msg: *const c_char) { }; // SAFETY: we have to trust the string is null terminated. - let msg = unsafe { std::ffi::CStr::from_ptr(msg) }; + let msg = unsafe { CStr::from_ptr(msg) }; let msg = String::from_utf8_lossy(msg.to_bytes()); res.push_str(msg.as_ref()); @@ -114,58 +132,168 @@ macro_rules! mallctl { }}; } -pub fn trim() -> Result { set(&mallctl!("arena.4096.purge"), ()) } - -pub fn decay() -> Result { set(&mallctl!("arena.4096.purge"), ()) } - -pub fn set_by_name(name: &str, val: T) -> Result { set(&key(name)?, val) } - -pub fn get_by_name(name: &str) -> Result { get(&key(name)?) } - pub mod this_thread { - use super::{get, key, set, Key, OnceCell, Result}; + use super::{is_nonzero, key, math, Debug, Key, OnceCell, Result}; - pub fn trim() -> Result { - let mut key = mallctl!("arena.0.purge"); - key[1] = arena_id()?.try_into()?; - set(&key, ()) + pub fn trim() -> Result { notify(mallctl!("arena.0.purge")) } + + pub fn decay() -> Result { notify(mallctl!("arena.0.decay")) } + + pub fn flush() -> Result { super::notify(&mallctl!("thread.tcache.flush")) } + + pub fn set_muzzy_decay(decay_ms: isize) -> Result { + set(mallctl!("arena.0.muzzy_decay_ms"), decay_ms) } - pub fn decay() -> Result { - let mut key = mallctl!("arena.0.decay"); - key[1] = arena_id()?.try_into()?; - set(&key, ()) + pub fn get_muzzy_decay() -> Result { get(mallctl!("arena.0.muzzy_decay_ms")) } + + pub fn set_dirty_decay(decay_ms: isize) -> Result { + set(mallctl!("arena.0.dirty_decay_ms"), decay_ms) } - pub fn cache(enable: bool) -> Result { - set(&mallctl!("thread.tcache.enabled"), u8::from(enable)) + pub fn get_dirty_decay() -> Result { get(mallctl!("arena.0.dirty_decay_ms")) } + + pub fn enable_cache(enable: bool) -> Result { + super::set::(&mallctl!("thread.tcache.enabled"), enable.into()).map(is_nonzero!()) } - pub fn flush() -> Result { set(&mallctl!("thread.tcache.flush"), ()) } + pub fn is_cache_enabled() -> Result { + super::get::(&mallctl!("thread.tcache.enabled")).map(is_nonzero!()) + } - pub fn allocated() -> Result { get::(&mallctl!("thread.allocated")) } + pub fn set_arena(id: usize) -> Result { + super::set::(&mallctl!("thread.arena"), id.try_into()?).and_then(math::try_into) + } - pub fn deallocated() -> Result { get::(&mallctl!("thread.deallocated")) } + pub fn arena_id() -> Result { + super::get::(&mallctl!("thread.arena")).and_then(math::try_into) + } - pub fn arena_id() -> Result { get::(&mallctl!("thread.arena")) } + pub fn allocated() -> Result { super::get(&mallctl!("thread.allocated")) } + + pub fn deallocated() -> Result { super::get(&mallctl!("thread.deallocated")) } + + fn notify(key: Key) -> Result { super::notify_by_arena(Some(arena_id()?), key) } + + fn set(key: Key, val: T) -> Result + where + T: Copy + Debug, + { + super::set_by_arena(Some(arena_id()?), key, val) + } + + fn get(key: Key) -> Result + where + T: Copy + Debug, + { + super::get_by_arena(Some(arena_id()?), key) + } } -fn set(key: &Key, val: T) -> Result +pub fn trim>>(arena: I) -> Result { + notify_by_arena(arena.into(), mallctl!("arena.4096.purge")) +} + +pub fn decay>>(arena: I) -> Result { + notify_by_arena(arena.into(), mallctl!("arena.4096.decay")) +} + +pub fn set_muzzy_decay>>(arena: I, decay_ms: isize) -> Result { + if let Some(arena) = arena.into() { + set_by_arena(Some(arena), mallctl!("arena.4096.muzzy_decay_ms"), decay_ms) + } else { + set(&mallctl!("arenas.muzzy_decay_ms"), decay_ms) + } +} + +pub fn set_dirty_decay>>(arena: I, decay_ms: isize) -> Result { + if let Some(arena) = arena.into() { + set_by_arena(Some(arena), mallctl!("arena.4096.dirty_decay_ms"), decay_ms) + } else { + set(&mallctl!("arenas.dirty_decay_ms"), decay_ms) + } +} + +#[inline] +#[must_use] +pub fn is_affine_arena() -> bool { is_percpu_arena() || is_phycpu_arena() } + +#[inline] +#[must_use] +pub fn is_percpu_arena() -> bool { percpu_arenas().is_ok_and(is_equal_to!("percpu")) } + +#[inline] +#[must_use] +pub fn is_phycpu_arena() -> bool { percpu_arenas().is_ok_and(is_equal_to!("phycpu")) } + +pub fn percpu_arenas() -> Result<&'static str> { + let ptr = get::<*const c_char>(&mallctl!("opt.percpu_arena"))?; + //SAFETY: ptr points to a null-terminated string returned for opt.percpu_arena. + let cstr = unsafe { CStr::from_ptr(ptr) }; + cstr.to_str().map_err(Into::into) +} + +pub fn arenas() -> Result { + get::(&mallctl!("arenas.narenas")).and_then(math::try_into) +} + +pub fn inc_epoch() -> Result { xchg(&mallctl!("epoch"), 1_u64) } + +pub fn acq_epoch() -> Result { xchg(&mallctl!("epoch"), 0_u64) } + +fn notify_by_arena(id: Option, mut key: Key) -> Result { + key[1] = id.unwrap_or(4096); + notify(&key) +} + +fn set_by_arena(id: Option, mut key: Key, val: T) -> Result where T: Copy + Debug, { - // SAFETY: T must be the exact expected type. - unsafe { mallctl::raw::write_mib(key.as_slice(), val) }.map_err(map_err) + key[1] = id.unwrap_or(4096); + set(&key, val) +} + +fn get_by_arena(id: Option, mut key: Key) -> Result +where + T: Copy + Debug, +{ + key[1] = id.unwrap_or(4096); + get(&key) +} + +fn notify(key: &Key) -> Result { xchg(key, ()) } + +fn set(key: &Key, val: T) -> Result +where + T: Copy + Debug, +{ + let _lock = CONTROL.write()?; + let res = xchg(key, val)?; + inc_epoch()?; + + Ok(res) } fn get(key: &Key) -> Result where T: Copy + Debug, { + acq_epoch()?; + acq_epoch()?; + // SAFETY: T must be perfectly valid to receive value. unsafe { mallctl::raw::read_mib(key.as_slice()) }.map_err(map_err) } +fn xchg(key: &Key, val: T) -> Result +where + T: Copy + Debug, +{ + // SAFETY: T must be the exact expected type. + unsafe { mallctl::raw::update_mib(key.as_slice(), val) }.map_err(map_err) +} + fn key(name: &str) -> Result { // tikv asserts the output buffer length is tight to the number of required mibs // so we slice that down here. diff --git a/src/database/pool.rs b/src/database/pool.rs index f5600c36..86516c31 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -13,7 +13,7 @@ use std::{ use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; use conduwuit::{ debug, debug_warn, err, error, implement, - result::DebugInspect, + result::{DebugInspect, LogDebugErr}, trace, utils::sys::compute::{get_affinity, nth_core_available, set_affinity}, Error, Result, Server, @@ -289,6 +289,20 @@ fn worker_init(&self, id: usize) { // affinity is empty (no-op) if there's only one queue set_affinity(affinity.clone()); + + #[cfg(feature = "jemalloc")] + if affinity.clone().count() == 1 && conduwuit::alloc::je::is_affine_arena() { + use conduwuit::alloc::je::this_thread::{arena_id, set_arena}; + + let id = affinity.clone().next().expect("at least one id"); + + if let Ok(arena) = arena_id() { + if arena != id { + set_arena(id).log_debug_err().ok(); + } + } + } + debug!( ?group, affinity = ?affinity.collect::>(), diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index baf5336f..f774c37a 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -41,6 +41,7 @@ default = [ "gzip_compression", "io_uring", "jemalloc", + "jemalloc_conf", "media_thumbnail", "release_max_log_level", "systemd", @@ -85,6 +86,9 @@ jemalloc_prof = [ jemalloc_stats = [ "conduwuit-core/jemalloc_stats", ] +jemalloc_conf = [ + "conduwuit-core/jemalloc_conf", +] media_thumbnail = [ "conduwuit-service/media_thumbnail", ] diff --git a/src/main/clap.rs b/src/main/clap.rs index d3d40491..2bb6f3f2 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -92,6 +92,22 @@ pub(crate) struct Args { require_equals(false), )] pub(crate) gc_on_park: Option, + + /// Toggles muzzy decay for jemalloc arenas associated with a tokio + /// worker (when worker-affinity is enabled). Setting to false releases + /// memory to the operating system using MADV_FREE without MADV_DONTNEED. + /// Setting to false increases performance by reducing pagefaults, but + /// resident memory usage appears high until there is memory pressure. The + /// default is true unless the system has four or more cores. + #[arg( + long, + hide(true), + env = "CONDUWUIT_RUNTIME_GC_MUZZY", + action = ArgAction::Set, + num_args = 0..=1, + require_equals(false), + )] + pub(crate) gc_muzzy: Option, } /// Parse commandline arguments into structured data diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 315336b0..9f4f60f8 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -9,8 +9,12 @@ use std::{ }; use conduwuit::{ - result::LogErr, - utils::sys::compute::{nth_core_available, set_affinity}, + is_true, + result::LogDebugErr, + utils::{ + available_parallelism, + sys::compute::{nth_core_available, set_affinity}, + }, Result, }; use tokio::runtime::Builder; @@ -21,9 +25,11 @@ const WORKER_NAME: &str = "conduwuit:worker"; const WORKER_MIN: usize = 2; const WORKER_KEEPALIVE: u64 = 36; const MAX_BLOCKING_THREADS: usize = 1024; +const DISABLE_MUZZY_THRESHOLD: usize = 4; static WORKER_AFFINITY: OnceLock = OnceLock::new(); static GC_ON_PARK: OnceLock> = OnceLock::new(); +static GC_MUZZY: OnceLock> = OnceLock::new(); pub(super) fn new(args: &Args) -> Result { WORKER_AFFINITY @@ -34,6 +40,10 @@ pub(super) fn new(args: &Args) -> Result { .set(args.gc_on_park) .expect("set GC_ON_PARK from program argument"); + GC_MUZZY + .set(args.gc_muzzy) + .expect("set GC_MUZZY from program argument"); + let mut builder = Builder::new_multi_thread(); builder .enable_io() @@ -83,11 +93,13 @@ fn enable_histogram(builder: &mut Builder, args: &Args) { ), )] fn thread_start() { - if WORKER_AFFINITY - .get() - .copied() - .expect("WORKER_AFFINITY initialized by runtime::new()") - { + debug_assert_eq!( + Some(WORKER_NAME), + thread::current().name(), + "tokio worker name mismatch at thread start" + ); + + if WORKER_AFFINITY.get().is_some_and(is_true!()) { set_worker_affinity(); } } @@ -95,10 +107,6 @@ fn thread_start() { fn set_worker_affinity() { static CORES_OCCUPIED: AtomicUsize = AtomicUsize::new(0); - if thread::current().name() != Some(WORKER_NAME) { - return; - } - let handle = tokio::runtime::Handle::current(); let num_workers = handle.metrics().num_workers(); let i = CORES_OCCUPIED.fetch_add(1, Ordering::Relaxed); @@ -111,8 +119,33 @@ fn set_worker_affinity() { }; set_affinity(once(id)); + set_worker_mallctl(id); } +#[cfg(feature = "jemalloc")] +fn set_worker_mallctl(id: usize) { + use conduwuit::alloc::je::{ + is_affine_arena, + this_thread::{set_arena, set_muzzy_decay}, + }; + + if is_affine_arena() { + set_arena(id).log_debug_err().ok(); + } + + let muzzy_option = GC_MUZZY + .get() + .expect("GC_MUZZY initialized by runtime::new()"); + + let muzzy_auto_disable = available_parallelism() >= DISABLE_MUZZY_THRESHOLD; + if matches!(muzzy_option, Some(false) | None if muzzy_auto_disable) { + set_muzzy_decay(-1).log_debug_err().ok(); + } +} + +#[cfg(not(feature = "jemalloc"))] +fn set_worker_mallctl(_: usize) {} + #[tracing::instrument( name = "join", level = "debug", @@ -157,7 +190,9 @@ fn thread_park() { fn gc_on_park() { #[cfg(feature = "jemalloc")] - conduwuit::alloc::je::this_thread::decay().log_err().ok(); + conduwuit::alloc::je::this_thread::decay() + .log_debug_err() + .ok(); } #[cfg(tokio_unstable)] From ac944496c15bc476bc9964e034b7fcea737cc733 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 14 Jan 2025 19:17:45 +0000 Subject: [PATCH 046/328] optimize statekey-from-short loopsite Signed-off-by: Jason Volk --- .../rooms/event_handler/resolve_state.rs | 25 ++++++++++--------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 8640c582..edce880d 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -6,10 +6,10 @@ use std::{ use conduwuit::{ debug, err, implement, - utils::stream::{automatic_width, IterStream, WidebandExt}, + utils::stream::{automatic_width, IterStream, ReadyExt, WidebandExt}, Result, }; -use futures::{FutureExt, StreamExt, TryFutureExt}; +use futures::{FutureExt, StreamExt}; use ruma::{ state_res::{self, StateMap}, OwnedEventId, RoomId, RoomVersionId, @@ -59,17 +59,18 @@ pub async fn resolve_state( let fork_states: Vec> = fork_states .into_iter() .stream() - .wide_then(|fork_state| { - fork_state - .into_iter() - .stream() - .wide_filter_map(|(k, id)| { - self.services - .short - .get_statekey_from_short(k) - .map_ok_or_else(|_| None, move |(ty, st_key)| Some(((ty, st_key), id))) - }) + .wide_then(|fork_state| async move { + let shortstatekeys = fork_state.keys().copied().stream(); + + let event_ids = fork_state.values().cloned().stream().boxed(); + + self.services + .short + .multi_get_statekey_from_short(shortstatekeys) + .zip(event_ids) + .ready_filter_map(|(ty_sk, id)| Some((ty_sk.ok()?, id))) .collect() + .await }) .collect() .await; From 388730d6dd7dc69e1243218e556946bb35fd7461 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 20 Jan 2025 07:38:19 +0000 Subject: [PATCH 047/328] add TryWideband trait to similar to TryBroadband Signed-off-by: Jason Volk --- src/core/utils/stream/mod.rs | 2 + src/core/utils/stream/try_wideband.rs | 57 +++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 src/core/utils/stream/try_wideband.rs diff --git a/src/core/utils/stream/mod.rs b/src/core/utils/stream/mod.rs index 0fee0a3a..c7bfa021 100644 --- a/src/core/utils/stream/mod.rs +++ b/src/core/utils/stream/mod.rs @@ -9,6 +9,7 @@ mod tools; mod try_broadband; mod try_ready; mod try_tools; +mod try_wideband; mod wideband; pub use band::{ @@ -25,4 +26,5 @@ pub use tools::Tools; pub use try_broadband::TryBroadbandExt; pub use try_ready::TryReadyExt; pub use try_tools::TryTools; +pub use try_wideband::TryWidebandExt; pub use wideband::WidebandExt; diff --git a/src/core/utils/stream/try_wideband.rs b/src/core/utils/stream/try_wideband.rs new file mode 100644 index 00000000..0af3c8ec --- /dev/null +++ b/src/core/utils/stream/try_wideband.rs @@ -0,0 +1,57 @@ +//! Synchronous combinator extensions to futures::TryStream + +use futures::{TryFuture, TryStream, TryStreamExt}; + +use super::automatic_width; +use crate::Result; + +/// Concurrency extensions to augment futures::TryStreamExt. wide_ combinators +/// produce in-order results +pub trait TryWidebandExt +where + Self: TryStream> + Send + Sized, +{ + fn widen_and_then( + self, + n: N, + f: F, + ) -> impl TryStream> + Send + where + N: Into>, + F: Fn(Self::Ok) -> Fut + Send, + Fut: TryFuture> + Send, + U: Send; + + fn wide_and_then( + self, + f: F, + ) -> impl TryStream> + Send + where + F: Fn(Self::Ok) -> Fut + Send, + Fut: TryFuture> + Send, + U: Send, + { + self.widen_and_then(None, f) + } +} + +impl TryWidebandExt for S +where + S: TryStream> + Send + Sized, + E: Send, +{ + fn widen_and_then( + self, + n: N, + f: F, + ) -> impl TryStream> + Send + where + N: Into>, + F: Fn(Self::Ok) -> Fut + Send, + Fut: TryFuture> + Send, + U: Send, + { + self.map_ok(f) + .try_buffered(n.into().unwrap_or_else(automatic_width)) + } +} From ea25dc04b26fb4c3dffb456fd1c8e4b7c066b95f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 20 Jan 2025 07:38:32 +0000 Subject: [PATCH 048/328] parallelize current and incoming fork-state fetch Signed-off-by: Jason Volk --- .../rooms/event_handler/resolve_state.rs | 32 +++++++++++-------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index edce880d..0526d31c 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -6,10 +6,10 @@ use std::{ use conduwuit::{ debug, err, implement, - utils::stream::{automatic_width, IterStream, ReadyExt, WidebandExt}, + utils::stream::{automatic_width, IterStream, ReadyExt, TryWidebandExt, WidebandExt}, Result, }; -use futures::{FutureExt, StreamExt}; +use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ state_res::{self, StateMap}, OwnedEventId, RoomId, RoomVersionId, @@ -40,20 +40,24 @@ pub async fn resolve_state( .await?; let fork_states = [current_state_ids, incoming_state]; - let mut auth_chain_sets = Vec::with_capacity(fork_states.len()); - for state in &fork_states { - let starting_events = state.values().map(Borrow::borrow); + let auth_chain_sets: Vec> = fork_states + .iter() + .try_stream() + .wide_and_then(|state| async move { + let starting_events = state.values().map(Borrow::borrow); - let auth_chain: HashSet = self - .services - .auth_chain - .get_event_ids(room_id, starting_events) - .await? - .into_iter() - .collect(); + let auth_chain = self + .services + .auth_chain + .get_event_ids(room_id, starting_events) + .await? + .into_iter() + .collect(); - auth_chain_sets.push(auth_chain); - } + Ok(auth_chain) + }) + .try_collect() + .await?; debug!("Loading fork states"); let fork_states: Vec> = fork_states From 4c0ae8c2f708cc2d950f6a8269844ae42069d55a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 20 Jan 2025 09:02:50 +0000 Subject: [PATCH 049/328] parallelize get_auth_chain outer Signed-off-by: Jason Volk --- src/service/rooms/auth_chain/mod.rs | 112 ++++++++++++++-------------- 1 file changed, 55 insertions(+), 57 deletions(-) diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 74064701..fb7b6163 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -7,11 +7,14 @@ use std::{ }; use conduwuit::{ - debug, debug_error, trace, - utils::{stream::ReadyExt, IterStream}, + at, debug, debug_error, trace, + utils::{ + stream::{ReadyExt, TryBroadbandExt}, + IterStream, + }, validated, warn, Err, Result, }; -use futures::{Stream, StreamExt}; +use futures::{Stream, StreamExt, TryStreamExt}; use ruma::{EventId, OwnedEventId, RoomId}; use self::data::Data; @@ -112,66 +115,61 @@ impl Service { "start", ); - let mut hits: usize = 0; - let mut misses: usize = 0; - let mut full_auth_chain = Vec::with_capacity(buckets.len()); - for chunk in buckets { - if chunk.is_empty() { - continue; - } + let full_auth_chain: Vec<_> = buckets + .into_iter() + .try_stream() + .broad_and_then(|chunk| async move { + let chunk_key: Vec = chunk.iter().map(at!(0)).collect(); - let chunk_key: Vec = - chunk.iter().map(|(short, _)| short).copied().collect(); - if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { - trace!("Found cache entry for whole chunk"); - full_auth_chain.extend(cached.iter().copied()); - hits = hits.saturating_add(1); - continue; - } + if chunk_key.is_empty() { + return Ok(Vec::new()); + } - let mut hits2: usize = 0; - let mut misses2: usize = 0; - let mut chunk_cache = Vec::with_capacity(chunk.len()); - for (sevent_id, event_id) in chunk { - if let Ok(cached) = self.get_cached_eventid_authchain(&[sevent_id]).await { - trace!(?event_id, "Found cache entry for event"); - chunk_cache.extend(cached.iter().copied()); - hits2 = hits2.saturating_add(1); - } else { - let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?; - self.cache_auth_chain(vec![sevent_id], &auth_chain); - chunk_cache.extend(auth_chain.iter()); - misses2 = misses2.saturating_add(1); - debug!( - event_id = ?event_id, - chain_length = ?auth_chain.len(), - chunk_cache_length = ?chunk_cache.len(), - elapsed = ?started.elapsed(), - "Cache missed event" - ); - }; - } + if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { + return Ok(cached.to_vec()); + } - chunk_cache.sort_unstable(); - chunk_cache.dedup(); - self.cache_auth_chain_vec(chunk_key, &chunk_cache); - full_auth_chain.extend(chunk_cache.iter()); - misses = misses.saturating_add(1); - debug!( - chunk_cache_length = ?chunk_cache.len(), - hits = ?hits2, - misses = ?misses2, - elapsed = ?started.elapsed(), - "Chunk missed", - ); - } + let chunk_cache: Vec<_> = chunk + .into_iter() + .try_stream() + .broad_and_then(|(shortid, event_id)| async move { + if let Ok(cached) = self.get_cached_eventid_authchain(&[shortid]).await { + return Ok(cached.to_vec()); + } + let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?; + self.cache_auth_chain_vec(vec![shortid], auth_chain.as_slice()); + debug!( + ?event_id, + elapsed = ?started.elapsed(), + "Cache missed event" + ); + + Ok(auth_chain) + }) + .try_collect() + .await?; + + let mut chunk_cache: Vec<_> = chunk_cache.into_iter().flatten().collect(); + chunk_cache.sort_unstable(); + chunk_cache.dedup(); + self.cache_auth_chain_vec(chunk_key, chunk_cache.as_slice()); + debug!( + chunk_cache_length = ?chunk_cache.len(), + elapsed = ?started.elapsed(), + "Cache missed chunk", + ); + + Ok(chunk_cache) + }) + .try_collect() + .await?; + + let mut full_auth_chain: Vec<_> = full_auth_chain.into_iter().flatten().collect(); full_auth_chain.sort_unstable(); full_auth_chain.dedup(); debug!( chain_length = ?full_auth_chain.len(), - hits = ?hits, - misses = ?misses, elapsed = ?started.elapsed(), "done", ); @@ -184,7 +182,7 @@ impl Service { &self, room_id: &RoomId, event_id: &EventId, - ) -> Result> { + ) -> Result> { let mut todo: VecDeque<_> = [event_id.to_owned()].into(); let mut found = HashSet::new(); @@ -226,7 +224,7 @@ impl Service { } } - Ok(found) + Ok(found.into_iter().collect()) } #[inline] From 610129d16265f702b7bfbf0ada019fc77766e10f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 20 Jan 2025 09:05:49 +0000 Subject: [PATCH 050/328] outdent auth_chain Service impl Signed-off-by: Jason Volk --- src/service/rooms/auth_chain/mod.rs | 429 ++++++++++++++-------------- 1 file changed, 219 insertions(+), 210 deletions(-) diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index fb7b6163..df2663b2 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -7,14 +7,14 @@ use std::{ }; use conduwuit::{ - at, debug, debug_error, trace, + at, debug, debug_error, implement, trace, utils::{ stream::{ReadyExt, TryBroadbandExt}, IterStream, }, validated, warn, Err, Result, }; -use futures::{Stream, StreamExt, TryStreamExt}; +use futures::{Stream, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{EventId, OwnedEventId, RoomId}; use self::data::Data; @@ -44,213 +44,222 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - pub async fn event_ids_iter<'a, I>( - &'a self, - room_id: &RoomId, - starting_events: I, - ) -> Result + Send + '_> - where - I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, - { - let stream = self - .get_event_ids(room_id, starting_events) - .await? - .into_iter() - .stream(); +#[implement(Service)] +pub async fn event_ids_iter<'a, I>( + &'a self, + room_id: &RoomId, + starting_events: I, +) -> Result + Send + '_> +where + I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, +{ + let stream = self + .get_event_ids(room_id, starting_events) + .await? + .into_iter() + .stream(); - Ok(stream) - } - - pub async fn get_event_ids<'a, I>( - &'a self, - room_id: &RoomId, - starting_events: I, - ) -> Result> - where - I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, - { - let chain = self.get_auth_chain(room_id, starting_events).await?; - let event_ids = self - .services - .short - .multi_get_eventid_from_short(chain.into_iter().stream()) - .ready_filter_map(Result::ok) - .collect() - .await; - - Ok(event_ids) - } - - #[tracing::instrument(name = "auth_chain", level = "debug", skip_all)] - pub async fn get_auth_chain<'a, I>( - &'a self, - room_id: &RoomId, - starting_events: I, - ) -> Result> - where - I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, - { - const NUM_BUCKETS: usize = 50; //TODO: change possible w/o disrupting db? - const BUCKET: BTreeSet<(u64, &EventId)> = BTreeSet::new(); - - let started = std::time::Instant::now(); - let mut starting_ids = self - .services - .short - .multi_get_or_create_shorteventid(starting_events.clone()) - .zip(starting_events.clone().stream()) - .boxed(); - - let mut buckets = [BUCKET; NUM_BUCKETS]; - while let Some((short, starting_event)) = starting_ids.next().await { - let bucket: usize = short.try_into()?; - let bucket: usize = validated!(bucket % NUM_BUCKETS); - buckets[bucket].insert((short, starting_event)); - } - - debug!( - starting_events = ?starting_events.count(), - elapsed = ?started.elapsed(), - "start", - ); - - let full_auth_chain: Vec<_> = buckets - .into_iter() - .try_stream() - .broad_and_then(|chunk| async move { - let chunk_key: Vec = chunk.iter().map(at!(0)).collect(); - - if chunk_key.is_empty() { - return Ok(Vec::new()); - } - - if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { - return Ok(cached.to_vec()); - } - - let chunk_cache: Vec<_> = chunk - .into_iter() - .try_stream() - .broad_and_then(|(shortid, event_id)| async move { - if let Ok(cached) = self.get_cached_eventid_authchain(&[shortid]).await { - return Ok(cached.to_vec()); - } - - let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?; - self.cache_auth_chain_vec(vec![shortid], auth_chain.as_slice()); - debug!( - ?event_id, - elapsed = ?started.elapsed(), - "Cache missed event" - ); - - Ok(auth_chain) - }) - .try_collect() - .await?; - - let mut chunk_cache: Vec<_> = chunk_cache.into_iter().flatten().collect(); - chunk_cache.sort_unstable(); - chunk_cache.dedup(); - self.cache_auth_chain_vec(chunk_key, chunk_cache.as_slice()); - debug!( - chunk_cache_length = ?chunk_cache.len(), - elapsed = ?started.elapsed(), - "Cache missed chunk", - ); - - Ok(chunk_cache) - }) - .try_collect() - .await?; - - let mut full_auth_chain: Vec<_> = full_auth_chain.into_iter().flatten().collect(); - full_auth_chain.sort_unstable(); - full_auth_chain.dedup(); - debug!( - chain_length = ?full_auth_chain.len(), - elapsed = ?started.elapsed(), - "done", - ); - - Ok(full_auth_chain) - } - - #[tracing::instrument(name = "inner", level = "trace", skip(self, room_id))] - async fn get_auth_chain_inner( - &self, - room_id: &RoomId, - event_id: &EventId, - ) -> Result> { - let mut todo: VecDeque<_> = [event_id.to_owned()].into(); - let mut found = HashSet::new(); - - while let Some(event_id) = todo.pop_front() { - trace!(?event_id, "processing auth event"); - - match self.services.timeline.get_pdu(&event_id).await { - | Err(e) => { - debug_error!(?event_id, ?e, "Could not find pdu mentioned in auth events"); - }, - | Ok(pdu) => { - if pdu.room_id != room_id { - return Err!(Request(Forbidden(error!( - ?event_id, - ?room_id, - wrong_room_id = ?pdu.room_id, - "auth event for incorrect room" - )))); - } - - for auth_event in &pdu.auth_events { - let sauthevent = self - .services - .short - .get_or_create_shorteventid(auth_event) - .await; - - if found.insert(sauthevent) { - trace!( - ?event_id, - ?auth_event, - "adding auth event to processing queue" - ); - - todo.push_back(auth_event.clone()); - } - } - }, - } - } - - Ok(found.into_iter().collect()) - } - - #[inline] - pub async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result> { - self.db.get_cached_eventid_authchain(key).await - } - - #[tracing::instrument(skip_all, level = "debug")] - pub fn cache_auth_chain(&self, key: Vec, auth_chain: &HashSet) { - let val: Arc<[ShortEventId]> = auth_chain.iter().copied().collect(); - - self.db.cache_auth_chain(key, val); - } - - #[tracing::instrument(skip_all, level = "debug")] - pub fn cache_auth_chain_vec(&self, key: Vec, auth_chain: &[ShortEventId]) { - let val: Arc<[ShortEventId]> = auth_chain.iter().copied().collect(); - - self.db.cache_auth_chain(key, val); - } - - pub fn get_cache_usage(&self) -> (usize, usize) { - let cache = self.db.auth_chain_cache.lock().expect("locked"); - - (cache.len(), cache.capacity()) - } - - pub fn clear_cache(&self) { self.db.auth_chain_cache.lock().expect("locked").clear(); } + Ok(stream) } + +#[implement(Service)] +pub async fn get_event_ids<'a, I>( + &'a self, + room_id: &RoomId, + starting_events: I, +) -> Result> +where + I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, +{ + let chain = self.get_auth_chain(room_id, starting_events).await?; + let event_ids = self + .services + .short + .multi_get_eventid_from_short(chain.into_iter().stream()) + .ready_filter_map(Result::ok) + .collect() + .await; + + Ok(event_ids) +} + +#[implement(Service)] +#[tracing::instrument(name = "auth_chain", level = "debug", skip_all)] +pub async fn get_auth_chain<'a, I>( + &'a self, + room_id: &RoomId, + starting_events: I, +) -> Result> +where + I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, +{ + const NUM_BUCKETS: usize = 50; //TODO: change possible w/o disrupting db? + const BUCKET: BTreeSet<(u64, &EventId)> = BTreeSet::new(); + + let started = std::time::Instant::now(); + let mut starting_ids = self + .services + .short + .multi_get_or_create_shorteventid(starting_events.clone()) + .zip(starting_events.clone().stream()) + .boxed(); + + let mut buckets = [BUCKET; NUM_BUCKETS]; + while let Some((short, starting_event)) = starting_ids.next().await { + let bucket: usize = short.try_into()?; + let bucket: usize = validated!(bucket % NUM_BUCKETS); + buckets[bucket].insert((short, starting_event)); + } + + debug!( + starting_events = ?starting_events.count(), + elapsed = ?started.elapsed(), + "start", + ); + + let full_auth_chain: Vec = buckets + .into_iter() + .try_stream() + .broad_and_then(|chunk| async move { + let chunk_key: Vec = chunk.iter().map(at!(0)).collect(); + + if chunk_key.is_empty() { + return Ok(Vec::new()); + } + + if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { + return Ok(cached.to_vec()); + } + + let chunk_cache: Vec<_> = chunk + .into_iter() + .try_stream() + .broad_and_then(|(shortid, event_id)| async move { + if let Ok(cached) = self.get_cached_eventid_authchain(&[shortid]).await { + return Ok(cached.to_vec()); + } + + let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?; + self.cache_auth_chain_vec(vec![shortid], auth_chain.as_slice()); + debug!( + ?event_id, + elapsed = ?started.elapsed(), + "Cache missed event" + ); + + Ok(auth_chain) + }) + .try_collect() + .map_ok(|chunk_cache: Vec<_>| chunk_cache.into_iter().flatten().collect()) + .map_ok(|mut chunk_cache: Vec<_>| { + chunk_cache.sort_unstable(); + chunk_cache.dedup(); + chunk_cache + }) + .await?; + + self.cache_auth_chain_vec(chunk_key, chunk_cache.as_slice()); + debug!( + chunk_cache_length = ?chunk_cache.len(), + elapsed = ?started.elapsed(), + "Cache missed chunk", + ); + + Ok(chunk_cache) + }) + .try_collect() + .map_ok(|auth_chain: Vec<_>| auth_chain.into_iter().flatten().collect()) + .map_ok(|mut full_auth_chain: Vec<_>| { + full_auth_chain.sort_unstable(); + full_auth_chain.dedup(); + full_auth_chain + }) + .await?; + + debug!( + chain_length = ?full_auth_chain.len(), + elapsed = ?started.elapsed(), + "done", + ); + + Ok(full_auth_chain) +} + +#[implement(Service)] +#[tracing::instrument(name = "inner", level = "trace", skip(self, room_id))] +async fn get_auth_chain_inner( + &self, + room_id: &RoomId, + event_id: &EventId, +) -> Result> { + let mut todo: VecDeque<_> = [event_id.to_owned()].into(); + let mut found = HashSet::new(); + + while let Some(event_id) = todo.pop_front() { + trace!(?event_id, "processing auth event"); + + match self.services.timeline.get_pdu(&event_id).await { + | Err(e) => { + debug_error!(?event_id, ?e, "Could not find pdu mentioned in auth events"); + }, + | Ok(pdu) => { + if pdu.room_id != room_id { + return Err!(Request(Forbidden(error!( + ?event_id, + ?room_id, + wrong_room_id = ?pdu.room_id, + "auth event for incorrect room" + )))); + } + + for auth_event in &pdu.auth_events { + let sauthevent = self + .services + .short + .get_or_create_shorteventid(auth_event) + .await; + + if found.insert(sauthevent) { + trace!(?event_id, ?auth_event, "adding auth event to processing queue"); + + todo.push_back(auth_event.clone()); + } + } + }, + } + } + + Ok(found.into_iter().collect()) +} + +#[implement(Service)] +#[inline] +pub async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result> { + self.db.get_cached_eventid_authchain(key).await +} + +#[implement(Service)] +#[tracing::instrument(skip_all, level = "debug")] +pub fn cache_auth_chain(&self, key: Vec, auth_chain: &HashSet) { + let val: Arc<[ShortEventId]> = auth_chain.iter().copied().collect(); + + self.db.cache_auth_chain(key, val); +} + +#[implement(Service)] +#[tracing::instrument(skip_all, level = "debug")] +pub fn cache_auth_chain_vec(&self, key: Vec, auth_chain: &[ShortEventId]) { + let val: Arc<[ShortEventId]> = auth_chain.iter().copied().collect(); + + self.db.cache_auth_chain(key, val); +} + +#[implement(Service)] +pub fn get_cache_usage(&self) -> (usize, usize) { + let cache = self.db.auth_chain_cache.lock().expect("locked"); + + (cache.len(), cache.capacity()) +} + +#[implement(Service)] +pub fn clear_cache(&self) { self.db.auth_chain_cache.lock().expect("locked").clear(); } From 277b4951e8e7f28e6319a17e91229e77c9db090d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 20 Jan 2025 11:50:17 +0000 Subject: [PATCH 051/328] add compression-shaping; tweak default compression levels Signed-off-by: Jason Volk --- conduwuit-example.toml | 8 +++++++- src/core/config/mod.rs | 8 +++++++- src/database/engine/cf_opts.rs | 32 +++++++++++++++++++++++++++---- src/database/engine/descriptor.rs | 19 ++++++++++++++++-- 4 files changed, 59 insertions(+), 8 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 54143ced..79efbd14 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -818,6 +818,9 @@ # magic number and translated to the library's default compression level # as they all differ. See their `kDefaultCompressionLevel`. # +# Note when using the default value we may override it with a setting +# tailored specifically conduwuit. +# #rocksdb_compression_level = 32767 # Level of compression the specified compression algorithm for the @@ -831,6 +834,9 @@ # less likely for this data to be used. Research your chosen compression # algorithm. # +# Note when using the default value we may override it with a setting +# tailored specifically conduwuit. +# #rocksdb_bottommost_compression_level = 32767 # Whether to enable RocksDB's "bottommost_compression". @@ -842,7 +848,7 @@ # # See https://github.com/facebook/rocksdb/wiki/Compression for more details. # -#rocksdb_bottommost_compression = false +#rocksdb_bottommost_compression = true # Database recovery mode (for RocksDB WAL corruption). # diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index cb42940b..5cfed0b9 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -958,6 +958,9 @@ pub struct Config { /// magic number and translated to the library's default compression level /// as they all differ. See their `kDefaultCompressionLevel`. /// + /// Note when using the default value we may override it with a setting + /// tailored specifically conduwuit. + /// /// default: 32767 #[serde(default = "default_rocksdb_compression_level")] pub rocksdb_compression_level: i32, @@ -973,6 +976,9 @@ pub struct Config { /// less likely for this data to be used. Research your chosen compression /// algorithm. /// + /// Note when using the default value we may override it with a setting + /// tailored specifically conduwuit. + /// /// default: 32767 #[serde(default = "default_rocksdb_bottommost_compression_level")] pub rocksdb_bottommost_compression_level: i32, @@ -985,7 +991,7 @@ pub struct Config { /// if you're trying to reduce storage usage from the database. /// /// See https://github.com/facebook/rocksdb/wiki/Compression for more details. - #[serde(default)] + #[serde(default = "true_fn")] pub rocksdb_bottommost_compression: bool, /// Database recovery mode (for RocksDB WAL corruption). diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 7b3a1d49..da636718 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -8,6 +8,8 @@ use rocksdb::{ use super::descriptor::{CacheDisp, Descriptor}; use crate::{util::map_err, Context}; +pub(super) const SENTINEL_COMPRESSION_LEVEL: i32 = 32767; + /// Adjust options for the specific column by name. Provide the result of /// db_options() as the argument to this function and use the return value in /// the arguments to open the specific column. @@ -45,7 +47,15 @@ fn descriptor_cf_options( opts.set_compaction_pri(desc.compaction_pri); opts.set_universal_compaction_options(&uc_options(&desc)); + let compression_shape: Vec<_> = desc + .compression_shape + .into_iter() + .map(|val| (val > 0).then_some(desc.compression)) + .map(|val| val.unwrap_or(CompressionType::None)) + .collect(); + opts.set_compression_type(desc.compression); + opts.set_compression_per_level(compression_shape.as_slice()); opts.set_compression_options(-14, desc.compression_level, 0, 0); // -14 w_bits used by zlib. if let Some(&bottommost_level) = desc.bottommost_level.as_ref() { opts.set_bottommost_compression_type(desc.compression); @@ -95,10 +105,24 @@ fn set_compression(desc: &mut Descriptor, config: &Config) { | _ => CompressionType::Zstd, }; - desc.compression_level = config.rocksdb_compression_level; - desc.bottommost_level = config - .rocksdb_bottommost_compression - .then_some(config.rocksdb_bottommost_compression_level); + let can_override_level = config.rocksdb_compression_level == SENTINEL_COMPRESSION_LEVEL + && desc.compression == CompressionType::Zstd; + + if !can_override_level { + desc.compression_level = config.rocksdb_compression_level; + } + + let can_override_bottom = config.rocksdb_bottommost_compression_level + == SENTINEL_COMPRESSION_LEVEL + && desc.compression == CompressionType::Zstd; + + if !can_override_bottom { + desc.bottommost_level = Some(config.rocksdb_bottommost_compression_level); + } + + if !config.rocksdb_bottommost_compression { + desc.bottommost_level = None; + } } fn uc_options(desc: &Descriptor) -> UniversalCompactOptions { diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index 234ca2bf..2c84ac53 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -4,6 +4,8 @@ use rocksdb::{ DBCompressionType as CompressionType, }; +use super::cf_opts::SENTINEL_COMPRESSION_LEVEL; + #[derive(Debug, Clone, Copy)] pub(crate) enum CacheDisp { Unique, @@ -32,6 +34,7 @@ pub(crate) struct Descriptor { pub(crate) compaction: CompactionStyle, pub(crate) compaction_pri: CompactionPri, pub(crate) compression: CompressionType, + pub(crate) compression_shape: [i32; 7], pub(crate) compression_level: i32, pub(crate) bottommost_level: Option, pub(crate) block_index_hashing: Option, @@ -58,8 +61,9 @@ pub(crate) static BASE: Descriptor = Descriptor { compaction: CompactionStyle::Level, compaction_pri: CompactionPri::MinOverlappingRatio, compression: CompressionType::Zstd, - compression_level: 32767, - bottommost_level: Some(32767), + compression_shape: [0, 0, 0, 1, 1, 1, 1], + compression_level: SENTINEL_COMPRESSION_LEVEL, + bottommost_level: Some(SENTINEL_COMPRESSION_LEVEL), block_index_hashing: None, cache_shards: 64, }; @@ -68,6 +72,8 @@ pub(crate) static RANDOM: Descriptor = Descriptor { compaction_pri: CompactionPri::OldestSmallestSeqFirst, write_size: 1024 * 1024 * 32, cache_shards: 128, + compression_level: -3, + bottommost_level: Some(4), ..BASE }; @@ -77,6 +83,9 @@ pub(crate) static SEQUENTIAL: Descriptor = Descriptor { level_size: 1024 * 1024 * 32, file_size: 1024 * 1024 * 2, cache_shards: 128, + compression_level: -1, + bottommost_level: Some(6), + compression_shape: [0, 0, 1, 1, 1, 1, 1], ..BASE }; @@ -88,6 +97,9 @@ pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { index_size: 512, block_size: 512, cache_shards: 64, + compression_level: -4, + bottommost_level: Some(1), + compression_shape: [0, 0, 0, 0, 0, 1, 1], ..RANDOM }; @@ -99,5 +111,8 @@ pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { block_size: 512, cache_shards: 64, block_index_hashing: Some(false), + compression_level: -2, + bottommost_level: Some(4), + compression_shape: [0, 0, 0, 0, 1, 1, 1], ..SEQUENTIAL }; From 19f6d9d0e1dd2e40bb710bdc0e876e4f2fc02917 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 20 Jan 2025 12:12:44 +0000 Subject: [PATCH 052/328] add index-compression and auto-readahead to descriptor Signed-off-by: Jason Volk --- src/database/engine/cf_opts.rs | 16 +++++++++++----- src/database/engine/descriptor.rs | 12 ++++++++++++ 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index da636718..1230081c 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -83,11 +83,17 @@ fn set_table_options(opts: &mut Options, desc: &Descriptor, cache: Option<&Cache table.disable_cache(); } - opts.set_options_from_string( - "{{block_based_table_factory={num_file_reads_for_auto_readahead=0;\ - max_auto_readahead_size=524288;initial_auto_readahead_size=16384}}}", - ) - .map_err(map_err)?; + let string = format!( + "{{block_based_table_factory={{num_file_reads_for_auto_readahead={0};\ + max_auto_readahead_size={1};initial_auto_readahead_size={2};\ + enable_index_compression={3}}}}}", + desc.auto_readahead_thresh, + desc.auto_readahead_max, + desc.auto_readahead_init, + desc.compressed_index, + ); + + opts.set_options_from_string(&string).map_err(map_err)?; opts.set_block_based_table_factory(&table); diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index 2c84ac53..6ce8b5ad 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -34,11 +34,15 @@ pub(crate) struct Descriptor { pub(crate) compaction: CompactionStyle, pub(crate) compaction_pri: CompactionPri, pub(crate) compression: CompressionType, + pub(crate) compressed_index: bool, pub(crate) compression_shape: [i32; 7], pub(crate) compression_level: i32, pub(crate) bottommost_level: Option, pub(crate) block_index_hashing: Option, pub(crate) cache_shards: u32, + pub(crate) auto_readahead_thresh: u32, + pub(crate) auto_readahead_init: usize, + pub(crate) auto_readahead_max: usize, } pub(crate) static BASE: Descriptor = Descriptor { @@ -61,11 +65,15 @@ pub(crate) static BASE: Descriptor = Descriptor { compaction: CompactionStyle::Level, compaction_pri: CompactionPri::MinOverlappingRatio, compression: CompressionType::Zstd, + compressed_index: true, compression_shape: [0, 0, 0, 1, 1, 1, 1], compression_level: SENTINEL_COMPRESSION_LEVEL, bottommost_level: Some(SENTINEL_COMPRESSION_LEVEL), block_index_hashing: None, cache_shards: 64, + auto_readahead_thresh: 0, + auto_readahead_init: 1024 * 16, + auto_readahead_max: 1024 * 1024 * 2, }; pub(crate) static RANDOM: Descriptor = Descriptor { @@ -74,6 +82,7 @@ pub(crate) static RANDOM: Descriptor = Descriptor { cache_shards: 128, compression_level: -3, bottommost_level: Some(4), + compressed_index: true, ..BASE }; @@ -86,6 +95,7 @@ pub(crate) static SEQUENTIAL: Descriptor = Descriptor { compression_level: -1, bottommost_level: Some(6), compression_shape: [0, 0, 1, 1, 1, 1, 1], + compressed_index: false, ..BASE }; @@ -100,6 +110,7 @@ pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { compression_level: -4, bottommost_level: Some(1), compression_shape: [0, 0, 0, 0, 0, 1, 1], + compressed_index: false, ..RANDOM }; @@ -114,5 +125,6 @@ pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { compression_level: -2, bottommost_level: Some(4), compression_shape: [0, 0, 0, 0, 1, 1, 1], + compressed_index: false, ..SEQUENTIAL }; From 8ab825b12c08324977898b26e9513b197750b9bb Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 21 Jan 2025 20:43:38 +0000 Subject: [PATCH 053/328] add stream parallelism extension combinators Signed-off-by: Jason Volk --- src/core/utils/stream/mod.rs | 2 + src/core/utils/stream/try_broadband.rs | 6 +-- src/core/utils/stream/try_parallel.rs | 71 ++++++++++++++++++++++++++ 3 files changed, 76 insertions(+), 3 deletions(-) create mode 100644 src/core/utils/stream/try_parallel.rs diff --git a/src/core/utils/stream/mod.rs b/src/core/utils/stream/mod.rs index c7bfa021..23455322 100644 --- a/src/core/utils/stream/mod.rs +++ b/src/core/utils/stream/mod.rs @@ -7,6 +7,7 @@ mod iter_stream; mod ready; mod tools; mod try_broadband; +mod try_parallel; mod try_ready; mod try_tools; mod try_wideband; @@ -24,6 +25,7 @@ pub use iter_stream::IterStream; pub use ready::ReadyExt; pub use tools::Tools; pub use try_broadband::TryBroadbandExt; +pub use try_parallel::TryParallelExt; pub use try_ready::TryReadyExt; pub use try_tools::TryTools; pub use try_wideband::TryWidebandExt; diff --git a/src/core/utils/stream/try_broadband.rs b/src/core/utils/stream/try_broadband.rs index c72fcc2c..361b4a92 100644 --- a/src/core/utils/stream/try_broadband.rs +++ b/src/core/utils/stream/try_broadband.rs @@ -18,7 +18,7 @@ where ) -> impl TryStream> + Send where N: Into>, - F: Fn(Self::Ok) -> Fut + Send + Sync, + F: Fn(Self::Ok) -> Fut + Send, Fut: TryFuture> + Send; fn broad_and_then( @@ -26,7 +26,7 @@ where f: F, ) -> impl TryStream> + Send where - F: Fn(Self::Ok) -> Fut + Send + Sync, + F: Fn(Self::Ok) -> Fut + Send, Fut: TryFuture> + Send, { self.broadn_and_then(None, f) @@ -44,7 +44,7 @@ where ) -> impl TryStream> + Send where N: Into>, - F: Fn(Self::Ok) -> Fut + Send + Sync, + F: Fn(Self::Ok) -> Fut + Send, Fut: TryFuture> + Send, { self.map_ok(f) diff --git a/src/core/utils/stream/try_parallel.rs b/src/core/utils/stream/try_parallel.rs new file mode 100644 index 00000000..7f8a63b1 --- /dev/null +++ b/src/core/utils/stream/try_parallel.rs @@ -0,0 +1,71 @@ +//! Parallelism stream combinator extensions to futures::Stream + +use futures::{stream::TryStream, TryFutureExt}; +use tokio::{runtime, task::JoinError}; + +use super::TryBroadbandExt; +use crate::{utils::sys::available_parallelism, Error, Result}; + +/// Parallelism extensions to augment futures::StreamExt. These combinators are +/// for computation-oriented workloads, unlike -band combinators for I/O +/// workloads; these default to the available compute parallelism for the +/// system. Threads are currently drawn from the tokio-spawn pool. Results are +/// unordered. +pub trait TryParallelExt +where + Self: TryStream> + Send + Sized, + E: From + From + Send + 'static, + T: Send + 'static, +{ + fn paralleln_and_then( + self, + h: H, + n: N, + f: F, + ) -> impl TryStream> + Send + where + N: Into>, + H: Into>, + F: Fn(Self::Ok) -> Result + Clone + Send + 'static, + U: Send + 'static; + + fn parallel_and_then( + self, + h: H, + f: F, + ) -> impl TryStream> + Send + where + H: Into>, + F: Fn(Self::Ok) -> Result + Clone + Send + 'static, + U: Send + 'static, + { + self.paralleln_and_then(h, None, f) + } +} + +impl TryParallelExt for S +where + S: TryStream> + Send + Sized, + E: From + From + Send + 'static, + T: Send + 'static, +{ + fn paralleln_and_then( + self, + h: H, + n: N, + f: F, + ) -> impl TryStream> + Send + where + N: Into>, + H: Into>, + F: Fn(Self::Ok) -> Result + Clone + Send + 'static, + U: Send + 'static, + { + let n = n.into().unwrap_or_else(available_parallelism); + let h = h.into().unwrap_or_else(runtime::Handle::current); + self.broadn_and_then(n, move |val| { + let (h, f) = (h.clone(), f.clone()); + async move { h.spawn_blocking(move || f(val)).map_err(E::from).await? } + }) + } +} From dda27ffcb1a6d9f1ff6dafebb6203cb9cb8c2f22 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 12:05:07 +0000 Subject: [PATCH 054/328] add some compaction related interfaces Signed-off-by: Jason Volk --- Cargo.toml | 1 + src/admin/mod.rs | 1 + src/admin/query/raw.rs | 104 ++++++++++++++++++++++++++++++++++-- src/database/engine.rs | 23 ++++++-- src/database/map.rs | 1 + src/database/map/compact.rs | 62 +++++++++++++++++++++ src/database/mod.rs | 4 +- 7 files changed, 188 insertions(+), 8 deletions(-) create mode 100644 src/database/map/compact.rs diff --git a/Cargo.toml b/Cargo.toml index 4d738a11..f9e3b6db 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -875,6 +875,7 @@ enum_glob_use = { level = "allow", priority = 1 } if_not_else = { level = "allow", priority = 1 } if_then_some_else_none = { level = "allow", priority = 1 } inline_always = { level = "allow", priority = 1 } +match_bool = { level = "allow", priority = 1 } missing_docs_in_private_items = { level = "allow", priority = 1 } missing_errors_doc = { level = "allow", priority = 1 } missing_panics_doc = { level = "allow", priority = 1 } diff --git a/src/admin/mod.rs b/src/admin/mod.rs index ac51104a..695155e8 100644 --- a/src/admin/mod.rs +++ b/src/admin/mod.rs @@ -1,6 +1,7 @@ #![recursion_limit = "192"] #![allow(clippy::wildcard_imports)] #![allow(clippy::enum_glob_use)] +#![allow(clippy::too_many_arguments)] pub(crate) mod admin; pub(crate) mod command; diff --git a/src/admin/query/raw.rs b/src/admin/query/raw.rs index 678d21c9..ac5e8976 100644 --- a/src/admin/query/raw.rs +++ b/src/admin/query/raw.rs @@ -2,13 +2,13 @@ use std::{borrow::Cow, collections::BTreeMap, ops::Deref}; use clap::Subcommand; use conduwuit::{ - apply, at, + apply, at, is_zero, utils::{ - stream::{ReadyExt, TryIgnore}, + stream::{ReadyExt, TryIgnore, TryParallelExt}, string::EMPTY, IterStream, }, - Result, + Err, Result, }; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::events::room::message::RoomMessageEventContent; @@ -121,6 +121,104 @@ pub(crate) enum RawCommand { /// Key prefix prefix: Option, }, + + /// - Compact database + Compact { + #[arg(short, long, alias("column"))] + map: Option>, + + #[arg(long)] + start: Option, + + #[arg(long)] + stop: Option, + + #[arg(long)] + from: Option, + + #[arg(long)] + into: Option, + + /// There is one compaction job per column; then this controls how many + /// columns are compacted in parallel. If zero, one compaction job is + /// still run at a time here, but in exclusive-mode blocking any other + /// automatic compaction jobs until complete. + #[arg(long)] + parallelism: Option, + + #[arg(long, default_value("false"))] + exhaustive: bool, + }, +} + +#[admin_command] +pub(super) async fn compact( + &self, + map: Option>, + start: Option, + stop: Option, + from: Option, + into: Option, + parallelism: Option, + exhaustive: bool, +) -> Result { + use conduwuit_database::compact::Options; + + let default_all_maps = map + .is_none() + .then(|| { + self.services + .db + .keys() + .map(Deref::deref) + .map(ToOwned::to_owned) + }) + .into_iter() + .flatten(); + + let maps: Vec<_> = map + .unwrap_or_default() + .into_iter() + .chain(default_all_maps) + .map(|map| self.services.db.get(&map)) + .filter_map(Result::ok) + .cloned() + .collect(); + + if maps.is_empty() { + return Err!("--map argument invalid. not found in database"); + } + + let range = ( + start.as_ref().map(String::as_bytes).map(Into::into), + stop.as_ref().map(String::as_bytes).map(Into::into), + ); + + let options = Options { + range, + level: (from, into), + exclusive: parallelism.is_some_and(is_zero!()), + exhaustive, + }; + + let runtime = self.services.server.runtime().clone(); + let parallelism = parallelism.unwrap_or(1); + let results = maps + .into_iter() + .try_stream() + .paralleln_and_then(runtime, parallelism, move |map| { + map.compact_blocking(options.clone())?; + Ok(map.name().to_owned()) + }) + .collect::>(); + + let timer = Instant::now(); + let results = results.await; + let query_time = timer.elapsed(); + self.write_str(&format!("Jobs completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")) + .await?; + + Ok(RoomMessageEventContent::text_plain("")) } #[admin_command] diff --git a/src/database/engine.rs b/src/database/engine.rs index 2958f73f..8be9eecc 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -18,9 +18,16 @@ use std::{ }; use conduwuit::{debug, info, warn, Err, Result}; -use rocksdb::{AsColumnFamilyRef, BoundColumnFamily, DBCommon, DBWithThreadMode, MultiThreaded}; +use rocksdb::{ + AsColumnFamilyRef, BoundColumnFamily, DBCommon, DBWithThreadMode, MultiThreaded, + WaitForCompactOptions, +}; -use crate::{pool::Pool, result, Context}; +use crate::{ + pool::Pool, + util::{map_err, result}, + Context, +}; pub struct Engine { pub(super) read_only: bool, @@ -55,12 +62,22 @@ impl Engine { #[tracing::instrument(skip(self), level = "debug")] pub fn flush(&self) -> Result { result(DBCommon::flush_wal(&self.db, false)) } - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self), level = "info")] pub fn sort(&self) -> Result { let flushoptions = rocksdb::FlushOptions::default(); result(DBCommon::flush_opt(&self.db, &flushoptions)) } + #[tracing::instrument(skip(self), level = "info")] + pub fn wait_compactions(&self) -> Result { + let mut opts = WaitForCompactOptions::default(); + opts.set_abort_on_pause(true); + opts.set_flush(false); + opts.set_timeout(0); + + self.db.wait_for_compact(&opts).map_err(map_err) + } + /// Query for database property by null-terminated name which is expected to /// have a result with an integer representation. This is intended for /// low-overhead programmatic use. diff --git a/src/database/map.rs b/src/database/map.rs index 60d66585..33cae594 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -1,3 +1,4 @@ +pub mod compact; mod contains; mod count; mod get; diff --git a/src/database/map/compact.rs b/src/database/map/compact.rs new file mode 100644 index 00000000..c0381eb4 --- /dev/null +++ b/src/database/map/compact.rs @@ -0,0 +1,62 @@ +use conduwuit::{implement, Err, Result}; +use rocksdb::{BottommostLevelCompaction, CompactOptions}; + +use crate::keyval::KeyBuf; + +#[derive(Clone, Debug, Default)] +pub struct Options { + /// Key range to start and stop compaction. + pub range: (Option, Option), + + /// (None, None) - all levels to all necessary levels + /// (None, Some(1)) - compact all levels into level 1 + /// (Some(1), None) - compact level 1 into level 1 + /// (Some(_), Some(_) - currently unsupported + pub level: (Option, Option), + + /// run compaction until complete. if false only one pass is made, and the + /// results of that pass are not further recompacted. + pub exhaustive: bool, + + /// waits for other compactions to complete, then runs this compaction + /// exclusively before allowing automatic compactions to resume. + pub exclusive: bool, +} + +#[implement(super::Map)] +#[tracing::instrument( + name = "compact", + level = "info" + skip(self), + fields(%self), +)] +pub fn compact_blocking(&self, opts: Options) -> Result { + let mut co = CompactOptions::default(); + co.set_exclusive_manual_compaction(opts.exclusive); + co.set_bottommost_level_compaction(match opts.exhaustive { + | true => BottommostLevelCompaction::Force, + | false => BottommostLevelCompaction::ForceOptimized, + }); + + match opts.level { + | (None, None) => { + co.set_change_level(true); + co.set_target_level(-1); + }, + | (None, Some(level)) => { + co.set_change_level(true); + co.set_target_level(level.try_into()?); + }, + | (Some(level), None) => { + co.set_change_level(false); + co.set_target_level(level.try_into()?); + }, + | (Some(_), Some(_)) => return Err!("compacting between specific levels not supported"), + }; + + self.db + .db + .compact_range_cf_opt(&self.cf(), opts.range.0, opts.range.1, &co); + + Ok(()) +} diff --git a/src/database/mod.rs b/src/database/mod.rs index 6e3f8c96..8ae8dcf5 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -30,12 +30,12 @@ pub use self::{ deserialized::Deserialized, handle::Handle, keyval::{serialize_key, serialize_val, KeyVal, Slice}, - map::Map, + map::{compact, Map}, ser::{serialize, serialize_to, serialize_to_vec, Interfix, Json, Separator, SEP}, }; pub(crate) use self::{ engine::{context::Context, Engine}, - util::{or_else, result}, + util::or_else, }; use crate::maps::{Maps, MapsKey, MapsVal}; From 9ab381e4ebf8b2953c3cd697185c79e14c0ae309 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 07:12:20 +0000 Subject: [PATCH 055/328] generate fmt::Display for Config Signed-off-by: Jason Volk --- src/admin/server/commands.rs | 4 +- src/core/config/mod.rs | 401 ----------------------------------- src/macros/config.rs | 93 +++++--- 3 files changed, 63 insertions(+), 435 deletions(-) diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index 8d3358a8..6469a0e9 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -22,8 +22,8 @@ pub(super) async fn uptime(&self) -> Result { pub(super) async fn show_config(&self) -> Result { // Construct and send the response Ok(RoomMessageEventContent::text_markdown(format!( - "```\n{}\n```", - self.services.globals.config + "{}", + self.services.server.config ))) } diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 5cfed0b9..d6983540 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -3,7 +3,6 @@ pub mod proxy; use std::{ collections::{BTreeMap, BTreeSet, HashSet}, - fmt, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, path::PathBuf, }; @@ -15,7 +14,6 @@ use either::{ }; use figment::providers::{Env, Format, Toml}; pub use figment::{value::Value as FigmentValue, Figment}; -use itertools::Itertools; use regex::RegexSet; use ruma::{ api::client::discovery::discover_support::ContactRole, OwnedRoomOrAliasId, OwnedServerName, @@ -1859,405 +1857,6 @@ impl Config { pub fn check(&self) -> Result<(), Error> { check(self) } } -impl fmt::Display for Config { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - writeln!(f, "Active config values:\n").expect("wrote line to formatter stream"); - let mut line = |key: &str, val: &str| { - writeln!(f, "{key}: {val}").expect("wrote line to formatter stream"); - }; - - line("Server name", self.server_name.host()); - line("Database path", &self.database_path.to_string_lossy()); - line( - "Database backup path", - self.database_backup_path - .as_ref() - .map_or("", |path| path.to_str().unwrap_or("")), - ); - line("Database backups to keep", &self.database_backups_to_keep.to_string()); - line("Database cache capacity (MB)", &self.db_cache_capacity_mb.to_string()); - line("Cache capacity modifier", &self.cache_capacity_modifier.to_string()); - line("PDU cache capacity", &self.pdu_cache_capacity.to_string()); - line("Auth chain cache capacity", &self.auth_chain_cache_capacity.to_string()); - line("Short eventid cache capacity", &self.shorteventid_cache_capacity.to_string()); - line("Eventid short cache capacity", &self.eventidshort_cache_capacity.to_string()); - line("Short statekey cache capacity", &self.shortstatekey_cache_capacity.to_string()); - line("Statekey short cache capacity", &self.statekeyshort_cache_capacity.to_string()); - line( - "Server visibility cache capacity", - &self.server_visibility_cache_capacity.to_string(), - ); - line( - "User visibility cache capacity", - &self.user_visibility_cache_capacity.to_string(), - ); - line("Stateinfo cache capacity", &self.stateinfo_cache_capacity.to_string()); - line( - "Roomid space hierarchy cache capacity", - &self.roomid_spacehierarchy_cache_capacity.to_string(), - ); - line("DNS cache entry limit", &self.dns_cache_entries.to_string()); - line("DNS minimum TTL", &self.dns_min_ttl.to_string()); - line("DNS minimum NXDOMAIN TTL", &self.dns_min_ttl_nxdomain.to_string()); - line("DNS attempts", &self.dns_attempts.to_string()); - line("DNS timeout", &self.dns_timeout.to_string()); - line("DNS fallback to TCP", &self.dns_tcp_fallback.to_string()); - line("DNS query over TCP only", &self.query_over_tcp_only.to_string()); - line("Query all nameservers", &self.query_all_nameservers.to_string()); - line("Maximum request size (bytes)", &self.max_request_size.to_string()); - line("Sender retry backoff limit", &self.sender_retry_backoff_limit.to_string()); - line("Request connect timeout", &self.request_conn_timeout.to_string()); - line("Request timeout", &self.request_timeout.to_string()); - line("Request total timeout", &self.request_total_timeout.to_string()); - line("Idle connections per host", &self.request_idle_per_host.to_string()); - line("Request pool idle timeout", &self.request_idle_timeout.to_string()); - line("Well_known connect timeout", &self.well_known_conn_timeout.to_string()); - line("Well_known timeout", &self.well_known_timeout.to_string()); - line("Federation timeout", &self.federation_timeout.to_string()); - line("Federation pool idle per host", &self.federation_idle_per_host.to_string()); - line("Federation pool idle timeout", &self.federation_idle_timeout.to_string()); - line("Sender timeout", &self.sender_timeout.to_string()); - line("Sender pool idle timeout", &self.sender_idle_timeout.to_string()); - line("Appservice timeout", &self.appservice_timeout.to_string()); - line("Appservice pool idle timeout", &self.appservice_idle_timeout.to_string()); - line("Pusher pool idle timeout", &self.pusher_idle_timeout.to_string()); - line("Allow registration", &self.allow_registration.to_string()); - line( - "Registration token", - if self.registration_token.is_none() - && self.registration_token_file.is_none() - && self.allow_registration - { - "not set (⚠️ open registration!)" - } else if self.registration_token.is_none() && self.registration_token_file.is_none() - { - "not set" - } else { - "set" - }, - ); - line( - "Registration token file path", - self.registration_token_file - .as_ref() - .map_or("", |path| path.to_str().unwrap_or_default()), - ); - line( - "Allow guest registration (inherently false if allow registration is false)", - &self.allow_guest_registration.to_string(), - ); - line( - "Log guest registrations in admin room", - &self.log_guest_registrations.to_string(), - ); - line( - "Allow guests to auto join rooms", - &self.allow_guests_auto_join_rooms.to_string(), - ); - line("New user display name suffix", &self.new_user_displayname_suffix); - line("Allow encryption", &self.allow_encryption.to_string()); - line("Allow federation", &self.allow_federation.to_string()); - line("Federation loopback", &self.federation_loopback.to_string()); - line( - "Require authentication for profile requests", - &self.require_auth_for_profile_requests.to_string(), - ); - line( - "Allow incoming federated presence requests (updates)", - &self.allow_incoming_presence.to_string(), - ); - line( - "Allow outgoing federated presence requests (updates)", - &self.allow_outgoing_presence.to_string(), - ); - line( - "Allow local presence requests (updates)", - &self.allow_local_presence.to_string(), - ); - line( - "Allow incoming remote read receipts", - &self.allow_incoming_read_receipts.to_string(), - ); - line( - "Allow outgoing remote read receipts", - &self.allow_outgoing_read_receipts.to_string(), - ); - line( - "Block non-admin room invites (local and remote, admins can still send and receive \ - invites)", - &self.block_non_admin_invites.to_string(), - ); - line("Enable admin escape commands", &self.admin_escape_commands.to_string()); - line( - "Activate admin console after startup", - &self.admin_console_automatic.to_string(), - ); - line("Execute admin commands after startup", &self.admin_execute.join(", ")); - line( - "Continue startup even if some commands fail", - &self.admin_execute_errors_ignore.to_string(), - ); - line("Filter for admin command log capture", &self.admin_log_capture); - line("Admin room tag", &self.admin_room_tag); - line("Allow outgoing federated typing", &self.allow_outgoing_typing.to_string()); - line("Allow incoming federated typing", &self.allow_incoming_typing.to_string()); - line( - "Incoming federated typing timeout", - &self.typing_federation_timeout_s.to_string(), - ); - line("Client typing timeout minimum", &self.typing_client_timeout_min_s.to_string()); - line("Client typing timeout maxmimum", &self.typing_client_timeout_max_s.to_string()); - line("Allow device name federation", &self.allow_device_name_federation.to_string()); - line( - "Allow incoming profile lookup federation requests", - &self - .allow_inbound_profile_lookup_federation_requests - .to_string(), - ); - line( - "Auto deactivate banned room join attempts", - &self.auto_deactivate_banned_room_attempts.to_string(), - ); - line("Notification push path", &self.notification_push_path); - line("Allow room creation", &self.allow_room_creation.to_string()); - line( - "Allow public room directory over federation", - &self.allow_public_room_directory_over_federation.to_string(), - ); - line( - "Allow public room directory without authentication", - &self.allow_public_room_directory_without_auth.to_string(), - ); - line( - "Lockdown public room directory (only allow admins to publish)", - &self.lockdown_public_room_directory.to_string(), - ); - line( - "Trusted key servers", - &self - .trusted_servers - .iter() - .map(|server| server.host()) - .join(", "), - ); - line("OpenID Token TTL", &self.openid_token_ttl.to_string()); - line( - "TURN username", - if self.turn_username.is_empty() { - "not set" - } else { - &self.turn_username - }, - ); - line("TURN password", { - if self.turn_password.is_empty() { - "not set" - } else { - "set" - } - }); - line("TURN secret", { - if self.turn_secret.is_empty() && self.turn_secret_file.is_none() { - "not set" - } else { - "set" - } - }); - line("TURN secret file path", { - self.turn_secret_file - .as_ref() - .map_or("", |path| path.to_str().unwrap_or_default()) - }); - line("Turn TTL", &self.turn_ttl.to_string()); - line("Turn URIs", { - let mut lst = Vec::with_capacity(self.turn_uris.len()); - for item in self.turn_uris.iter().cloned().enumerate() { - let (_, uri): (usize, String) = item; - lst.push(uri); - } - &lst.join(", ") - }); - line("Auto Join Rooms", { - let mut lst = Vec::with_capacity(self.auto_join_rooms.len()); - for room in &self.auto_join_rooms { - lst.push(room); - } - &lst.into_iter().join(", ") - }); - line("Zstd HTTP Compression", &self.zstd_compression.to_string()); - line("Gzip HTTP Compression", &self.gzip_compression.to_string()); - line("Brotli HTTP Compression", &self.brotli_compression.to_string()); - line("RocksDB database LOG level", &self.rocksdb_log_level); - line("RocksDB database LOG to stderr", &self.rocksdb_log_stderr.to_string()); - line("RocksDB database LOG time-to-roll", &self.rocksdb_log_time_to_roll.to_string()); - line("RocksDB Max LOG Files", &self.rocksdb_max_log_files.to_string()); - line( - "RocksDB database max LOG file size", - &self.rocksdb_max_log_file_size.to_string(), - ); - line( - "RocksDB database optimize for spinning disks", - &self.rocksdb_optimize_for_spinning_disks.to_string(), - ); - line("RocksDB Direct-IO", &self.rocksdb_direct_io.to_string()); - line("RocksDB Parallelism Threads", &self.rocksdb_parallelism_threads.to_string()); - line("RocksDB Compression Algorithm", &self.rocksdb_compression_algo); - line("RocksDB Compression Level", &self.rocksdb_compression_level.to_string()); - line( - "RocksDB Bottommost Compression Level", - &self.rocksdb_bottommost_compression_level.to_string(), - ); - line( - "RocksDB Bottommost Level Compression", - &self.rocksdb_bottommost_compression.to_string(), - ); - line("RocksDB Recovery Mode", &self.rocksdb_recovery_mode.to_string()); - line("RocksDB Repair Mode", &self.rocksdb_repair.to_string()); - line("RocksDB Read-only Mode", &self.rocksdb_read_only.to_string()); - line("RocksDB Secondary Mode", &self.rocksdb_secondary.to_string()); - line( - "RocksDB Compaction Idle Priority", - &self.rocksdb_compaction_prio_idle.to_string(), - ); - line( - "RocksDB Compaction Idle IOPriority", - &self.rocksdb_compaction_ioprio_idle.to_string(), - ); - line("RocksDB Compaction enabled", &self.rocksdb_compaction.to_string()); - line("RocksDB Statistics level", &self.rocksdb_stats_level.to_string()); - line("Media integrity checks on startup", &self.media_startup_check.to_string()); - line("Media compatibility filesystem links", &self.media_compat_file_link.to_string()); - line("Prune missing media from database", &self.prune_missing_media.to_string()); - line("Allow legacy (unauthenticated) media", &self.allow_legacy_media.to_string()); - line("Freeze legacy (unauthenticated) media", &self.freeze_legacy_media.to_string()); - line("Prevent Media Downloads From", { - let mut lst = Vec::with_capacity(self.prevent_media_downloads_from.len()); - for domain in &self.prevent_media_downloads_from { - lst.push(domain.host()); - } - &lst.join(", ") - }); - line("Forbidden Remote Server Names (\"Global\" ACLs)", { - let mut lst = Vec::with_capacity(self.forbidden_remote_server_names.len()); - for domain in &self.forbidden_remote_server_names { - lst.push(domain.host()); - } - &lst.join(", ") - }); - line("Forbidden Remote Room Directory Server Names", { - let mut lst = - Vec::with_capacity(self.forbidden_remote_room_directory_server_names.len()); - for domain in &self.forbidden_remote_room_directory_server_names { - lst.push(domain.host()); - } - &lst.join(", ") - }); - line("Outbound Request IP Range (CIDR) Denylist", { - let mut lst = Vec::with_capacity(self.ip_range_denylist.len()); - for item in self.ip_range_denylist.iter().cloned().enumerate() { - let (_, ip): (usize, String) = item; - lst.push(ip); - } - &lst.join(", ") - }); - line("Forbidden usernames", { - &self.forbidden_usernames.patterns().iter().join(", ") - }); - line("Forbidden room aliases", { - &self.forbidden_alias_names.patterns().iter().join(", ") - }); - line( - "URL preview bound interface", - self.url_preview_bound_interface - .as_ref() - .map(Either::as_ref) - .map(|either| either.map_left(ToString::to_string)) - .map(Either::either_into::) - .unwrap_or_default() - .as_str(), - ); - line( - "URL preview domain contains allowlist", - &self.url_preview_domain_contains_allowlist.join(", "), - ); - line( - "URL preview domain explicit allowlist", - &self.url_preview_domain_explicit_allowlist.join(", "), - ); - line( - "URL preview domain explicit denylist", - &self.url_preview_domain_explicit_denylist.join(", "), - ); - line( - "URL preview URL contains allowlist", - &self.url_preview_url_contains_allowlist.join(", "), - ); - line("URL preview maximum spider size", &self.url_preview_max_spider_size.to_string()); - line("URL preview check root domain", &self.url_preview_check_root_domain.to_string()); - line( - "Allow check for updates / announcements check", - &self.allow_check_for_updates.to_string(), - ); - line("Enable netburst on startup", &self.startup_netburst.to_string()); - #[cfg(feature = "sentry_telemetry")] - line("Sentry.io reporting and tracing", &self.sentry.to_string()); - #[cfg(feature = "sentry_telemetry")] - line("Sentry.io send server_name in logs", &self.sentry_send_server_name.to_string()); - #[cfg(feature = "sentry_telemetry")] - line("Sentry.io tracing sample rate", &self.sentry_traces_sample_rate.to_string()); - line("Sentry.io attach stacktrace", &self.sentry_attach_stacktrace.to_string()); - line("Sentry.io send panics", &self.sentry_send_panic.to_string()); - line("Sentry.io send errors", &self.sentry_send_error.to_string()); - line("Sentry.io tracing filter", &self.sentry_filter); - line( - "Well-known server name", - self.well_known - .server - .as_ref() - .map_or("", |server| server.as_str()), - ); - line( - "Well-known client URL", - self.well_known - .client - .as_ref() - .map_or("", |url| url.as_str()), - ); - line( - "Well-known support email", - self.well_known - .support_email - .as_ref() - .map_or("", |str| str.as_ref()), - ); - line( - "Well-known support Matrix ID", - self.well_known - .support_mxid - .as_ref() - .map_or("", |mxid| mxid.as_str()), - ); - line( - "Well-known support role", - self.well_known - .support_role - .as_ref() - .map_or("", |role| role.as_str()), - ); - line( - "Well-known support page/URL", - self.well_known - .support_page - .as_ref() - .map_or("", |url| url.as_str()), - ); - line("Enable the tokio-console", &self.tokio_console.to_string()); - line("Admin room notices", &self.admin_room_notices.to_string()); - - Ok(()) - } -} - fn true_fn() -> bool { true } fn default_address() -> ListeningAddr { diff --git a/src/macros/config.rs b/src/macros/config.rs index 452abd20..90d6ef15 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -1,8 +1,8 @@ use std::{collections::HashSet, fmt::Write as _, fs::OpenOptions, io::Write as _}; use proc_macro::TokenStream; -use proc_macro2::Span; -use quote::ToTokens; +use proc_macro2::{Span, TokenStream as TokenStream2}; +use quote::{quote, ToTokens}; use syn::{ parse::Parser, punctuated::Punctuated, spanned::Spanned, Error, Expr, ExprLit, Field, Fields, FieldsNamed, ItemStruct, Lit, Meta, MetaList, MetaNameValue, Type, TypePath, @@ -19,18 +19,24 @@ const HIDDEN: &[&str] = &["default"]; #[allow(clippy::needless_pass_by_value)] pub(super) fn example_generator(input: ItemStruct, args: &[Meta]) -> Result { - if is_cargo_build() && !is_cargo_test() { - generate_example(&input, args)?; - } + let write = is_cargo_build() && !is_cargo_test(); + let additional = generate_example(&input, args, write)?; - Ok(input.to_token_stream().into()) + Ok([input.to_token_stream(), additional] + .into_iter() + .collect::() + .into()) } #[allow(clippy::needless_pass_by_value)] #[allow(unused_variables)] -fn generate_example(input: &ItemStruct, args: &[Meta]) -> Result<()> { +fn generate_example(input: &ItemStruct, args: &[Meta], write: bool) -> Result { let settings = get_simple_settings(args); + let section = settings.get("section").ok_or_else(|| { + Error::new(args[0].span(), "missing required 'section' attribute argument") + })?; + let filename = settings.get("filename").ok_or_else(|| { Error::new(args[0].span(), "missing required 'filename' attribute argument") })?; @@ -45,31 +51,33 @@ fn generate_example(input: &ItemStruct, args: &[Meta]) -> Result<()> { .split(' ') .collect(); - let section = settings.get("section").ok_or_else(|| { - Error::new(args[0].span(), "missing required 'section' attribute argument") - })?; - - let mut file = OpenOptions::new() + let fopts = OpenOptions::new() .write(true) .create(section == "global") .truncate(section == "global") .append(section != "global") - .open(filename) - .map_err(|e| { - Error::new( - Span::call_site(), - format!("Failed to open config file for generation: {e}"), - ) - })?; + .clone(); - if let Some(header) = settings.get("header") { - file.write_all(header.as_bytes()) + let mut file = write + .then(|| { + fopts.open(filename).map_err(|e| { + let msg = format!("Failed to open file for config generation: {e}"); + Error::new(Span::call_site(), msg) + }) + }) + .transpose()?; + + if let Some(file) = file.as_mut() { + if let Some(header) = settings.get("header") { + file.write_all(header.as_bytes()) + .expect("written to config file"); + } + + file.write_fmt(format_args!("\n[{section}]\n")) .expect("written to config file"); } - file.write_fmt(format_args!("\n[{section}]\n")) - .expect("written to config file"); - + let mut summary: Vec = Vec::new(); if let Fields::Named(FieldsNamed { named, .. }) = &input.fields { for field in named { let Some(ident) = &field.ident else { @@ -105,20 +113,41 @@ fn generate_example(input: &ItemStruct, args: &[Meta]) -> Result<()> { default }; - file.write_fmt(format_args!("\n{doc}")) - .expect("written to config file"); + if let Some(file) = file.as_mut() { + file.write_fmt(format_args!("\n{doc}")) + .expect("written to config file"); - file.write_fmt(format_args!("#{ident} ={default}\n")) + file.write_fmt(format_args!("#{ident} ={default}\n")) + .expect("written to config file"); + } + + let name = ident.to_string(); + summary.push(quote! { + writeln!(out, "| {} | {:?} |", #name, self.#ident)?; + }); + } + } + + if let Some(file) = file.as_mut() { + if let Some(footer) = settings.get("footer") { + file.write_all(footer.as_bytes()) .expect("written to config file"); } } - if let Some(footer) = settings.get("footer") { - file.write_all(footer.as_bytes()) - .expect("written to config file"); - } + let struct_name = &input.ident; + let display = quote! { + impl std::fmt::Display for #struct_name { + fn fmt(&self, out: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(out, "| name | value |")?; + writeln!(out, "| :--- | :--- |")?; + #( #summary )* + Ok(()) + } + } + }; - Ok(()) + Ok(display) } fn get_default(field: &Field) -> Option { From 1f31e74024bcdc23efcbafc32c0d9572df83fb82 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 22 Jan 2025 03:50:51 +0000 Subject: [PATCH 056/328] add del to raw suite Signed-off-by: Jason Volk --- src/admin/query/raw.rs | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/src/admin/query/raw.rs b/src/admin/query/raw.rs index ac5e8976..5a6006ec 100644 --- a/src/admin/query/raw.rs +++ b/src/admin/query/raw.rs @@ -33,6 +33,15 @@ pub(crate) enum RawCommand { key: String, }, + /// - Raw database delete (for string keys) + RawDel { + /// Map name + map: String, + + /// Key + key: String, + }, + /// - Raw database keys iteration RawKeys { /// Map name @@ -534,6 +543,18 @@ pub(super) async fn raw_iter_from( ))) } +#[admin_command] +pub(super) async fn raw_del(&self, map: String, key: String) -> Result { + let map = self.services.db.get(&map)?; + let timer = Instant::now(); + map.remove(&key); + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Operation completed in {query_time:?}" + ))) +} + #[admin_command] pub(super) async fn raw_get(&self, map: String, key: String) -> Result { let map = self.services.db.get(&map)?; From 0c96891008713b1e121f1896fdba59f94570cc29 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 22 Jan 2025 00:52:48 +0000 Subject: [PATCH 057/328] add CBOR support to database schema Signed-off-by: Jason Volk --- Cargo.lock | 32 ++++++++++++++++++++++++++++++++ Cargo.toml | 8 ++++++++ src/database/Cargo.toml | 2 ++ src/database/de.rs | 4 ++++ src/database/mod.rs | 2 +- src/database/ser.rs | 14 +++++++++++++- 6 files changed, 60 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8de3abf4..d9758e6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -761,6 +761,8 @@ dependencies = [ "const-str", "futures", "log", + "minicbor", + "minicbor-serde", "rust-rocksdb-uwu", "serde", "serde_json", @@ -2329,6 +2331,36 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +[[package]] +name = "minicbor" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0452a60c1863c1f50b5f77cd295e8d2786849f35883f0b9e18e7e6e1b5691b0" +dependencies = [ + "minicbor-derive", +] + +[[package]] +name = "minicbor-derive" +version = "0.15.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd2209fff77f705b00c737016a48e73733d7fbccb8b007194db148f03561fb70" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "minicbor-serde" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "becf18ac384ecf6f53b2db3b1549eebff664c67ecf259ae99be5912193291686" +dependencies = [ + "minicbor", + "serde", +] + [[package]] name = "minimad" version = "0.13.1" diff --git a/Cargo.toml b/Cargo.toml index f9e3b6db..042587fc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -506,6 +506,14 @@ version = "0.2" [workspace.dependencies.num-traits] version = "0.2" +[workspace.dependencies.minicbor] +version = "0.25.1" +features = ["std"] + +[workspace.dependencies.minicbor-serde] +version = "0.3.2" +features = ["std"] + # # Patches # diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml index 09eedaf4..557c9a3e 100644 --- a/src/database/Cargo.toml +++ b/src/database/Cargo.toml @@ -40,6 +40,8 @@ conduwuit-core.workspace = true const-str.workspace = true futures.workspace = true log.workspace = true +minicbor.workspace = true +minicbor-serde.workspace = true rust-rocksdb.workspace = true serde.workspace = true serde_json.workspace = true diff --git a/src/database/de.rs b/src/database/de.rs index 48bc9f64..4fdc2251 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -248,6 +248,10 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { { match name { | "$serde_json::private::RawValue" => visitor.visit_map(self), + | "Cbor" => visitor + .visit_newtype_struct(&mut minicbor_serde::Deserializer::new(self.record_trail())) + .map_err(|e| Self::Error::SerdeDe(e.to_string().into())), + | _ => visitor.visit_newtype_struct(self), } } diff --git a/src/database/mod.rs b/src/database/mod.rs index 8ae8dcf5..42b7f5e3 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -31,7 +31,7 @@ pub use self::{ handle::Handle, keyval::{serialize_key, serialize_val, KeyVal, Slice}, map::{compact, Map}, - ser::{serialize, serialize_to, serialize_to_vec, Interfix, Json, Separator, SEP}, + ser::{serialize, serialize_to, serialize_to_vec, Cbor, Interfix, Json, Separator, SEP}, }; pub(crate) use self::{ engine::{context::Context, Engine}, diff --git a/src/database/ser.rs b/src/database/ser.rs index e6de5f7f..372b7522 100644 --- a/src/database/ser.rs +++ b/src/database/ser.rs @@ -1,7 +1,7 @@ use std::io::Write; use conduwuit::{debug::type_name, err, result::DebugInspect, utils::exchange, Error, Result}; -use serde::{ser, Serialize}; +use serde::{ser, Deserialize, Serialize}; use crate::util::unhandled; @@ -55,6 +55,10 @@ pub(crate) struct Serializer<'a, W: Write> { #[derive(Debug, Serialize)] pub struct Json(pub T); +/// Newtype for CBOR serialization. +#[derive(Debug, Deserialize, Serialize)] +pub struct Cbor(pub T); + /// Directive to force separator serialization specifically for prefix keying /// use. This is a quirk of the database schema and prefix iterations. #[derive(Debug, Serialize)] @@ -189,6 +193,14 @@ impl ser::Serializer for &mut Serializer<'_, W> { match name { | "Json" => serde_json::to_writer(&mut self.out, value).map_err(Into::into), + | "Cbor" => { + use minicbor::encode::write::Writer; + use minicbor_serde::Serializer; + + value + .serialize(&mut Serializer::new(&mut Writer::new(&mut self.out))) + .map_err(|e| Self::Error::SerdeSer(e.to_string().into())) + }, | _ => unhandled!("Unrecognized serialization Newtype {name:?}"), } } From 49023aa295da8b4d975389a1494611696d4cc63d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 22 Jan 2025 02:16:51 +0000 Subject: [PATCH 058/328] use database for resolver caches Signed-off-by: Jason Volk --- src/admin/query/resolver.rs | 59 +++++--------- src/database/maps.rs | 8 ++ src/service/resolver/actual.rs | 6 +- src/service/resolver/cache.rs | 135 ++++++++++++++++----------------- src/service/resolver/dns.rs | 24 +++--- src/service/resolver/fed.rs | 3 +- src/service/resolver/mod.rs | 39 +--------- src/service/sending/send.rs | 2 +- 8 files changed, 114 insertions(+), 162 deletions(-) diff --git a/src/admin/query/resolver.rs b/src/admin/query/resolver.rs index b53661fc..0b6da6fd 100644 --- a/src/admin/query/resolver.rs +++ b/src/admin/query/resolver.rs @@ -1,7 +1,6 @@ -use std::fmt::Write; - use clap::Subcommand; use conduwuit::{utils::time, Result}; +use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, OwnedServerName}; use crate::{admin_command, admin_command_dispatch}; @@ -31,29 +30,19 @@ async fn destinations_cache( writeln!(self, "| Server Name | Destination | Hostname | Expires |").await?; writeln!(self, "| ----------- | ----------- | -------- | ------- |").await?; - let mut out = String::new(); - { - let map = self - .services - .resolver - .cache - .destinations - .read() - .expect("locked"); + let mut destinations = self.services.resolver.cache.destinations().boxed(); - for (name, &CachedDest { ref dest, ref host, expire }) in map.iter() { - if let Some(server_name) = server_name.as_ref() { - if name != server_name { - continue; - } + while let Some((name, CachedDest { dest, host, expire })) = destinations.next().await { + if let Some(server_name) = server_name.as_ref() { + if name != server_name { + continue; } - - let expire = time::format(expire, "%+"); - writeln!(out, "| {name} | {dest} | {host} | {expire} |")?; } - } - self.write_str(out.as_str()).await?; + let expire = time::format(expire, "%+"); + self.write_str(&format!("| {name} | {dest} | {host} | {expire} |\n")) + .await?; + } Ok(RoomMessageEventContent::notice_plain("")) } @@ -65,29 +54,19 @@ async fn overrides_cache(&self, server_name: Option) -> Result Result { - let (result, cached) = if let Some(result) = self.get_cached_destination(server_name) { + let (result, cached) = if let Ok(result) = self.cache.get_destination(server_name).await { (result, true) } else { self.validate_dest(server_name)?; @@ -232,7 +232,7 @@ impl super::Service { #[tracing::instrument(skip_all, name = "well-known")] async fn request_well_known(&self, dest: &str) -> Result> { - if !self.has_cached_override(dest) { + if !self.cache.has_override(dest).await { self.query_and_cache_override(dest, dest, 8448).await?; } @@ -315,7 +315,7 @@ impl super::Service { debug_info!("{overname:?} overriden by {hostname:?}"); } - self.set_cached_override(overname, CachedOverride { + self.cache.set_override(overname, CachedOverride { ips: override_ip.into_iter().take(MAX_IPS).collect(), port, expire: CachedOverride::default_expire(), diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index e309a129..11e6c9bd 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -1,108 +1,103 @@ -use std::{ - collections::HashMap, - net::IpAddr, - sync::{Arc, RwLock}, - time::SystemTime, -}; +use std::{net::IpAddr, sync::Arc, time::SystemTime}; use arrayvec::ArrayVec; use conduwuit::{ - trace, - utils::{math::Expected, rand}, + at, implement, + utils::{math::Expected, rand, stream::TryIgnore}, + Result, }; -use ruma::{OwnedServerName, ServerName}; +use database::{Cbor, Deserialized, Map}; +use futures::{Stream, StreamExt}; +use ruma::ServerName; +use serde::{Deserialize, Serialize}; use super::fed::FedDest; pub struct Cache { - pub destinations: RwLock, // actual_destination, host - pub overrides: RwLock, + destinations: Arc, + overrides: Arc, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Deserialize, Serialize)] pub struct CachedDest { pub dest: FedDest, pub host: String, pub expire: SystemTime, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Deserialize, Serialize)] pub struct CachedOverride { pub ips: IpAddrs, pub port: u16, pub expire: SystemTime, } -pub type WellKnownMap = HashMap; -pub type TlsNameMap = HashMap; - pub type IpAddrs = ArrayVec; pub(crate) const MAX_IPS: usize = 3; impl Cache { - pub(super) fn new() -> Arc { + pub(super) fn new(args: &crate::Args<'_>) -> Arc { Arc::new(Self { - destinations: RwLock::new(WellKnownMap::new()), - overrides: RwLock::new(TlsNameMap::new()), + destinations: args.db["servername_destination"].clone(), + overrides: args.db["servername_override"].clone(), }) } } -impl super::Service { - pub fn set_cached_destination( - &self, - name: OwnedServerName, - dest: CachedDest, - ) -> Option { - trace!(?name, ?dest, "set cached destination"); - self.cache - .destinations - .write() - .expect("locked for writing") - .insert(name, dest) - } +#[implement(Cache)] +pub fn set_destination(&self, name: &ServerName, dest: CachedDest) { + self.destinations.raw_put(name, Cbor(dest)); +} - #[must_use] - pub fn get_cached_destination(&self, name: &ServerName) -> Option { - self.cache - .destinations - .read() - .expect("locked for reading") - .get(name) - .cloned() - } +#[implement(Cache)] +pub fn set_override(&self, name: &str, over: CachedOverride) { + self.overrides.raw_put(name, Cbor(over)); +} - pub fn set_cached_override( - &self, - name: &str, - over: CachedOverride, - ) -> Option { - trace!(?name, ?over, "set cached override"); - self.cache - .overrides - .write() - .expect("locked for writing") - .insert(name.into(), over) - } +#[implement(Cache)] +pub async fn get_destination(&self, name: &ServerName) -> Result { + self.destinations + .get(name) + .await + .deserialized::>() + .map(at!(0)) +} - #[must_use] - pub fn get_cached_override(&self, name: &str) -> Option { - self.cache - .overrides - .read() - .expect("locked for reading") - .get(name) - .cloned() - } +#[implement(Cache)] +pub async fn get_override(&self, name: &str) -> Result { + self.overrides + .get(name) + .await + .deserialized::>() + .map(at!(0)) +} - #[must_use] - pub fn has_cached_override(&self, name: &str) -> bool { - self.cache - .overrides - .read() - .expect("locked for reading") - .contains_key(name) - } +#[implement(Cache)] +#[must_use] +pub async fn has_destination(&self, destination: &str) -> bool { + self.destinations.exists(destination).await.is_ok() +} + +#[implement(Cache)] +#[must_use] +pub async fn has_override(&self, destination: &str) -> bool { + self.overrides.exists(destination).await.is_ok() +} + +#[implement(Cache)] +pub fn destinations(&self) -> impl Stream + Send + '_ { + self.destinations + .stream() + .ignore_err() + .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1 .0)) +} + +#[implement(Cache)] +pub fn overrides(&self) -> impl Stream + Send + '_ { + self.overrides + .stream() + .ignore_err() + .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1 .0)) } impl CachedDest { diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index 5c9018ab..ad7768bc 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -88,18 +88,20 @@ impl Resolve for Resolver { impl Resolve for Hooked { fn resolve(&self, name: Name) -> Resolving { - let cached: Option = self - .cache - .overrides - .read() - .expect("locked for reading") - .get(name.as_str()) - .cloned(); + hooked_resolve(self.cache.clone(), self.server.clone(), self.resolver.clone(), name) + .boxed() + } +} - cached.map_or_else( - || resolve_to_reqwest(self.server.clone(), self.resolver.clone(), name).boxed(), - |cached| cached_to_reqwest(cached).boxed(), - ) +async fn hooked_resolve( + cache: Arc, + server: Arc, + resolver: Arc, + name: Name, +) -> Result> { + match cache.get_override(name.as_str()).await { + | Ok(cached) => cached_to_reqwest(cached).await, + | Err(_) => resolve_to_reqwest(server, resolver, name).boxed().await, } } diff --git a/src/service/resolver/fed.rs b/src/service/resolver/fed.rs index 76fc6894..bfe100e7 100644 --- a/src/service/resolver/fed.rs +++ b/src/service/resolver/fed.rs @@ -6,8 +6,9 @@ use std::{ use arrayvec::ArrayString; use conduwuit::utils::math::Expected; +use serde::{Deserialize, Serialize}; -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] pub enum FedDest { Literal(SocketAddr), Named(String, PortString), diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 6a6289b6..3163b0d0 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -4,9 +4,9 @@ mod dns; pub mod fed; mod tests; -use std::{fmt::Write, sync::Arc}; +use std::sync::Arc; -use conduwuit::{utils, utils::math::Expected, Result, Server}; +use conduwuit::{Result, Server}; use self::{cache::Cache, dns::Resolver}; use crate::{client, Dep}; @@ -25,7 +25,7 @@ struct Services { impl crate::Service for Service { #[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] fn build(args: crate::Args<'_>) -> Result> { - let cache = Cache::new(); + let cache = Cache::new(&args); Ok(Arc::new(Self { cache: cache.clone(), resolver: Resolver::build(args.server, cache)?, @@ -36,38 +36,5 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result { - use utils::bytes::pretty; - - let (oc_count, oc_bytes) = self.cache.overrides.read()?.iter().fold( - (0_usize, 0_usize), - |(count, bytes), (key, val)| { - (count.expected_add(1), bytes.expected_add(key.len()).expected_add(val.size())) - }, - ); - - let (dc_count, dc_bytes) = self.cache.destinations.read()?.iter().fold( - (0_usize, 0_usize), - |(count, bytes), (key, val)| { - (count.expected_add(1), bytes.expected_add(key.len()).expected_add(val.size())) - }, - ); - - writeln!(out, "resolver_overrides_cache: {oc_count} ({})", pretty(oc_bytes))?; - writeln!(out, "resolver_destinations_cache: {dc_count} ({})", pretty(dc_bytes))?; - - Ok(()) - } - - fn clear_cache(&self) { - self.cache.overrides.write().expect("write locked").clear(); - self.cache - .destinations - .write() - .expect("write locked") - .clear(); - self.resolver.resolver.clear_cache(); - } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index e2981068..831a1dd8 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -125,7 +125,7 @@ where let result = T::IncomingResponse::try_from_http_response(response); if result.is_ok() && !actual.cached { - resolver.set_cached_destination(dest.to_owned(), CachedDest { + resolver.cache.set_destination(dest, CachedDest { dest: actual.dest.clone(), host: actual.host.clone(), expire: CachedDest::default_expire(), From 7c0c029a4a90ded986d76910c162377f73360b64 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 22 Jan 2025 06:40:07 +0000 Subject: [PATCH 059/328] add try_lock to MutexMap; allow TryFrom constructions Signed-off-by: Jason Volk --- Cargo.lock | 26 ++++++++-------- Cargo.toml | 2 +- src/admin/debug/commands.rs | 2 +- src/core/utils/mutex_map.rs | 60 ++++++++++++++++++++++++++++++++++--- 4 files changed, 71 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d9758e6f..7985a411 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3188,7 +3188,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "assign", "js_int", @@ -3210,7 +3210,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "js_int", "ruma-common", @@ -3222,7 +3222,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "as_variant", "assign", @@ -3245,7 +3245,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "as_variant", "base64 0.22.1", @@ -3276,7 +3276,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3301,7 +3301,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "bytes", "http", @@ -3319,7 +3319,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3328,7 +3328,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "js_int", "ruma-common", @@ -3338,7 +3338,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3353,7 +3353,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "js_int", "ruma-common", @@ -3365,7 +3365,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "headers", "http", @@ -3378,7 +3378,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3394,7 +3394,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 042587fc..b8c145ca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -332,7 +332,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +rev = "427877d5bc14988ed877e500bbb27f8bc08b84e8" features = [ "compat", "rand", diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index a77587b0..49078dde 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -725,7 +725,7 @@ pub(super) async fn force_set_room_state_from_server( .save_state(room_id.clone().as_ref(), new_room_state) .await?; - let state_lock = self.services.rooms.state.mutex.lock(&room_id).await; + let state_lock = self.services.rooms.state.mutex.lock(&*room_id).await; self.services .rooms .state diff --git a/src/core/utils/mutex_map.rs b/src/core/utils/mutex_map.rs index 9b9821fe..03a4adf1 100644 --- a/src/core/utils/mutex_map.rs +++ b/src/core/utils/mutex_map.rs @@ -1,7 +1,13 @@ -use std::{fmt::Debug, hash::Hash, sync::Arc}; +use std::{ + fmt::Debug, + hash::Hash, + sync::{Arc, TryLockError::WouldBlock}, +}; use tokio::sync::OwnedMutexGuard as Omg; +use crate::{err, Result}; + /// Map of Mutexes pub struct MutexMap { map: Map, @@ -30,16 +36,17 @@ where } #[tracing::instrument(level = "trace", skip(self))] - pub async fn lock(&self, k: &K) -> Guard + pub async fn lock<'a, K>(&'a self, k: &'a K) -> Guard where K: Debug + Send + ?Sized + Sync, - Key: for<'a> From<&'a K>, + Key: TryFrom<&'a K>, + >::Error: Debug, { let val = self .map .lock() .expect("locked") - .entry(k.into()) + .entry(k.try_into().expect("failed to construct key")) .or_default() .clone(); @@ -49,6 +56,51 @@ where } } + #[tracing::instrument(level = "trace", skip(self))] + pub fn try_lock<'a, K>(&self, k: &'a K) -> Result> + where + K: Debug + Send + ?Sized + Sync, + Key: TryFrom<&'a K>, + >::Error: Debug, + { + let val = self + .map + .lock() + .expect("locked") + .entry(k.try_into().expect("failed to construct key")) + .or_default() + .clone(); + + Ok(Guard:: { + map: Arc::clone(&self.map), + val: val.try_lock_owned().map_err(|_| err!("would yield"))?, + }) + } + + #[tracing::instrument(level = "trace", skip(self))] + pub fn try_try_lock<'a, K>(&self, k: &'a K) -> Result> + where + K: Debug + Send + ?Sized + Sync, + Key: TryFrom<&'a K>, + >::Error: Debug, + { + let val = self + .map + .try_lock() + .map_err(|e| match e { + | WouldBlock => err!("would block"), + | _ => panic!("{e:?}"), + })? + .entry(k.try_into().expect("failed to construct key")) + .or_default() + .clone(); + + Ok(Guard:: { + map: Arc::clone(&self.map), + val: val.try_lock_owned().map_err(|_| err!("would yield"))?, + }) + } + #[must_use] pub fn contains(&self, k: &Key) -> bool { self.map.lock().expect("locked").contains_key(k) } From f75d9fa79e1ef1bfbd7454cfa470acf1910d6a99 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 22 Jan 2025 06:57:18 +0000 Subject: [PATCH 060/328] deduplicate name resolutions Signed-off-by: Jason Volk --- src/service/resolver/actual.rs | 55 +++++++++++++++++++++++----------- src/service/resolver/mod.rs | 8 ++++- 2 files changed, 44 insertions(+), 19 deletions(-) diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 5676d7b1..afe5a1e5 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -4,7 +4,7 @@ use std::{ }; use conduwuit::{debug, debug_error, debug_info, debug_warn, err, error, trace, Err, Result}; -use futures::FutureExt; +use futures::{FutureExt, TryFutureExt}; use hickory_resolver::error::ResolveError; use ipaddress::IPAddress; use ruma::ServerName; @@ -29,18 +29,31 @@ impl ActualDest { impl super::Service { #[tracing::instrument(skip_all, level = "debug", name = "resolve")] pub(crate) async fn get_actual_dest(&self, server_name: &ServerName) -> Result { - let (result, cached) = if let Ok(result) = self.cache.get_destination(server_name).await { - (result, true) - } else { - self.validate_dest(server_name)?; - (self.resolve_actual_dest(server_name, true).boxed().await?, false) - }; - - let CachedDest { dest, host, .. } = result; + let (CachedDest { dest, host, .. }, cached) = + self.lookup_actual_dest(server_name).await?; Ok(ActualDest { dest, host, cached }) } + pub(crate) async fn lookup_actual_dest( + &self, + server_name: &ServerName, + ) -> Result<(CachedDest, bool)> { + if let Ok(result) = self.cache.get_destination(server_name).await { + return Ok((result, true)); + } + + let _dedup = self.resolving.lock(server_name.as_str()); + if let Ok(result) = self.cache.get_destination(server_name).await { + return Ok((result, true)); + } + + self.resolve_actual_dest(server_name, true) + .map_ok(|result| (result, false)) + .boxed() + .await + } + /// Returns: `actual_destination`, host header /// Implemented according to the specification at /// Numbers in comments below refer to bullet points in linked section of @@ -51,7 +64,7 @@ impl super::Service { dest: &ServerName, cache: bool, ) -> Result { - trace!("Finding actual destination for {dest}"); + self.validate_dest(dest)?; let mut host = dest.as_str().to_owned(); let actual_dest = match get_ip_with_port(dest.as_str()) { | Some(host_port) => Self::actual_dest_1(host_port)?, @@ -106,6 +119,7 @@ impl super::Service { cache, ) .await?; + Ok(FedDest::Named( host.to_owned(), port.try_into().unwrap_or_else(|_| FedDest::default_port()), @@ -156,6 +170,7 @@ impl super::Service { cache, ) .await?; + Ok(FedDest::Named( host.to_owned(), port.try_into().unwrap_or_else(|_| FedDest::default_port()), @@ -177,17 +192,18 @@ impl super::Service { cache, ) .await?; + if let Some(port) = force_port { - Ok(FedDest::Named( + return Ok(FedDest::Named( delegated, format!(":{port}") .as_str() .try_into() .unwrap_or_else(|_| FedDest::default_port()), - )) - } else { - Ok(add_port_to_hostname(&delegated)) + )); } + + Ok(add_port_to_hostname(&delegated)) } async fn actual_dest_3_4(&self, cache: bool, delegated: String) -> Result { @@ -212,21 +228,24 @@ impl super::Service { cache, ) .await?; + if let Some(port) = force_port { let port = format!(":{port}"); - Ok(FedDest::Named( + + return Ok(FedDest::Named( host.to_owned(), PortString::from(port.as_str()).unwrap_or_else(|_| FedDest::default_port()), - )) - } else { - Ok(add_port_to_hostname(host)) + )); } + + Ok(add_port_to_hostname(host)) } async fn actual_dest_5(&self, dest: &ServerName, cache: bool) -> Result { debug!("5: No SRV record found"); self.conditional_query_and_cache_override(dest.as_str(), dest.as_str(), 8448, cache) .await?; + Ok(add_port_to_hostname(dest.as_str())) } diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 3163b0d0..090e562d 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -6,7 +6,8 @@ mod tests; use std::sync::Arc; -use conduwuit::{Result, Server}; +use arrayvec::ArrayString; +use conduwuit::{utils::MutexMap, Result, Server}; use self::{cache::Cache, dns::Resolver}; use crate::{client, Dep}; @@ -14,6 +15,7 @@ use crate::{client, Dep}; pub struct Service { pub cache: Arc, pub resolver: Arc, + resolving: Resolving, services: Services, } @@ -22,6 +24,9 @@ struct Services { client: Dep, } +type Resolving = MutexMap; +type NameBuf = ArrayString<256>; + impl crate::Service for Service { #[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] fn build(args: crate::Args<'_>) -> Result> { @@ -29,6 +34,7 @@ impl crate::Service for Service { Ok(Arc::new(Self { cache: cache.clone(), resolver: Resolver::build(args.server, cache)?, + resolving: MutexMap::new(), services: Services { server: args.server.clone(), client: args.depend::("client"), From 607e338ac2bdb03b9e08cfe207bc7253aa8a8a2e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 22 Jan 2025 07:56:24 +0000 Subject: [PATCH 061/328] cache result of resolution at completion of resolution Signed-off-by: Jason Volk --- src/service/resolver/actual.rs | 8 ++++---- src/service/resolver/cache.rs | 4 ++-- src/service/sending/send.rs | 29 ++++------------------------- 3 files changed, 10 insertions(+), 31 deletions(-) diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index afe5a1e5..1a36936d 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -18,7 +18,6 @@ use super::{ pub(crate) struct ActualDest { pub(crate) dest: FedDest, pub(crate) host: String, - pub(crate) cached: bool, } impl ActualDest { @@ -29,10 +28,10 @@ impl ActualDest { impl super::Service { #[tracing::instrument(skip_all, level = "debug", name = "resolve")] pub(crate) async fn get_actual_dest(&self, server_name: &ServerName) -> Result { - let (CachedDest { dest, host, .. }, cached) = + let (CachedDest { dest, host, .. }, _cached) = self.lookup_actual_dest(server_name).await?; - Ok(ActualDest { dest, host, cached }) + Ok(ActualDest { dest, host }) } pub(crate) async fn lookup_actual_dest( @@ -49,6 +48,7 @@ impl super::Service { } self.resolve_actual_dest(server_name, true) + .inspect_ok(|result| self.cache.set_destination(server_name, result)) .map_ok(|result| (result, false)) .boxed() .await @@ -334,7 +334,7 @@ impl super::Service { debug_info!("{overname:?} overriden by {hostname:?}"); } - self.cache.set_override(overname, CachedOverride { + self.cache.set_override(overname, &CachedOverride { ips: override_ip.into_iter().take(MAX_IPS).collect(), port, expire: CachedOverride::default_expire(), diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index 11e6c9bd..657718b3 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -45,12 +45,12 @@ impl Cache { } #[implement(Cache)] -pub fn set_destination(&self, name: &ServerName, dest: CachedDest) { +pub fn set_destination(&self, name: &ServerName, dest: &CachedDest) { self.destinations.raw_put(name, Cbor(dest)); } #[implement(Cache)] -pub fn set_override(&self, name: &str, over: CachedOverride) { +pub fn set_override(&self, name: &str, over: &CachedOverride) { self.overrides.raw_put(name, Cbor(over)); } diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index 831a1dd8..c8a64f3c 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -18,10 +18,7 @@ use ruma::{ CanonicalJsonObject, CanonicalJsonValue, ServerName, ServerSigningKeyId, }; -use crate::{ - resolver, - resolver::{actual::ActualDest, cache::CachedDest}, -}; +use crate::resolver::actual::ActualDest; impl super::Service { #[tracing::instrument( @@ -73,16 +70,7 @@ impl super::Service { debug!(?method, ?url, "Sending request"); match client.execute(request).await { - | Ok(response) => - handle_response::( - &self.services.resolver, - dest, - actual, - &method, - &url, - response, - ) - .await, + | Ok(response) => handle_response::(dest, actual, &method, &url, response).await, | Err(error) => Err(handle_error(actual, &method, &url, error).expect_err("always returns error")), } @@ -111,7 +99,6 @@ impl super::Service { } async fn handle_response( - resolver: &resolver::Service, dest: &ServerName, actual: &ActualDest, method: &Method, @@ -122,17 +109,9 @@ where T: OutgoingRequest + Send, { let response = into_http_response(dest, actual, method, url, response).await?; - let result = T::IncomingResponse::try_from_http_response(response); - if result.is_ok() && !actual.cached { - resolver.cache.set_destination(dest, CachedDest { - dest: actual.dest.clone(), - host: actual.host.clone(), - expire: CachedDest::default_expire(), - }); - } - - result.map_err(|e| err!(BadServerResponse("Server returned bad 200 response: {e:?}"))) + T::IncomingResponse::try_from_http_response(response) + .map_err(|e| err!(BadServerResponse("Server returned bad 200 response: {e:?}"))) } async fn into_http_response( From da9f1ae5d7daf68d8e8568f07d38cbf8c065634a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 22 Jan 2025 08:51:15 +0000 Subject: [PATCH 062/328] expire resolver cache entries Signed-off-by: Jason Volk --- src/core/utils/rand.rs | 2 +- src/service/resolver/cache.rs | 44 ++++++++++++++++++----------------- 2 files changed, 24 insertions(+), 22 deletions(-) diff --git a/src/core/utils/rand.rs b/src/core/utils/rand.rs index 9e6fc7a8..1d289c6e 100644 --- a/src/core/utils/rand.rs +++ b/src/core/utils/rand.rs @@ -33,7 +33,7 @@ pub fn string_array() -> ArrayString { #[inline] #[must_use] -pub fn timepoint_secs(range: Range) -> SystemTime { +pub fn time_from_now_secs(range: Range) -> SystemTime { SystemTime::now() .checked_add(secs(range)) .expect("range does not overflow SystemTime") diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index 657718b3..e64878d4 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -2,7 +2,7 @@ use std::{net::IpAddr, sync::Arc, time::SystemTime}; use arrayvec::ArrayVec; use conduwuit::{ - at, implement, + at, err, implement, utils::{math::Expected, rand, stream::TryIgnore}, Result, }; @@ -54,6 +54,18 @@ pub fn set_override(&self, name: &str, over: &CachedOverride) { self.overrides.raw_put(name, Cbor(over)); } +#[implement(Cache)] +#[must_use] +pub async fn has_destination(&self, destination: &ServerName) -> bool { + self.get_destination(destination).await.is_ok() +} + +#[implement(Cache)] +#[must_use] +pub async fn has_override(&self, destination: &str) -> bool { + self.get_override(destination).await.is_ok() +} + #[implement(Cache)] pub async fn get_destination(&self, name: &ServerName) -> Result { self.destinations @@ -61,6 +73,9 @@ pub async fn get_destination(&self, name: &ServerName) -> Result { .await .deserialized::>() .map(at!(0)) + .into_iter() + .find(CachedDest::valid) + .ok_or(err!(Request(NotFound("Expired from cache")))) } #[implement(Cache)] @@ -70,18 +85,9 @@ pub async fn get_override(&self, name: &str) -> Result { .await .deserialized::>() .map(at!(0)) -} - -#[implement(Cache)] -#[must_use] -pub async fn has_destination(&self, destination: &str) -> bool { - self.destinations.exists(destination).await.is_ok() -} - -#[implement(Cache)] -#[must_use] -pub async fn has_override(&self, destination: &str) -> bool { - self.overrides.exists(destination).await.is_ok() + .into_iter() + .find(CachedOverride::valid) + .ok_or(err!(Request(NotFound("Expired from cache")))) } #[implement(Cache)] @@ -103,13 +109,11 @@ pub fn overrides(&self) -> impl Stream + S impl CachedDest { #[inline] #[must_use] - pub fn valid(&self) -> bool { true } - - //pub fn valid(&self) -> bool { self.expire > SystemTime::now() } + pub fn valid(&self) -> bool { self.expire > SystemTime::now() } #[must_use] pub(crate) fn default_expire() -> SystemTime { - rand::timepoint_secs(60 * 60 * 18..60 * 60 * 36) + rand::time_from_now_secs(60 * 60 * 18..60 * 60 * 36) } #[inline] @@ -125,13 +129,11 @@ impl CachedDest { impl CachedOverride { #[inline] #[must_use] - pub fn valid(&self) -> bool { true } - - //pub fn valid(&self) -> bool { self.expire > SystemTime::now() } + pub fn valid(&self) -> bool { self.expire > SystemTime::now() } #[must_use] pub(crate) fn default_expire() -> SystemTime { - rand::timepoint_secs(60 * 60 * 6..60 * 60 * 12) + rand::time_from_now_secs(60 * 60 * 6..60 * 60 * 12) } #[inline] From 265802d54608eb10295560f54f26bd106e4930e9 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 22 Jan 2025 21:38:15 +0000 Subject: [PATCH 063/328] additional prof/stats jemalloc related interface tweaks Signed-off-by: Jason Volk --- src/core/alloc/je.rs | 61 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 55 insertions(+), 6 deletions(-) diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 119ff45e..81fbd3ea 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -26,10 +26,11 @@ metadata_thp:always\ ,background_thread:true\ ,max_background_threads:-1\ ,lg_extent_max_active_fit:4\ -,oversize_threshold:33554432\ -,tcache_max:1048576\ +,oversize_threshold:16777216\ +,tcache_max:2097152\ ,dirty_decay_ms:16000\ ,muzzy_decay_ms:144000\ +,prof_active:false\ \0"; #[global_allocator] @@ -120,7 +121,7 @@ unsafe extern "C" fn malloc_stats_cb(opaque: *mut c_void, msg: *const c_char) { } macro_rules! mallctl { - ($name:literal) => {{ + ($name:expr) => {{ thread_local! { static KEY: OnceCell = OnceCell::default(); }; @@ -135,6 +136,13 @@ macro_rules! mallctl { pub mod this_thread { use super::{is_nonzero, key, math, Debug, Key, OnceCell, Result}; + thread_local! { + static ALLOCATED_BYTES: OnceCell<&'static u64> = const { OnceCell::new() }; + static DEALLOCATED_BYTES: OnceCell<&'static u64> = const { OnceCell::new() }; + } + + pub fn idle() -> Result { super::notify(&mallctl!("thread.idle")) } + pub fn trim() -> Result { notify(mallctl!("arena.0.purge")) } pub fn decay() -> Result { notify(mallctl!("arena.0.decay")) } @@ -153,7 +161,7 @@ pub mod this_thread { pub fn get_dirty_decay() -> Result { get(mallctl!("arena.0.dirty_decay_ms")) } - pub fn enable_cache(enable: bool) -> Result { + pub fn cache_enable(enable: bool) -> Result { super::set::(&mallctl!("thread.tcache.enabled"), enable.into()).map(is_nonzero!()) } @@ -169,9 +177,29 @@ pub mod this_thread { super::get::(&mallctl!("thread.arena")).and_then(math::try_into) } - pub fn allocated() -> Result { super::get(&mallctl!("thread.allocated")) } + pub fn prof_enable(enable: bool) -> Result { + super::set::(&mallctl!("thread.prof.active"), enable.into()).map(is_nonzero!()) + } - pub fn deallocated() -> Result { super::get(&mallctl!("thread.deallocated")) } + pub fn is_prof_enabled() -> Result { + super::get::(&mallctl!("thread.prof.active")).map(is_nonzero!()) + } + + pub fn reset_peak() -> Result { super::notify(&mallctl!("thread.peak.reset")) } + + pub fn peak() -> Result { super::get(&mallctl!("thread.peak.read")) } + + #[inline] + #[must_use] + pub fn allocated() -> u64 { + *ALLOCATED_BYTES.with(|once| init_tls_cell(once, "thread.allocatedp")) + } + + #[inline] + #[must_use] + pub fn deallocated() -> u64 { + *DEALLOCATED_BYTES.with(|once| init_tls_cell(once, "thread.deallocatedp")) + } fn notify(key: Key) -> Result { super::notify_by_arena(Some(arena_id()?), key) } @@ -188,6 +216,27 @@ pub mod this_thread { { super::get_by_arena(Some(arena_id()?), key) } + + fn init_tls_cell(cell: &OnceCell<&'static u64>, name: &str) -> &'static u64 { + cell.get_or_init(|| { + let ptr: *const u64 = super::get(&mallctl!(name)).expect("failed to obtain pointer"); + + // SAFETY: ptr points directly to the internal state of jemalloc for this thread + unsafe { ptr.as_ref() }.expect("pointer must not be null") + }) + } +} + +pub fn stats_reset() -> Result { notify(&mallctl!("stats.mutexes.reset")) } + +pub fn prof_reset() -> Result { notify(&mallctl!("prof.reset")) } + +pub fn prof_enable(enable: bool) -> Result { + set::(&mallctl!("prof.active"), enable.into()).map(is_nonzero!()) +} + +pub fn is_prof_enabled() -> Result { + get::(&mallctl!("prof.active")).map(is_nonzero!()) } pub fn trim>>(arena: I) -> Result { From a5520e8b1bc1c4ddb9090dc9b93ef76899e58d9a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 22 Jan 2025 23:07:13 +0000 Subject: [PATCH 064/328] fix SRV override loss on cache expiration Signed-off-by: Jason Volk --- src/admin/query/resolver.rs | 10 +++--- src/service/resolver/actual.rs | 62 ++++++++++++++++++---------------- src/service/resolver/cache.rs | 9 ++--- src/service/resolver/dns.rs | 22 ++++++++++-- 4 files changed, 63 insertions(+), 40 deletions(-) diff --git a/src/admin/query/resolver.rs b/src/admin/query/resolver.rs index 0b6da6fd..08b5d171 100644 --- a/src/admin/query/resolver.rs +++ b/src/admin/query/resolver.rs @@ -51,12 +51,14 @@ async fn destinations_cache( async fn overrides_cache(&self, server_name: Option) -> Result { use service::resolver::cache::CachedOverride; - writeln!(self, "| Server Name | IP | Port | Expires |").await?; - writeln!(self, "| ----------- | --- | ----:| ------- |").await?; + writeln!(self, "| Server Name | IP | Port | Expires | Overriding |").await?; + writeln!(self, "| ----------- | --- | ----:| ------- | ---------- |").await?; let mut overrides = self.services.resolver.cache.overrides().boxed(); - while let Some((name, CachedOverride { ips, port, expire })) = overrides.next().await { + while let Some((name, CachedOverride { ips, port, expire, overriding })) = + overrides.next().await + { if let Some(server_name) = server_name.as_ref() { if name != server_name { continue; @@ -64,7 +66,7 @@ async fn overrides_cache(&self, server_name: Option) -> Result Result { debug!("2: Hostname with included port"); let (host, port) = dest.as_str().split_at(pos); - self.conditional_query_and_cache_override( - host, - host, - port.parse::().unwrap_or(8448), - cache, - ) - .await?; + self.conditional_query_and_cache(host, port.parse::().unwrap_or(8448), cache) + .await?; Ok(FedDest::Named( host.to_owned(), @@ -163,13 +158,8 @@ impl super::Service { ) -> Result { debug!("3.2: Hostname with port in .well-known file"); let (host, port) = delegated.split_at(pos); - self.conditional_query_and_cache_override( - host, - host, - port.parse::().unwrap_or(8448), - cache, - ) - .await?; + self.conditional_query_and_cache(host, port.parse::().unwrap_or(8448), cache) + .await?; Ok(FedDest::Named( host.to_owned(), @@ -208,7 +198,7 @@ impl super::Service { async fn actual_dest_3_4(&self, cache: bool, delegated: String) -> Result { debug!("3.4: No SRV records, just use the hostname from .well-known"); - self.conditional_query_and_cache_override(&delegated, &delegated, 8448, cache) + self.conditional_query_and_cache(&delegated, 8448, cache) .await?; Ok(add_port_to_hostname(&delegated)) } @@ -243,7 +233,7 @@ impl super::Service { async fn actual_dest_5(&self, dest: &ServerName, cache: bool) -> Result { debug!("5: No SRV record found"); - self.conditional_query_and_cache_override(dest.as_str(), dest.as_str(), 8448, cache) + self.conditional_query_and_cache(dest.as_str(), 8448, cache) .await?; Ok(add_port_to_hostname(dest.as_str())) @@ -251,9 +241,7 @@ impl super::Service { #[tracing::instrument(skip_all, name = "well-known")] async fn request_well_known(&self, dest: &str) -> Result> { - if !self.cache.has_override(dest).await { - self.query_and_cache_override(dest, dest, 8448).await?; - } + self.conditional_query_and_cache(dest, 8448, true).await?; self.services.server.check_running()?; trace!("Requesting well known for {dest}"); @@ -301,6 +289,17 @@ impl super::Service { Ok(Some(m_server.to_owned())) } + #[inline] + async fn conditional_query_and_cache( + &self, + hostname: &str, + port: u16, + cache: bool, + ) -> Result { + self.conditional_query_and_cache_override(hostname, hostname, port, cache) + .await + } + #[inline] async fn conditional_query_and_cache_override( &self, @@ -308,13 +307,17 @@ impl super::Service { hostname: &str, port: u16, cache: bool, - ) -> Result<()> { - if cache { - self.query_and_cache_override(overname, hostname, port) - .await - } else { - Ok(()) + ) -> Result { + if !cache { + return Ok(()); } + + if self.cache.has_override(overname).await { + return Ok(()); + } + + self.query_and_cache_override(overname, hostname, port) + .await } #[tracing::instrument(skip(self, overname, port), name = "ip")] @@ -323,21 +326,20 @@ impl super::Service { overname: &'_ str, hostname: &'_ str, port: u16, - ) -> Result<()> { + ) -> Result { self.services.server.check_running()?; debug!("querying IP for {overname:?} ({hostname:?}:{port})"); match self.resolver.resolver.lookup_ip(hostname.to_owned()).await { | Err(e) => Self::handle_resolve_error(&e, hostname), | Ok(override_ip) => { - if hostname != overname { - debug_info!("{overname:?} overriden by {hostname:?}"); - } - self.cache.set_override(overname, &CachedOverride { ips: override_ip.into_iter().take(MAX_IPS).collect(), port, expire: CachedOverride::default_expire(), + overriding: (hostname != overname) + .then_some(hostname.into()) + .inspect(|_| debug_info!("{overname:?} overriden by {hostname:?}")), }); Ok(()) diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index e64878d4..22a92865 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -30,6 +30,7 @@ pub struct CachedOverride { pub ips: IpAddrs, pub port: u16, pub expire: SystemTime, + pub overriding: Option, } pub type IpAddrs = ArrayVec; @@ -63,7 +64,10 @@ pub async fn has_destination(&self, destination: &ServerName) -> bool { #[implement(Cache)] #[must_use] pub async fn has_override(&self, destination: &str) -> bool { - self.get_override(destination).await.is_ok() + self.get_override(destination) + .await + .iter() + .any(CachedOverride::valid) } #[implement(Cache)] @@ -85,9 +89,6 @@ pub async fn get_override(&self, name: &str) -> Result { .await .deserialized::>() .map(at!(0)) - .into_iter() - .find(CachedOverride::valid) - .ok_or(err!(Request(NotFound("Expired from cache")))) } #[implement(Cache)] diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index ad7768bc..ca6106e2 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -93,6 +93,11 @@ impl Resolve for Hooked { } } +#[tracing::instrument( + level = "debug", + skip_all, + fields(name = ?name.as_str()) +)] async fn hooked_resolve( cache: Arc, server: Arc, @@ -100,8 +105,21 @@ async fn hooked_resolve( name: Name, ) -> Result> { match cache.get_override(name.as_str()).await { - | Ok(cached) => cached_to_reqwest(cached).await, - | Err(_) => resolve_to_reqwest(server, resolver, name).boxed().await, + | Ok(cached) if cached.valid() => cached_to_reqwest(cached).await, + | Ok(CachedOverride { overriding, .. }) if overriding.is_some() => + resolve_to_reqwest( + server, + resolver, + overriding + .as_deref() + .map(str::parse) + .expect("overriding is set for this record") + .expect("overriding is a valid internet name"), + ) + .boxed() + .await, + + | _ => resolve_to_reqwest(server, resolver, name).boxed().await, } } From 52adae7553e896bb07aacce4224a7fe8ff1bc992 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 23 Jan 2025 20:05:20 +0000 Subject: [PATCH 065/328] add sequence method to db engine; improve engine interface/tracing Signed-off-by: Jason Volk --- src/database/engine.rs | 87 +++++++++++++++++++++++++++++------------- 1 file changed, 60 insertions(+), 27 deletions(-) diff --git a/src/database/engine.rs b/src/database/engine.rs index 8be9eecc..76b2889b 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -41,12 +41,49 @@ pub struct Engine { pub(crate) type Db = DBWithThreadMode; impl Engine { - pub(crate) fn cf(&self, name: &str) -> Arc> { - self.db - .cf_handle(name) - .expect("column must be described prior to database open") + #[tracing::instrument( + level = "info", + skip_all, + fields( + sequence = ?self.current_sequence(), + ), + )] + pub fn wait_compactions_blocking(&self) -> Result { + let mut opts = WaitForCompactOptions::default(); + opts.set_abort_on_pause(true); + opts.set_flush(false); + opts.set_timeout(0); + + self.db.wait_for_compact(&opts).map_err(map_err) } + #[tracing::instrument( + level = "info", + skip_all, + fields( + sequence = ?self.current_sequence(), + ), + )] + pub fn sort(&self) -> Result { + let flushoptions = rocksdb::FlushOptions::default(); + result(DBCommon::flush_opt(&self.db, &flushoptions)) + } + + #[tracing::instrument( + level = "debug", + skip_all, + fields( + sequence = ?self.current_sequence(), + ), + )] + pub fn update(&self) -> Result { self.db.try_catch_up_with_primary().map_err(map_err) } + + #[tracing::instrument(level = "info", skip_all)] + pub fn sync(&self) -> Result { result(DBCommon::flush_wal(&self.db, true)) } + + #[tracing::instrument(level = "debug", skip_all)] + pub fn flush(&self) -> Result { result(DBCommon::flush_wal(&self.db, false)) } + #[inline] pub(crate) fn cork(&self) { self.corks.fetch_add(1, Ordering::Relaxed); } @@ -56,28 +93,6 @@ impl Engine { #[inline] pub fn corked(&self) -> bool { self.corks.load(Ordering::Relaxed) > 0 } - #[tracing::instrument(skip(self))] - pub fn sync(&self) -> Result { result(DBCommon::flush_wal(&self.db, true)) } - - #[tracing::instrument(skip(self), level = "debug")] - pub fn flush(&self) -> Result { result(DBCommon::flush_wal(&self.db, false)) } - - #[tracing::instrument(skip(self), level = "info")] - pub fn sort(&self) -> Result { - let flushoptions = rocksdb::FlushOptions::default(); - result(DBCommon::flush_opt(&self.db, &flushoptions)) - } - - #[tracing::instrument(skip(self), level = "info")] - pub fn wait_compactions(&self) -> Result { - let mut opts = WaitForCompactOptions::default(); - opts.set_abort_on_pause(true); - opts.set_flush(false); - opts.set_timeout(0); - - self.db.wait_for_compact(&opts).map_err(map_err) - } - /// Query for database property by null-terminated name which is expected to /// have a result with an integer representation. This is intended for /// low-overhead programmatic use. @@ -96,6 +111,24 @@ impl Engine { .and_then(|val| val.map_or_else(|| Err!("Property {name:?} not found."), Ok)) } + pub(crate) fn cf(&self, name: &str) -> Arc> { + self.db + .cf_handle(name) + .expect("column must be described prior to database open") + } + + #[inline] + #[must_use] + #[tracing::instrument(name = "sequence", level = "debug", skip_all, fields(sequence))] + pub fn current_sequence(&self) -> u64 { + let sequence = self.db.latest_sequence_number(); + + #[cfg(debug_assertions)] + tracing::Span::current().record("sequence", sequence); + + sequence + } + #[inline] #[must_use] pub fn is_read_only(&self) -> bool { self.secondary || self.read_only } @@ -114,7 +147,7 @@ impl Drop for Engine { self.db.cancel_all_background_work(BLOCKING); info!( - sequence = %self.db.latest_sequence_number(), + sequence = %self.current_sequence(), "Closing database..." ); } From 6e7c73336c49bd43cdb143212f36fe82f749209a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 24 Jan 2025 06:12:52 +0000 Subject: [PATCH 066/328] move room version config check out of services.globals make available_room_versions() non-member associated Signed-off-by: Jason Volk --- src/api/client/capabilities.rs | 5 +++-- src/core/config/check.rs | 12 +++++++++++- src/core/info/room_version.rs | 10 +++++----- src/service/globals/mod.rs | 8 -------- 4 files changed, 19 insertions(+), 16 deletions(-) diff --git a/src/api/client/capabilities.rs b/src/api/client/capabilities.rs index 87cdb43d..7188aa23 100644 --- a/src/api/client/capabilities.rs +++ b/src/api/client/capabilities.rs @@ -1,6 +1,7 @@ use std::collections::BTreeMap; use axum::extract::State; +use conduwuit::{Result, Server}; use ruma::{ api::client::discovery::get_capabilities::{ self, Capabilities, GetLoginTokenCapability, RoomVersionStability, @@ -10,7 +11,7 @@ use ruma::{ }; use serde_json::json; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/v3/capabilities` /// @@ -21,7 +22,7 @@ pub(crate) async fn get_capabilities_route( _body: Ruma, ) -> Result { let available: BTreeMap = - services.server.available_room_versions().collect(); + Server::available_room_versions().collect(); let mut capabilities = Capabilities::default(); capabilities.room_versions = RoomVersionsCapability { diff --git a/src/core/config/check.rs b/src/core/config/check.rs index 06ae5ebb..d7be54b1 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -4,7 +4,7 @@ use either::Either; use figment::Figment; use super::DEPRECATED_KEYS; -use crate::{debug, debug_info, debug_warn, error, warn, Config, Err, Result}; +use crate::{debug, debug_info, debug_warn, error, warn, Config, Err, Result, Server}; #[allow(clippy::cognitive_complexity)] pub fn check(config: &Config) -> Result<()> { @@ -233,6 +233,16 @@ pub fn check(config: &Config) -> Result<()> { } } + if !Server::available_room_versions() + .any(|(version, _)| version == config.default_room_version) + { + return Err!(Config( + "default_room_version", + "Room version {:?} is not available", + config.default_room_version + )); + } + Ok(()) } diff --git a/src/core/info/room_version.rs b/src/core/info/room_version.rs index 40f0cf0a..b33a8562 100644 --- a/src/core/info/room_version.rs +++ b/src/core/info/room_version.rs @@ -20,6 +20,8 @@ pub const STABLE_ROOM_VERSIONS: &[RoomVersionId] = &[ pub const UNSTABLE_ROOM_VERSIONS: &[RoomVersionId] = &[RoomVersionId::V2, RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; +type RoomVersion = (RoomVersionId, RoomVersionStability); + impl crate::Server { #[inline] pub fn supported_room_version(&self, version: &RoomVersionId) -> bool { @@ -28,15 +30,13 @@ impl crate::Server { #[inline] pub fn supported_room_versions(&self) -> impl Iterator + '_ { - self.available_room_versions() + Self::available_room_versions() .filter(|(_, stability)| self.supported_stability(stability)) .map(at!(0)) } #[inline] - pub fn available_room_versions( - &self, - ) -> impl Iterator { + pub fn available_room_versions() -> impl Iterator { available_room_versions() } @@ -46,7 +46,7 @@ impl crate::Server { } } -pub fn available_room_versions() -> impl Iterator { +pub fn available_room_versions() -> impl Iterator { let unstable_room_versions = UNSTABLE_ROOM_VERSIONS .iter() .cloned() diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index f6ff2b09..fe84578a 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -72,14 +72,6 @@ impl crate::Service for Service { registration_token, }; - if !args - .server - .supported_room_version(&config.default_room_version) - { - error!(config=?s.config.default_room_version, fallback=?conduwuit::config::default_default_room_version(), "Room version in config isn't supported, falling back to default version"); - s.config.default_room_version = conduwuit::config::default_default_room_version(); - }; - Ok(Arc::new(s)) } From 1351d07735719525da6af3485afcc6039de67b8c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 24 Jan 2025 06:58:26 +0000 Subject: [PATCH 067/328] improve path argument to Config::load and constructions Signed-off-by: Jason Volk --- src/core/config/mod.rs | 17 ++++++++++------- src/main/server.rs | 15 +++++++++++---- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index d6983540..beaabe5d 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -4,7 +4,7 @@ pub mod proxy; use std::{ collections::{BTreeMap, BTreeSet, HashSet}, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, - path::PathBuf, + path::{Path, PathBuf}, }; use conduwuit_macros::config_example_generator; @@ -1797,14 +1797,17 @@ const DEPRECATED_KEYS: &[&str; 9] = &[ impl Config { /// Pre-initialize config - pub fn load(paths: Option<&[PathBuf]>) -> Result { - let paths_files = paths.into_iter().flatten().map(Toml::file); - + pub fn load<'a, I>(paths: I) -> Result + where + I: Iterator, + { let envs = [Env::var("CONDUIT_CONFIG"), Env::var("CONDUWUIT_CONFIG")]; - let envs_files = envs.into_iter().flatten().map(Toml::file); - let config = envs_files - .chain(paths_files) + let config = envs + .into_iter() + .flatten() + .map(Toml::file) + .chain(paths.map(Toml::file)) .fold(Figment::new(), |config, file| config.merge(file.nested())) .merge(Env::prefixed("CONDUIT_").global().split("__")) .merge(Env::prefixed("CONDUWUIT_").global().split("__")); diff --git a/src/main/server.rs b/src/main/server.rs index 359a029c..74859f2b 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{path::PathBuf, sync::Arc}; use conduwuit::{ config::Config, @@ -35,9 +35,16 @@ impl Server { ) -> Result, Error> { let _runtime_guard = runtime.map(runtime::Handle::enter); - let raw_config = Config::load(args.config.as_deref())?; - let raw_config = crate::clap::update(raw_config, args)?; - let config = Config::new(&raw_config)?; + let config_paths = args + .config + .as_deref() + .into_iter() + .flat_map(<[_]>::iter) + .map(PathBuf::as_path); + + let config = Config::load(config_paths) + .and_then(|raw| crate::clap::update(raw, args)) + .and_then(|raw| Config::new(&raw))?; #[cfg(feature = "sentry_telemetry")] let sentry_guard = crate::sentry::init(&config); From 7c6b8b132aea086fc95c5a6def4af14d1c35d0f8 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 24 Jan 2025 06:15:10 +0000 Subject: [PATCH 068/328] add config reloading indirector Signed-off-by: Jason Volk --- src/core/config/manager.rs | 128 +++++++++++++++++++++++++++++++++++++ src/core/config/mod.rs | 3 +- 2 files changed, 130 insertions(+), 1 deletion(-) create mode 100644 src/core/config/manager.rs diff --git a/src/core/config/manager.rs b/src/core/config/manager.rs new file mode 100644 index 00000000..0c95ca15 --- /dev/null +++ b/src/core/config/manager.rs @@ -0,0 +1,128 @@ +use std::{ + cell::{Cell, RefCell}, + ops::Deref, + ptr, + ptr::null_mut, + sync::{ + atomic::{AtomicPtr, Ordering}, + Arc, + }, +}; + +use super::Config; +use crate::{implement, Result}; + +/// The configuration manager is an indirection to reload the configuration for +/// the server while it is running. In order to not burden or clutter the many +/// callsites which query for configuration items, this object implements Deref +/// for the actively loaded configuration. +pub struct Manager { + active: AtomicPtr, +} + +thread_local! { + static INDEX: Cell = 0.into(); + static HANDLE: RefCell = const { + RefCell::new([const { None }; HISTORY]) + }; +} + +type Handle = Option>; +type Handles = [Handle; HISTORY]; + +const HISTORY: usize = 8; + +impl Manager { + pub(crate) fn new(config: Config) -> Self { + let config = Arc::new(config); + Self { + active: AtomicPtr::new(Arc::into_raw(config).cast_mut()), + } + } +} + +impl Drop for Manager { + fn drop(&mut self) { + let config = self.active.swap(null_mut(), Ordering::AcqRel); + + // SAFETY: The active pointer was set using an Arc::into_raw(). We're obliged to + // reconstitute that into Arc otherwise it will leak. + unsafe { Arc::from_raw(config) }; + } +} + +impl Deref for Manager { + type Target = Arc; + + fn deref(&self) -> &Self::Target { HANDLE.with_borrow_mut(|handle| self.load(handle)) } +} + +/// Update the active configuration, returning prior configuration. +#[implement(Manager)] +#[tracing::instrument(skip_all)] +pub fn update(&self, config: Config) -> Result> { + let config = Arc::new(config); + let new = Arc::into_raw(config); + let old = self.active.swap(new.cast_mut(), Ordering::AcqRel); + + // SAFETY: The old active pointer was set using an Arc::into_raw(). We're + // obliged to reconstitute that into Arc otherwise it will leak. + Ok(unsafe { Arc::from_raw(old) }) +} + +#[implement(Manager)] +fn load(&self, handle: &mut [Option>]) -> &'static Arc { + let config = self.active.load(Ordering::Acquire); + + // Branch taken after config reload or first access by this thread. + if handle[INDEX.get()] + .as_ref() + .is_none_or(|handle| !ptr::eq(config, Arc::as_ptr(handle))) + { + INDEX.set(INDEX.get().wrapping_add(1).wrapping_rem(HISTORY)); + return load_miss(handle, INDEX.get(), config); + } + + let config: &Arc = handle[INDEX.get()] + .as_ref() + .expect("handle was already cached for this thread"); + + // SAFETY: The caller should not hold multiple references at a time directly + // into Config, as a subsequent reference might invalidate the thread's cache + // causing another reference to dangle. + // + // This is a highly unusual pattern as most config values are copied by value or + // used immediately without running overlap with another value. Even if it does + // actually occur somewhere, the window of danger is limited to the config being + // reloaded while the reference is held and another access is made by the same + // thread into a different config value. This is mitigated by creating a buffer + // of old configs rather than discarding at the earliest opportunity; the odds + // of this scenario are thus astronomical. + unsafe { std::mem::transmute(config) } +} + +#[tracing::instrument( + name = "miss", + level = "trace", + skip_all, + fields(%index, ?config) +)] +#[allow(clippy::transmute_ptr_to_ptr)] +fn load_miss( + handle: &mut [Option>], + index: usize, + config: *const Config, +) -> &'static Arc { + // SAFETY: The active pointer was set prior and always remains valid. We're + // reconstituting the Arc here but as a new reference, so the count is + // incremented. This instance will be cached in the thread-local. + let config = unsafe { + Arc::increment_strong_count(config); + Arc::from_raw(config) + }; + + // SAFETY: See the note on the transmute above. The caller should not hold more + // than one reference at a time directly into Config, as the second access + // might invalidate the thread's cache, dangling the reference to the first. + unsafe { std::mem::transmute(handle[index].insert(config)) } +} diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index beaabe5d..e459f50b 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1,4 +1,5 @@ pub mod check; +pub mod manager; pub mod proxy; use std::{ @@ -22,8 +23,8 @@ use ruma::{ use serde::{de::IgnoredAny, Deserialize}; use url::Url; -pub use self::check::check; use self::proxy::ProxyConfig; +pub use self::{check::check, manager::Manager}; use crate::{err, error::Error, utils::sys, Result}; /// All the config options for conduwuit. From 5be07ebc0f4bbedb3f0d93d35d290720d042fd0d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 24 Jan 2025 07:02:56 +0000 Subject: [PATCH 069/328] eliminate references to services.globals.config Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 18 +++---- src/admin/federation/commands.rs | 2 +- src/admin/user/commands.rs | 8 +-- src/api/client/account.rs | 18 +++---- src/api/client/directory.rs | 6 +-- src/api/client/media.rs | 2 +- src/api/client/media_legacy.rs | 2 +- src/api/client/membership.rs | 12 ++--- src/api/client/openid.rs | 2 +- src/api/client/report.rs | 2 +- src/api/client/room/create.rs | 8 +-- src/api/client/typing.rs | 12 ++--- src/api/client/voip.rs | 2 +- src/api/router/auth.rs | 6 +-- src/api/router/request.rs | 2 +- src/api/server/invite.rs | 4 +- src/api/server/make_join.rs | 4 +- src/api/server/make_knock.rs | 4 +- src/api/server/publicrooms.rs | 2 +- src/api/server/query.rs | 2 +- src/api/server/send.rs | 4 +- src/api/server/send_join.rs | 8 +-- src/api/server/send_knock.rs | 4 +- src/service/admin/mod.rs | 2 +- src/service/globals/mod.rs | 84 ++++++++++++++++---------------- src/service/rooms/alias/mod.rs | 10 ++-- 26 files changed, 116 insertions(+), 114 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 49078dde..af7bd79f 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -170,7 +170,7 @@ pub(super) async fn get_remote_pdu_list( server: Box, force: bool, ) -> Result { - if !self.services.globals.config.allow_federation { + if !self.services.server.config.allow_federation { return Ok(RoomMessageEventContent::text_plain( "Federation is disabled on this homeserver.", )); @@ -235,7 +235,7 @@ pub(super) async fn get_remote_pdu( event_id: Box, server: Box, ) -> Result { - if !self.services.globals.config.allow_federation { + if !self.services.server.config.allow_federation { return Ok(RoomMessageEventContent::text_plain( "Federation is disabled on this homeserver.", )); @@ -419,7 +419,7 @@ pub(super) async fn change_log_level( let handles = &["console"]; if reset { - let old_filter_layer = match EnvFilter::try_new(&self.services.globals.config.log) { + let old_filter_layer = match EnvFilter::try_new(&self.services.server.config.log) { | Ok(s) => s, | Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( @@ -438,7 +438,7 @@ pub(super) async fn change_log_level( | Ok(()) => { return Ok(RoomMessageEventContent::text_plain(format!( "Successfully changed log level back to config value {}", - self.services.globals.config.log + self.services.server.config.log ))); }, | Err(e) => { @@ -554,7 +554,7 @@ pub(super) async fn first_pdu_in_room( .services .rooms .state_cache - .server_in_room(&self.services.globals.config.server_name, &room_id) + .server_in_room(&self.services.server.config.server_name, &room_id) .await { return Ok(RoomMessageEventContent::text_plain( @@ -583,7 +583,7 @@ pub(super) async fn latest_pdu_in_room( .services .rooms .state_cache - .server_in_room(&self.services.globals.config.server_name, &room_id) + .server_in_room(&self.services.server.config.server_name, &room_id) .await { return Ok(RoomMessageEventContent::text_plain( @@ -613,7 +613,7 @@ pub(super) async fn force_set_room_state_from_server( .services .rooms .state_cache - .server_in_room(&self.services.globals.config.server_name, &room_id) + .server_in_room(&self.services.server.config.server_name, &room_id) .await { return Ok(RoomMessageEventContent::text_plain( @@ -818,13 +818,13 @@ pub(super) async fn resolve_true_destination( server_name: Box, no_cache: bool, ) -> Result { - if !self.services.globals.config.allow_federation { + if !self.services.server.config.allow_federation { return Ok(RoomMessageEventContent::text_plain( "Federation is disabled on this homeserver.", )); } - if server_name == self.services.globals.config.server_name { + if server_name == self.services.server.config.server_name { return Ok(RoomMessageEventContent::text_plain( "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ fetching local PDUs.", diff --git a/src/admin/federation/commands.rs b/src/admin/federation/commands.rs index 75635b1b..be91ef0a 100644 --- a/src/admin/federation/commands.rs +++ b/src/admin/federation/commands.rs @@ -92,7 +92,7 @@ pub(super) async fn remote_user_in_rooms( &self, user_id: Box, ) -> Result { - if user_id.server_name() == self.services.globals.config.server_name { + if user_id.server_name() == self.services.server.config.server_name { return Ok(RoomMessageEventContent::text_plain( "User belongs to our server, please use `list-joined-rooms` user admin command \ instead.", diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 57aedd9c..64767a36 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -83,12 +83,12 @@ pub(super) async fn create_user( // content is set to the user's display name with a space before it if !self .services - .globals + .server .config .new_user_displayname_suffix .is_empty() { - write!(displayname, " {}", self.services.globals.config.new_user_displayname_suffix) + write!(displayname, " {}", self.services.server.config.new_user_displayname_suffix) .expect("should be able to write to string buffer"); } @@ -114,8 +114,8 @@ pub(super) async fn create_user( ) .await?; - if !self.services.globals.config.auto_join_rooms.is_empty() { - for room in &self.services.globals.config.auto_join_rooms { + if !self.services.server.config.auto_join_rooms.is_empty() { + for room in &self.services.server.config.auto_join_rooms { let Ok(room_id) = self.services.rooms.alias.resolve(room).await else { error!(%user_id, "Failed to resolve room alias to room ID when attempting to auto join {room}, skipping"); continue; diff --git a/src/api/client/account.rs b/src/api/client/account.rs index e6748124..cb25b276 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -299,7 +299,7 @@ pub(crate) async fn register_route( if !services.globals.new_user_displayname_suffix().is_empty() && body.appservice_info.is_none() { - write!(displayname, " {}", services.globals.config.new_user_displayname_suffix) + write!(displayname, " {}", services.server.config.new_user_displayname_suffix) .expect("should be able to write to string buffer"); } @@ -365,7 +365,7 @@ pub(crate) async fn register_route( \"{device_display_name}\"" ); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_message(RoomMessageEventContent::notice_plain(format!( @@ -378,7 +378,7 @@ pub(crate) async fn register_route( } else { info!("New user \"{user_id}\" registered on this server."); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_message(RoomMessageEventContent::notice_plain(format!( @@ -395,7 +395,7 @@ pub(crate) async fn register_route( info!("New guest user \"{user_id}\" registered on this server."); if !device_display_name.is_empty() { - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_message(RoomMessageEventContent::notice_plain(format!( @@ -407,7 +407,7 @@ pub(crate) async fn register_route( } } else { #[allow(clippy::collapsible_else_if)] - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_message(RoomMessageEventContent::notice_plain(format!( @@ -438,10 +438,10 @@ pub(crate) async fn register_route( } if body.appservice_info.is_none() - && !services.globals.config.auto_join_rooms.is_empty() + && !services.server.config.auto_join_rooms.is_empty() && (services.globals.allow_guests_auto_join_rooms() || !is_guest) { - for room in &services.globals.config.auto_join_rooms { + for room in &services.server.config.auto_join_rooms { let Ok(room_id) = services.rooms.alias.resolve(room).await else { error!( "Failed to resolve room alias to room ID when attempting to auto join \ @@ -570,7 +570,7 @@ pub(crate) async fn change_password_route( info!("User {sender_user} changed their password."); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_message(RoomMessageEventContent::notice_plain(format!( @@ -673,7 +673,7 @@ pub(crate) async fn deactivate_route( info!("User {sender_user} deactivated their account."); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_message(RoomMessageEventContent::notice_plain(format!( diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index c8faaa46..9166eed9 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -152,7 +152,7 @@ pub(crate) async fn set_room_visibility_route( match &body.visibility { | room::Visibility::Public => { - if services.globals.config.lockdown_public_room_directory + if services.server.config.lockdown_public_room_directory && !services.users.is_admin(sender_user).await && body.appservice_info.is_none() { @@ -162,7 +162,7 @@ pub(crate) async fn set_room_visibility_route( body.room_id ); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_text(&format!( @@ -181,7 +181,7 @@ pub(crate) async fn set_room_visibility_route( services.rooms.directory.set_public(&body.room_id); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_text(&format!( diff --git a/src/api/client/media.rs b/src/api/client/media.rs index e58ba626..afbc218a 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -31,7 +31,7 @@ pub(crate) async fn get_media_config_route( _body: Ruma, ) -> Result { Ok(get_media_config::v1::Response { - upload_size: ruma_from_usize(services.globals.config.max_request_size), + upload_size: ruma_from_usize(services.server.config.max_request_size), }) } diff --git a/src/api/client/media_legacy.rs b/src/api/client/media_legacy.rs index 6f54a683..29cf3069 100644 --- a/src/api/client/media_legacy.rs +++ b/src/api/client/media_legacy.rs @@ -27,7 +27,7 @@ pub(crate) async fn get_media_config_legacy_route( _body: Ruma, ) -> Result { Ok(get_media_config::v3::Response { - upload_size: ruma_from_usize(services.globals.config.max_request_size), + upload_size: ruma_from_usize(services.server.config.max_request_size), }) } diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index d94fc3c7..2e23dab9 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -71,7 +71,7 @@ async fn banned_room_check( if let Some(room_id) = room_id { if services.rooms.metadata.is_banned(room_id).await || services - .globals + .server .config .forbidden_remote_server_names .contains(&room_id.server_name().unwrap().to_owned()) @@ -81,12 +81,12 @@ async fn banned_room_check( attempted to join a banned room or banned room server name: {room_id}" ); - if services.globals.config.auto_deactivate_banned_room_attempts { + if services.server.config.auto_deactivate_banned_room_attempts { warn!( "Automatically deactivating user {user_id} due to attempted banned room join" ); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_message(RoomMessageEventContent::text_plain(format!( @@ -112,7 +112,7 @@ async fn banned_room_check( } } else if let Some(server_name) = server_name { if services - .globals + .server .config .forbidden_remote_server_names .contains(&server_name.to_owned()) @@ -122,12 +122,12 @@ async fn banned_room_check( name {server_name} that is globally forbidden. Rejecting.", ); - if services.globals.config.auto_deactivate_banned_room_attempts { + if services.server.config.auto_deactivate_banned_room_attempts { warn!( "Automatically deactivating user {user_id} due to attempted banned room join" ); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_message(RoomMessageEventContent::text_plain(format!( diff --git a/src/api/client/openid.rs b/src/api/client/openid.rs index 4b2ff727..3547d284 100644 --- a/src/api/client/openid.rs +++ b/src/api/client/openid.rs @@ -37,7 +37,7 @@ pub(crate) async fn create_openid_token_route( Ok(account::request_openid_token::v3::Response { access_token, token_type: TokenType::Bearer, - matrix_server_name: services.globals.config.server_name.clone(), + matrix_server_name: services.server.config.server_name.clone(), expires_in: Duration::from_secs(expires_in), }) } diff --git a/src/api/client/report.rs b/src/api/client/report.rs index fe23b7bd..2b25b518 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -50,7 +50,7 @@ pub(crate) async fn report_room_route( if !services .rooms .state_cache - .server_in_room(&services.globals.config.server_name, &body.room_id) + .server_in_room(&services.server.config.server_name, &body.room_id) .await { return Err!(Request(NotFound( diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index a78242ca..1b6e8667 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -71,7 +71,7 @@ pub(crate) async fn create_room_route( let room_id: OwnedRoomId = if let Some(custom_room_id) = &body.room_id { custom_room_id_check(&services, custom_room_id)? } else { - RoomId::new(&services.globals.config.server_name) + RoomId::new(&services.server.config.server_name) }; // check if room ID doesn't already exist instead of erroring on auth check @@ -83,7 +83,7 @@ pub(crate) async fn create_room_route( } if body.visibility == room::Visibility::Public - && services.globals.config.lockdown_public_room_directory + && services.server.config.lockdown_public_room_directory && !services.users.is_admin(sender_user).await && body.appservice_info.is_none() { @@ -93,7 +93,7 @@ pub(crate) async fn create_room_route( &room_id ); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_text(&format!( @@ -450,7 +450,7 @@ pub(crate) async fn create_room_route( if body.visibility == room::Visibility::Public { services.rooms.directory.set_public(&room_id); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_text(&format!( diff --git a/src/api/client/typing.rs b/src/api/client/typing.rs index 6eabe96a..b311295b 100644 --- a/src/api/client/typing.rs +++ b/src/api/client/typing.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::Err; +use conduwuit::{utils::math::Tried, Err}; use ruma::api::client::typing::create_typing_event; use crate::{utils, Result, Ruma}; @@ -31,17 +31,15 @@ pub(crate) async fn create_typing_event_route( let duration = utils::clamp( duration.as_millis().try_into().unwrap_or(u64::MAX), services - .globals + .server .config .typing_client_timeout_min_s - .checked_mul(1000) - .unwrap(), + .try_mul(1000)?, services - .globals + .server .config .typing_client_timeout_max_s - .checked_mul(1000) - .unwrap(), + .try_mul(1000)?, ); services .rooms diff --git a/src/api/client/voip.rs b/src/api/client/voip.rs index ec804570..c08b1fdf 100644 --- a/src/api/client/voip.rs +++ b/src/api/client/voip.rs @@ -38,7 +38,7 @@ pub(crate) async fn turn_server_route( let user = body.sender_user.unwrap_or_else(|| { UserId::parse_with_server_name( utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(), - &services.globals.config.server_name, + &services.server.config.server_name, ) .unwrap() }); diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index dd25e091..ecea305b 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -71,7 +71,7 @@ pub(super) async fn auth( match metadata { | &get_public_rooms::v3::Request::METADATA => { if !services - .globals + .server .config .allow_public_room_directory_without_auth { @@ -94,7 +94,7 @@ pub(super) async fn auth( | &get_display_name::v3::Request::METADATA | &get_avatar_url::v3::Request::METADATA | &get_timezone_key::unstable::Request::METADATA => { - if services.globals.config.require_auth_for_profile_requests { + if services.server.config.require_auth_for_profile_requests { match token { | Token::Appservice(_) | Token::User(_) => { // we should have validated the token above @@ -127,7 +127,7 @@ pub(super) async fn auth( }), | (AuthScheme::AccessToken, Token::None) => match metadata { | &get_turn_server_info::v3::Request::METADATA => { - if services.globals.config.turn_allow_guests { + if services.server.config.turn_allow_guests { Ok(Auth { origin: None, sender_user: None, diff --git a/src/api/router/request.rs b/src/api/router/request.rs index 627abd30..615a8bff 100644 --- a/src/api/router/request.rs +++ b/src/api/router/request.rs @@ -32,7 +32,7 @@ pub(super) async fn from( let query = serde_html_form::from_str(query) .map_err(|e| err!(Request(Unknown("Failed to read query parameters: {e}"))))?; - let max_body_size = services.globals.config.max_request_size; + let max_body_size = services.server.config.max_request_size; let body = axum::body::to_bytes(body, max_body_size) .await diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index 1fea268b..27a4485c 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -37,7 +37,7 @@ pub(crate) async fn create_invite_route( if let Some(server) = body.room_id.server_name() { if services - .globals + .server .config .forbidden_remote_server_names .contains(&server.to_owned()) @@ -47,7 +47,7 @@ pub(crate) async fn create_invite_route( } if services - .globals + .server .config .forbidden_remote_server_names .contains(body.origin()) diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index 3900c418..b753346c 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -42,7 +42,7 @@ pub(crate) async fn create_join_event_template_route( .await?; if services - .globals + .server .config .forbidden_remote_server_names .contains(body.origin()) @@ -59,7 +59,7 @@ pub(crate) async fn create_join_event_template_route( if let Some(server) = body.room_id.server_name() { if services - .globals + .server .config .forbidden_remote_server_names .contains(&server.to_owned()) diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs index 90b9b629..423e202d 100644 --- a/src/api/server/make_knock.rs +++ b/src/api/server/make_knock.rs @@ -34,7 +34,7 @@ pub(crate) async fn create_knock_event_template_route( .await?; if services - .globals + .server .config .forbidden_remote_server_names .contains(body.origin()) @@ -51,7 +51,7 @@ pub(crate) async fn create_knock_event_template_route( if let Some(server) = body.room_id.server_name() { if services - .globals + .server .config .forbidden_remote_server_names .contains(&server.to_owned()) diff --git a/src/api/server/publicrooms.rs b/src/api/server/publicrooms.rs index 77cde15f..2c09385b 100644 --- a/src/api/server/publicrooms.rs +++ b/src/api/server/publicrooms.rs @@ -20,7 +20,7 @@ pub(crate) async fn get_public_rooms_filtered_route( body: Ruma, ) -> Result { if !services - .globals + .server .config .allow_public_room_directory_over_federation { diff --git a/src/api/server/query.rs b/src/api/server/query.rs index 0e5f7e56..69f62e94 100644 --- a/src/api/server/query.rs +++ b/src/api/server/query.rs @@ -63,7 +63,7 @@ pub(crate) async fn get_profile_information_route( body: Ruma, ) -> Result { if !services - .globals + .server .config .allow_inbound_profile_lookup_federation_requests { diff --git a/src/api/server/send.rs b/src/api/server/send.rs index 56a17c22..eec9bd11 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -309,7 +309,7 @@ async fn handle_edu_typing( origin: &ServerName, typing: TypingContent, ) { - if !services.globals.config.allow_incoming_typing { + if !services.server.config.allow_incoming_typing { return; } @@ -344,7 +344,7 @@ async fn handle_edu_typing( if typing.typing { let timeout = utils::millis_since_unix_epoch().saturating_add( services - .globals + .server .config .typing_federation_timeout_s .saturating_mul(1000), diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 97a65bf8..e62089b4 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -268,7 +268,7 @@ pub(crate) async fn create_join_event_v1_route( body: Ruma, ) -> Result { if services - .globals + .server .config .forbidden_remote_server_names .contains(body.origin()) @@ -284,7 +284,7 @@ pub(crate) async fn create_join_event_v1_route( if let Some(server) = body.room_id.server_name() { if services - .globals + .server .config .forbidden_remote_server_names .contains(&server.to_owned()) @@ -316,7 +316,7 @@ pub(crate) async fn create_join_event_v2_route( body: Ruma, ) -> Result { if services - .globals + .server .config .forbidden_remote_server_names .contains(body.origin()) @@ -326,7 +326,7 @@ pub(crate) async fn create_join_event_v2_route( if let Some(server) = body.room_id.server_name() { if services - .globals + .server .config .forbidden_remote_server_names .contains(&server.to_owned()) diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index 95478081..b07620af 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -22,7 +22,7 @@ pub(crate) async fn create_knock_event_v1_route( body: Ruma, ) -> Result { if services - .globals + .server .config .forbidden_remote_server_names .contains(body.origin()) @@ -38,7 +38,7 @@ pub(crate) async fn create_knock_event_v1_route( if let Some(server) = body.room_id.server_name() { if services - .globals + .server .config .forbidden_remote_server_names .contains(&server.to_owned()) diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 399055aa..bc410631 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -338,7 +338,7 @@ impl Service { } // Check if server-side command-escape is disabled by configuration - if is_public_escape && !self.services.globals.config.admin_escape_commands { + if is_public_escape && !self.services.server.config.admin_escape_commands { return false; } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index fe84578a..ef34054f 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -7,7 +7,7 @@ use std::{ time::Instant, }; -use conduwuit::{error, utils::bytes::pretty, Config, Result}; +use conduwuit::{error, utils::bytes::pretty, Result, Server}; use data::Data; use regex::RegexSet; use ruma::{OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, ServerName, UserId}; @@ -16,8 +16,8 @@ use crate::service; pub struct Service { pub db: Data, + server: Arc, - pub config: Config, pub bad_event_ratelimiter: Arc>>, pub server_user: OwnedUserId, pub admin_alias: OwnedRoomAliasId, @@ -57,9 +57,9 @@ impl crate::Service for Service { }, ); - let mut s = Self { + Ok(Arc::new(Self { db, - config: config.clone(), + server: args.server.clone(), bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), admin_alias: OwnedRoomAliasId::try_from(format!("#admins:{}", &config.server_name)) .expect("#admins:server_name is valid alias name"), @@ -70,9 +70,7 @@ impl crate::Service for Service { .expect("@conduit:server_name is valid"), turn_secret, registration_token, - }; - - Ok(Arc::new(s)) + })) } fn memory_usage(&self, out: &mut dyn Write) -> Result { @@ -109,93 +107,97 @@ impl Service { pub fn current_count(&self) -> Result { Ok(self.db.current_count()) } #[inline] - pub fn server_name(&self) -> &ServerName { self.config.server_name.as_ref() } + pub fn server_name(&self) -> &ServerName { self.server.config.server_name.as_ref() } - pub fn allow_registration(&self) -> bool { self.config.allow_registration } + pub fn allow_registration(&self) -> bool { self.server.config.allow_registration } - pub fn allow_guest_registration(&self) -> bool { self.config.allow_guest_registration } + pub fn allow_guest_registration(&self) -> bool { self.server.config.allow_guest_registration } pub fn allow_guests_auto_join_rooms(&self) -> bool { - self.config.allow_guests_auto_join_rooms + self.server.config.allow_guests_auto_join_rooms } - pub fn log_guest_registrations(&self) -> bool { self.config.log_guest_registrations } + pub fn log_guest_registrations(&self) -> bool { self.server.config.log_guest_registrations } - pub fn allow_encryption(&self) -> bool { self.config.allow_encryption } + pub fn allow_encryption(&self) -> bool { self.server.config.allow_encryption } - pub fn allow_federation(&self) -> bool { self.config.allow_federation } + pub fn allow_federation(&self) -> bool { self.server.config.allow_federation } pub fn allow_public_room_directory_over_federation(&self) -> bool { - self.config.allow_public_room_directory_over_federation + self.server + .config + .allow_public_room_directory_over_federation } pub fn allow_device_name_federation(&self) -> bool { - self.config.allow_device_name_federation + self.server.config.allow_device_name_federation } - pub fn allow_room_creation(&self) -> bool { self.config.allow_room_creation } + pub fn allow_room_creation(&self) -> bool { self.server.config.allow_room_creation } pub fn new_user_displayname_suffix(&self) -> &String { - &self.config.new_user_displayname_suffix + &self.server.config.new_user_displayname_suffix } - pub fn allow_check_for_updates(&self) -> bool { self.config.allow_check_for_updates } + pub fn allow_check_for_updates(&self) -> bool { self.server.config.allow_check_for_updates } - pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.config.trusted_servers } + pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.server.config.trusted_servers } - pub fn turn_password(&self) -> &String { &self.config.turn_password } + pub fn turn_password(&self) -> &String { &self.server.config.turn_password } - pub fn turn_ttl(&self) -> u64 { self.config.turn_ttl } + pub fn turn_ttl(&self) -> u64 { self.server.config.turn_ttl } - pub fn turn_uris(&self) -> &[String] { &self.config.turn_uris } + pub fn turn_uris(&self) -> &[String] { &self.server.config.turn_uris } - pub fn turn_username(&self) -> &String { &self.config.turn_username } + pub fn turn_username(&self) -> &String { &self.server.config.turn_username } - pub fn notification_push_path(&self) -> &String { &self.config.notification_push_path } + pub fn notification_push_path(&self) -> &String { &self.server.config.notification_push_path } - pub fn emergency_password(&self) -> &Option { &self.config.emergency_password } + pub fn emergency_password(&self) -> &Option { &self.server.config.emergency_password } pub fn url_preview_domain_contains_allowlist(&self) -> &Vec { - &self.config.url_preview_domain_contains_allowlist + &self.server.config.url_preview_domain_contains_allowlist } pub fn url_preview_domain_explicit_allowlist(&self) -> &Vec { - &self.config.url_preview_domain_explicit_allowlist + &self.server.config.url_preview_domain_explicit_allowlist } pub fn url_preview_domain_explicit_denylist(&self) -> &Vec { - &self.config.url_preview_domain_explicit_denylist + &self.server.config.url_preview_domain_explicit_denylist } pub fn url_preview_url_contains_allowlist(&self) -> &Vec { - &self.config.url_preview_url_contains_allowlist + &self.server.config.url_preview_url_contains_allowlist } - pub fn url_preview_max_spider_size(&self) -> usize { self.config.url_preview_max_spider_size } + pub fn url_preview_max_spider_size(&self) -> usize { + self.server.config.url_preview_max_spider_size + } pub fn url_preview_check_root_domain(&self) -> bool { - self.config.url_preview_check_root_domain + self.server.config.url_preview_check_root_domain } - pub fn forbidden_alias_names(&self) -> &RegexSet { &self.config.forbidden_alias_names } + pub fn forbidden_alias_names(&self) -> &RegexSet { &self.server.config.forbidden_alias_names } - pub fn forbidden_usernames(&self) -> &RegexSet { &self.config.forbidden_usernames } + pub fn forbidden_usernames(&self) -> &RegexSet { &self.server.config.forbidden_usernames } - pub fn allow_local_presence(&self) -> bool { self.config.allow_local_presence } + pub fn allow_local_presence(&self) -> bool { self.server.config.allow_local_presence } - pub fn allow_incoming_presence(&self) -> bool { self.config.allow_incoming_presence } + pub fn allow_incoming_presence(&self) -> bool { self.server.config.allow_incoming_presence } - pub fn allow_outgoing_presence(&self) -> bool { self.config.allow_outgoing_presence } + pub fn allow_outgoing_presence(&self) -> bool { self.server.config.allow_outgoing_presence } pub fn allow_incoming_read_receipts(&self) -> bool { - self.config.allow_incoming_read_receipts + self.server.config.allow_incoming_read_receipts } pub fn allow_outgoing_read_receipts(&self) -> bool { - self.config.allow_outgoing_read_receipts + self.server.config.allow_outgoing_read_receipts } - pub fn block_non_admin_invites(&self) -> bool { self.config.block_non_admin_invites } + pub fn block_non_admin_invites(&self) -> bool { self.server.config.block_non_admin_invites } /// checks if `user_id` is local to us via server_name comparison #[inline] @@ -205,7 +207,7 @@ impl Service { #[inline] pub fn server_is_ours(&self, server_name: &ServerName) -> bool { - server_name == self.config.server_name + server_name == self.server.config.server_name } #[inline] diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 0acbb116..91797d01 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use conduwuit::{ err, utils::{stream::TryIgnore, ReadyExt}, - Err, Result, + Err, Result, Server, }; use database::{Deserialized, Ignore, Interfix, Map}; use futures::{Stream, StreamExt, TryFutureExt}; @@ -31,6 +31,7 @@ struct Data { } struct Services { + server: Arc, admin: Dep, appservice: Dep, globals: Dep, @@ -47,6 +48,7 @@ impl crate::Service for Service { aliasid_alias: args.db["aliasid_alias"].clone(), }, services: Services { + server: args.server.clone(), admin: args.depend::("admin"), appservice: args.depend::("appservice"), globals: args.depend::("globals"), @@ -146,9 +148,9 @@ impl Service { let server_name = room_alias.server_name(); let server_is_ours = self.services.globals.server_is_ours(server_name); let servers_contains_ours = || { - servers.as_ref().is_some_and(|servers| { - servers.contains(&self.services.globals.config.server_name) - }) + servers + .as_ref() + .is_some_and(|servers| servers.contains(&self.services.server.config.server_name)) }; if !server_is_ours && !servers_contains_ours() { From b5c167de121e17696c2542b34a6b7904dade8c21 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 24 Jan 2025 11:29:36 +0000 Subject: [PATCH 070/328] call decay prior to purge for trim-memory Signed-off-by: Jason Volk --- src/core/alloc/je.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 81fbd3ea..6bdf8b33 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -141,12 +141,14 @@ pub mod this_thread { static DEALLOCATED_BYTES: OnceCell<&'static u64> = const { OnceCell::new() }; } - pub fn idle() -> Result { super::notify(&mallctl!("thread.idle")) } + pub fn trim() -> Result { decay().and_then(|()| purge()) } - pub fn trim() -> Result { notify(mallctl!("arena.0.purge")) } + pub fn purge() -> Result { notify(mallctl!("arena.0.purge")) } pub fn decay() -> Result { notify(mallctl!("arena.0.decay")) } + pub fn idle() -> Result { super::notify(&mallctl!("thread.idle")) } + pub fn flush() -> Result { super::notify(&mallctl!("thread.tcache.flush")) } pub fn set_muzzy_decay(decay_ms: isize) -> Result { @@ -239,7 +241,11 @@ pub fn is_prof_enabled() -> Result { get::(&mallctl!("prof.active")).map(is_nonzero!()) } -pub fn trim>>(arena: I) -> Result { +pub fn trim> + Copy>(arena: I) -> Result { + decay(arena).and_then(|()| purge(arena)) +} + +pub fn purge>>(arena: I) -> Result { notify_by_arena(arena.into(), mallctl!("arena.4096.purge")) } From 184a3b0f0cccfbd0a6f4d95f65504b4d1d9bb21f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 23 Jan 2025 19:07:13 +0000 Subject: [PATCH 071/328] reduce some tracing span levels; bump ruma Signed-off-by: Jason Volk --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- src/api/client/media_legacy.rs | 10 +++++----- src/api/server/media.rs | 14 ++++++++++++-- src/service/resolver/actual.rs | 24 ++++++++++++------------ src/service/rooms/timeline/mod.rs | 4 ++-- 6 files changed, 45 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7985a411..cd914dfd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3188,7 +3188,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "assign", "js_int", @@ -3210,7 +3210,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "js_int", "ruma-common", @@ -3222,7 +3222,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "as_variant", "assign", @@ -3245,7 +3245,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "as_variant", "base64 0.22.1", @@ -3276,7 +3276,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3301,7 +3301,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "bytes", "http", @@ -3319,7 +3319,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3328,7 +3328,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "js_int", "ruma-common", @@ -3338,7 +3338,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3353,7 +3353,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "js_int", "ruma-common", @@ -3365,7 +3365,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "headers", "http", @@ -3378,7 +3378,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3394,7 +3394,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index b8c145ca..d52ce974 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -332,7 +332,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "427877d5bc14988ed877e500bbb27f8bc08b84e8" +rev = "b560338b2a50dbf61ecfe80808b9b095ad4cec00" features = [ "compat", "rand", diff --git a/src/api/client/media_legacy.rs b/src/api/client/media_legacy.rs index 29cf3069..4fa0b52e 100644 --- a/src/api/client/media_legacy.rs +++ b/src/api/client/media_legacy.rs @@ -50,7 +50,7 @@ pub(crate) async fn get_media_config_legacy_legacy_route( /// # `GET /_matrix/media/v3/preview_url` /// /// Returns URL preview. -#[tracing::instrument(skip_all, fields(%client), name = "url_preview_legacy")] +#[tracing::instrument(skip_all, fields(%client), name = "url_preview_legacy", level = "debug")] pub(crate) async fn get_media_preview_legacy_route( State(services): State, InsecureClientIp(client): InsecureClientIp, @@ -131,7 +131,7 @@ pub(crate) async fn create_content_legacy_route( /// - Only redirects if `allow_redirect` is true /// - Uses client-provided `timeout_ms` if available, else defaults to 20 /// seconds -#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy")] +#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy", level = "debug")] pub(crate) async fn get_content_legacy_route( State(services): State, InsecureClientIp(client): InsecureClientIp, @@ -197,7 +197,7 @@ pub(crate) async fn get_content_legacy_route( /// - Only redirects if `allow_redirect` is true /// - Uses client-provided `timeout_ms` if available, else defaults to 20 /// seconds -#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy")] +#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy", level = "debug")] pub(crate) async fn get_content_legacy_legacy_route( State(services): State, InsecureClientIp(client): InsecureClientIp, @@ -216,7 +216,7 @@ pub(crate) async fn get_content_legacy_legacy_route( /// - Only redirects if `allow_redirect` is true /// - Uses client-provided `timeout_ms` if available, else defaults to 20 /// seconds -#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy")] +#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy", level = "debug")] pub(crate) async fn get_content_as_filename_legacy_route( State(services): State, InsecureClientIp(client): InsecureClientIp, @@ -303,7 +303,7 @@ pub(crate) async fn get_content_as_filename_legacy_legacy_route( /// - Only redirects if `allow_redirect` is true /// - Uses client-provided `timeout_ms` if available, else defaults to 20 /// seconds -#[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get_legacy")] +#[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get_legacy", level = "debug")] pub(crate) async fn get_content_thumbnail_legacy_route( State(services): State, InsecureClientIp(client): InsecureClientIp, diff --git a/src/api/server/media.rs b/src/api/server/media.rs index 03ec7b51..e56f5b9d 100644 --- a/src/api/server/media.rs +++ b/src/api/server/media.rs @@ -14,7 +14,12 @@ use crate::Ruma; /// # `GET /_matrix/federation/v1/media/download/{mediaId}` /// /// Load media from our server. -#[tracing::instrument(skip_all, fields(%client), name = "media_get")] +#[tracing::instrument( + name = "media_get", + level = "debug", + skip_all, + fields(%client) +)] pub(crate) async fn get_content_route( State(services): State, InsecureClientIp(client): InsecureClientIp, @@ -51,7 +56,12 @@ pub(crate) async fn get_content_route( /// # `GET /_matrix/federation/v1/media/thumbnail/{mediaId}` /// /// Load media thumbnail from our server. -#[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get")] +#[tracing::instrument( + name = "media_thumbnail_get", + level = "debug", + skip_all, + fields(%client) +)] pub(crate) async fn get_content_thumbnail_route( State(services): State, InsecureClientIp(client): InsecureClientIp, diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index c5451c58..33374240 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -58,7 +58,7 @@ impl super::Service { /// Implemented according to the specification at /// Numbers in comments below refer to bullet points in linked section of /// specification - #[tracing::instrument(skip(self, cache), name = "actual")] + #[tracing::instrument(name = "actual", level = "debug", skip(self, cache))] pub async fn resolve_actual_dest( &self, dest: &ServerName, @@ -239,7 +239,7 @@ impl super::Service { Ok(add_port_to_hostname(dest.as_str())) } - #[tracing::instrument(skip_all, name = "well-known")] + #[tracing::instrument(name = "well-known", level = "debug", skip(self, dest))] async fn request_well_known(&self, dest: &str) -> Result> { self.conditional_query_and_cache(dest, 8448, true).await?; @@ -303,7 +303,7 @@ impl super::Service { #[inline] async fn conditional_query_and_cache_override( &self, - overname: &str, + untername: &str, hostname: &str, port: u16, cache: bool, @@ -312,34 +312,34 @@ impl super::Service { return Ok(()); } - if self.cache.has_override(overname).await { + if self.cache.has_override(untername).await { return Ok(()); } - self.query_and_cache_override(overname, hostname, port) + self.query_and_cache_override(untername, hostname, port) .await } - #[tracing::instrument(skip(self, overname, port), name = "ip")] + #[tracing::instrument(name = "ip", level = "debug", skip(self))] async fn query_and_cache_override( &self, - overname: &'_ str, + untername: &'_ str, hostname: &'_ str, port: u16, ) -> Result { self.services.server.check_running()?; - debug!("querying IP for {overname:?} ({hostname:?}:{port})"); + debug!("querying IP for {untername:?} ({hostname:?}:{port})"); match self.resolver.resolver.lookup_ip(hostname.to_owned()).await { | Err(e) => Self::handle_resolve_error(&e, hostname), | Ok(override_ip) => { - self.cache.set_override(overname, &CachedOverride { + self.cache.set_override(untername, &CachedOverride { ips: override_ip.into_iter().take(MAX_IPS).collect(), port, expire: CachedOverride::default_expire(), - overriding: (hostname != overname) + overriding: (hostname != untername) .then_some(hostname.into()) - .inspect(|_| debug_info!("{overname:?} overriden by {hostname:?}")), + .inspect(|_| debug_info!("{untername:?} overriden by {hostname:?}")), }); Ok(()) @@ -347,7 +347,7 @@ impl super::Service { } } - #[tracing::instrument(skip_all, name = "srv")] + #[tracing::instrument(name = "srv", level = "debug", skip(self))] async fn query_srv_record(&self, hostname: &'_ str) -> Result> { let hostnames = [format!("_matrix-fed._tcp.{hostname}."), format!("_matrix._tcp.{hostname}.")]; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index bd60e40e..362bfab5 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1017,7 +1017,7 @@ impl Service { } /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] + #[tracing::instrument(name = "redact", level = "debug", skip(self))] pub async fn redact_pdu( &self, event_id: &EventId, @@ -1053,7 +1053,7 @@ impl Service { self.replace_pdu(&pdu_id, &obj, &pdu).await } - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(name = "backfill", level = "debug", skip(self))] pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> { if self .services From b1b6dc0479538a207b3cf62ac90b58abb38ae103 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 24 Jan 2025 07:04:29 +0000 Subject: [PATCH 072/328] reloadable configuration Signed-off-by: Jason Volk --- src/admin/server/commands.rs | 22 +++++++++++++++++++--- src/admin/server/mod.rs | 7 +++++++ src/core/server.rs | 6 +++--- 3 files changed, 29 insertions(+), 6 deletions(-) diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index 6469a0e9..3ea27883 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -1,6 +1,6 @@ -use std::{fmt::Write, sync::Arc}; +use std::{fmt::Write, path::PathBuf, sync::Arc}; -use conduwuit::{info, utils::time, warn, Err, Result}; +use conduwuit::{info, utils::time, warn, Config, Err, Result}; use ruma::events::room::message::RoomMessageEventContent; use crate::admin_command; @@ -23,10 +23,26 @@ pub(super) async fn show_config(&self) -> Result { // Construct and send the response Ok(RoomMessageEventContent::text_markdown(format!( "{}", - self.services.server.config + *self.services.server.config ))) } +#[admin_command] +pub(super) async fn reload_config( + &self, + path: Option, +) -> Result { + let path = path.as_deref().into_iter(); + let config = Config::load(path).and_then(|raw| Config::new(&raw))?; + if config.server_name != self.services.server.config.server_name { + return Err!("You can't change the server name."); + } + + let _old = self.services.server.config.update(config)?; + + Ok(RoomMessageEventContent::text_plain("Successfully reconfigured.")) +} + #[admin_command] pub(super) async fn list_features( &self, diff --git a/src/admin/server/mod.rs b/src/admin/server/mod.rs index 69ad7953..3f3d6c5e 100644 --- a/src/admin/server/mod.rs +++ b/src/admin/server/mod.rs @@ -1,5 +1,7 @@ mod commands; +use std::path::PathBuf; + use clap::Subcommand; use conduwuit::Result; @@ -14,6 +16,11 @@ pub(super) enum ServerCommand { /// - Show configuration values ShowConfig, + /// - Reload configuration values + ReloadConfig { + path: Option, + }, + /// - List the features built into the server ListFeatures { #[arg(short, long)] diff --git a/src/core/server.rs b/src/core/server.rs index 948eea36..6838c9c9 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -8,12 +8,12 @@ use std::{ use tokio::{runtime, sync::broadcast}; -use crate::{config::Config, err, log::Log, metrics::Metrics, Err, Result}; +use crate::{config, config::Config, err, log::Log, metrics::Metrics, Err, Result}; /// Server runtime state; public portion pub struct Server { /// Server-wide configuration instance - pub config: Config, + pub config: config::Manager, /// Timestamp server was started; used for uptime. pub started: SystemTime, @@ -46,7 +46,7 @@ impl Server { #[must_use] pub fn new(config: Config, runtime: Option, log: Log) -> Self { Self { - config, + config: config::Manager::new(config), started: SystemTime::now(), stopping: AtomicBool::new(false), reloading: AtomicBool::new(false), From d59f68a51aa35e7e5491da79d667ad4dd497be5e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 24 Jan 2025 22:49:10 +0000 Subject: [PATCH 073/328] add sensitive-field directives to config display Signed-off-by: Jason Volk --- src/core/config/mod.rs | 9 +++++++++ src/macros/config.rs | 27 ++++++++++++++++++++++----- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index e459f50b..c541c7e4 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -502,6 +502,8 @@ pub struct Config { /// YOU NEED TO EDIT THIS OR USE registration_token_file. /// /// example: "o&^uCtes4HPf0Vu@F20jQeeWE7" + /// + /// display: sensitive pub registration_token: Option, /// Path to a file on the system that gets read for the registration token. @@ -793,6 +795,8 @@ pub struct Config { /// Static TURN password to provide the client if not using a shared secret /// ("turn_secret"). It is recommended to use a shared secret over static /// credentials. + /// + /// display: sensitive #[serde(default)] pub turn_password: String, @@ -814,6 +818,8 @@ pub struct Config { /// /// This is more secure, but if needed you can use traditional static /// username/password credentials. + /// + /// display: sensitive #[serde(default)] pub turn_secret: String, @@ -1111,6 +1117,8 @@ pub struct Config { /// security purposes. /// /// example: "F670$2CP@Hw8mG7RY1$%!#Ic7YA" + /// + /// display: sensitive pub emergency_password: Option, /// default: "/_matrix/push/v1/notify" @@ -1560,6 +1568,7 @@ pub struct Config { /// Sentry reporting URL, if a custom one is desired. /// + /// display: sensitive /// default: "https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536" #[serde(default = "default_sentry_endpoint")] pub sentry_endpoint: Option, diff --git a/src/macros/config.rs b/src/macros/config.rs index 90d6ef15..50feefa8 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -15,7 +15,7 @@ use crate::{ const UNDOCUMENTED: &str = "# This item is undocumented. Please contribute documentation for it."; -const HIDDEN: &[&str] = &["default"]; +const HIDDEN: &[&str] = &["default", "display"]; #[allow(clippy::needless_pass_by_value)] pub(super) fn example_generator(input: ItemStruct, args: &[Meta]) -> Result { @@ -121,10 +121,27 @@ fn generate_example(input: &ItemStruct, args: &[Meta], write: bool) -> Result Date: Fri, 24 Jan 2025 23:45:35 +0000 Subject: [PATCH 074/328] fix missing iteration-optimized read options on several stream types Signed-off-by: Jason Volk --- src/database/map.rs | 3 ++- src/database/map/options.rs | 29 +++++++++++++++++++---------- src/database/map/rev_stream.rs | 4 ++-- src/database/map/rev_stream_from.rs | 2 +- src/database/map/stream.rs | 4 ++-- src/database/map/stream_from.rs | 4 ++-- 6 files changed, 28 insertions(+), 18 deletions(-) diff --git a/src/database/map.rs b/src/database/map.rs index 33cae594..97e90659 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -34,7 +34,8 @@ use conduwuit::Result; use rocksdb::{AsColumnFamilyRef, ColumnFamily, ReadOptions, WriteOptions}; pub(crate) use self::options::{ - cache_read_options_default, iter_options_default, read_options_default, write_options_default, + cache_iter_options_default, cache_read_options_default, iter_options_default, + read_options_default, write_options_default, }; use crate::{watchers::Watchers, Engine}; diff --git a/src/database/map/options.rs b/src/database/map/options.rs index 90dc0261..f726036d 100644 --- a/src/database/map/options.rs +++ b/src/database/map/options.rs @@ -2,24 +2,33 @@ use rocksdb::{ReadOptions, ReadTier, WriteOptions}; #[inline] pub(crate) fn iter_options_default() -> ReadOptions { - let mut read_options = read_options_default(); - read_options.set_background_purge_on_iterator_cleanup(true); - //read_options.set_pin_data(true); - read_options + let mut options = read_options_default(); + options.set_background_purge_on_iterator_cleanup(true); + //options.set_pin_data(true); + options +} + +#[inline] +pub(crate) fn cache_iter_options_default() -> ReadOptions { + let mut options = cache_read_options_default(); + options.set_background_purge_on_iterator_cleanup(true); + //options.set_pin_data(true); + options } #[inline] pub(crate) fn cache_read_options_default() -> ReadOptions { - let mut read_options = read_options_default(); - read_options.set_read_tier(ReadTier::BlockCache); - read_options + let mut options = read_options_default(); + options.set_read_tier(ReadTier::BlockCache); + options.fill_cache(false); + options } #[inline] pub(crate) fn read_options_default() -> ReadOptions { - let mut read_options = ReadOptions::default(); - read_options.set_total_order_seek(true); - read_options + let mut options = ReadOptions::default(); + options.set_total_order_seek(true); + options } #[inline] diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs index 1d5d3d10..56b20b9b 100644 --- a/src/database/map/rev_stream.rs +++ b/src/database/map/rev_stream.rs @@ -31,7 +31,7 @@ where pub fn rev_raw_stream(self: &Arc) -> impl Stream>> + Send { use crate::pool::Seek; - let opts = super::read_options_default(); + let opts = super::iter_options_default(); let state = stream::State::new(self, opts); if is_cached(self) { let state = state.init_rev(None); @@ -66,7 +66,7 @@ pub fn rev_raw_stream(self: &Arc) -> impl Stream> fields(%map), )] pub(super) fn is_cached(map: &Arc) -> bool { - let opts = super::cache_read_options_default(); + let opts = super::cache_iter_options_default(); let state = stream::State::new(map, opts).init_rev(None); !state.is_incomplete() diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index 1b66e8cc..83832bdd 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -118,7 +118,7 @@ pub(super) fn is_cached

(map: &Arc, from: &P) -> bool where P: AsRef<[u8]> + ?Sized, { - let cache_opts = super::cache_read_options_default(); + let cache_opts = super::cache_iter_options_default(); let cache_status = stream::State::new(map, cache_opts) .init_rev(from.as_ref().into()) .status(); diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs index fa3b0ad7..f1b5fdc3 100644 --- a/src/database/map/stream.rs +++ b/src/database/map/stream.rs @@ -30,7 +30,7 @@ where pub fn raw_stream(self: &Arc) -> impl Stream>> + Send { use crate::pool::Seek; - let opts = super::read_options_default(); + let opts = super::iter_options_default(); let state = stream::State::new(self, opts); if is_cached(self) { let state = state.init_fwd(None); @@ -65,7 +65,7 @@ pub fn raw_stream(self: &Arc) -> impl Stream>> + fields(%map), )] pub(super) fn is_cached(map: &Arc) -> bool { - let opts = super::cache_read_options_default(); + let opts = super::cache_iter_options_default(); let state = stream::State::new(map, opts).init_fwd(None); !state.is_incomplete() diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index 4296b6f6..562ab6b1 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -77,7 +77,7 @@ where { use crate::pool::Seek; - let opts = super::read_options_default(); + let opts = super::iter_options_default(); let state = stream::State::new(self, opts); if is_cached(self, from) { let state = state.init_fwd(from.as_ref().into()); @@ -115,7 +115,7 @@ pub(super) fn is_cached

(map: &Arc, from: &P) -> bool where P: AsRef<[u8]> + ?Sized, { - let opts = super::cache_read_options_default(); + let opts = super::cache_iter_options_default(); let state = stream::State::new(map, opts).init_fwd(from.as_ref().into()); !state.is_incomplete() From 72daf7ea6816ebf46c43b02e137d58bed5bee883 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 25 Jan 2025 02:01:20 -0500 Subject: [PATCH 075/328] bump rocksdb to v9.10.0, reverts upstream rocksdb regression causing deadlocks on corrupt WAL files Signed-off-by: June Clementine Strawberry --- Cargo.lock | 8 +++---- deps/rust-rocksdb/Cargo.toml | 2 +- flake.lock | 44 ++++++++++++++++++------------------ flake.nix | 2 +- 4 files changed, 28 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cd914dfd..cb1458db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3408,8 +3408,8 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" -version = "0.31.0+9.9.3" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2d31cf323df7c6d95396ef0213e28936c2218bd6#2d31cf323df7c6d95396ef0213e28936c2218bd6" +version = "0.32.0+9.10.0" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e#24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e" dependencies = [ "bindgen", "bzip2-sys", @@ -3425,8 +3425,8 @@ dependencies = [ [[package]] name = "rust-rocksdb" -version = "0.35.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2d31cf323df7c6d95396ef0213e28936c2218bd6#2d31cf323df7c6d95396ef0213e28936c2218bd6" +version = "0.36.0" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e#24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index 96554aed..40c0541e 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "2d31cf323df7c6d95396ef0213e28936c2218bd6" +rev = "24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e" #branch = "master" default-features = false diff --git a/flake.lock b/flake.lock index 210e8e08..d245ccdd 100644 --- a/flake.lock +++ b/flake.lock @@ -32,11 +32,11 @@ "nixpkgs": "nixpkgs_4" }, "locked": { - "lastModified": 1733424942, - "narHash": "sha256-5t7Sl6EkOaoP4FvzLmH7HFDbdl9SizmLh53RjDQCbWQ=", + "lastModified": 1737621947, + "narHash": "sha256-8HFvG7fvIFbgtaYAY2628Tb89fA55nPm2jSiNs0/Cws=", "owner": "cachix", "repo": "cachix", - "rev": "8b6b0e4694b9aa78b2ea4c93bff6e1a222dc7e4a", + "rev": "f65a3cd5e339c223471e64c051434616e18cc4f5", "type": "github" }, "original": { @@ -117,11 +117,11 @@ }, "crane_2": { "locked": { - "lastModified": 1736566337, - "narHash": "sha256-SC0eDcZPqISVt6R0UfGPyQLrI0+BppjjtQ3wcSlk0oI=", + "lastModified": 1737689766, + "narHash": "sha256-ivVXYaYlShxYoKfSo5+y5930qMKKJ8CLcAoIBPQfJ6s=", "owner": "ipetkov", "repo": "crane", - "rev": "9172acc1ee6c7e1cbafc3044ff850c568c75a5a3", + "rev": "6fe74265bbb6d016d663b1091f015e2976c4a527", "type": "github" }, "original": { @@ -170,11 +170,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1736836313, - "narHash": "sha256-zdZ7/T6yG0/hzoVOiNpDiR/sW3zR6oSMrfIFJK2BrrE=", + "lastModified": 1737700483, + "narHash": "sha256-1778bR4GDDc51/iZQvcshGLZ4JU87zCzqei8Hn7vU1A=", "owner": "nix-community", "repo": "fenix", - "rev": "056c9393c821a4df356df6ce7f14c722dc8717ec", + "rev": "bab2a2840bc2d5ae7c6a133602185edbe4ca7daa", "type": "github" }, "original": { @@ -364,11 +364,11 @@ "liburing": { "flake": false, "locked": { - "lastModified": 1736719310, - "narHash": "sha256-Turvx60THwzTiUHb49WV3upUgsPuktr7tVy2Lwu2xJg=", + "lastModified": 1737600516, + "narHash": "sha256-EKyLQ3pbcjoU5jH5atge59F4fzuhTsb6yalUj6Ve2t8=", "owner": "axboe", "repo": "liburing", - "rev": "3124a4619e4daf26b06d48ccf0186a947070c415", + "rev": "6c509e2b0c881a13b83b259a221bf15fc9b3f681", "type": "github" }, "original": { @@ -550,11 +550,11 @@ }, "nixpkgs_5": { "locked": { - "lastModified": 1736817698, - "narHash": "sha256-1m+JP9RUsbeLVv/tF1DX3Ew9Vl/fatXnlh/g5k3jcSk=", + "lastModified": 1737717945, + "narHash": "sha256-ET91TMkab3PmOZnqiJQYOtSGvSTvGeHoegAv4zcTefM=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "2b1fca3296ddd1602d2c4f104a4050e006f4b0cb", + "rev": "ecd26a469ac56357fd333946a99086e992452b6a", "type": "github" }, "original": { @@ -567,16 +567,16 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1734469478, - "narHash": "sha256-IcQ4N8xADYal79K+ONmNq4RLlIwdgUqgrVzgNgiIaG8=", + "lastModified": 1737761947, + "narHash": "sha256-FqpAOeFGuA+luV36jaf5aVz3UB183n6wUrTbFxCwjjQ=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "8b4808e7de2fbb5d119d8d72cdca76d8ab84bc47", + "rev": "d078ca31e802696b26d972bda7bed86ee1382156", "type": "github" }, "original": { "owner": "girlbossceo", - "ref": "v9.9.3", + "ref": "v9.10.0", "repo": "rocksdb", "type": "github" } @@ -599,11 +599,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1736690231, - "narHash": "sha256-g9gyxX+F6CrkT5gRIMKPnCPom0o9ZDzYnzzeNF86D6Q=", + "lastModified": 1737634189, + "narHash": "sha256-AG5G9KDsl0Ngby9EfWvlemma7WWG0KCADTIccPJuzUE=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "8364ef299790cb6ec22b9e09e873c97dbe9f2cb5", + "rev": "84d44d0a574630aa8500ed62b6c01ccd3fae2473", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 920d3d14..1d38f80f 100644 --- a/flake.nix +++ b/flake.nix @@ -9,7 +9,7 @@ flake-utils.url = "github:numtide/flake-utils?ref=main"; nix-filter.url = "github:numtide/nix-filter?ref=main"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable"; - rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.9.3"; flake = false; }; + rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.10.0"; flake = false; }; liburing = { url = "github:axboe/liburing?ref=master"; flake = false; }; }; From 6a7fe3ab7c0ddd489250f1c2922c7808d67bff43 Mon Sep 17 00:00:00 2001 From: morguldir Date: Tue, 21 Jan 2025 12:11:49 +0100 Subject: [PATCH 076/328] limit wal archive size to 1gb Signed-off-by: morguldir --- src/database/engine/db_opts.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/engine/db_opts.rs b/src/database/engine/db_opts.rs index 26f53825..01847257 100644 --- a/src/database/engine/db_opts.rs +++ b/src/database/engine/db_opts.rs @@ -55,7 +55,7 @@ pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache) -> Resul // Files opts.set_table_cache_num_shard_bits(7); - opts.set_wal_size_limit_mb(1024 * 1024 * 1024); + opts.set_wal_size_limit_mb(1024); opts.set_max_total_wal_size(1024 * 1024 * 512); opts.set_writable_file_max_buffer_size(1024 * 1024 * 2); From eed3291625d6b6f454ff24e41888d17ed492e1b7 Mon Sep 17 00:00:00 2001 From: morguldir Date: Tue, 21 Jan 2025 13:32:10 +0100 Subject: [PATCH 077/328] ci: set variable after ssh has been configured, mainly for draft pull requests --- .github/workflows/ci.yml | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 007adace..ce662101 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -306,6 +306,8 @@ jobs: echo "Checking connection" ssh -q website "echo test" + echo "SSH_WEBSITE=1" >> "$GITHUB_ENV" + - uses: nixbuild/nix-quick-install-action@master - name: Restore and cache Nix store @@ -489,7 +491,7 @@ jobs: - name: Upload static-x86_64-linux-musl-all-features-x86_64-haswell-optimised to webserver if: ${{ matrix.target == 'x86_64-linux-musl' }} run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then chmod +x static-x86_64-linux-musl-x86_64-haswell-optimised scp static-x86_64-linux-musl-x86_64-haswell-optimised website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-x86_64-linux-musl-x86_64-haswell-optimised fi @@ -497,7 +499,7 @@ jobs: - name: Upload static-${{ matrix.target }}-all-features to webserver if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then chmod +x static-${{ matrix.target }} scp static-${{ matrix.target }} website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-${{ matrix.target }} fi @@ -505,14 +507,14 @@ jobs: - name: Upload static deb x86_64-linux-musl-all-features-x86_64-haswell-optimised to webserver if: ${{ matrix.target == 'x86_64-linux-musl' }} run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then scp x86_64-linux-musl-x86_64-haswell-optimised.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/x86_64-linux-musl-x86_64-haswell-optimised.deb fi - name: Upload static deb ${{ matrix.target }}-all-features to webserver if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then scp ${{ matrix.target }}.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/${{ matrix.target }}.deb fi @@ -534,14 +536,14 @@ jobs: - name: Upload static-${{ matrix.target }}-debug-all-features to webserver if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then scp static-${{ matrix.target }}-debug website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-${{ matrix.target }}-debug fi - name: Upload static deb ${{ matrix.target }}-debug-all-features to webserver if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE]; then scp ${{ matrix.target }}-debug.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/${{ matrix.target }}-debug.deb fi @@ -583,21 +585,21 @@ jobs: - name: Upload OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz to webserver if: ${{ matrix.target == 'x86_64-linux-musl' }} run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then scp oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz fi - name: Upload OCI image ${{ matrix.target }}-all-features to webserver if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then scp oci-image-${{ matrix.target }}.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/oci-image-${{ matrix.target }}.tar.gz fi - name: Upload OCI image ${{ matrix.target }}-debug-all-features to webserver if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then scp oci-image-${{ matrix.target }}-debug.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/oci-image-${{ matrix.target }}-debug.tar.gz fi @@ -693,7 +695,7 @@ jobs: - name: Upload macOS x86_64 binary to webserver if: ${{ matrix.os == 'macos-13' }} run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then chmod +x conduwuit-macos-x86_64 scp conduwuit-macos-x86_64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/conduwuit-macos-x86_64 fi @@ -701,7 +703,7 @@ jobs: - name: Upload macOS arm64 binary to webserver if: ${{ matrix.os == 'macos-latest' }} run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then chmod +x conduwuit-macos-arm64 scp conduwuit-macos-arm64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/conduwuit-macos-arm64 fi From cd5d4f48bec719a938f50cb17b667668105a1141 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 25 Jan 2025 03:08:35 -0500 Subject: [PATCH 078/328] add mau.dev mirror of conduwuit Signed-off-by: June Clementine Strawberry --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 796f96f8..74b6bddf 100644 --- a/README.md +++ b/README.md @@ -109,7 +109,8 @@ Both, but I prefer conduwuit. - GitLab: - git.girlcock.ceo: - git.gay: -- Codeberg: +- mau.dev: +- Codeberg: - sourcehut: From 2abf15b9e9587e6b625c0f40bf29bef75368630e Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 25 Jan 2025 13:14:53 -0500 Subject: [PATCH 079/328] revert back to v9.9.3 due to upstream build issue with macos Signed-off-by: June Clementine Strawberry --- Cargo.lock | 12 ++++++------ deps/rust-rocksdb/Cargo.toml | 2 +- flake.lock | 20 ++++++++++---------- flake.nix | 2 +- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cb1458db..5848cc46 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1250,7 +1250,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -2183,7 +2183,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -3016,7 +3016,7 @@ dependencies = [ "once_cell", "socket2", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -3409,7 +3409,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.32.0+9.10.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e#24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1f032427d3a0e7b0f13c04b4e34712bd8610291b#1f032427d3a0e7b0f13c04b4e34712bd8610291b" dependencies = [ "bindgen", "bzip2-sys", @@ -3426,7 +3426,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.36.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e#24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1f032427d3a0e7b0f13c04b4e34712bd8610291b#1f032427d3a0e7b0f13c04b4e34712bd8610291b" dependencies = [ "libc", "rust-librocksdb-sys", @@ -3477,7 +3477,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index 40c0541e..ba8259a3 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e" +rev = "1f032427d3a0e7b0f13c04b4e34712bd8610291b" #branch = "master" default-features = false diff --git a/flake.lock b/flake.lock index d245ccdd..5af6ec43 100644 --- a/flake.lock +++ b/flake.lock @@ -170,11 +170,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1737700483, - "narHash": "sha256-1778bR4GDDc51/iZQvcshGLZ4JU87zCzqei8Hn7vU1A=", + "lastModified": 1737786656, + "narHash": "sha256-ubCW9Jy7ZUOF354bWxTgLDpVnTvIpNr6qR4H/j7I0oo=", "owner": "nix-community", "repo": "fenix", - "rev": "bab2a2840bc2d5ae7c6a133602185edbe4ca7daa", + "rev": "2f721f527886f801403f389a9cabafda8f1e3b7f", "type": "github" }, "original": { @@ -567,16 +567,16 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1737761947, - "narHash": "sha256-FqpAOeFGuA+luV36jaf5aVz3UB183n6wUrTbFxCwjjQ=", + "lastModified": 1737828695, + "narHash": "sha256-8Ev6zzhNPU798JNvU27a7gj5X+6SDG3jBweUkQ59DbA=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "d078ca31e802696b26d972bda7bed86ee1382156", + "rev": "a4d9230dcc9d03be428b9a728133f8f646c0065c", "type": "github" }, "original": { "owner": "girlbossceo", - "ref": "v9.10.0", + "ref": "v9.9.3", "repo": "rocksdb", "type": "github" } @@ -599,11 +599,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1737634189, - "narHash": "sha256-AG5G9KDsl0Ngby9EfWvlemma7WWG0KCADTIccPJuzUE=", + "lastModified": 1737728869, + "narHash": "sha256-U4pl3Hi0lT6GP4ecN3q9wdD2sdaKMbmD/5NJ1NdJ9AM=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "84d44d0a574630aa8500ed62b6c01ccd3fae2473", + "rev": "6e4c29f7ce18cea7d3d31237a4661ab932eab636", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 1d38f80f..920d3d14 100644 --- a/flake.nix +++ b/flake.nix @@ -9,7 +9,7 @@ flake-utils.url = "github:numtide/flake-utils?ref=main"; nix-filter.url = "github:numtide/nix-filter?ref=main"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable"; - rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.10.0"; flake = false; }; + rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.9.3"; flake = false; }; liburing = { url = "github:axboe/liburing?ref=master"; flake = false; }; }; From 9514064c1c709dc7c437b1478b224bb0d711ec05 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 25 Jan 2025 13:40:31 -0500 Subject: [PATCH 080/328] use --locked for macOS builds Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ce662101..de6dbc77 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -669,7 +669,7 @@ jobs: - name: Build macOS x86_64 binary if: ${{ matrix.os == 'macos-13' }} run: | - CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release + CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release --locked --features=perf_measurements,sentry_telemetry,direct_tls cp -v -f target/release/conduwuit conduwuit-macos-x86_64 otool -L conduwuit-macos-x86_64 @@ -677,12 +677,13 @@ jobs: - name: Run x86_64 macOS release binary if: ${{ matrix.os == 'macos-13' }} run: | + ./conduwuit-macos-x86_64 --help ./conduwuit-macos-x86_64 --version - name: Build macOS arm64 binary if: ${{ matrix.os == 'macos-latest' }} run: | - CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release + CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release --locked --features=perf_measurements,sentry_telemetry,direct_tls cp -v -f target/release/conduwuit conduwuit-macos-arm64 otool -L conduwuit-macos-arm64 @@ -690,6 +691,7 @@ jobs: - name: Run arm64 macOS release binary if: ${{ matrix.os == 'macos-latest' }} run: | + ./conduwuit-macos-arm64 --help ./conduwuit-macos-arm64 --version - name: Upload macOS x86_64 binary to webserver From 1d26eec82d8d75f3d67cd973482bc7aa604e6381 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 11 Jan 2025 14:12:08 +0000 Subject: [PATCH 081/328] publish README to docker hub --- .github/workflows/docker-hub-description.yml | 36 ++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 .github/workflows/docker-hub-description.yml diff --git a/.github/workflows/docker-hub-description.yml b/.github/workflows/docker-hub-description.yml new file mode 100644 index 00000000..5ff5f666 --- /dev/null +++ b/.github/workflows/docker-hub-description.yml @@ -0,0 +1,36 @@ +name: Update Docker Hub Description + +on: + push: + branches: + - main + paths: + - README.md + - .github/workflows/docker-hub-description.yml + +jobs: + dockerHubDescription: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + - name: Setting variables + uses: actions/github-script@v7 + id: var + with: + script: | + const githubRepo = '${{ github.repository }}'.toLowerCase() + const repoId = githubRepo.split('/')[1] + + core.setOutput('github_repository', githubRepo) + const dockerRepo = '${{ vars.DOCKER_USERNAME }}'.toLowerCase() + '/' + repoId + core.setOutput('docker_repo', dockerRepo) + - name: Docker Hub Description + uses: peter-evans/dockerhub-description@v4 + with: + username: ${{ vars.DOCKER_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + repository: ${{ steps.var.outputs.docker_repo }} + short-description: ${{ github.event.repository.description }} + enable-url-completion: true From d86061084cf2d544b99e16890914001b116ab2ca Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 26 Jan 2025 01:05:41 +0000 Subject: [PATCH 082/328] Publish haswell images to image registries (#674) * push haswell images to docker repos * Add OCI labels to image * fixup! Add OCI labels to image * fixup! push haswell images to docker repos * fixup! Add OCI labels to image * fixup! Add OCI labels to image * fixup! Add OCI labels to image --- .github/workflows/ci.yml | 174 ++++++++++++++++++++------------- nix/pkgs/oci-image/default.nix | 6 ++ 2 files changed, 114 insertions(+), 66 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index de6dbc77..345713aa 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -566,6 +566,14 @@ jobs: cp -v -f result oci-image-${{ matrix.target }}-debug.tar.gz + - name: Upload OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised to GitHub + if: ${{ matrix.target == 'x86_64-linux-musl' }} + uses: actions/upload-artifact@v4 + with: + name: oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised + path: oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz + if-no-files-found: error + compression-level: 0 - name: Upload OCI image ${{ matrix.target }}-all-features to GitHub uses: actions/upload-artifact@v4 with: @@ -745,18 +753,11 @@ jobs: contents: read if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]' env: - DOCKER_ARM64: docker.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-arm64v8 - DOCKER_AMD64: docker.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-amd64 - DOCKER_TAG: docker.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} - DOCKER_BRANCH: docker.io/${{ needs.variables.outputs.github_repository }}:${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }} - GHCR_ARM64: ghcr.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-arm64v8 - GHCR_AMD64: ghcr.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-amd64 - GHCR_TAG: ghcr.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} - GHCR_BRANCH: ghcr.io/${{ needs.variables.outputs.github_repository }}:${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }} - GLCR_ARM64: registry.gitlab.com/conduwuit/conduwuit:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-arm64v8 - GLCR_AMD64: registry.gitlab.com/conduwuit/conduwuit:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-amd64 - GLCR_TAG: registry.gitlab.com/conduwuit/conduwuit:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} - GLCR_BRANCH: registry.gitlab.com/conduwuit/conduwuit:${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }} + DOCKER_HUB_REPO: docker.io/${{ needs.variables.outputs.github_repository }} + GHCR_REPO: ghcr.io/${{ needs.variables.outputs.github_repository }} + GLCR_REPO: registry.gitlab.com/conduwuit/conduwuit + UNIQUE_TAG: ${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} + BRANCH_TAG: ${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }} @@ -790,143 +791,184 @@ jobs: - name: Move OCI images into position run: | + mv -v oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised/*.tar.gz oci-image-amd64-haswell-optimised.tar.gz mv -v oci-image-x86_64-linux-musl/*.tar.gz oci-image-amd64.tar.gz mv -v oci-image-aarch64-linux-musl/*.tar.gz oci-image-arm64v8.tar.gz mv -v oci-image-x86_64-linux-musl-debug/*.tar.gz oci-image-amd64-debug.tar.gz mv -v oci-image-aarch64-linux-musl-debug/*.tar.gz oci-image-arm64v8-debug.tar.gz + - name: Load and push amd64 haswell image + run: | + docker load -i oci-image-amd64.tar.gz + if [ ! -z $DOCKERHUB_TOKEN ]; then + docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell + docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell + fi + if [ $GHCR_ENABLED = "true" ]; then + docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-haswell + docker push ${GHCR_REPO}:${UNIQUE_TAG}-haswell + fi + if [ ! -z $GITLAB_TOKEN ]; then + docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-haswell + docker push ${GLCR_REPO}:${UNIQUE_TAG}-haswell + fi + - name: Load and push amd64 image run: | docker load -i oci-image-amd64.tar.gz if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_AMD64} - docker push ${DOCKER_AMD64} + docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 + docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 fi if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_AMD64} - docker push ${GHCR_AMD64} + docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-amd64 + docker push ${GHCR_REPO}:${UNIQUE_TAG}-amd64 fi if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_AMD64} - docker push ${GLCR_AMD64} + docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-amd64 + docker push ${GLCR_REPO}:${UNIQUE_TAG}-amd64 fi - name: Load and push arm64 image run: | docker load -i oci-image-arm64v8.tar.gz if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_ARM64} - docker push ${DOCKER_ARM64} + docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 + docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 fi if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_ARM64} - docker push ${GHCR_ARM64} + docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 + docker push ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 fi if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_ARM64} - docker push ${GLCR_ARM64} + docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 + docker push ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 fi - name: Load and push amd64 debug image run: | docker load -i oci-image-amd64-debug.tar.gz if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_AMD64}-debug - docker push ${DOCKER_AMD64}-debug + docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug + docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug fi if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_AMD64}-debug - docker push ${GHCR_AMD64}-debug + docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug + docker push ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug fi if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_AMD64}-debug - docker push ${GLCR_AMD64}-debug + docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug + docker push ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug fi - name: Load and push arm64 debug image run: | docker load -i oci-image-arm64v8-debug.tar.gz if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_ARM64}-debug - docker push ${DOCKER_ARM64}-debug + docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug + docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug fi if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_ARM64}-debug - docker push ${GHCR_ARM64}-debug + docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug + docker push ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug fi if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_ARM64}-debug - docker push ${GLCR_ARM64}-debug + docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug + docker push ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug + fi + + - name: Create Docker haswell manifests + run: | + # Dockerhub Container Registry + if [ ! -z $DOCKERHUB_TOKEN ]; then + docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell + fi + # GitHub Container Registry + if [ $GHCR_ENABLED = "true" ]; then + docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell + fi + # GitLab Container Registry + if [ ! -z $GITLAB_TOKEN ]; then + docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell fi - name: Create Docker combined manifests run: | # Dockerhub Container Registry if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest create ${DOCKER_TAG} --amend ${DOCKER_ARM64} --amend ${DOCKER_AMD64} - docker manifest create ${DOCKER_BRANCH} --amend ${DOCKER_ARM64} --amend ${DOCKER_AMD64} + docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG} --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 + docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG} --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 fi # GitHub Container Registry if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${GHCR_TAG} --amend ${GHCR_ARM64} --amend ${GHCR_AMD64} - docker manifest create ${GHCR_BRANCH} --amend ${GHCR_ARM64} --amend ${GHCR_AMD64} + docker manifest create ${GHCR_REPO}:${UNIQUE_TAG} --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64 + docker manifest create ${GHCR_REPO}:${BRANCH_TAG} --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64 fi # GitLab Container Registry if [ ! -z $GITLAB_TOKEN ]; then - docker manifest create ${GLCR_TAG} --amend ${GLCR_ARM64} --amend ${GLCR_AMD64} - docker manifest create ${GLCR_BRANCH} --amend ${GLCR_ARM64} --amend ${GLCR_AMD64} + docker manifest create ${GLCR_REPO}:${UNIQUE_TAG} --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64 + docker manifest create ${GLCR_REPO}:${BRANCH_TAG} --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64 fi - name: Create Docker combined debug manifests run: | # Dockerhub Container Registry if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest create ${DOCKER_TAG}-debug --amend ${DOCKER_ARM64}-debug --amend ${DOCKER_AMD64}-debug - docker manifest create ${DOCKER_BRANCH}-debug --amend ${DOCKER_ARM64}-debug --amend ${DOCKER_AMD64}-debug + docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug + docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug fi # GitHub Container Registry if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${GHCR_TAG}-debug --amend ${GHCR_ARM64}-debug --amend ${GHCR_AMD64}-debug - docker manifest create ${GHCR_BRANCH}-debug --amend ${GHCR_ARM64}-debug --amend ${GHCR_AMD64}-debug + docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug + docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug fi # GitLab Container Registry if [ ! -z $GITLAB_TOKEN ]; then - docker manifest create ${GLCR_TAG}-debug --amend ${GLCR_ARM64}-debug --amend ${GLCR_AMD64}-debug - docker manifest create ${GLCR_BRANCH}-debug --amend ${GLCR_ARM64}-debug --amend ${GLCR_AMD64}-debug + docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug + docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug fi - name: Push manifests to Docker registries run: | if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest push ${DOCKER_TAG} - docker manifest push ${DOCKER_BRANCH} - docker manifest push ${DOCKER_TAG}-debug - docker manifest push ${DOCKER_BRANCH}-debug + docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG} + docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG} + docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug + docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG}-debug + docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell + docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell fi if [ $GHCR_ENABLED = "true" ]; then - docker manifest push ${GHCR_TAG} - docker manifest push ${GHCR_BRANCH} - docker manifest push ${GHCR_TAG}-debug - docker manifest push ${GHCR_BRANCH}-debug + docker manifest push ${GHCR_REPO}:${UNIQUE_TAG} + docker manifest push ${GHCR_REPO}:${BRANCH_TAG} + docker manifest push ${GHCR_REPO}:${UNIQUE_TAG}-debug + docker manifest push ${GHCR_REPO}:${BRANCH_TAG}-debug + docker manifest push ${GHCR_REPO}:${UNIQUE_TAG}-haswell + docker manifest push ${GHCR_REPO}:${BRANCH_TAG}-haswell fi if [ ! -z $GITLAB_TOKEN ]; then - docker manifest push ${GLCR_TAG} - docker manifest push ${GLCR_BRANCH} - docker manifest push ${GLCR_TAG}-debug - docker manifest push ${GLCR_BRANCH}-debug + docker manifest push ${GLCR_REPO}:${UNIQUE_TAG} + docker manifest push ${GLCR_REPO}:${BRANCH_TAG} + docker manifest push ${GLCR_REPO}:${UNIQUE_TAG}-debug + docker manifest push ${GLCR_REPO}:${BRANCH_TAG}-debug + docker manifest push ${GLCR_REPO}:${UNIQUE_TAG}-haswell + docker manifest push ${GLCR_REPO}:${BRANCH_TAG}-haswell fi - name: Add Image Links to Job Summary run: | if [ ! -z $DOCKERHUB_TOKEN ]; then - echo "- \`docker pull ${DOCKER_TAG}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${DOCKER_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY fi if [ $GHCR_ENABLED = "true" ]; then - echo "- \`docker pull ${GHCR_TAG}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${GHCR_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY fi if [ ! -z $GITLAB_TOKEN ]; then - echo "- \`docker pull ${GLCR_TAG}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${GLCR_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY fi diff --git a/nix/pkgs/oci-image/default.nix b/nix/pkgs/oci-image/default.nix index 152e00d1..d378d017 100644 --- a/nix/pkgs/oci-image/default.nix +++ b/nix/pkgs/oci-image/default.nix @@ -28,5 +28,11 @@ dockerTools.buildLayeredImage { Env = [ "RUST_BACKTRACE=full" ]; + Labels = { + "org.opencontainers.image.title" = main.pname; + "org.opencontainers.image.version" = main.version; + "org.opencontainers.image.revision" = inputs.self.rev or inputs.self.dirtyRev or ""; + # "org.opencontainers.image.created" = builtins.formatTime "%Y-%m-%dT%H:%M:%SZ" inputs.self.lastModified; + }; }; } From 9dcf289c7a1f57bfb512f406ddcfb22895e30846 Mon Sep 17 00:00:00 2001 From: bumpsoo Date: Sun, 5 Jan 2025 11:37:40 +0900 Subject: [PATCH 083/328] (doc): Update docker-compose.yml and conduwuit-example.toml The server cannot start without a registration token when registration is configured Signed-off-by: bumpsoo --- conduwuit-example.toml | 7 +++++-- docs/deploying/docker-compose.for-traefik.yml | 2 ++ docs/deploying/docker-compose.with-caddy.yml | 2 ++ docs/deploying/docker-compose.yml | 2 ++ src/core/config/mod.rs | 6 +++++- 5 files changed, 16 insertions(+), 3 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 79efbd14..3ecc1628 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -389,13 +389,16 @@ # #allow_registration = false -# This item is undocumented. Please contribute documentation for it. +# Enabling this setting opens registration to anyone without restrictions. +# This makes your server vulnerable to abuse # #yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = false # A static registration token that new users will have to provide when # creating an account. If unset and `allow_registration` is true, -# registration is open without any condition. +# you must set +# `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` +# to true to allow open registration without any conditions. # # YOU NEED TO EDIT THIS OR USE registration_token_file. # diff --git a/docs/deploying/docker-compose.for-traefik.yml b/docs/deploying/docker-compose.for-traefik.yml index b4316426..366f6999 100644 --- a/docs/deploying/docker-compose.for-traefik.yml +++ b/docs/deploying/docker-compose.for-traefik.yml @@ -17,6 +17,8 @@ services: CONDUWUIT_PORT: 6167 # should match the loadbalancer traefik label CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB CONDUWUIT_ALLOW_REGISTRATION: 'true' + CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed. + #CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true' CONDUWUIT_ALLOW_FEDERATION: 'true' CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]' diff --git a/docs/deploying/docker-compose.with-caddy.yml b/docs/deploying/docker-compose.with-caddy.yml index c080293f..431cf2d4 100644 --- a/docs/deploying/docker-compose.with-caddy.yml +++ b/docs/deploying/docker-compose.with-caddy.yml @@ -33,6 +33,8 @@ services: CONDUWUIT_PORT: 6167 CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB CONDUWUIT_ALLOW_REGISTRATION: 'true' + CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed. + #CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true' CONDUWUIT_ALLOW_FEDERATION: 'true' CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]' diff --git a/docs/deploying/docker-compose.yml b/docs/deploying/docker-compose.yml index 3b7d84ed..ca33b5f5 100644 --- a/docs/deploying/docker-compose.yml +++ b/docs/deploying/docker-compose.yml @@ -17,6 +17,8 @@ services: CONDUWUIT_PORT: 6167 CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB CONDUWUIT_ALLOW_REGISTRATION: 'true' + CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed. + #CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true' CONDUWUIT_ALLOW_FEDERATION: 'true' CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]' diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index c541c7e4..133f0887 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -492,12 +492,16 @@ pub struct Config { #[serde(default)] pub allow_registration: bool, + /// Enabling this setting opens registration to anyone without restrictions. + /// This makes your server vulnerable to abuse #[serde(default)] pub yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse: bool, /// A static registration token that new users will have to provide when /// creating an account. If unset and `allow_registration` is true, - /// registration is open without any condition. + /// you must set + /// `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` + /// to true to allow open registration without any conditions. /// /// YOU NEED TO EDIT THIS OR USE registration_token_file. /// From 5b5ccba64e3d36a9235f4e0d449f40d859046dad Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 25 Jan 2025 21:14:38 -0500 Subject: [PATCH 084/328] make conduwuit lowercase in the user-agent again Signed-off-by: June Clementine Strawberry --- src/core/info/version.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/info/version.rs b/src/core/info/version.rs index fb71d4e1..37580210 100644 --- a/src/core/info/version.rs +++ b/src/core/info/version.rs @@ -7,7 +7,7 @@ use std::sync::OnceLock; -static BRANDING: &str = "Conduwuit"; +static BRANDING: &str = "conduwuit"; static SEMANTIC: &str = env!("CARGO_PKG_VERSION"); static VERSION: OnceLock = OnceLock::new(); From c323894497e263514e92bfe0dee8397085305bc0 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 25 Jan 2025 21:20:29 -0500 Subject: [PATCH 085/328] use test in postrm deb script before deleting Signed-off-by: June Clementine Strawberry --- debian/postrm | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/debian/postrm b/debian/postrm index f516f2a2..3c0b1c09 100644 --- a/debian/postrm +++ b/debian/postrm @@ -10,21 +10,33 @@ CONDUWUIT_DATABASE_PATH_SYMLINK=/var/lib/matrix-conduit case $1 in purge) # Remove debconf changes from the db - db_purge + #db_purge # Per https://www.debian.org/doc/debian-policy/ch-files.html#behavior # "configuration files must be preserved when the package is removed, and # only deleted when the package is purged." + + # + if [ -d "$CONDUWUIT_CONFIG_PATH" ]; then - rm -v -r "$CONDUWUIT_CONFIG_PATH" + if test -L "$CONDUWUIT_CONFIG_PATH"; then + echo "Deleting conduwuit configuration files" + rm -v -r "$CONDUWUIT_CONFIG_PATH" + fi fi if [ -d "$CONDUWUIT_DATABASE_PATH" ]; then - rm -v -r "$CONDUWUIT_DATABASE_PATH" + if test -L "$CONDUWUIT_DATABASE_PATH"; then + echo "Deleting conduwuit database directory" + rm -r "$CONDUWUIT_DATABASE_PATH" + fi fi if [ -d "$CONDUWUIT_DATABASE_PATH_SYMLINK" ]; then - rm -v -r "$CONDUWUIT_DATABASE_PATH_SYMLINK" + if test -L "$CONDUWUIT_DATABASE_SYMLINK"; then + echo "Removing matrix-conduit symlink" + rm -r "$CONDUWUIT_DATABASE_PATH_SYMLINK" + fi fi ;; esac From 4b331fe50e568241f6703d92b005149da9dc4a52 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 25 Jan 2025 22:42:16 -0500 Subject: [PATCH 086/328] update README.md, crate metadata, and OCI image metadata Signed-off-by: June Clementine Strawberry --- Cargo.toml | 9 +++-- README.md | 72 ++++++++++++++++++++++++---------- nix/pkgs/oci-image/default.nix | 13 ++++-- 3 files changed, 66 insertions(+), 28 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d52ce974..c4af4a7c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,14 +7,15 @@ default-members = ["src/*"] [workspace.package] authors = [ - "strawberry ", - "timokoesters ", + "June Clementine Strawberry ", + "strawberry ", # woof + "Jason Volk ", ] categories = ["network-programming"] -description = "a very cool fork of Conduit, a Matrix homeserver written in Rust" +description = "a very cool Matrix chat homeserver written in Rust" edition = "2021" homepage = "https://conduwuit.puppyirl.gay/" -keywords = ["chat", "matrix", "server", "uwu"] +keywords = ["chat", "matrix", "networking", "server", "uwu"] license = "Apache-2.0" # See also `rust-toolchain.toml` readme = "README.md" diff --git a/README.md b/README.md index 74b6bddf..13a1c67f 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ -### a very cool, featureful fork of [Conduit](https://conduit.rs/) +### a very cool [Matrix](https://matrix.org/) chat homeserver written in Rust @@ -15,16 +15,15 @@ information and how to deploy/setup conduwuit. #### What is Matrix? -[Matrix](https://matrix.org) is an open network for secure and decentralized -communication. Users from every Matrix homeserver can chat with users from all -other Matrix servers. You can even use bridges (also called Matrix Appservices) -to communicate with users outside of Matrix, like a community on Discord. +[Matrix](https://matrix.org) is an open, federated, and extensible network for +decentralised communication. Users from any Matrix homeserver can chat with users from all +other homeservers over federation. Matrix is designed to be extensible and built on top of. +You can even use bridges such as Matrix Appservices to communicate with users outside of Matrix, like a community on Discord. #### What is the goal? -A high-performance and efficient Matrix homeserver that's easy to set up and -just works. You can install it on a mini-computer like the Raspberry Pi to -host Matrix for your family, friends or company. +A high-performance, efficient, low-cost, and featureful Matrix homeserver that's +easy to set up and just works with minimal configuration needed. #### Can I try it out? @@ -37,17 +36,22 @@ homeserver". This means there are rules, so please read the rules: [https://transfem.dev/homeserver_rules.txt](https://transfem.dev/homeserver_rules.txt) transfem.dev is also listed at -[servers.joinmatrix.org](https://servers.joinmatrix.org/) +[servers.joinmatrix.org](https://servers.joinmatrix.org/), which is a list of +popular public Matrix homeservers, including some others that run conduwuit. #### What is the current status? -conduwuit is technically a hard fork of Conduit, which is in Beta. The Beta status -initially was inherited from Conduit, however overtime this Beta status is rapidly -becoming less and less relevant as our codebase significantly diverges more and more. +conduwuit is technically a hard fork of [Conduit](https://conduit.rs/), which is in beta. +The beta status initially was inherited from Conduit, however the huge amount of +codebase divergance, changes, fixes, and improvements have effectively made this +beta status not entirely applicable to us anymore. -conduwuit is quite stable and very usable as a daily driver and for a low-medium -sized homeserver. There is still a lot of more work to be done, but it is in a far -better place than the project was in early 2024. +conduwuit is very stable based on our rapidly growing userbase, has lots of features that users +expect, and very usable as a daily driver for small, medium, and upper-end medium sized homeservers. + +A lot of critical stability and performance issues have been fixed, and a lot of +necessary groundwork has finished; making this project way better than it was +back in the start at ~early 2024. #### How is conduwuit funded? Is conduwuit sustainable? @@ -72,16 +76,37 @@ Conduit like before. If you are truly finding yourself wanting to migrate back to Conduit, we would appreciate all your feedback and if we can assist with any issues or concerns. +#### Can I migrate from Synapse or Dendrite? + +Currently there is no known way to seamlessly migrate all user data from the old +homeserver to conduwuit. However it is perfectly acceptable to replace the old +homeserver software with conduwuit using the same server name and there will not +be any issues with federation. + +There is an interest in developing a built-in seamless user data migration +method into conduwuit, however there is no concrete ETA or timeline for this. + + #### Contact -If you run into any question, feel free to +[`#conduwuit:puppygock.gay`](https://matrix.to/#/#conduwuit:puppygock.gay) +is the official project Matrix room. You can get support here, ask questions or +concerns, get assistance setting up conduwuit, etc. -- Ask us in `#conduwuit:puppygock.gay` on Matrix -- [Open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new) +This room should stay relevant and focused on conduwuit. An offtopic general +chatter room can be found there as well. + +Please keep the issue trackers focused on bug reports and enhancement requests. +General support is extremely difficult to be offered over an issue tracker, and +simple questions should be asked directly in an interactive platform like our +Matrix room above as they can turn into a relevant discussion and/or may not be +simple to answer. If you're not sure, just ask in the Matrix room. + +If you have a bug or feature to request: [Open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new) #### Donate @@ -89,9 +114,11 @@ conduwuit development is purely made possible by myself and contributors. I do not get paid to work on this, and I work on it in my free time. Donations are heavily appreciated! 💜🥺 -- Liberapay: -- Ko-fi (note they take a fee): -- GitHub Sponsors: +- Liberapay (preferred): +- GitHub Sponsors (preferred): +- Ko-fi: + +I do not and will not accept cryptocurrency donations, including things related. #### Logo @@ -105,6 +132,9 @@ Both, but I prefer conduwuit. #### Mirrors of conduwuit +If GitHub is unavailable in your country, or has poor connectivity, conduwuit's +source code is mirrored onto the following additional platforms I maintain: + - GitHub: - GitLab: - git.girlcock.ceo: diff --git a/nix/pkgs/oci-image/default.nix b/nix/pkgs/oci-image/default.nix index d378d017..5520c920 100644 --- a/nix/pkgs/oci-image/default.nix +++ b/nix/pkgs/oci-image/default.nix @@ -29,10 +29,17 @@ dockerTools.buildLayeredImage { "RUST_BACKTRACE=full" ]; Labels = { - "org.opencontainers.image.title" = main.pname; - "org.opencontainers.image.version" = main.version; + "org.opencontainers.image.authors" = "June Clementine Strawberry and Jason Volk + "; + "org.opencontainers.image.created" ="@${toString inputs.self.lastModified}"; + "org.opencontainers.image.description" = "a very cool Matrix chat homeserver written in Rust"; + "org.opencontainers.image.documentation" = "https://conduwuit.puppyirl.gay/"; + "org.opencontainers.image.licenses" = "Apache-2.0"; "org.opencontainers.image.revision" = inputs.self.rev or inputs.self.dirtyRev or ""; - # "org.opencontainers.image.created" = builtins.formatTime "%Y-%m-%dT%H:%M:%SZ" inputs.self.lastModified; + "org.opencontainers.image.title" = main.pname; + "org.opencontainers.image.url" = "https://conduwuit.puppyirl.gay/"; + "org.opencontainers.image.vendor" = "girlbossceo"; + "org.opencontainers.image.version" = main.version; }; }; } From 3b0195e6b387364d8919ce90e2f461e82d2f51d1 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 25 Jan 2025 21:04:52 -0500 Subject: [PATCH 087/328] misc various github actions ci fixes Signed-off-by: June Clementine Strawberry --- .gitea/PULL_REQUEST_TEMPLATE.md | 8 - .gitea/workflows/ci.yml | 264 ------------------- .github/workflows/ci.yml | 80 +++--- .github/workflows/docker-hub-description.yml | 5 + .github/workflows/documentation.yml | 3 + 5 files changed, 56 insertions(+), 304 deletions(-) delete mode 100644 .gitea/PULL_REQUEST_TEMPLATE.md delete mode 100644 .gitea/workflows/ci.yml diff --git a/.gitea/PULL_REQUEST_TEMPLATE.md b/.gitea/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 4210554b..00000000 --- a/.gitea/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,8 +0,0 @@ - - - ------------------------------------------------------------------------------ - -- [ ] I ran `cargo fmt`, `cargo clippy`, and `cargo test` -- [ ] I agree to release my code and all other changes of this MR under the Apache-2.0 license - diff --git a/.gitea/workflows/ci.yml b/.gitea/workflows/ci.yml deleted file mode 100644 index ef436734..00000000 --- a/.gitea/workflows/ci.yml +++ /dev/null @@ -1,264 +0,0 @@ -name: CI and Artifacts - -on: - pull_request: - push: - # documentation workflow deals with this or is not relevant for this workflow - paths-ignore: - - '*.md' - - 'conduwuit-example.toml' - - 'book.toml' - - '.gitlab-ci.yml' - - '.gitignore' - - 'renovate.json' - - 'docs/**' - - 'debian/**' - - 'docker/**' - branches: - - main - tags: - - '*' - # Allows you to run this workflow manually from the Actions tab - #workflow_dispatch: - -#concurrency: -# group: ${{ gitea.head_ref || gitea.ref_name }} -# cancel-in-progress: true - -env: - # Required to make some things output color - TERM: ansi - # Publishing to my nix binary cache - ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }} - # conduwuit.cachix.org - CACHIX_AUTH_TOKEN: ${{ secrets.CACHIX_AUTH_TOKEN }} - # Just in case incremental is still being set to true, speeds up CI - CARGO_INCREMENTAL: 0 - # Custom nix binary cache if fork is being used - ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }} - ATTIC_PUBLIC_KEY: ${{ vars.ATTIC_PUBLIC_KEY }} - # Get error output from nix that we can actually use - NIX_CONFIG: show-trace = true - -#permissions: -# packages: write -# contents: read - -jobs: - tests: - name: Test - runs-on: ubuntu-latest - steps: - - name: Sync repository - uses: https://github.com/actions/checkout@v4 - - - name: Tag comparison check - if: startsWith(gitea.ref, 'refs/tags/v') - run: | - # Tag mismatch with latest repo tag check to prevent potential downgrades - LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`) - - if [ $LATEST_TAG != ${{ gitea.ref_name }} ]; then - echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' - echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' >> $GITHUB_STEP_SUMMARY - exit 1 - fi - - - name: Install Nix - uses: https://github.com/DeterminateSystems/nix-installer-action@main - with: - diagnostic-endpoint: "" - extra-conf: | - experimental-features = nix-command flakes - accept-flake-config = true - - - name: Enable Cachix binary cache - run: | - nix profile install nixpkgs#cachix - cachix use crane - cachix use nix-community - - - name: Configure Magic Nix Cache - uses: https://github.com/DeterminateSystems/magic-nix-cache-action@main - with: - diagnostic-endpoint: "" - upstream-cache: "https://attic.kennel.juneis.dog/conduwuit" - - - name: Apply Nix binary cache configuration - run: | - sudo tee -a /etc/nix/nix.conf > /dev/null < /dev/null < "$HOME/.direnvrc" - nix profile install --impure --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv - direnv allow - nix develop .#all-features --command true - - - name: Cache CI dependencies - run: | - bin/nix-build-and-cache ci - - - name: Run CI tests - run: | - direnv exec . engage > >(tee -a test_output.log) - - - name: Sync Complement repository - uses: https://github.com/actions/checkout@v4 - with: - repository: 'matrix-org/complement' - path: complement_src - - - name: Run Complement tests - run: | - direnv exec . bin/complement 'complement_src' 'complement_test_logs.jsonl' 'complement_test_results.jsonl' - cp -v -f result complement_oci_image.tar.gz - - - name: Upload Complement OCI image - uses: https://github.com/actions/upload-artifact@v4 - with: - name: complement_oci_image.tar.gz - path: complement_oci_image.tar.gz - if-no-files-found: error - - - name: Upload Complement logs - uses: https://github.com/actions/upload-artifact@v4 - with: - name: complement_test_logs.jsonl - path: complement_test_logs.jsonl - if-no-files-found: error - - - name: Upload Complement results - uses: https://github.com/actions/upload-artifact@v4 - with: - name: complement_test_results.jsonl - path: complement_test_results.jsonl - if-no-files-found: error - - - name: Diff Complement results with checked-in repo results - run: | - diff -u --color=always tests/test_results/complement/test_results.jsonl complement_test_results.jsonl > >(tee -a complement_test_output.log) - echo '# Complement diff results' >> $GITHUB_STEP_SUMMARY - echo '```diff' >> $GITHUB_STEP_SUMMARY - tail -n 100 complement_test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - - name: Update Job Summary - if: success() || failure() - run: | - if [ ${{ job.status }} == 'success' ]; then - echo '# ✅ completed suwuccessfully' >> $GITHUB_STEP_SUMMARY - else - echo '```' >> $GITHUB_STEP_SUMMARY - tail -n 40 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - fi - - build: - name: Build - runs-on: ubuntu-latest - needs: tests - strategy: - matrix: - include: - - target: aarch64-unknown-linux-musl - - target: x86_64-unknown-linux-musl - steps: - - name: Sync repository - uses: https://github.com/actions/checkout@v4 - - - name: Install Nix - uses: https://github.com/DeterminateSystems/nix-installer-action@main - with: - diagnostic-endpoint: "" - extra-conf: | - experimental-features = nix-command flakes - accept-flake-config = true - - - name: Install and enable Cachix binary cache - run: | - nix profile install nixpkgs#cachix - cachix use crane - cachix use nix-community - - - name: Configure Magic Nix Cache - uses: https://github.com/DeterminateSystems/magic-nix-cache-action@main - with: - diagnostic-endpoint: "" - upstream-cache: "https://attic.kennel.juneis.dog/conduwuit" - - - name: Apply Nix binary cache configuration - run: | - sudo tee -a /etc/nix/nix.conf > /dev/null < /dev/null < "$HOME/.direnvrc" - nix profile install --impure --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv - direnv allow - nix develop .#all-features --command true - - - name: Build static ${{ matrix.target }} - run: | - CARGO_DEB_TARGET_TUPLE=$(echo ${{ matrix.target }} | grep -o -E '^([^-]*-){3}[^-]*') - SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) - - bin/nix-build-and-cache just .#static-${{ matrix.target }} - mkdir -v -p target/release/ - mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/ - cp -v -f result/bin/conduit target/release/conduwuit - cp -v -f result/bin/conduit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit - # -p conduit is the main crate name - direnv exec . cargo deb --verbose --no-build --no-strip -p conduit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/${{ matrix.target }}.deb - mv -v target/release/conduwuit static-${{ matrix.target }} - mv -v target/release/${{ matrix.target }}.deb ${{ matrix.target }}.deb - - - name: Upload static-${{ matrix.target }} - uses: https://github.com/actions/upload-artifact@v4 - with: - name: static-${{ matrix.target }} - path: static-${{ matrix.target }} - if-no-files-found: error - - - name: Upload deb ${{ matrix.target }} - uses: https://github.com/actions/upload-artifact@v4 - with: - name: deb-${{ matrix.target }} - path: ${{ matrix.target }}.deb - if-no-files-found: error - compression-level: 0 - - - name: Build OCI image ${{ matrix.target }} - run: | - bin/nix-build-and-cache just .#oci-image-${{ matrix.target }} - cp -v -f result oci-image-${{ matrix.target }}.tar.gz - - - name: Upload OCI image ${{ matrix.target }} - uses: https://github.com/actions/upload-artifact@v4 - with: - name: oci-image-${{ matrix.target }} - path: oci-image-${{ matrix.target }}.tar.gz - if-no-files-found: error - compression-level: 0 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 345713aa..b0b0bd53 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,8 +22,8 @@ concurrency: env: # sccache only on main repo - SCCACHE_GHA_ENABLED: "${{ (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'true' || 'false' }}" - RUSTC_WRAPPER: "${{ (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'sccache' || '' }}" + SCCACHE_GHA_ENABLED: "${{ !startsWith(github.ref, 'refs/tags/') && (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'true' || 'false' }}" + RUSTC_WRAPPER: "${{ !startsWith(github.ref, 'refs/tags/') && (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'sccache' || '' }}" SCCACHE_BUCKET: "${{ (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'sccache' || '' }}" SCCACHE_S3_USE_SSL: ${{ vars.SCCACHE_S3_USE_SSL }} SCCACHE_REGION: ${{ vars.SCCACHE_REGION }} @@ -51,8 +51,8 @@ env: extra-experimental-features = nix-command flakes accept-flake-config = true WEB_UPLOAD_SSH_USERNAME: ${{ secrets.WEB_UPLOAD_SSH_USERNAME }} - GH_SHA: ${{ github.sha }} GH_REF_NAME: ${{ github.ref_name }} + WEBSERVER_DIR_NAME: ${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} permissions: {} @@ -85,11 +85,13 @@ jobs: END echo "Checking connection" - ssh -q website "echo test" + ssh -q website "echo test" || ssh -q website "echo test" echo "Creating commit rev directory on web server" - ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/$GITHUB_SHA/" - ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/$GITHUB_SHA/" + ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" || ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" + ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" || ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" + + echo "SSH_WEBSITE=1" >> "$GITHUB_ENV" - name: Install liburing run: | @@ -124,6 +126,9 @@ jobs: - uses: nixbuild/nix-quick-install-action@master - name: Restore and cache Nix store + # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting + # releases and tags + if: ${{ !startsWith(github.ref, 'refs/tags/') }} uses: nix-community/cache-nix-action@v5.1.0 with: # restore and save a cache using this key @@ -184,11 +189,16 @@ jobs: # use sccache for Rust - name: Run sccache-cache - if: (env.SCCACHE_GHA_ENABLED == 'true') + # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting + # releases and tags + if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} uses: mozilla-actions/sccache-action@main # use rust-cache - uses: Swatinem/rust-cache@v2 + # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting + # releases and tags + if: ${{ !startsWith(github.ref, 'refs/tags/') }} with: cache-all-crates: "true" cache-on-failure: "true" @@ -304,13 +314,16 @@ jobs: END echo "Checking connection" - ssh -q website "echo test" + ssh -q website "echo test" || ssh -q website "echo test" echo "SSH_WEBSITE=1" >> "$GITHUB_ENV" - uses: nixbuild/nix-quick-install-action@master - name: Restore and cache Nix store + # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting + # releases and tags + if: ${{ !startsWith(github.ref, 'refs/tags/') }} uses: nix-community/cache-nix-action@v5.1.0 with: # restore and save a cache using this key @@ -364,11 +377,16 @@ jobs: # use sccache for Rust - name: Run sccache-cache - if: (env.SCCACHE_GHA_ENABLED == 'true') + # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting + # releases and tags + if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} uses: mozilla-actions/sccache-action@main # use rust-cache - uses: Swatinem/rust-cache@v2 + # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting + # releases and tags + if: ${{ !startsWith(github.ref, 'refs/tags/') }} with: cache-all-crates: "true" cache-on-failure: "true" @@ -493,29 +511,27 @@ jobs: run: | if [ ! -z $SSH_WEBSITE ]; then chmod +x static-x86_64-linux-musl-x86_64-haswell-optimised - scp static-x86_64-linux-musl-x86_64-haswell-optimised website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-x86_64-linux-musl-x86_64-haswell-optimised + scp static-x86_64-linux-musl-x86_64-haswell-optimised website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-x86_64-linux-musl-x86_64-haswell-optimised fi - name: Upload static-${{ matrix.target }}-all-features to webserver - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | if [ ! -z $SSH_WEBSITE ]; then chmod +x static-${{ matrix.target }} - scp static-${{ matrix.target }} website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-${{ matrix.target }} + scp static-${{ matrix.target }} website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-${{ matrix.target }} fi - name: Upload static deb x86_64-linux-musl-all-features-x86_64-haswell-optimised to webserver if: ${{ matrix.target == 'x86_64-linux-musl' }} run: | if [ ! -z $SSH_WEBSITE ]; then - scp x86_64-linux-musl-x86_64-haswell-optimised.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/x86_64-linux-musl-x86_64-haswell-optimised.deb + scp x86_64-linux-musl-x86_64-haswell-optimised.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/x86_64-linux-musl-x86_64-haswell-optimised.deb fi - name: Upload static deb ${{ matrix.target }}-all-features to webserver - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | if [ ! -z $SSH_WEBSITE ]; then - scp ${{ matrix.target }}.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/${{ matrix.target }}.deb + scp ${{ matrix.target }}.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/${{ matrix.target }}.deb fi - name: Upload static-${{ matrix.target }}-debug-all-features to GitHub @@ -534,17 +550,15 @@ jobs: compression-level: 0 - name: Upload static-${{ matrix.target }}-debug-all-features to webserver - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | if [ ! -z $SSH_WEBSITE ]; then - scp static-${{ matrix.target }}-debug website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-${{ matrix.target }}-debug + scp static-${{ matrix.target }}-debug website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-${{ matrix.target }}-debug fi - name: Upload static deb ${{ matrix.target }}-debug-all-features to webserver - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | - if [ ! -z $SSH_WEBSITE]; then - scp ${{ matrix.target }}-debug.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/${{ matrix.target }}-debug.deb + if [ ! -z $SSH_WEBSITE ]; then + scp ${{ matrix.target }}-debug.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/${{ matrix.target }}-debug.deb fi - name: Build OCI image ${{ matrix.target }}-all-features @@ -594,21 +608,19 @@ jobs: if: ${{ matrix.target == 'x86_64-linux-musl' }} run: | if [ ! -z $SSH_WEBSITE ]; then - scp oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz + scp oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz fi - name: Upload OCI image ${{ matrix.target }}-all-features to webserver - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | if [ ! -z $SSH_WEBSITE ]; then - scp oci-image-${{ matrix.target }}.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/oci-image-${{ matrix.target }}.tar.gz + scp oci-image-${{ matrix.target }}.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-${{ matrix.target }}.tar.gz fi - name: Upload OCI image ${{ matrix.target }}-debug-all-features to webserver - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | if [ ! -z $SSH_WEBSITE ]; then - scp oci-image-${{ matrix.target }}-debug.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/oci-image-${{ matrix.target }}-debug.tar.gz + scp oci-image-${{ matrix.target }}-debug.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-${{ matrix.target }}-debug.tar.gz fi build_mac_binaries: @@ -647,7 +659,9 @@ jobs: END echo "Checking connection" - ssh -q website "echo test" + ssh -q website "echo test" || ssh -q website "echo test" + + echo "SSH_WEBSITE=1" >> "$GITHUB_ENV" - name: Tag comparison check if: ${{ startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') }} @@ -663,7 +677,9 @@ jobs: # use sccache for Rust - name: Run sccache-cache - if: (env.SCCACHE_GHA_ENABLED == 'true') + # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting + # releases and tags + if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} uses: mozilla-actions/sccache-action@main # use rust-cache @@ -707,7 +723,7 @@ jobs: run: | if [ ! -z $SSH_WEBSITE ]; then chmod +x conduwuit-macos-x86_64 - scp conduwuit-macos-x86_64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/conduwuit-macos-x86_64 + scp conduwuit-macos-x86_64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/conduwuit-macos-x86_64 fi - name: Upload macOS arm64 binary to webserver @@ -715,7 +731,7 @@ jobs: run: | if [ ! -z $SSH_WEBSITE ]; then chmod +x conduwuit-macos-arm64 - scp conduwuit-macos-arm64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/conduwuit-macos-arm64 + scp conduwuit-macos-arm64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/conduwuit-macos-arm64 fi - name: Upload macOS x86_64 binary @@ -881,15 +897,15 @@ jobs: run: | # Dockerhub Container Registry if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell + docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell fi # GitHub Container Registry if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell + docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GHCR_REPO}:${BRANCH_TAG}-haswell fi # GitLab Container Registry if [ ! -z $GITLAB_TOKEN ]; then - docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell + docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GLCR_REPO}:${BRANCH_TAG}-haswell fi - name: Create Docker combined manifests diff --git a/.github/workflows/docker-hub-description.yml b/.github/workflows/docker-hub-description.yml index 5ff5f666..96b2d38b 100644 --- a/.github/workflows/docker-hub-description.yml +++ b/.github/workflows/docker-hub-description.yml @@ -8,13 +8,17 @@ on: - README.md - .github/workflows/docker-hub-description.yml + workflow_dispatch: + jobs: dockerHubDescription: runs-on: ubuntu-latest + if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]' steps: - uses: actions/checkout@v4 with: persist-credentials: false + - name: Setting variables uses: actions/github-script@v7 id: var @@ -26,6 +30,7 @@ jobs: core.setOutput('github_repository', githubRepo) const dockerRepo = '${{ vars.DOCKER_USERNAME }}'.toLowerCase() + '/' + repoId core.setOutput('docker_repo', dockerRepo) + - name: Docker Hub Description uses: peter-evans/dockerhub-description@v4 with: diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index b0ccdb47..0eefe0a4 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -73,6 +73,9 @@ jobs: - uses: nixbuild/nix-quick-install-action@master - name: Restore and cache Nix store + # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting + # releases and tags + if: ${{ !startsWith(github.ref, 'refs/tags/') }} uses: nix-community/cache-nix-action@v5.1.0 with: # restore and save a cache using this key From 29a19ba437c6b387f3f250a2d91e2edd6d751a18 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 25 Jan 2025 02:06:20 +0000 Subject: [PATCH 088/328] add write_to_cache to descriptor Signed-off-by: Jason Volk --- src/database/engine/cf_opts.rs | 6 +++++- src/database/engine/descriptor.rs | 2 ++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 1230081c..ab11b9e1 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -77,20 +77,24 @@ fn descriptor_cf_options( fn set_table_options(opts: &mut Options, desc: &Descriptor, cache: Option<&Cache>) -> Result { let mut table = table_options(desc, cache.is_some()); + if let Some(cache) = cache { table.set_block_cache(cache); } else { table.disable_cache(); } + let prepopulate = if desc.write_to_cache { "kFlushOnly" } else { "kDisable" }; + let string = format!( "{{block_based_table_factory={{num_file_reads_for_auto_readahead={0};\ max_auto_readahead_size={1};initial_auto_readahead_size={2};\ - enable_index_compression={3}}}}}", + enable_index_compression={3};prepopulate_block_cache={4}}}}}", desc.auto_readahead_thresh, desc.auto_readahead_max, desc.auto_readahead_init, desc.compressed_index, + prepopulate, ); opts.set_options_from_string(&string).map_err(map_err)?; diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index 6ce8b5ad..c4dc2901 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -40,6 +40,7 @@ pub(crate) struct Descriptor { pub(crate) bottommost_level: Option, pub(crate) block_index_hashing: Option, pub(crate) cache_shards: u32, + pub(crate) write_to_cache: bool, pub(crate) auto_readahead_thresh: u32, pub(crate) auto_readahead_init: usize, pub(crate) auto_readahead_max: usize, @@ -71,6 +72,7 @@ pub(crate) static BASE: Descriptor = Descriptor { bottommost_level: Some(SENTINEL_COMPRESSION_LEVEL), block_index_hashing: None, cache_shards: 64, + write_to_cache: false, auto_readahead_thresh: 0, auto_readahead_init: 1024 * 16, auto_readahead_max: 1024 * 1024 * 2, From 186c459584f3a25f00d873c5f73c820161226791 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 25 Jan 2025 02:06:44 +0000 Subject: [PATCH 089/328] use scalar for file shape; increase shape for small-type columns Signed-off-by: Jason Volk --- src/database/engine/cf_opts.rs | 2 +- src/database/engine/descriptor.rs | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index ab11b9e1..382bc169 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -33,7 +33,7 @@ fn descriptor_cf_options( opts.set_write_buffer_size(desc.write_size); opts.set_target_file_size_base(desc.file_size); - opts.set_target_file_size_multiplier(desc.file_shape[0]); + opts.set_target_file_size_multiplier(desc.file_shape); opts.set_level_zero_file_num_compaction_trigger(desc.level0_width); opts.set_level_compaction_dynamic_level_bytes(false); diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index c4dc2901..c735f402 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -27,7 +27,7 @@ pub(crate) struct Descriptor { pub(crate) level_size: u64, pub(crate) level_shape: [i32; 7], pub(crate) file_size: u64, - pub(crate) file_shape: [i32; 1], + pub(crate) file_shape: i32, pub(crate) level0_width: i32, pub(crate) merge_width: (i32, i32), pub(crate) ttl: u64, @@ -59,7 +59,7 @@ pub(crate) static BASE: Descriptor = Descriptor { level_size: 1024 * 1024 * 8, level_shape: [1, 1, 1, 3, 7, 15, 31], file_size: 1024 * 1024, - file_shape: [2], + file_shape: 2, level0_width: 2, merge_width: (2, 16), ttl: 60 * 60 * 24 * 21, @@ -106,6 +106,7 @@ pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { write_size: 1024 * 1024 * 16, level_size: 1024 * 512, file_size: 1024 * 128, + file_shape: 3, index_size: 512, block_size: 512, cache_shards: 64, @@ -121,6 +122,7 @@ pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { write_size: 1024 * 1024 * 16, level_size: 1024 * 1024, file_size: 1024 * 512, + file_shape: 3, block_size: 512, cache_shards: 64, block_index_hashing: Some(false), From 9ad4f20da4547f66720e363c92fac13a5c3af343 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 25 Jan 2025 08:59:48 +0000 Subject: [PATCH 090/328] propagate underflow as error result, not index bounds panic Signed-off-by: Jason Volk --- src/database/de.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/database/de.rs b/src/database/de.rs index 4fdc2251..7cc8f00a 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -298,9 +298,11 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { fn deserialize_i64>(self, visitor: V) -> Result { const BYTES: usize = size_of::(); - let end = self.pos.saturating_add(BYTES); + let end = self.pos.saturating_add(BYTES).min(self.buf.len()); let bytes: ArrayVec = self.buf[self.pos..end].try_into()?; - let bytes = bytes.into_inner().expect("array size matches i64"); + let bytes = bytes + .into_inner() + .map_err(|_| Self::Error::SerdeDe("i64 buffer underflow".into()))?; self.inc_pos(BYTES); visitor.visit_i64(i64::from_be_bytes(bytes)) @@ -328,9 +330,11 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { fn deserialize_u64>(self, visitor: V) -> Result { const BYTES: usize = size_of::(); - let end = self.pos.saturating_add(BYTES); + let end = self.pos.saturating_add(BYTES).min(self.buf.len()); let bytes: ArrayVec = self.buf[self.pos..end].try_into()?; - let bytes = bytes.into_inner().expect("array size matches u64"); + let bytes = bytes + .into_inner() + .map_err(|_| Self::Error::SerdeDe("u64 buffer underflow".into()))?; self.inc_pos(BYTES); visitor.visit_u64(u64::from_be_bytes(bytes)) From 68856645ee4f5a6a437b01c4c94bdc233d99f140 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 25 Jan 2025 07:18:33 +0000 Subject: [PATCH 091/328] refactor lazy-loading Signed-off-by: Jason Volk --- src/api/client/context.rs | 109 ++++--- src/api/client/message.rs | 151 ++++----- src/api/client/sync/v3.rs | 422 ++++++++++---------------- src/service/rooms/lazy_loading/mod.rs | 198 +++++++----- 4 files changed, 418 insertions(+), 462 deletions(-) diff --git a/src/api/client/context.rs b/src/api/client/context.rs index b957561c..388bcf4d 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,6 +1,6 @@ use axum::extract::State; use conduwuit::{ - at, err, ref_at, + at, deref_at, err, ref_at, utils::{ future::TryExtExt, stream::{BroadbandExt, ReadyExt, TryIgnore, WidebandExt}, @@ -8,15 +8,15 @@ use conduwuit::{ }, Err, PduEvent, Result, }; -use futures::{join, try_join, FutureExt, StreamExt, TryFutureExt}; -use ruma::{ - api::client::{context::get_context, filter::LazyLoadOptions}, - events::StateEventType, - OwnedEventId, UserId, +use futures::{ + future::{join, join3, try_join3, OptionFuture}, + FutureExt, StreamExt, TryFutureExt, }; +use ruma::{api::client::context::get_context, events::StateEventType, OwnedEventId, UserId}; +use service::rooms::{lazy_loading, lazy_loading::Options}; use crate::{ - client::message::{event_filter, ignored_filter, update_lazy, visibility_filter, LazySet}, + client::message::{event_filter, ignored_filter, lazy_loading_witness, visibility_filter}, Ruma, }; @@ -33,10 +33,10 @@ pub(crate) async fn get_context_route( State(services): State, body: Ruma, ) -> Result { - let filter = &body.filter; let sender = body.sender(); - let (sender_user, _) = sender; + let (sender_user, sender_device) = sender; let room_id = &body.room_id; + let filter = &body.filter; // Use limit or else 10, with maximum 100 let limit: usize = body @@ -45,18 +45,6 @@ pub(crate) async fn get_context_route( .unwrap_or(LIMIT_DEFAULT) .min(LIMIT_MAX); - // some clients, at least element, seem to require knowledge of redundant - // members for "inline" profiles on the timeline to work properly - let lazy_load_enabled = matches!(filter.lazy_load_options, LazyLoadOptions::Enabled { .. }); - - let lazy_load_redundant = if let LazyLoadOptions::Enabled { include_redundant_members } = - filter.lazy_load_options - { - include_redundant_members - } else { - false - }; - let base_id = services .rooms .timeline @@ -75,7 +63,7 @@ pub(crate) async fn get_context_route( .user_can_see_event(sender_user, &body.room_id, &body.event_id) .map(Ok); - let (base_id, base_pdu, visible) = try_join!(base_id, base_pdu, visible)?; + let (base_id, base_pdu, visible) = try_join3(base_id, base_pdu, visible).await?; if base_pdu.room_id != body.room_id || base_pdu.event_id != body.event_id { return Err!(Request(NotFound("Base event not found."))); @@ -112,12 +100,32 @@ pub(crate) async fn get_context_route( .collect(); let (base_event, events_before, events_after): (_, Vec<_>, Vec<_>) = - join!(base_event, events_before, events_after); + join3(base_event, events_before, events_after).await; + + let lazy_loading_context = lazy_loading::Context { + user_id: sender_user, + device_id: sender_device, + room_id, + token: Some(base_count.into_unsigned()), + options: Some(&filter.lazy_load_options), + }; + + let lazy_loading_witnessed: OptionFuture<_> = filter + .lazy_load_options + .is_enabled() + .then_some( + base_event + .iter() + .chain(events_before.iter()) + .chain(events_after.iter()), + ) + .map(|witnessed| lazy_loading_witness(&services, &lazy_loading_context, witnessed)) + .into(); let state_at = events_after .last() .map(ref_at!(1)) - .map_or(body.event_id.as_ref(), |e| e.event_id.as_ref()); + .map_or(body.event_id.as_ref(), |pdu| pdu.event_id.as_ref()); let state_ids = services .rooms @@ -126,41 +134,32 @@ pub(crate) async fn get_context_route( .or_else(|_| services.rooms.state.get_room_shortstatehash(room_id)) .and_then(|shortstatehash| services.rooms.state_accessor.state_full_ids(shortstatehash)) .map_err(|e| err!(Database("State not found: {e}"))) - .await?; + .boxed(); - let lazy = base_event - .iter() - .chain(events_before.iter()) - .chain(events_after.iter()) - .stream() - .fold(LazySet::new(), |lazy, item| { - update_lazy(&services, room_id, sender, lazy, item, lazy_load_redundant) - }) - .await; + let (lazy_loading_witnessed, state_ids) = join(lazy_loading_witnessed, state_ids).await; - let lazy = &lazy; - let state: Vec<_> = state_ids - .iter() - .stream() - .broad_filter_map(|(shortstatekey, event_id)| { - services - .rooms - .short - .get_statekey_from_short(*shortstatekey) - .map_ok(move |(event_type, state_key)| (event_type, state_key, event_id)) - .ok() - }) - .ready_filter_map(|(event_type, state_key, event_id)| { - if !lazy_load_enabled || event_type != StateEventType::RoomMember { - return Some(event_id); + let state_ids = state_ids?; + let lazy_loading_witnessed = lazy_loading_witnessed.unwrap_or_default(); + let shortstatekeys = state_ids.iter().stream().map(deref_at!(0)); + + let state: Vec<_> = services + .rooms + .short + .multi_get_statekey_from_short(shortstatekeys) + .zip(state_ids.iter().stream().map(at!(1))) + .ready_filter_map(|item| Some((item.0.ok()?, item.1))) + .ready_filter_map(|((event_type, state_key), event_id)| { + if filter.lazy_load_options.is_enabled() + && event_type == StateEventType::RoomMember + && state_key + .as_str() + .try_into() + .is_ok_and(|user_id: &UserId| !lazy_loading_witnessed.contains(user_id)) + { + return None; } - state_key - .as_str() - .try_into() - .ok() - .filter(|&user_id: &&UserId| lazy.contains(user_id)) - .map(|_| event_id) + Some(event_id) }) .broad_filter_map(|event_id: &OwnedEventId| { services.rooms.timeline.get_pdu(event_id).ok() diff --git a/src/api/client/message.rs b/src/api/client/message.rs index ec9a14d5..a508b5da 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,5 +1,3 @@ -use std::collections::HashSet; - use axum::extract::State; use conduwuit::{ at, is_equal_to, @@ -10,7 +8,7 @@ use conduwuit::{ }, Event, PduCount, Result, }; -use futures::{FutureExt, StreamExt}; +use futures::{future::OptionFuture, pin_mut, FutureExt, StreamExt}; use ruma::{ api::{ client::{filter::RoomEventFilter, message::get_message_events}, @@ -18,14 +16,19 @@ use ruma::{ }, events::{AnyStateEvent, StateEventType, TimelineEventType, TimelineEventType::*}, serde::Raw, - DeviceId, OwnedUserId, RoomId, UserId, + RoomId, UserId, +}; +use service::{ + rooms::{ + lazy_loading, + lazy_loading::{Options, Witness}, + timeline::PdusIterItem, + }, + Services, }; -use service::{rooms::timeline::PdusIterItem, Services}; use crate::Ruma; -pub(crate) type LazySet = HashSet; - /// list of safe and common non-state events to ignore if the user is ignored const IGNORED_MESSAGE_TYPES: &[TimelineEventType; 17] = &[ Audio, @@ -84,13 +87,6 @@ pub(crate) async fn get_message_events_route( .unwrap_or(LIMIT_DEFAULT) .min(LIMIT_MAX); - services.rooms.lazy_loading.lazy_load_confirm_delivery( - sender_user, - sender_device, - room_id, - from, - ); - if matches!(body.dir, Direction::Backward) { services .rooms @@ -127,35 +123,34 @@ pub(crate) async fn get_message_events_route( .collect() .await; - let lazy = events - .iter() - .stream() - .fold(LazySet::new(), |lazy, item| { - update_lazy(&services, room_id, sender, lazy, item, false) - }) - .await; + let lazy_loading_context = lazy_loading::Context { + user_id: sender_user, + device_id: sender_device, + room_id, + token: Some(from.into_unsigned()), + options: Some(&filter.lazy_load_options), + }; - let state = lazy - .iter() - .stream() - .broad_filter_map(|user_id| get_member_event(&services, room_id, user_id)) + let witness: OptionFuture<_> = filter + .lazy_load_options + .is_enabled() + .then(|| lazy_loading_witness(&services, &lazy_loading_context, events.iter())) + .into(); + + let state = witness + .map(Option::into_iter) + .map(|option| option.flat_map(Witness::into_iter)) + .map(IterStream::stream) + .into_stream() + .flatten() + .broad_filter_map(|user_id| async move { + get_member_event(&services, room_id, &user_id).await + }) .collect() .await; let next_token = events.last().map(at!(0)); - if !cfg!(feature = "element_hacks") { - if let Some(next_token) = next_token { - services.rooms.lazy_loading.lazy_load_mark_sent( - sender_user, - sender_device, - room_id, - lazy, - next_token, - ); - } - } - let chunk = events .into_iter() .map(at!(1)) @@ -170,6 +165,52 @@ pub(crate) async fn get_message_events_route( }) } +pub(crate) async fn lazy_loading_witness<'a, I>( + services: &Services, + lazy_loading_context: &lazy_loading::Context<'_>, + events: I, +) -> Witness +where + I: Iterator + Clone + Send, +{ + let oldest = events + .clone() + .map(|(count, _)| count) + .copied() + .min() + .unwrap_or_else(PduCount::max); + + let newest = events + .clone() + .map(|(count, _)| count) + .copied() + .max() + .unwrap_or_else(PduCount::max); + + let receipts = services + .rooms + .read_receipt + .readreceipts_since(lazy_loading_context.room_id, oldest.into_unsigned()); + + pin_mut!(receipts); + let witness: Witness = events + .stream() + .map(|(_, pdu)| pdu.sender.clone()) + .chain( + receipts + .ready_take_while(|(_, c, _)| *c <= newest.into_unsigned()) + .map(|(user_id, ..)| user_id.to_owned()), + ) + .collect() + .await; + + services + .rooms + .lazy_loading + .witness_retain(witness, lazy_loading_context) + .await +} + async fn get_member_event( services: &Services, room_id: &RoomId, @@ -184,42 +225,6 @@ async fn get_member_event( .ok() } -pub(crate) async fn update_lazy( - services: &Services, - room_id: &RoomId, - sender: (&UserId, &DeviceId), - mut lazy: LazySet, - item: &PdusIterItem, - force: bool, -) -> LazySet { - let (_, event) = &item; - let (sender_user, sender_device) = sender; - - /* TODO: Remove the "element_hacks" check when these are resolved: - * https://github.com/vector-im/element-android/issues/3417 - * https://github.com/vector-im/element-web/issues/21034 - */ - if force || cfg!(features = "element_hacks") { - lazy.insert(event.sender().into()); - return lazy; - } - - if lazy.contains(event.sender()) { - return lazy; - } - - if !services - .rooms - .lazy_loading - .lazy_load_was_sent_before(sender_user, sender_device, room_id, event.sender()) - .await - { - lazy.insert(event.sender().into()); - } - - lazy -} - pub(crate) async fn ignored_filter( services: &Services, item: PdusIterItem, diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index d6b9f15c..7cca9616 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -6,9 +6,9 @@ use std::{ use axum::extract::State; use conduwuit::{ - at, err, error, extract_variant, is_equal_to, is_false, + at, err, error, extract_variant, is_equal_to, pdu::EventHash, - result::{FlatOk, LogDebugErr}, + result::FlatOk, utils::{ self, future::OptionExt, @@ -19,16 +19,20 @@ use conduwuit::{ Error, PduCount, PduEvent, Result, }; use conduwuit_service::{ - rooms::short::{ShortStateHash, ShortStateKey}, + rooms::{ + lazy_loading, + lazy_loading::{Options, Witness}, + short::ShortStateHash, + }, Services, }; use futures::{ - future::{join, join3, join4, join5, try_join, try_join3, OptionFuture}, + future::{join, join3, join4, join5, try_join, try_join4, OptionFuture}, FutureExt, StreamExt, TryFutureExt, }; use ruma::{ api::client::{ - filter::{FilterDefinition, LazyLoadOptions}, + filter::FilterDefinition, sync::sync_events::{ self, v3::{ @@ -152,9 +156,14 @@ pub(crate) async fn build_sync_events( let (sender_user, sender_device) = body.sender(); let next_batch = services.globals.current_count()?; - let next_batch_string = next_batch.to_string(); + let since = body + .body + .since + .as_ref() + .and_then(|string| string.parse().ok()) + .unwrap_or(0); - // Load filter + let full_state = body.body.full_state; let filter = match body.body.filter.as_ref() { | None => FilterDefinition::default(), | Some(Filter::FilterDefinition(ref filter)) => filter.clone(), @@ -165,24 +174,6 @@ pub(crate) async fn build_sync_events( .unwrap_or_default(), }; - // some clients, at least element, seem to require knowledge of redundant - // members for "inline" profiles on the timeline to work properly - let (lazy_load_enabled, lazy_load_send_redundant) = match filter.room.state.lazy_load_options - { - | LazyLoadOptions::Enabled { include_redundant_members } => - (true, include_redundant_members), - | LazyLoadOptions::Disabled => (false, cfg!(feature = "element_hacks")), - }; - - let full_state = body.body.full_state; - - let since = body - .body - .since - .as_ref() - .and_then(|string| string.parse().ok()) - .unwrap_or(0); - let joined_rooms = services .rooms .state_cache @@ -196,9 +187,8 @@ pub(crate) async fn build_sync_events( room_id.clone(), since, next_batch, - lazy_load_enabled, - lazy_load_send_redundant, full_state, + &filter, ) .map_ok(move |(joined_room, dlu, jeu)| (room_id, joined_room, dlu, jeu)) .ok() @@ -227,9 +217,9 @@ pub(crate) async fn build_sync_events( since, room_id.clone(), sender_user, - &next_batch_string, + next_batch, full_state, - lazy_load_enabled, + &filter, ) .map_ok(move |left_room| (room_id, left_room)) .ok() @@ -358,7 +348,7 @@ pub(crate) async fn build_sync_events( device_one_time_keys_count, // Fallback keys are not yet supported device_unused_fallback_key_types: None, - next_batch: next_batch_string, + next_batch: next_batch.to_string(), presence: Presence { events: presence_updates .unwrap_or_default() @@ -449,7 +439,6 @@ async fn process_presence_updates( fields( room_id = %room_id, full = %full_state, - ll = %lazy_load_enabled, ), )] #[allow(clippy::too_many_arguments)] @@ -458,9 +447,9 @@ async fn handle_left_room( since: u64, ref room_id: OwnedRoomId, sender_user: &UserId, - next_batch_string: &str, + next_batch: u64, full_state: bool, - lazy_load_enabled: bool, + filter: &FilterDefinition, ) -> Result> { let left_count = services .rooms @@ -503,7 +492,7 @@ async fn handle_left_room( account_data: RoomAccountData { events: Vec::new() }, timeline: Timeline { limited: false, - prev_batch: Some(next_batch_string.to_owned()), + prev_batch: Some(next_batch.to_string()), events: Vec::new(), }, state: RoomState { @@ -567,28 +556,32 @@ async fn handle_left_room( .get_statekey_from_short(shortstatekey) .await?; - // TODO: Delete "element_hacks" when this is resolved: https://github.com/vector-im/element-web/issues/22565 - if !lazy_load_enabled - || event_type != StateEventType::RoomMember - || full_state - || (cfg!(feature = "element_hacks") && *sender_user == state_key) + if filter.room.state.lazy_load_options.is_enabled() + && event_type == StateEventType::RoomMember + && !full_state + && state_key + .as_str() + .try_into() + .is_ok_and(|user_id: &UserId| sender_user != user_id) { - let Ok(pdu) = services.rooms.timeline.get_pdu(&event_id).await else { - error!("Pdu in state not found: {event_id}"); - continue; - }; - - left_state_events.push(pdu.to_sync_state_event()); + continue; } + + let Ok(pdu) = services.rooms.timeline.get_pdu(&event_id).await else { + error!("Pdu in state not found: {event_id}"); + continue; + }; + + left_state_events.push(pdu.to_sync_state_event()); } } Ok(Some(LeftRoom { account_data: RoomAccountData { events: Vec::new() }, timeline: Timeline { - limited: true, /* TODO: support left timeline events so we dont need to set this to - * true */ - prev_batch: Some(next_batch_string.to_owned()), + // TODO: support left timeline events so we dont need to set limited to true + limited: true, + prev_batch: Some(next_batch.to_string()), events: Vec::new(), // and so we dont need to set this to empty vec }, state: RoomState { events: left_state_events }, @@ -611,9 +604,8 @@ async fn load_joined_room( ref room_id: OwnedRoomId, since: u64, next_batch: u64, - lazy_load_enabled: bool, - lazy_load_send_redundant: bool, full_state: bool, + filter: &FilterDefinition, ) -> Result<(JoinedRoom, HashSet, HashSet)> { let sincecount = PduCount::Normal(since); let next_batchcount = PduCount::Normal(next_batch); @@ -640,17 +632,26 @@ async fn load_joined_room( 10_usize, ); - let (current_shortstatehash, since_shortstatehash, timeline) = - try_join3(current_shortstatehash, since_shortstatehash, timeline).await?; + let receipt_events = services + .rooms + .read_receipt + .readreceipts_since(room_id, since) + .filter_map(|(read_user, _, edu)| async move { + services + .users + .user_is_ignored(read_user, sender_user) + .await + .or_some((read_user.to_owned(), edu)) + }) + .collect::>>() + .map(Ok); + + let (current_shortstatehash, since_shortstatehash, timeline, receipt_events) = + try_join4(current_shortstatehash, since_shortstatehash, timeline, receipt_events) + .boxed() + .await?; let (timeline_pdus, limited) = timeline; - let timeline_users = - timeline_pdus - .iter() - .fold(HashSet::new(), |mut timeline_users, (_, event)| { - timeline_users.insert(event.sender.as_str().to_owned()); - timeline_users - }); let last_notification_read: OptionFuture<_> = timeline_pdus .is_empty() @@ -662,21 +663,68 @@ async fn load_joined_room( }) .into(); - let send_notification_counts = last_notification_read - .is_none_or(|&count| count > since) - .await; - - services.rooms.lazy_loading.lazy_load_confirm_delivery( - sender_user, - sender_device, - room_id, - sincecount, - ); - let no_state_changes = timeline_pdus.is_empty() && (since_shortstatehash.is_none() || since_shortstatehash.is_some_and(is_equal_to!(current_shortstatehash))); + let since_sender_member: OptionFuture<_> = since_shortstatehash + .map(|short| { + services + .rooms + .state_accessor + .state_get_content(short, &StateEventType::RoomMember, sender_user.as_str()) + .ok() + }) + .into(); + + let joined_since_last_sync = + since_sender_member + .await + .flatten() + .is_none_or(|content: RoomMemberEventContent| { + content.membership != MembershipState::Join + }); + + let lazy_loading_enabled = filter.room.state.lazy_load_options.is_enabled() + || filter.room.timeline.lazy_load_options.is_enabled(); + + let generate_witness = + lazy_loading_enabled && (since_shortstatehash.is_none() || joined_since_last_sync); + + let lazy_reset = lazy_loading_enabled && since_shortstatehash.is_none(); + + let lazy_loading_context = &lazy_loading::Context { + user_id: sender_user, + device_id: sender_device, + room_id, + token: None, + options: Some(&filter.room.state.lazy_load_options), + }; + + // Reset lazy loading because this is an initial sync + let lazy_load_reset: OptionFuture<_> = lazy_reset + .then(|| services.rooms.lazy_loading.reset(lazy_loading_context)) + .into(); + + lazy_load_reset.await; + let witness: Option = generate_witness.then(|| { + timeline_pdus + .iter() + .map(|(_, pdu)| pdu.sender.clone()) + .chain(receipt_events.keys().cloned()) + .collect() + }); + + let witness: OptionFuture<_> = witness + .map(|witness| { + services + .rooms + .lazy_loading + .witness_retain(witness, lazy_loading_context) + }) + .into(); + + let witness = witness.await; let mut device_list_updates = HashSet::::new(); let mut left_encrypted_users = HashSet::::new(); let StateChanges { @@ -691,19 +739,17 @@ async fn load_joined_room( calculate_state_changes( services, sender_user, - sender_device, room_id, - next_batchcount, - lazy_load_enabled, - lazy_load_send_redundant, full_state, + filter, &mut device_list_updates, &mut left_encrypted_users, since_shortstatehash, current_shortstatehash, - &timeline_pdus, - &timeline_users, + joined_since_last_sync, + witness.as_ref(), ) + .boxed() .await? }; @@ -728,19 +774,6 @@ async fn load_joined_room( .map(|(_, pdu)| pdu.to_sync_room_event()) .collect(); - let receipt_events = services - .rooms - .read_receipt - .readreceipts_since(room_id, since) - .filter_map(|(read_user, _, edu)| async move { - services - .users - .user_is_ignored(read_user, sender_user) - .await - .or_some((read_user.to_owned(), edu)) - }) - .collect::>>(); - let typing_events = services .rooms .typing @@ -760,6 +793,10 @@ async fn load_joined_room( }) .unwrap_or(Vec::new()); + let send_notification_counts = last_notification_read + .is_none_or(|&count| count > since) + .await; + let notification_count: OptionFuture<_> = send_notification_counts .then(|| { services @@ -782,14 +819,14 @@ async fn load_joined_room( }) .into(); - let events = join4(room_events, account_data_events, receipt_events, typing_events); + let events = join3(room_events, account_data_events, typing_events); let unread_notifications = join(notification_count, highlight_count); let (unread_notifications, events, device_updates) = join3(unread_notifications, events, device_updates) .boxed() .await; - let (room_events, account_data_events, receipt_events, typing_events) = events; + let (room_events, account_data_events, typing_events) = events; let (notification_count, highlight_count) = unread_notifications; device_list_updates.extend(device_updates); @@ -866,7 +903,6 @@ async fn load_joined_room( skip_all, fields( full = %full_state, - ll = ?(lazy_load_enabled, lazy_load_send_redundant), cs = %current_shortstatehash, ss = ?since_shortstatehash, ) @@ -875,64 +911,38 @@ async fn load_joined_room( async fn calculate_state_changes( services: &Services, sender_user: &UserId, - sender_device: &DeviceId, room_id: &RoomId, - next_batchcount: PduCount, - lazy_load_enabled: bool, - lazy_load_send_redundant: bool, full_state: bool, + filter: &FilterDefinition, device_list_updates: &mut HashSet, left_encrypted_users: &mut HashSet, since_shortstatehash: Option, current_shortstatehash: ShortStateHash, - timeline_pdus: &Vec<(PduCount, PduEvent)>, - timeline_users: &HashSet, + joined_since_last_sync: bool, + witness: Option<&Witness>, ) -> Result { - let since_sender_member: OptionFuture<_> = since_shortstatehash - .map(|short| { - services - .rooms - .state_accessor - .state_get_content(short, &StateEventType::RoomMember, sender_user.as_str()) - .ok() - }) - .into(); - - let joined_since_last_sync = - since_sender_member - .await - .flatten() - .is_none_or(|content: RoomMemberEventContent| { - content.membership != MembershipState::Join - }); - if since_shortstatehash.is_none() || joined_since_last_sync { calculate_state_initial( services, sender_user, - sender_device, room_id, - next_batchcount, - lazy_load_enabled, full_state, + filter, current_shortstatehash, - timeline_users, + witness, ) .await } else { calculate_state_incremental( services, sender_user, - sender_device, room_id, - next_batchcount, - lazy_load_send_redundant, full_state, + filter, device_list_updates, left_encrypted_users, since_shortstatehash, current_shortstatehash, - timeline_pdus, joined_since_last_sync, ) .await @@ -944,87 +954,54 @@ async fn calculate_state_changes( async fn calculate_state_initial( services: &Services, sender_user: &UserId, - sender_device: &DeviceId, room_id: &RoomId, - next_batchcount: PduCount, - lazy_load_enabled: bool, full_state: bool, + filter: &FilterDefinition, current_shortstatehash: ShortStateHash, - timeline_users: &HashSet, + witness: Option<&Witness>, ) -> Result { - // Probably since = 0, we will do an initial sync - let state = services + let state_events = services .rooms .state_accessor .state_full_ids(current_shortstatehash) - .await? - .into_iter() - .stream() - .broad_filter_map(|(shortstatekey, event_id): (ShortStateKey, OwnedEventId)| { - services - .rooms - .short - .get_statekey_from_short(shortstatekey) - .map_ok(move |(event_type, state_key)| ((event_type, state_key), event_id)) - .ok() - }) - .fold((Vec::new(), HashSet::new()), |a, item| async move { - let (mut state_events, mut lazy_loaded) = a; - let ((event_type, state_key), event_id) = item; + .await?; - if event_type != StateEventType::RoomMember { - let Ok(pdu) = services.rooms.timeline.get_pdu(&event_id).await else { - error!("Pdu in state not found: {event_id}"); - return (state_events, lazy_loaded); - }; + let shortstatekeys = state_events.keys().copied().stream(); - state_events.push(pdu); - return (state_events, lazy_loaded); + let state_events = services + .rooms + .short + .multi_get_statekey_from_short(shortstatekeys) + .zip(state_events.values().cloned().stream()) + .ready_filter_map(|item| Some((item.0.ok()?, item.1))) + .ready_filter_map(|((event_type, state_key), event_id)| { + let lazy_load_enabled = filter.room.state.lazy_load_options.is_enabled() + || filter.room.timeline.lazy_load_options.is_enabled(); + + if lazy_load_enabled + && event_type == StateEventType::RoomMember + && !full_state + && state_key.as_str().try_into().is_ok_and(|user_id: &UserId| { + sender_user != user_id + && witness.is_some_and(|witness| !witness.contains(user_id)) + }) { + return None; } - // TODO: Delete "element_hacks" when this is resolved: https://github.com/vector-im/element-web/issues/22565 - if !lazy_load_enabled - || full_state - || timeline_users.contains(&state_key) - || (cfg!(feature = "element_hacks") && *sender_user == state_key) - { - let Ok(pdu) = services.rooms.timeline.get_pdu(&event_id).await else { - error!("Pdu in state not found: {event_id}"); - return (state_events, lazy_loaded); - }; - - // This check is in case a bad user ID made it into the database - if let Ok(uid) = OwnedUserId::parse(&state_key) { - lazy_loaded.insert(uid); - } - - state_events.push(pdu); - } - - (state_events, lazy_loaded) + Some(event_id) }) + .broad_filter_map(|event_id: OwnedEventId| async move { + services.rooms.timeline.get_pdu(&event_id).await.ok() + }) + .collect() .map(Ok); let counts = calculate_counts(services, room_id, sender_user); - let ((joined_member_count, invited_member_count, heroes), (state_events, lazy_loaded)) = - try_join(counts, state).boxed().await?; - - // Reset lazy loading because this is an initial sync - services - .rooms - .lazy_loading - .lazy_load_reset(sender_user, sender_device, room_id) - .await; + let ((joined_member_count, invited_member_count, heroes), state_events) = + try_join(counts, state_events).boxed().await?; // The state_events above should contain all timeline_users, let's mark them as // lazy loaded. - services.rooms.lazy_loading.lazy_load_mark_sent( - sender_user, - sender_device, - room_id, - lazy_loaded, - next_batchcount, - ); Ok(StateChanges { heroes, @@ -1040,16 +1017,13 @@ async fn calculate_state_initial( async fn calculate_state_incremental( services: &Services, sender_user: &UserId, - sender_device: &DeviceId, room_id: &RoomId, - next_batchcount: PduCount, - lazy_load_send_redundant: bool, full_state: bool, + _filter: &FilterDefinition, device_list_updates: &mut HashSet, left_encrypted_users: &mut HashSet, since_shortstatehash: Option, current_shortstatehash: ShortStateHash, - timeline_pdus: &Vec<(PduCount, PduEvent)>, joined_since_last_sync: bool, ) -> Result { // Incremental /sync @@ -1162,76 +1136,12 @@ async fn calculate_state_incremental( (None, None, None) }; - let mut state_events = delta_state_events; - - // Mark all member events we're returning as lazy-loaded - let mut lazy_loaded = state_events - .iter() - .filter(|pdu| pdu.kind == RoomMember) - .filter_map(|pdu| { - pdu.state_key - .clone() - .map(TryInto::try_into) - .map(LogDebugErr::log_debug_err) - .flat_ok() - }) - .fold(HashSet::new(), |mut lazy_loaded, user_id| { - lazy_loaded.insert(user_id); - lazy_loaded - }); - - // Fetch contextual member state events for events from the timeline, and - // mark them as lazy-loaded as well. - for (_, event) in timeline_pdus { - if lazy_loaded.contains(&event.sender) { - continue; - } - - let sent_before: OptionFuture<_> = (!lazy_load_send_redundant) - .then(|| { - services.rooms.lazy_loading.lazy_load_was_sent_before( - sender_user, - sender_device, - room_id, - &event.sender, - ) - }) - .into(); - - let member_event: OptionFuture<_> = sent_before - .await - .is_none_or(is_false!()) - .then(|| { - services.rooms.state_accessor.room_state_get( - room_id, - &StateEventType::RoomMember, - event.sender.as_str(), - ) - }) - .into(); - - let Some(Ok(member_event)) = member_event.await else { - continue; - }; - - lazy_loaded.insert(event.sender.clone()); - state_events.push(member_event); - } - - services.rooms.lazy_loading.lazy_load_mark_sent( - sender_user, - sender_device, - room_id, - lazy_loaded, - next_batchcount, - ); - Ok(StateChanges { heroes, joined_member_count, invited_member_count, joined_since_last_sync, - state_events, + state_events: delta_state_events, }) } diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index c3c27b9e..67274ff1 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -1,109 +1,65 @@ -use std::{ - collections::{HashMap, HashSet}, - fmt::Write, - sync::{Arc, Mutex}, -}; +//! Lazy Loading + +use std::{collections::HashSet, sync::Arc}; use conduwuit::{ implement, - utils::{stream::TryIgnore, ReadyExt}, - PduCount, Result, + utils::{stream::TryIgnore, IterStream, ReadyExt}, + Result, }; -use database::{Interfix, Map}; -use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId}; +use database::{Database, Deserialized, Handle, Interfix, Map}; +use futures::{pin_mut, Stream, StreamExt}; +use ruma::{api::client::filter::LazyLoadOptions, DeviceId, OwnedUserId, RoomId, UserId}; pub struct Service { - lazy_load_waiting: Mutex, db: Data, } struct Data { lazyloadedids: Arc, + db: Arc, } -type LazyLoadWaiting = HashMap; -type LazyLoadWaitingKey = (OwnedUserId, OwnedDeviceId, OwnedRoomId, PduCount); -type LazyLoadWaitingVal = HashSet; +pub trait Options: Send + Sync { + fn is_enabled(&self) -> bool; + fn include_redundant_members(&self) -> bool; +} + +#[derive(Clone, Debug)] +pub struct Context<'a> { + pub user_id: &'a UserId, + pub device_id: &'a DeviceId, + pub room_id: &'a RoomId, + pub token: Option, + pub options: Option<&'a LazyLoadOptions>, +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum Status { + Unseen, + Seen(u64), +} + +pub type Witness = HashSet; +type Key<'a> = (&'a UserId, &'a DeviceId, &'a RoomId, &'a UserId); impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - lazy_load_waiting: LazyLoadWaiting::new().into(), db: Data { lazyloadedids: args.db["lazyloadedids"].clone(), + db: args.db.clone(), }, })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result<()> { - let lazy_load_waiting = self.lazy_load_waiting.lock().expect("locked").len(); - writeln!(out, "lazy_load_waiting: {lazy_load_waiting}")?; - - Ok(()) - } - - fn clear_cache(&self) { self.lazy_load_waiting.lock().expect("locked").clear(); } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } #[implement(Service)] #[tracing::instrument(skip(self), level = "debug")] -#[inline] -pub async fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, -) -> bool { - let key = (user_id, device_id, room_id, ll_user); - self.db.lazyloadedids.qry(&key).await.is_ok() -} - -#[implement(Service)] -#[tracing::instrument(skip(self), level = "debug")] -pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet, - count: PduCount, -) { - let key = (user_id.to_owned(), device_id.to_owned(), room_id.to_owned(), count); - - self.lazy_load_waiting - .lock() - .expect("locked") - .insert(key, lazy_load); -} - -#[implement(Service)] -#[tracing::instrument(skip(self), level = "debug")] -pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: PduCount, -) { - let key = (user_id.to_owned(), device_id.to_owned(), room_id.to_owned(), since); - - let Some(user_ids) = self.lazy_load_waiting.lock().expect("locked").remove(&key) else { - return; - }; - - for ll_id in &user_ids { - let key = (user_id, device_id, room_id, ll_id); - self.db.lazyloadedids.put_raw(key, []); - } -} - -#[implement(Service)] -#[tracing::instrument(skip(self), level = "debug")] -pub async fn lazy_load_reset(&self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId) { - let prefix = (user_id, device_id, room_id, Interfix); +pub async fn reset(&self, ctx: &Context<'_>) { + let prefix = (ctx.user_id, ctx.device_id, ctx.room_id, Interfix); self.db .lazyloadedids .keys_prefix_raw(&prefix) @@ -111,3 +67,89 @@ pub async fn lazy_load_reset(&self, user_id: &UserId, device_id: &DeviceId, room .ready_for_each(|key| self.db.lazyloadedids.remove(key)) .await; } + +#[implement(Service)] +#[tracing::instrument(name = "retain", level = "debug", skip_all)] +pub async fn witness_retain(&self, senders: Witness, ctx: &Context<'_>) -> Witness { + debug_assert!( + ctx.options.is_none_or(Options::is_enabled), + "lazy loading should be enabled by your options" + ); + + let include_redundant = cfg!(feature = "element_hacks") + || ctx.options.is_some_and(Options::include_redundant_members); + + let witness = self + .witness(ctx, senders.iter().map(AsRef::as_ref)) + .zip(senders.iter().stream()); + + pin_mut!(witness); + let _cork = self.db.db.cork(); + let mut senders = Witness::with_capacity(senders.len()); + while let Some((status, sender)) = witness.next().await { + if include_redundant || status == Status::Unseen { + senders.insert(sender.into()); + continue; + } + + if let Status::Seen(seen) = status { + if seen == 0 || ctx.token == Some(seen) { + senders.insert(sender.into()); + continue; + } + } + } + + senders +} + +#[implement(Service)] +fn witness<'a, I>( + &'a self, + ctx: &'a Context<'a>, + senders: I, +) -> impl Stream + Send + 'a +where + I: Iterator + Send + Clone + 'a, +{ + let make_key = + |sender: &'a UserId| -> Key<'a> { (ctx.user_id, ctx.device_id, ctx.room_id, sender) }; + + self.db + .lazyloadedids + .qry_batch(senders.clone().stream().map(make_key)) + .map(into_status) + .zip(senders.stream()) + .map(move |(status, sender)| { + if matches!(status, Status::Unseen) { + self.db + .lazyloadedids + .put_aput::<8, _, _>(make_key(sender), 0_u64); + } else if matches!(status, Status::Seen(0)) { + self.db + .lazyloadedids + .put_aput::<8, _, _>(make_key(sender), ctx.token.unwrap_or(0_u64)); + } + + status + }) +} + +fn into_status(result: Result>) -> Status { + match result.and_then(|handle| handle.deserialized()) { + | Ok(seen) => Status::Seen(seen), + | Err(_) => Status::Unseen, + } +} + +impl Options for LazyLoadOptions { + fn include_redundant_members(&self) -> bool { + if let Self::Enabled { include_redundant_members } = self { + *include_redundant_members + } else { + false + } + } + + fn is_enabled(&self) -> bool { !self.is_disabled() } +} From 4b3c54bbfa8340c4bab09a221f47afb0c6d04346 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 26 Jan 2025 13:11:47 -0500 Subject: [PATCH 092/328] check if DOCKER_USERNAME is empty as well in dockerhub desc publish Signed-off-by: June Clementine Strawberry --- .github/workflows/docker-hub-description.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-hub-description.yml b/.github/workflows/docker-hub-description.yml index 96b2d38b..b4f142db 100644 --- a/.github/workflows/docker-hub-description.yml +++ b/.github/workflows/docker-hub-description.yml @@ -13,7 +13,7 @@ on: jobs: dockerHubDescription: runs-on: ubuntu-latest - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]' + if: ${{ (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]' && (vars.DOCKER_USERNAME != '') }} steps: - uses: actions/checkout@v4 with: From ee63f720c97e6047ac712ce828505e7e82923336 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 26 Jan 2025 20:39:08 +0000 Subject: [PATCH 093/328] revert incorrect tags --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b0b0bd53..cde31232 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -897,15 +897,15 @@ jobs: run: | # Dockerhub Container Registry if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell + docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell fi # GitHub Container Registry if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GHCR_REPO}:${BRANCH_TAG}-haswell + docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell fi # GitLab Container Registry if [ ! -z $GITLAB_TOKEN ]; then - docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GLCR_REPO}:${BRANCH_TAG}-haswell + docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell fi - name: Create Docker combined manifests From 24e6086f12d6f3421b45c13b02aac96c3a188205 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 26 Jan 2025 20:44:40 +0000 Subject: [PATCH 094/328] load correct image file --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cde31232..42f4e12e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -815,7 +815,7 @@ jobs: - name: Load and push amd64 haswell image run: | - docker load -i oci-image-amd64.tar.gz + docker load -i oci-image-amd64-haswell-optimised.tar.gz if [ ! -z $DOCKERHUB_TOKEN ]; then docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell From 1c585ab1b6b55e40dadca2d1339ea5f8e4a244bb Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 26 Jan 2025 22:16:32 +0000 Subject: [PATCH 095/328] create manifests for unique docker tags --- .github/workflows/ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 42f4e12e..8a870700 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -897,14 +897,17 @@ jobs: run: | # Dockerhub Container Registry if [ ! -z $DOCKERHUB_TOKEN ]; then + docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell fi # GitHub Container Registry if [ $GHCR_ENABLED = "true" ]; then + docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell fi # GitLab Container Registry if [ ! -z $GITLAB_TOKEN ]; then + docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell fi From db7d23e7804f9ed707358a0da9d8f3f4fa588bd1 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 26 Jan 2025 23:52:51 +0000 Subject: [PATCH 096/328] fix creating manifest on wrong repo --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8a870700..de599f45 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -902,7 +902,7 @@ jobs: fi # GitHub Container Registry if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell + docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell fi # GitLab Container Registry From 71a3855af61b0071832c23085f76a8711e32b49c Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 26 Jan 2025 21:30:15 -0500 Subject: [PATCH 097/328] fix couple more docker manifest typos Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index de599f45..35d60aa1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -902,12 +902,12 @@ jobs: fi # GitHub Container Registry if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell + docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell fi # GitLab Container Registry if [ ! -z $GITLAB_TOKEN ]; then - docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell + docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell fi From 3e0ff2dc840cd9b3b823cf5d8d9a6739ee531896 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 25 Jan 2025 23:41:39 +0000 Subject: [PATCH 098/328] simplify references to server_name Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 12 ++++++------ src/admin/federation/commands.rs | 2 +- src/admin/server/commands.rs | 2 +- src/api/client/openid.rs | 2 +- src/api/client/report.rs | 2 +- src/api/client/room/create.rs | 2 +- src/api/client/voip.rs | 2 +- src/core/server.rs | 6 ++++++ src/service/globals/mod.rs | 8 ++++---- src/service/migrations.rs | 10 ++++------ src/service/resolver/actual.rs | 3 +-- src/service/rooms/alias/mod.rs | 2 +- src/service/sending/sender.rs | 2 +- 13 files changed, 29 insertions(+), 26 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index af7bd79f..855941fd 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -554,7 +554,7 @@ pub(super) async fn first_pdu_in_room( .services .rooms .state_cache - .server_in_room(&self.services.server.config.server_name, &room_id) + .server_in_room(&self.services.server.name, &room_id) .await { return Ok(RoomMessageEventContent::text_plain( @@ -583,7 +583,7 @@ pub(super) async fn latest_pdu_in_room( .services .rooms .state_cache - .server_in_room(&self.services.server.config.server_name, &room_id) + .server_in_room(&self.services.server.name, &room_id) .await { return Ok(RoomMessageEventContent::text_plain( @@ -613,7 +613,7 @@ pub(super) async fn force_set_room_state_from_server( .services .rooms .state_cache - .server_in_room(&self.services.server.config.server_name, &room_id) + .server_in_room(&self.services.server.name, &room_id) .await { return Ok(RoomMessageEventContent::text_plain( @@ -757,7 +757,7 @@ pub(super) async fn get_signing_keys( query: bool, ) -> Result { let server_name = - server_name.unwrap_or_else(|| self.services.server.config.server_name.clone().into()); + server_name.unwrap_or_else(|| self.services.server.name.clone().into()); if let Some(notary) = notary { let signing_keys = self @@ -794,7 +794,7 @@ pub(super) async fn get_verify_keys( server_name: Option>, ) -> Result { let server_name = - server_name.unwrap_or_else(|| self.services.server.config.server_name.clone().into()); + server_name.unwrap_or_else(|| self.services.server.name.clone().into()); let keys = self .services @@ -824,7 +824,7 @@ pub(super) async fn resolve_true_destination( )); } - if server_name == self.services.server.config.server_name { + if server_name == self.services.server.name { return Ok(RoomMessageEventContent::text_plain( "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ fetching local PDUs.", diff --git a/src/admin/federation/commands.rs b/src/admin/federation/commands.rs index be91ef0a..13bc8da4 100644 --- a/src/admin/federation/commands.rs +++ b/src/admin/federation/commands.rs @@ -92,7 +92,7 @@ pub(super) async fn remote_user_in_rooms( &self, user_id: Box, ) -> Result { - if user_id.server_name() == self.services.server.config.server_name { + if user_id.server_name() == self.services.server.name { return Ok(RoomMessageEventContent::text_plain( "User belongs to our server, please use `list-joined-rooms` user admin command \ instead.", diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index 3ea27883..47509bad 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -34,7 +34,7 @@ pub(super) async fn reload_config( ) -> Result { let path = path.as_deref().into_iter(); let config = Config::load(path).and_then(|raw| Config::new(&raw))?; - if config.server_name != self.services.server.config.server_name { + if config.server_name != self.services.server.name { return Err!("You can't change the server name."); } diff --git a/src/api/client/openid.rs b/src/api/client/openid.rs index 3547d284..671d0c6d 100644 --- a/src/api/client/openid.rs +++ b/src/api/client/openid.rs @@ -37,7 +37,7 @@ pub(crate) async fn create_openid_token_route( Ok(account::request_openid_token::v3::Response { access_token, token_type: TokenType::Bearer, - matrix_server_name: services.server.config.server_name.clone(), + matrix_server_name: services.server.name.clone(), expires_in: Duration::from_secs(expires_in), }) } diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 2b25b518..57de3f12 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -50,7 +50,7 @@ pub(crate) async fn report_room_route( if !services .rooms .state_cache - .server_in_room(&services.server.config.server_name, &body.room_id) + .server_in_room(&services.server.name, &body.room_id) .await { return Err!(Request(NotFound( diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index 1b6e8667..a401b63d 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -71,7 +71,7 @@ pub(crate) async fn create_room_route( let room_id: OwnedRoomId = if let Some(custom_room_id) = &body.room_id { custom_room_id_check(&services, custom_room_id)? } else { - RoomId::new(&services.server.config.server_name) + RoomId::new(&services.server.name) }; // check if room ID doesn't already exist instead of erroring on auth check diff --git a/src/api/client/voip.rs b/src/api/client/voip.rs index c08b1fdf..70ad4913 100644 --- a/src/api/client/voip.rs +++ b/src/api/client/voip.rs @@ -38,7 +38,7 @@ pub(crate) async fn turn_server_route( let user = body.sender_user.unwrap_or_else(|| { UserId::parse_with_server_name( utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(), - &services.server.config.server_name, + &services.server.name, ) .unwrap() }); diff --git a/src/core/server.rs b/src/core/server.rs index 6838c9c9..05a4aae7 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -6,12 +6,17 @@ use std::{ time::SystemTime, }; +use ruma::OwnedServerName; use tokio::{runtime, sync::broadcast}; use crate::{config, config::Config, err, log::Log, metrics::Metrics, Err, Result}; /// Server runtime state; public portion pub struct Server { + /// Configured name of server. This is the same as the one in the config + /// but developers can (and should) reference this string instead. + pub name: OwnedServerName, + /// Server-wide configuration instance pub config: config::Manager, @@ -46,6 +51,7 @@ impl Server { #[must_use] pub fn new(config: Config, runtime: Option, log: Log) -> Self { Self { + name: config.server_name.clone(), config: config::Manager::new(config), started: SystemTime::now(), stopping: AtomicBool::new(false), diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index ef34054f..485d5020 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -61,11 +61,11 @@ impl crate::Service for Service { db, server: args.server.clone(), bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - admin_alias: OwnedRoomAliasId::try_from(format!("#admins:{}", &config.server_name)) + admin_alias: OwnedRoomAliasId::try_from(format!("#admins:{}", &args.server.name)) .expect("#admins:server_name is valid alias name"), server_user: UserId::parse_with_server_name( String::from("conduit"), - &config.server_name, + &args.server.name, ) .expect("@conduit:server_name is valid"), turn_secret, @@ -107,7 +107,7 @@ impl Service { pub fn current_count(&self) -> Result { Ok(self.db.current_count()) } #[inline] - pub fn server_name(&self) -> &ServerName { self.server.config.server_name.as_ref() } + pub fn server_name(&self) -> &ServerName { self.server.name.as_ref() } pub fn allow_registration(&self) -> bool { self.server.config.allow_registration } @@ -207,7 +207,7 @@ impl Service { #[inline] pub fn server_is_ours(&self, server_name: &ServerName) -> bool { - server_name == self.server.config.server_name + server_name == self.server_name() } #[inline] diff --git a/src/service/migrations.rs b/src/service/migrations.rs index c42c0324..27b4ab5a 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -218,8 +218,6 @@ async fn migrate(services: &Services) -> Result<()> { } async fn db_lt_12(services: &Services) -> Result<()> { - let config = &services.server.config; - for username in &services .users .list_local_users() @@ -227,7 +225,8 @@ async fn db_lt_12(services: &Services) -> Result<()> { .collect::>() .await { - let user = match UserId::parse_with_server_name(username.as_str(), &config.server_name) { + let user = match UserId::parse_with_server_name(username.as_str(), &services.server.name) + { | Ok(u) => u, | Err(e) => { warn!("Invalid username {username}: {e}"); @@ -297,8 +296,6 @@ async fn db_lt_12(services: &Services) -> Result<()> { } async fn db_lt_13(services: &Services) -> Result<()> { - let config = &services.server.config; - for username in &services .users .list_local_users() @@ -306,7 +303,8 @@ async fn db_lt_13(services: &Services) -> Result<()> { .collect::>() .await { - let user = match UserId::parse_with_server_name(username.as_str(), &config.server_name) { + let user = match UserId::parse_with_server_name(username.as_str(), &services.server.name) + { | Ok(u) => u, | Err(e) => { warn!("Invalid username {username}: {e}"); diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 33374240..66854764 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -401,8 +401,7 @@ impl super::Service { } fn validate_dest(&self, dest: &ServerName) -> Result<()> { - let config = &self.services.server.config; - if dest == config.server_name && !config.federation_loopback { + if dest == self.services.server.name && !self.services.server.config.federation_loopback { return Err!("Won't send federation request to ourselves"); } diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 91797d01..17ed5e13 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -150,7 +150,7 @@ impl Service { let servers_contains_ours = || { servers .as_ref() - .is_some_and(|servers| servers.contains(&self.services.server.config.server_name)) + .is_some_and(|servers| servers.contains(&self.services.server.name)) }; if !server_is_ours && !servers_contains_ours() { diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 5fd4cf91..122e75c5 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -850,7 +850,7 @@ impl Service { let txn_id = &*general_purpose::URL_SAFE_NO_PAD.encode(txn_hash); let request = send_transaction_message::v1::Request { - origin: self.server.config.server_name.clone(), + origin: self.server.name.clone(), pdus: pdu_jsons, edus: edu_jsons, origin_server_ts: MilliSecondsSinceUnixEpoch::now(), From 4a2d0d35bcae8f189774214eb850d78dd53332eb Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 00:24:11 +0000 Subject: [PATCH 099/328] split federation request from sending service Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 6 +- .../send.rs => federation/execute.rs} | 163 ++++++++++-------- src/service/federation/mod.rs | 33 ++++ src/service/mod.rs | 1 + src/service/sending/mod.rs | 24 ++- src/service/sending/sender.rs | 2 +- src/service/services.rs | 4 +- 7 files changed, 146 insertions(+), 87 deletions(-) rename src/service/{sending/send.rs => federation/execute.rs} (65%) create mode 100644 src/service/federation/mod.rs diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 855941fd..cdd69c0f 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -756,8 +756,7 @@ pub(super) async fn get_signing_keys( notary: Option>, query: bool, ) -> Result { - let server_name = - server_name.unwrap_or_else(|| self.services.server.name.clone().into()); + let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into()); if let Some(notary) = notary { let signing_keys = self @@ -793,8 +792,7 @@ pub(super) async fn get_verify_keys( &self, server_name: Option>, ) -> Result { - let server_name = - server_name.unwrap_or_else(|| self.services.server.name.clone().into()); + let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into()); let keys = self .services diff --git a/src/service/sending/send.rs b/src/service/federation/execute.rs similarity index 65% rename from src/service/sending/send.rs rename to src/service/federation/execute.rs index c8a64f3c..27d98968 100644 --- a/src/service/sending/send.rs +++ b/src/service/federation/execute.rs @@ -1,4 +1,4 @@ -use std::mem; +use std::{fmt::Debug, mem}; use bytes::Bytes; use conduwuit::{ @@ -20,82 +20,109 @@ use ruma::{ use crate::resolver::actual::ActualDest; -impl super::Service { - #[tracing::instrument( +/// Sends a request to a federation server +#[implement(super::Service)] +#[tracing::instrument(skip_all, name = "request", level = "debug")] +pub async fn execute(&self, dest: &ServerName, request: T) -> Result +where + T: OutgoingRequest + Debug + Send, +{ + let client = &self.services.client.federation; + self.execute_on(client, dest, request).await +} + +/// Like execute() but with a very large timeout +#[implement(super::Service)] +#[tracing::instrument(skip_all, name = "synapse", level = "debug")] +pub async fn execute_synapse( + &self, + dest: &ServerName, + request: T, +) -> Result +where + T: OutgoingRequest + Debug + Send, +{ + let client = &self.services.client.synapse; + self.execute_on(client, dest, request).await +} + +#[implement(super::Service)] +#[tracing::instrument( level = "debug" skip(self, client, request), )] - pub async fn send( - &self, - client: &Client, - dest: &ServerName, - request: T, - ) -> Result - where - T: OutgoingRequest + Send, +pub async fn execute_on( + &self, + client: &Client, + dest: &ServerName, + request: T, +) -> Result +where + T: OutgoingRequest + Send, +{ + if !self.services.server.config.allow_federation { + return Err!(Config("allow_federation", "Federation is disabled.")); + } + + if self + .services + .server + .config + .forbidden_remote_server_names + .contains(dest) { - if !self.server.config.allow_federation { - return Err!(Config("allow_federation", "Federation is disabled.")); - } - - if self - .server - .config - .forbidden_remote_server_names - .contains(dest) - { - return Err!(Request(Forbidden(debug_warn!( - "Federation with {dest} is not allowed." - )))); - } - - let actual = self.services.resolver.get_actual_dest(dest).await?; - let request = into_http_request::(&actual, request)?; - let request = self.prepare(dest, request)?; - self.execute::(dest, &actual, request, client).await + return Err!(Request(Forbidden(debug_warn!("Federation with {dest} is not allowed.")))); } - async fn execute( - &self, - dest: &ServerName, - actual: &ActualDest, - request: Request, - client: &Client, - ) -> Result - where - T: OutgoingRequest + Send, - { - let url = request.url().clone(); - let method = request.method().clone(); + let actual = self.services.resolver.get_actual_dest(dest).await?; + let request = into_http_request::(&actual, request)?; + let request = self.prepare(dest, request)?; + self.perform::(dest, &actual, request, client).await +} - debug!(?method, ?url, "Sending request"); - match client.execute(request).await { - | Ok(response) => handle_response::(dest, actual, &method, &url, response).await, - | Err(error) => - Err(handle_error(actual, &method, &url, error).expect_err("always returns error")), +#[implement(super::Service)] +async fn perform( + &self, + dest: &ServerName, + actual: &ActualDest, + request: Request, + client: &Client, +) -> Result +where + T: OutgoingRequest + Send, +{ + let url = request.url().clone(); + let method = request.method().clone(); + + debug!(?method, ?url, "Sending request"); + match client.execute(request).await { + | Ok(response) => handle_response::(dest, actual, &method, &url, response).await, + | Err(error) => + Err(handle_error(actual, &method, &url, error).expect_err("always returns error")), + } +} + +#[implement(super::Service)] +fn prepare(&self, dest: &ServerName, mut request: http::Request>) -> Result { + self.sign_request(&mut request, dest); + + let request = Request::try_from(request)?; + self.validate_url(request.url())?; + self.services.server.check_running()?; + + Ok(request) +} + +#[implement(super::Service)] +fn validate_url(&self, url: &Url) -> Result<()> { + if let Some(url_host) = url.host_str() { + if let Ok(ip) = IPAddress::parse(url_host) { + trace!("Checking request URL IP {ip:?}"); + self.services.resolver.validate_ip(&ip)?; } } - fn prepare(&self, dest: &ServerName, mut request: http::Request>) -> Result { - self.sign_request(&mut request, dest); - - let request = Request::try_from(request)?; - self.validate_url(request.url())?; - self.server.check_running()?; - - Ok(request) - } - - fn validate_url(&self, url: &Url) -> Result<()> { - if let Some(url_host) = url.host_str() { - if let Ok(ip) = IPAddress::parse(url_host) { - trace!("Checking request URL IP {ip:?}"); - self.services.resolver.validate_ip(&ip)?; - } - } - - Ok(()) - } + Ok(()) } async fn handle_response( @@ -195,7 +222,7 @@ fn sign_request(&self, http_request: &mut http::Request>, dest: &ServerN type Value = CanonicalJsonValue; type Object = CanonicalJsonObject; - let origin = self.services.globals.server_name(); + let origin = &self.services.server.name; let body = http_request.body(); let uri = http_request .uri() diff --git a/src/service/federation/mod.rs b/src/service/federation/mod.rs new file mode 100644 index 00000000..dacdb20e --- /dev/null +++ b/src/service/federation/mod.rs @@ -0,0 +1,33 @@ +mod execute; + +use std::sync::Arc; + +use conduwuit::{Result, Server}; + +use crate::{client, resolver, server_keys, Dep}; + +pub struct Service { + services: Services, +} + +struct Services { + server: Arc, + client: Dep, + resolver: Dep, + server_keys: Dep, +} + +impl crate::Service for Service { + fn build(args: crate::Args<'_>) -> Result> { + Ok(Arc::new(Self { + services: Services { + server: args.server.clone(), + client: args.depend::("client"), + resolver: args.depend::("resolver"), + server_keys: args.depend::("server_keys"), + }, + })) + } + + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } +} diff --git a/src/service/mod.rs b/src/service/mod.rs index 789994d3..2102921f 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -10,6 +10,7 @@ pub mod admin; pub mod appservice; pub mod client; pub mod emergency; +pub mod federation; pub mod globals; pub mod key_backups; pub mod media; diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index e52bfb25..80bca112 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -1,7 +1,6 @@ mod appservice; mod data; mod dest; -mod send; mod sender; use std::{ @@ -30,8 +29,8 @@ pub use self::{ sender::{EDU_LIMIT, PDU_LIMIT}, }; use crate::{ - account_data, client, globals, presence, pusher, resolver, rooms, rooms::timeline::RawPduId, - server_keys, users, Dep, + account_data, client, federation, globals, presence, pusher, rooms, + rooms::timeline::RawPduId, users, Dep, }; pub struct Service { @@ -44,7 +43,6 @@ pub struct Service { struct Services { client: Dep, globals: Dep, - resolver: Dep, state: Dep, state_cache: Dep, user: Dep, @@ -55,7 +53,7 @@ struct Services { account_data: Dep, appservice: Dep, pusher: Dep, - server_keys: Dep, + federation: Dep, } #[derive(Clone, Debug, PartialEq, Eq)] @@ -83,7 +81,6 @@ impl crate::Service for Service { services: Services { client: args.depend::("client"), globals: args.depend::("globals"), - resolver: args.depend::("resolver"), state: args.depend::("rooms::state"), state_cache: args.depend::("rooms::state_cache"), user: args.depend::("rooms::user"), @@ -94,7 +91,7 @@ impl crate::Service for Service { account_data: args.depend::("account_data"), appservice: args.depend::("appservice"), pusher: args.depend::("pusher"), - server_keys: args.depend::("server_keys"), + federation: args.depend::("federation"), }, channels: (0..num_senders).map(|_| loole::unbounded()).collect(), })) @@ -277,7 +274,7 @@ impl Service { } /// Sends a request to a federation server - #[tracing::instrument(skip_all, name = "request", level = "debug")] + #[inline] pub async fn send_federation_request( &self, dest: &ServerName, @@ -286,12 +283,11 @@ impl Service { where T: OutgoingRequest + Debug + Send, { - let client = &self.services.client.federation; - self.send(client, dest, request).await + self.services.federation.execute(dest, request).await } /// Like send_federation_request() but with a very large timeout - #[tracing::instrument(skip_all, name = "synapse", level = "debug")] + #[inline] pub async fn send_synapse_request( &self, dest: &ServerName, @@ -300,8 +296,10 @@ impl Service { where T: OutgoingRequest + Debug + Send, { - let client = &self.services.client.synapse; - self.send(client, dest, request).await + self.services + .federation + .execute_synapse(dest, request) + .await } /// Sends a request to an appservice diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 122e75c5..c91e1d31 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -858,7 +858,7 @@ impl Service { }; let client = &self.services.client.sender; - self.send(client, &server, request) + self.services.federation.execute_on(client, &server, request) .await .inspect(|response| { response diff --git a/src/service/services.rs b/src/service/services.rs index 1aa87f58..cb5cc12f 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -10,7 +10,7 @@ use database::Database; use tokio::sync::Mutex; use crate::{ - account_data, admin, appservice, client, emergency, globals, key_backups, + account_data, admin, appservice, client, emergency, federation, globals, key_backups, manager::Manager, media, presence, pusher, resolver, rooms, sending, server_keys, service, service::{Args, Map, Service}, @@ -30,6 +30,7 @@ pub struct Services { pub pusher: Arc, pub resolver: Arc, pub rooms: rooms::Service, + pub federation: Arc, pub sending: Arc, pub server_keys: Arc, pub sync: Arc, @@ -95,6 +96,7 @@ impl Services { typing: build!(rooms::typing::Service), user: build!(rooms::user::Service), }, + federation: build!(federation::Service), sending: build!(sending::Service), server_keys: build!(server_keys::Service), sync: build!(sync::Service), From d0b4a619af08030a28f3a445fb8031bafc3cf90a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 03:30:34 +0000 Subject: [PATCH 100/328] furnish batch interface with trait Signed-off-by: Jason Volk --- src/database/map.rs | 3 ++ src/database/map/get.rs | 52 +--------------------- src/database/map/get_batch.rs | 45 ++++++++----------- src/database/map/qry.rs | 54 +++++++++++++++++++++++ src/database/map/qry_batch.rs | 63 +++++++++++++++++++++++++++ src/database/mod.rs | 2 +- src/service/rooms/lazy_loading/mod.rs | 10 +++-- src/service/rooms/short/mod.rs | 19 ++++---- 8 files changed, 155 insertions(+), 93 deletions(-) create mode 100644 src/database/map/qry.rs create mode 100644 src/database/map/qry_batch.rs diff --git a/src/database/map.rs b/src/database/map.rs index 97e90659..5176c529 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -9,6 +9,8 @@ mod keys_from; mod keys_prefix; mod open; mod options; +mod qry; +mod qry_batch; mod remove; mod rev_keys; mod rev_keys_from; @@ -37,6 +39,7 @@ pub(crate) use self::options::{ cache_iter_options_default, cache_read_options_default, iter_options_default, read_options_default, write_options_default, }; +pub use self::{get_batch::Get, qry_batch::Qry}; use crate::{watchers::Watchers, Engine}; pub struct Map { diff --git a/src/database/map/get.rs b/src/database/map/get.rs index 73182042..d6c65be2 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -1,65 +1,15 @@ -use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; +use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use arrayvec::ArrayVec; use conduwuit::{err, implement, utils::result::MapExpect, Err, Result}; use futures::{future::ready, Future, FutureExt, TryFutureExt}; use rocksdb::{DBPinnableSlice, ReadOptions}; -use serde::Serialize; use tokio::task; use crate::{ - keyval::KeyBuf, - ser, util::{is_incomplete, map_err, or_else}, Handle, }; -/// Fetch a value from the database into cache, returning a reference-handle -/// asynchronously. The key is serialized into an allocated buffer to perform -/// the query. -#[implement(super::Map)] -#[inline] -pub fn qry(self: &Arc, key: &K) -> impl Future>> + Send -where - K: Serialize + ?Sized + Debug, -{ - let mut buf = KeyBuf::new(); - self.bqry(key, &mut buf) -} - -/// Fetch a value from the database into cache, returning a reference-handle -/// asynchronously. The key is serialized into a fixed-sized buffer to perform -/// the query. The maximum size is supplied as const generic parameter. -#[implement(super::Map)] -#[inline] -pub fn aqry( - self: &Arc, - key: &K, -) -> impl Future>> + Send -where - K: Serialize + ?Sized + Debug, -{ - let mut buf = ArrayVec::::new(); - self.bqry(key, &mut buf) -} - -/// Fetch a value from the database into cache, returning a reference-handle -/// asynchronously. The key is serialized into a user-supplied Writer. -#[implement(super::Map)] -#[tracing::instrument(skip(self, buf), level = "trace")] -pub fn bqry( - self: &Arc, - key: &K, - buf: &mut B, -) -> impl Future>> + Send -where - K: Serialize + ?Sized + Debug, - B: Write + AsRef<[u8]>, -{ - let key = ser::serialize(buf, key).expect("failed to serialize query key"); - self.get(key) -} - /// Fetch a value from the database into cache, returning a reference-handle /// asynchronously. The key is referenced directly to perform the query. #[implement(super::Map)] diff --git a/src/database/map/get_batch.rs b/src/database/map/get_batch.rs index ee9269e3..ab9c1dc8 100644 --- a/src/database/map/get_batch.rs +++ b/src/database/map/get_batch.rs @@ -1,4 +1,4 @@ -use std::{convert::AsRef, fmt::Debug, sync::Arc}; +use std::{convert::AsRef, sync::Arc}; use conduwuit::{ implement, @@ -10,43 +10,34 @@ use conduwuit::{ }; use futures::{Stream, StreamExt, TryStreamExt}; use rocksdb::{DBPinnableSlice, ReadOptions}; -use serde::Serialize; use super::get::{cached_handle_from, handle_from}; -use crate::{keyval::KeyBuf, ser, Handle}; +use crate::Handle; -#[implement(super::Map)] -#[tracing::instrument(skip(self, keys), level = "trace")] -pub fn qry_batch<'a, S, K>( - self: &'a Arc, - keys: S, -) -> impl Stream>> + Send + 'a +pub trait Get<'a, K, S> where + Self: Sized, S: Stream + Send + 'a, - K: Serialize + Debug + 'a, + K: AsRef<[u8]> + Send + Sync + 'a, { - use crate::pool::Get; + fn get(self, map: &'a Arc) -> impl Stream>> + Send + 'a; +} - keys.ready_chunks(automatic_amplification()) - .widen_then(automatic_width(), |chunk| { - let keys = chunk - .iter() - .map(ser::serialize_to::) - .map(|result| result.expect("failed to serialize query key")) - .map(Into::into) - .collect(); - - self.db - .pool - .execute_get(Get { map: self.clone(), key: keys, res: None }) - }) - .map_ok(|results| results.into_iter().stream()) - .try_flatten() +impl<'a, K, S> Get<'a, K, S> for S +where + Self: Sized, + S: Stream + Send + 'a, + K: AsRef<[u8]> + Send + Sync + 'a, +{ + #[inline] + fn get(self, map: &'a Arc) -> impl Stream>> + Send + 'a { + map.get_batch(self) + } } #[implement(super::Map)] #[tracing::instrument(skip(self, keys), level = "trace")] -pub fn get_batch<'a, S, K>( +pub(crate) fn get_batch<'a, S, K>( self: &'a Arc, keys: S, ) -> impl Stream>> + Send + 'a diff --git a/src/database/map/qry.rs b/src/database/map/qry.rs new file mode 100644 index 00000000..401eba43 --- /dev/null +++ b/src/database/map/qry.rs @@ -0,0 +1,54 @@ +use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; + +use arrayvec::ArrayVec; +use conduwuit::{implement, Result}; +use futures::Future; +use serde::Serialize; + +use crate::{keyval::KeyBuf, ser, Handle}; + +/// Fetch a value from the database into cache, returning a reference-handle +/// asynchronously. The key is serialized into an allocated buffer to perform +/// the query. +#[implement(super::Map)] +#[inline] +pub fn qry(self: &Arc, key: &K) -> impl Future>> + Send +where + K: Serialize + ?Sized + Debug, +{ + let mut buf = KeyBuf::new(); + self.bqry(key, &mut buf) +} + +/// Fetch a value from the database into cache, returning a reference-handle +/// asynchronously. The key is serialized into a fixed-sized buffer to perform +/// the query. The maximum size is supplied as const generic parameter. +#[implement(super::Map)] +#[inline] +pub fn aqry( + self: &Arc, + key: &K, +) -> impl Future>> + Send +where + K: Serialize + ?Sized + Debug, +{ + let mut buf = ArrayVec::::new(); + self.bqry(key, &mut buf) +} + +/// Fetch a value from the database into cache, returning a reference-handle +/// asynchronously. The key is serialized into a user-supplied Writer. +#[implement(super::Map)] +#[tracing::instrument(skip(self, buf), level = "trace")] +pub fn bqry( + self: &Arc, + key: &K, + buf: &mut B, +) -> impl Future>> + Send +where + K: Serialize + ?Sized + Debug, + B: Write + AsRef<[u8]>, +{ + let key = ser::serialize(buf, key).expect("failed to serialize query key"); + self.get(key) +} diff --git a/src/database/map/qry_batch.rs b/src/database/map/qry_batch.rs new file mode 100644 index 00000000..31817c48 --- /dev/null +++ b/src/database/map/qry_batch.rs @@ -0,0 +1,63 @@ +use std::{fmt::Debug, sync::Arc}; + +use conduwuit::{ + implement, + utils::{ + stream::{automatic_amplification, automatic_width, WidebandExt}, + IterStream, + }, + Result, +}; +use futures::{Stream, StreamExt, TryStreamExt}; +use serde::Serialize; + +use crate::{keyval::KeyBuf, ser, Handle}; + +pub trait Qry<'a, K, S> +where + S: Stream + Send + 'a, + K: Serialize + Debug, +{ + fn qry(self, map: &'a Arc) -> impl Stream>> + Send + 'a; +} + +impl<'a, K, S> Qry<'a, K, S> for S +where + Self: 'a, + S: Stream + Send + 'a, + K: Serialize + Debug + 'a, +{ + #[inline] + fn qry(self, map: &'a Arc) -> impl Stream>> + Send + 'a { + map.qry_batch(self) + } +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self, keys), level = "trace")] +pub(crate) fn qry_batch<'a, S, K>( + self: &'a Arc, + keys: S, +) -> impl Stream>> + Send + 'a +where + S: Stream + Send + 'a, + K: Serialize + Debug + 'a, +{ + use crate::pool::Get; + + keys.ready_chunks(automatic_amplification()) + .widen_then(automatic_width(), |chunk| { + let keys = chunk + .iter() + .map(ser::serialize_to::) + .map(|result| result.expect("failed to serialize query key")) + .map(Into::into) + .collect(); + + self.db + .pool + .execute_get(Get { map: self.clone(), key: keys, res: None }) + }) + .map_ok(|results| results.into_iter().stream()) + .try_flatten() +} diff --git a/src/database/mod.rs b/src/database/mod.rs index 42b7f5e3..4f8e2ad9 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -30,7 +30,7 @@ pub use self::{ deserialized::Deserialized, handle::Handle, keyval::{serialize_key, serialize_val, KeyVal, Slice}, - map::{compact, Map}, + map::{compact, Get, Map, Qry}, ser::{serialize, serialize_to, serialize_to_vec, Cbor, Interfix, Json, Separator, SEP}, }; pub(crate) use self::{ diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index 67274ff1..a6e00271 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -7,7 +7,7 @@ use conduwuit::{ utils::{stream::TryIgnore, IterStream, ReadyExt}, Result, }; -use database::{Database, Deserialized, Handle, Interfix, Map}; +use database::{Database, Deserialized, Handle, Interfix, Map, Qry}; use futures::{pin_mut, Stream, StreamExt}; use ruma::{api::client::filter::LazyLoadOptions, DeviceId, OwnedUserId, RoomId, UserId}; @@ -115,9 +115,11 @@ where let make_key = |sender: &'a UserId| -> Key<'a> { (ctx.user_id, ctx.device_id, ctx.room_id, sender) }; - self.db - .lazyloadedids - .qry_batch(senders.clone().stream().map(make_key)) + senders + .clone() + .stream() + .map(make_key) + .qry(&self.db.lazyloadedids) .map(into_status) .zip(senders.stream()) .map(move |(status, sender)| { diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 4a591592..dd586d02 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -2,7 +2,7 @@ use std::{borrow::Borrow, fmt::Debug, mem::size_of_val, sync::Arc}; pub use conduwuit::pdu::{ShortEventId, ShortId, ShortRoomId}; use conduwuit::{err, implement, utils, utils::IterStream, Result}; -use database::{Deserialized, Map}; +use database::{Deserialized, Get, Map, Qry}; use futures::{Stream, StreamExt}; use ruma::{events::StateEventType, EventId, RoomId}; use serde::Deserialize; @@ -67,9 +67,10 @@ pub fn multi_get_or_create_shorteventid<'a, I>( where I: Iterator + Clone + Debug + Send + 'a, { - self.db - .eventid_shorteventid - .get_batch(event_ids.clone().stream()) + event_ids + .clone() + .stream() + .get(&self.db.eventid_shorteventid) .zip(event_ids.into_iter().stream()) .map(|(result, event_id)| match result { | Ok(ref short) => utils::u64_from_u8(short), @@ -171,9 +172,8 @@ where Id: for<'de> Deserialize<'de> + Sized + ToOwned + 'a, ::Owned: Borrow, { - self.db - .shorteventid_eventid - .qry_batch(shorteventid) + shorteventid + .qry(&self.db.shorteventid_eventid) .map(Deserialized::deserialized) } @@ -204,9 +204,8 @@ pub fn multi_get_statekey_from_short<'a, S>( where S: Stream + Send + 'a, { - self.db - .shortstatekey_statekey - .qry_batch(shortstatekey) + shortstatekey + .qry(&self.db.shortstatekey_statekey) .map(Deserialized::deserialized) } From 6db8df5e232fad8c1e229194e6336b9f268b13c1 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 04:26:40 +0000 Subject: [PATCH 101/328] skip redundant acl check when sender is origin Signed-off-by: Jason Volk --- src/service/rooms/event_handler/handle_incoming_pdu.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index 4e6f0b0c..94d4bcc7 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -79,7 +79,9 @@ pub async fn handle_incoming_pdu<'a>( .try_into() .map_err(|e| err!(Request(InvalidParam("PDU does not have a valid sender key: {e}"))))?; - self.acl_check(sender.server_name(), room_id).await?; + if sender.server_name() != origin { + self.acl_check(sender.server_name(), room_id).await?; + } // Fetch create event let create_event = self From 13335042b715b8279cd9adbaa6425a11eb7d3e64 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 04:47:07 +0000 Subject: [PATCH 102/328] enable the paranoid-checks options in debug mode Signed-off-by: Jason Volk --- src/database/engine/cf_opts.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 382bc169..83bce08c 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -72,6 +72,13 @@ fn descriptor_cf_options( opts.set_options_from_string("{{arena_block_size=2097152;}}") .map_err(map_err)?; + #[cfg(debug_assertions)] + opts.set_options_from_string( + "{{paranoid_checks=true;paranoid_file_checks=true;force_consistency_checks=true;\ + verify_sst_unique_id_in_manifest=true;}}", + ) + .map_err(map_err)?; + Ok(opts) } From 98f95705478dbe60a56206dd2ae9057f602040ea Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 07:05:00 +0000 Subject: [PATCH 103/328] add option to disable rocksdb checksums reference runtime state for default option initialization Signed-off-by: Jason Volk --- conduwuit-example.toml | 7 ++++ src/core/config/mod.rs | 9 ++++++ src/database/engine.rs | 1 + src/database/engine/open.rs | 1 + src/database/map.rs | 6 ++-- src/database/map/keys.rs | 2 +- src/database/map/keys_from.rs | 2 +- src/database/map/options.rs | 50 +++++++++++++++++------------ src/database/map/rev_keys.rs | 2 +- src/database/map/rev_keys_from.rs | 2 +- src/database/map/rev_stream.rs | 4 +-- src/database/map/rev_stream_from.rs | 4 +-- src/database/map/stream.rs | 4 +-- src/database/map/stream_from.rs | 4 +-- 14 files changed, 62 insertions(+), 36 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 3ecc1628..51d948e8 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -897,6 +897,13 @@ # #rocksdb_paranoid_file_checks = false +# Enables or disables checksum verification in rocksdb at runtime. +# Checksums are usually hardware accelerated with low overhead; they are +# enabled in rocksdb by default. Older or slower platforms may see gains +# from disabling. +# +#rocksdb_checksums = true + # Database repair mode (for RocksDB SST corruption). # # Use this option when the server reports corruption while running or diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 133f0887..94788fa4 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1049,6 +1049,15 @@ pub struct Config { #[serde(default)] pub rocksdb_paranoid_file_checks: bool, + /// Enables or disables checksum verification in rocksdb at runtime. + /// Checksums are usually hardware accelerated with low overhead; they are + /// enabled in rocksdb by default. Older or slower platforms may see gains + /// from disabling. + /// + /// default: true + #[serde(default = "true_fn")] + pub rocksdb_checksums: bool, + /// Database repair mode (for RocksDB SST corruption). /// /// Use this option when the server reports corruption while running or diff --git a/src/database/engine.rs b/src/database/engine.rs index 76b2889b..be3d62cf 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -32,6 +32,7 @@ use crate::{ pub struct Engine { pub(super) read_only: bool, pub(super) secondary: bool, + pub(crate) checksums: bool, corks: AtomicU32, pub(crate) db: Db, pub(crate) pool: Arc, diff --git a/src/database/engine/open.rs b/src/database/engine/open.rs index 6a801878..ad724765 100644 --- a/src/database/engine/open.rs +++ b/src/database/engine/open.rs @@ -58,6 +58,7 @@ pub(crate) async fn open(ctx: Arc, desc: &[Descriptor]) -> Result) -> impl Stream>> + Send { use crate::pool::Seek; - let opts = super::iter_options_default(); + let opts = super::iter_options_default(&self.db); let state = stream::State::new(self, opts); if is_cached(self) { let state = state.init_fwd(None); diff --git a/src/database/map/keys_from.rs b/src/database/map/keys_from.rs index b83775ac..76c76325 100644 --- a/src/database/map/keys_from.rs +++ b/src/database/map/keys_from.rs @@ -53,7 +53,7 @@ where { use crate::pool::Seek; - let opts = super::iter_options_default(); + let opts = super::iter_options_default(&self.db); let state = stream::State::new(self, opts); if is_cached(self, from) { return stream::Keys::<'_>::from(state.init_fwd(from.as_ref().into())).boxed(); diff --git a/src/database/map/options.rs b/src/database/map/options.rs index f726036d..9e2ad898 100644 --- a/src/database/map/options.rs +++ b/src/database/map/options.rs @@ -1,35 +1,43 @@ +use std::sync::Arc; + use rocksdb::{ReadOptions, ReadTier, WriteOptions}; -#[inline] -pub(crate) fn iter_options_default() -> ReadOptions { - let mut options = read_options_default(); - options.set_background_purge_on_iterator_cleanup(true); - //options.set_pin_data(true); - options -} +use crate::Engine; #[inline] -pub(crate) fn cache_iter_options_default() -> ReadOptions { - let mut options = cache_read_options_default(); - options.set_background_purge_on_iterator_cleanup(true); - //options.set_pin_data(true); - options -} - -#[inline] -pub(crate) fn cache_read_options_default() -> ReadOptions { - let mut options = read_options_default(); +pub(crate) fn cache_iter_options_default(db: &Arc) -> ReadOptions { + let mut options = iter_options_default(db); options.set_read_tier(ReadTier::BlockCache); options.fill_cache(false); options } #[inline] -pub(crate) fn read_options_default() -> ReadOptions { - let mut options = ReadOptions::default(); - options.set_total_order_seek(true); +pub(crate) fn iter_options_default(db: &Arc) -> ReadOptions { + let mut options = read_options_default(db); + options.set_background_purge_on_iterator_cleanup(true); options } #[inline] -pub(crate) fn write_options_default() -> WriteOptions { WriteOptions::default() } +pub(crate) fn cache_read_options_default(db: &Arc) -> ReadOptions { + let mut options = read_options_default(db); + options.set_read_tier(ReadTier::BlockCache); + options.fill_cache(false); + options +} + +#[inline] +pub(crate) fn read_options_default(db: &Arc) -> ReadOptions { + let mut options = ReadOptions::default(); + options.set_total_order_seek(true); + + if !db.checksums { + options.set_verify_checksums(false); + } + + options +} + +#[inline] +pub(crate) fn write_options_default(_db: &Arc) -> WriteOptions { WriteOptions::default() } diff --git a/src/database/map/rev_keys.rs b/src/database/map/rev_keys.rs index a559d04b..21558a17 100644 --- a/src/database/map/rev_keys.rs +++ b/src/database/map/rev_keys.rs @@ -22,7 +22,7 @@ where pub fn rev_raw_keys(self: &Arc) -> impl Stream>> + Send { use crate::pool::Seek; - let opts = super::iter_options_default(); + let opts = super::iter_options_default(&self.db); let state = stream::State::new(self, opts); if is_cached(self) { let state = state.init_rev(None); diff --git a/src/database/map/rev_keys_from.rs b/src/database/map/rev_keys_from.rs index 5b159195..65072337 100644 --- a/src/database/map/rev_keys_from.rs +++ b/src/database/map/rev_keys_from.rs @@ -61,7 +61,7 @@ where { use crate::pool::Seek; - let opts = super::iter_options_default(); + let opts = super::iter_options_default(&self.db); let state = stream::State::new(self, opts); if is_cached(self, from) { return stream::KeysRev::<'_>::from(state.init_rev(from.as_ref().into())).boxed(); diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs index 56b20b9b..f55053be 100644 --- a/src/database/map/rev_stream.rs +++ b/src/database/map/rev_stream.rs @@ -31,7 +31,7 @@ where pub fn rev_raw_stream(self: &Arc) -> impl Stream>> + Send { use crate::pool::Seek; - let opts = super::iter_options_default(); + let opts = super::iter_options_default(&self.db); let state = stream::State::new(self, opts); if is_cached(self) { let state = state.init_rev(None); @@ -66,7 +66,7 @@ pub fn rev_raw_stream(self: &Arc) -> impl Stream> fields(%map), )] pub(super) fn is_cached(map: &Arc) -> bool { - let opts = super::cache_iter_options_default(); + let opts = super::cache_iter_options_default(&map.db); let state = stream::State::new(map, opts).init_rev(None); !state.is_incomplete() diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index 83832bdd..ddc98607 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -80,7 +80,7 @@ where { use crate::pool::Seek; - let opts = super::iter_options_default(); + let opts = super::iter_options_default(&self.db); let state = stream::State::new(self, opts); if is_cached(self, from) { let state = state.init_rev(from.as_ref().into()); @@ -118,7 +118,7 @@ pub(super) fn is_cached

(map: &Arc, from: &P) -> bool where P: AsRef<[u8]> + ?Sized, { - let cache_opts = super::cache_iter_options_default(); + let cache_opts = super::cache_iter_options_default(&map.db); let cache_status = stream::State::new(map, cache_opts) .init_rev(from.as_ref().into()) .status(); diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs index f1b5fdc3..bfc8ba04 100644 --- a/src/database/map/stream.rs +++ b/src/database/map/stream.rs @@ -30,7 +30,7 @@ where pub fn raw_stream(self: &Arc) -> impl Stream>> + Send { use crate::pool::Seek; - let opts = super::iter_options_default(); + let opts = super::iter_options_default(&self.db); let state = stream::State::new(self, opts); if is_cached(self) { let state = state.init_fwd(None); @@ -65,7 +65,7 @@ pub fn raw_stream(self: &Arc) -> impl Stream>> + fields(%map), )] pub(super) fn is_cached(map: &Arc) -> bool { - let opts = super::cache_iter_options_default(); + let opts = super::cache_iter_options_default(&map.db); let state = stream::State::new(map, opts).init_fwd(None); !state.is_incomplete() diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index 562ab6b1..74140a65 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -77,7 +77,7 @@ where { use crate::pool::Seek; - let opts = super::iter_options_default(); + let opts = super::iter_options_default(&self.db); let state = stream::State::new(self, opts); if is_cached(self, from) { let state = state.init_fwd(from.as_ref().into()); @@ -115,7 +115,7 @@ pub(super) fn is_cached

(map: &Arc, from: &P) -> bool where P: AsRef<[u8]> + ?Sized, { - let opts = super::cache_iter_options_default(); + let opts = super::cache_iter_options_default(&map.db); let state = stream::State::new(map, opts).init_fwd(from.as_ref().into()); !state.is_incomplete() From 2b730a30ad8f6fc0b188f79f7c45746ba46eff52 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 09:59:59 +0000 Subject: [PATCH 104/328] add broad_flat_map Signed-off-by: Jason Volk --- src/core/utils/stream/broadband.rs | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/src/core/utils/stream/broadband.rs b/src/core/utils/stream/broadband.rs index 6d1ff6fe..282008e7 100644 --- a/src/core/utils/stream/broadband.rs +++ b/src/core/utils/stream/broadband.rs @@ -35,6 +35,13 @@ where Fut: Future> + Send, U: Send; + fn broadn_flat_map(self, n: N, f: F) -> impl Stream + Send + where + N: Into>, + F: Fn(Item) -> Fut + Send, + Fut: Stream + Send + Unpin, + U: Send; + fn broadn_then(self, n: N, f: F) -> impl Stream + Send where N: Into>, @@ -70,6 +77,16 @@ where self.broadn_filter_map(None, f) } + #[inline] + fn broad_flat_map(self, f: F) -> impl Stream + Send + where + F: Fn(Item) -> Fut + Send, + Fut: Stream + Send + Unpin, + U: Send, + { + self.broadn_flat_map(None, f) + } + #[inline] fn broad_then(self, f: F) -> impl Stream + Send where @@ -122,6 +139,17 @@ where .ready_filter_map(identity) } + #[inline] + fn broadn_flat_map(self, n: N, f: F) -> impl Stream + Send + where + N: Into>, + F: Fn(Item) -> Fut + Send, + Fut: Stream + Send + Unpin, + U: Send, + { + self.flat_map_unordered(n.into().unwrap_or_else(automatic_width), f) + } + #[inline] fn broadn_then(self, n: N, f: F) -> impl Stream + Send where From 677316631a029fdc23fb48092a1af14284e26448 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 06:15:01 +0000 Subject: [PATCH 105/328] pipeline prologue of handle_incoming_pdu simplify room_version/first_pdu_in_room argument passing Signed-off-by: Jason Volk --- .../fetch_and_handle_outliers.rs | 13 ++-- src/service/rooms/event_handler/fetch_prev.rs | 17 ++--- .../rooms/event_handler/fetch_state.rs | 5 +- .../event_handler/handle_incoming_pdu.rs | 72 +++++++++++-------- .../rooms/event_handler/handle_outlier_pdu.rs | 1 - .../rooms/event_handler/handle_prev_pdu.rs | 8 +-- .../event_handler/upgrade_outlier_pdu.rs | 2 +- 7 files changed, 62 insertions(+), 56 deletions(-) diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index 84d0edd0..540ebb64 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -10,10 +10,11 @@ use conduwuit::{ }; use futures::TryFutureExt; use ruma::{ - api::federation::event::get_event, CanonicalJsonValue, OwnedEventId, RoomId, RoomVersionId, - ServerName, + api::federation::event::get_event, CanonicalJsonValue, OwnedEventId, RoomId, ServerName, }; +use super::get_room_version_id; + /// Find the event and auth it. Once the event is validated (steps 1 - 8) /// it is appended to the outliers Tree. /// @@ -30,7 +31,6 @@ pub(super) async fn fetch_and_handle_outliers<'a>( events: &'a [OwnedEventId], create_event: &'a PduEvent, room_id: &'a RoomId, - room_version_id: &'a RoomVersionId, ) -> Vec<(Arc, Option>)> { let back_off = |id| match self .services @@ -113,8 +113,13 @@ pub(super) async fn fetch_and_handle_outliers<'a>( { | Ok(res) => { debug!("Got {next_id} over federation"); + let Ok(room_version_id) = get_room_version_id(create_event) else { + back_off((*next_id).to_owned()); + continue; + }; + let Ok((calculated_event_id, value)) = - pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) + pdu::gen_event_id_canonical_json(&res.pdu, &room_version_id) else { back_off((*next_id).to_owned()); continue; diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index 5966aeba..aea70739 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -8,8 +8,7 @@ use futures::{future, FutureExt}; use ruma::{ int, state_res::{self}, - uint, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, - ServerName, + uint, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName, UInt, }; use super::check_room_id; @@ -26,7 +25,7 @@ pub(super) async fn fetch_prev( origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, - room_version_id: &RoomVersionId, + first_ts_in_room: UInt, initial_set: Vec, ) -> Result<( Vec, @@ -36,21 +35,13 @@ pub(super) async fn fetch_prev( let mut eventid_info = HashMap::new(); let mut todo_outlier_stack: VecDeque = initial_set.into(); - let first_pdu_in_room = self.services.timeline.first_pdu_in_room(room_id).await?; - let mut amount = 0; while let Some(prev_event_id) = todo_outlier_stack.pop_front() { self.services.server.check_running()?; if let Some((pdu, mut json_opt)) = self - .fetch_and_handle_outliers( - origin, - &[prev_event_id.clone()], - create_event, - room_id, - room_version_id, - ) + .fetch_and_handle_outliers(origin, &[prev_event_id.clone()], create_event, room_id) .boxed() .await .pop() @@ -74,7 +65,7 @@ pub(super) async fn fetch_prev( } if let Some(json) = json_opt { - if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { + if pdu.origin_server_ts > first_ts_in_room { amount = amount.saturating_add(1); for prev_prev in &pdu.prev_events { if !graph.contains_key(prev_prev) { diff --git a/src/service/rooms/event_handler/fetch_state.rs b/src/service/rooms/event_handler/fetch_state.rs index 0892655e..4f2580db 100644 --- a/src/service/rooms/event_handler/fetch_state.rs +++ b/src/service/rooms/event_handler/fetch_state.rs @@ -4,7 +4,7 @@ use conduwuit::{debug, debug_warn, implement, Err, Error, PduEvent, Result}; use futures::FutureExt; use ruma::{ api::federation::event::get_room_state_ids, events::StateEventType, EventId, OwnedEventId, - RoomId, RoomVersionId, ServerName, + RoomId, ServerName, }; use crate::rooms::short::ShortStateKey; @@ -23,7 +23,6 @@ pub(super) async fn fetch_state( origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, - room_version_id: &RoomVersionId, event_id: &EventId, ) -> Result>> { let res = self @@ -38,7 +37,7 @@ pub(super) async fn fetch_state( debug!("Fetching state events"); let state_vec = self - .fetch_and_handle_outliers(origin, &res.pdu_ids, create_event, room_id, room_version_id) + .fetch_and_handle_outliers(origin, &res.pdu_ids, create_event, room_id) .boxed() .await; diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index 94d4bcc7..7db71961 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -1,14 +1,15 @@ use std::{ collections::{hash_map, BTreeMap}, - sync::Arc, time::Instant, }; use conduwuit::{debug, err, implement, warn, Err, Result}; -use futures::{FutureExt, TryFutureExt}; +use futures::{ + future::{try_join5, OptionFuture}, + FutureExt, +}; use ruma::{events::StateEventType, CanonicalJsonValue, EventId, RoomId, ServerName, UserId}; -use super::{check_room_id, get_room_version_id}; use crate::rooms::timeline::RawPduId; /// When receiving an event one needs to: @@ -59,19 +60,13 @@ pub async fn handle_incoming_pdu<'a>( } // 1.1 Check the server is in the room - if !self.services.metadata.exists(room_id).await { - return Err!(Request(NotFound("Room is unknown to this server"))); - } + let meta_exists = self.services.metadata.exists(room_id).map(Ok); // 1.2 Check if the room is disabled - if self.services.metadata.is_disabled(room_id).await { - return Err!(Request(Forbidden( - "Federation of this room is currently disabled on this server." - ))); - } + let is_disabled = self.services.metadata.is_disabled(room_id).map(Ok); // 1.3.1 Check room ACL on origin field/server - self.acl_check(origin, room_id).await?; + let origin_acl_check = self.acl_check(origin, room_id); // 1.3.2 Check room ACL on sender's server name let sender: &UserId = value @@ -79,36 +74,53 @@ pub async fn handle_incoming_pdu<'a>( .try_into() .map_err(|e| err!(Request(InvalidParam("PDU does not have a valid sender key: {e}"))))?; - if sender.server_name() != origin { - self.acl_check(sender.server_name(), room_id).await?; - } + let sender_acl_check: OptionFuture<_> = sender + .server_name() + .ne(origin) + .then(|| self.acl_check(sender.server_name(), room_id)) + .into(); // Fetch create event - let create_event = self - .services - .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .map_ok(Arc::new) - .await?; + let create_event = + self.services + .state_accessor + .room_state_get(room_id, &StateEventType::RoomCreate, ""); - // Procure the room version - let room_version_id = get_room_version_id(&create_event)?; + let (meta_exists, is_disabled, (), (), create_event) = try_join5( + meta_exists, + is_disabled, + origin_acl_check, + sender_acl_check.map(|o| o.unwrap_or(Ok(()))), + create_event, + ) + .await?; - let first_pdu_in_room = self.services.timeline.first_pdu_in_room(room_id).await?; + if !meta_exists { + return Err!(Request(NotFound("Room is unknown to this server"))); + } + + if is_disabled { + return Err!(Request(Forbidden("Federation of this room is disabled by this server."))); + } let (incoming_pdu, val) = self .handle_outlier_pdu(origin, &create_event, event_id, room_id, value, false) - .boxed() .await?; - check_room_id(room_id, &incoming_pdu)?; - // 8. if not timeline event: stop if !is_timeline_event { return Ok(None); } + // Skip old events - if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { + let first_ts_in_room = self + .services + .timeline + .first_pdu_in_room(room_id) + .await? + .origin_server_ts; + + if incoming_pdu.origin_server_ts < first_ts_in_room { return Ok(None); } @@ -119,7 +131,7 @@ pub async fn handle_incoming_pdu<'a>( origin, &create_event, room_id, - &room_version_id, + first_ts_in_room, incoming_pdu.prev_events.clone(), ) .await?; @@ -134,7 +146,7 @@ pub async fn handle_incoming_pdu<'a>( room_id, &mut eventid_info, &create_event, - &first_pdu_in_room, + first_ts_in_room, &prev_id, ) .await diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index 3ad73295..a35aabe0 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -84,7 +84,6 @@ pub(super) async fn handle_outlier_pdu<'a>( &incoming_pdu.auth_events, create_event, room_id, - &room_version_id, )) .await; } diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index 2bec4eba..32ab505f 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -7,7 +7,7 @@ use std::{ use conduwuit::{ debug, implement, utils::continue_exponential_backoff_secs, Err, PduEvent, Result, }; -use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName}; +use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName, UInt}; #[implement(super::Service)] #[allow(clippy::type_complexity)] @@ -27,8 +27,8 @@ pub(super) async fn handle_prev_pdu<'a>( OwnedEventId, (Arc, BTreeMap), >, - create_event: &Arc, - first_pdu_in_room: &PduEvent, + create_event: &PduEvent, + first_ts_in_room: UInt, prev_id: &EventId, ) -> Result { // Check for disabled again because it might have changed @@ -62,7 +62,7 @@ pub(super) async fn handle_prev_pdu<'a>( if let Some((pdu, json)) = eventid_info.remove(prev_id) { // Skip old events - if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { + if pdu.origin_server_ts < first_ts_in_room { return Ok(()); } diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 8adf4246..f0c8f0c5 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -63,7 +63,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( if state_at_incoming_event.is_none() { state_at_incoming_event = self - .fetch_state(origin, create_event, room_id, &room_version_id, &incoming_pdu.event_id) + .fetch_state(origin, create_event, room_id, &incoming_pdu.event_id) .await?; } From 94d786ac12890306be5d0577bc3fcc8f6b856558 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 10:43:53 +0000 Subject: [PATCH 106/328] process rooms and edus concurrently Signed-off-by: Jason Volk --- Cargo.lock | 1 + src/api/Cargo.toml | 1 + src/api/server/send.rs | 241 ++++++++++-------- .../rooms/event_handler/parse_incoming_pdu.rs | 9 +- src/service/rooms/timeline/mod.rs | 2 +- 5 files changed, 142 insertions(+), 112 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5848cc46..3a435a10 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -685,6 +685,7 @@ dependencies = [ "http-body-util", "hyper", "ipaddress", + "itertools 0.13.0", "log", "rand", "reqwest", diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 1b463fbc..385e786f 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -50,6 +50,7 @@ http.workspace = true http-body-util.workspace = true hyper.workspace = true ipaddress.workspace = true +itertools.workspace = true log.workspace = true rand.workspace = true reqwest.workspace = true diff --git a/src/api/server/send.rs b/src/api/server/send.rs index eec9bd11..016f5194 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -3,10 +3,17 @@ use std::{collections::BTreeMap, net::IpAddr, time::Instant}; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - debug, debug_warn, err, error, result::LogErr, trace, utils::ReadyExt, warn, Err, Error, - Result, + debug, debug_warn, err, error, + result::LogErr, + trace, + utils::{ + stream::{automatic_width, BroadbandExt, TryBroadbandExt}, + IterStream, ReadyExt, + }, + warn, Err, Error, Result, }; -use futures::{FutureExt, StreamExt}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; +use itertools::Itertools; use ruma::{ api::{ client::error::ErrorKind, @@ -19,11 +26,9 @@ use ruma::{ }, }, events::receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType}, - serde::Raw, to_device::DeviceIdOrAllDevices, - OwnedEventId, ServerName, + CanonicalJsonObject, OwnedEventId, OwnedRoomId, ServerName, }; -use serde_json::value::RawValue as RawJsonValue; use service::{ sending::{EDU_LIMIT, PDU_LIMIT}, Services, @@ -34,7 +39,8 @@ use crate::{ Ruma, }; -type ResolvedMap = BTreeMap>; +type ResolvedMap = BTreeMap; +type Pdu = (OwnedRoomId, OwnedEventId, CanonicalJsonObject); /// # `PUT /_matrix/federation/v1/send/{txnId}` /// @@ -73,91 +79,41 @@ pub(crate) async fn send_transaction_message_route( let txn_start_time = Instant::now(); trace!( - pdus = ?body.pdus.len(), - edus = ?body.edus.len(), + pdus = body.pdus.len(), + edus = body.edus.len(), elapsed = ?txn_start_time.elapsed(), id = ?body.transaction_id, origin =?body.origin(), "Starting txn", ); - let resolved_map = - handle_pdus(&services, &client, &body.pdus, body.origin(), &txn_start_time) - .boxed() - .await?; + let pdus = body + .pdus + .iter() + .stream() + .broad_then(|pdu| services.rooms.event_handler.parse_incoming_pdu(pdu)) + .inspect_err(|e| debug_warn!("Could not parse PDU: {e}")) + .ready_filter_map(Result::ok); - handle_edus(&services, &client, &body.edus, body.origin()) - .boxed() - .await; + let edus = body + .edus + .iter() + .map(|edu| edu.json().get()) + .map(serde_json::from_str) + .filter_map(Result::ok) + .stream(); + + let results = handle(&services, &client, body.origin(), txn_start_time, pdus, edus).await?; debug!( - pdus = ?body.pdus.len(), - edus = ?body.edus.len(), + pdus = body.pdus.len(), + edus = body.edus.len(), elapsed = ?txn_start_time.elapsed(), id = ?body.transaction_id, origin =?body.origin(), "Finished txn", ); - - Ok(send_transaction_message::v1::Response { - pdus: resolved_map - .into_iter() - .map(|(e, r)| (e, r.map_err(error::sanitized_message))) - .collect(), - }) -} - -async fn handle_pdus( - services: &Services, - _client: &IpAddr, - pdus: &[Box], - origin: &ServerName, - txn_start_time: &Instant, -) -> Result { - let mut parsed_pdus = Vec::with_capacity(pdus.len()); - for pdu in pdus { - parsed_pdus.push(match services.rooms.event_handler.parse_incoming_pdu(pdu).await { - | Ok(t) => t, - | Err(e) => { - debug_warn!("Could not parse PDU: {e}"); - continue; - }, - }); - - // We do not add the event_id field to the pdu here because of signature - // and hashes checks - } - - let mut resolved_map = BTreeMap::new(); - for (event_id, value, room_id) in parsed_pdus { - services.server.check_running()?; - let pdu_start_time = Instant::now(); - let mutex_lock = services - .rooms - .event_handler - .mutex_federation - .lock(&room_id) - .await; - - let result = services - .rooms - .event_handler - .handle_incoming_pdu(origin, &room_id, &event_id, value, true) - .boxed() - .await - .map(|_| ()); - - drop(mutex_lock); - debug!( - pdu_elapsed = ?pdu_start_time.elapsed(), - txn_elapsed = ?txn_start_time.elapsed(), - "Finished PDU {event_id}", - ); - - resolved_map.insert(event_id, result); - } - - for (id, result) in &resolved_map { + for (id, result) in &results { if let Err(e) = result { if matches!(e, Error::BadRequest(ErrorKind::NotFound, _)) { warn!("Incoming PDU failed {id}: {e:?}"); @@ -165,39 +121,112 @@ async fn handle_pdus( } } - Ok(resolved_map) + Ok(send_transaction_message::v1::Response { + pdus: results + .into_iter() + .map(|(e, r)| (e, r.map_err(error::sanitized_message))) + .collect(), + }) } -async fn handle_edus( +async fn handle( services: &Services, client: &IpAddr, - edus: &[Raw], origin: &ServerName, -) { - for edu in edus - .iter() - .filter_map(|edu| serde_json::from_str::(edu.json().get()).ok()) - { - match edu { - | Edu::Presence(presence) => { - handle_edu_presence(services, client, origin, presence).await; - }, - | Edu::Receipt(receipt) => - handle_edu_receipt(services, client, origin, receipt).await, - | Edu::Typing(typing) => handle_edu_typing(services, client, origin, typing).await, - | Edu::DeviceListUpdate(content) => { - handle_edu_device_list_update(services, client, origin, content).await; - }, - | Edu::DirectToDevice(content) => { - handle_edu_direct_to_device(services, client, origin, content).await; - }, - | Edu::SigningKeyUpdate(content) => { - handle_edu_signing_key_update(services, client, origin, content).await; - }, - | Edu::_Custom(ref _custom) => { - debug_warn!(?edus, "received custom/unknown EDU"); - }, - } + started: Instant, + pdus: impl Stream + Send, + edus: impl Stream + Send, +) -> Result { + // group pdus by room + let pdus = pdus + .collect() + .map(|mut pdus: Vec<_>| { + pdus.sort_by(|(room_a, ..), (room_b, ..)| room_a.cmp(room_b)); + pdus.into_iter() + .into_grouping_map_by(|(room_id, ..)| room_id.clone()) + .collect() + }) + .await; + + // we can evaluate rooms concurrently + let results: ResolvedMap = pdus + .into_iter() + .try_stream() + .broad_and_then(|(room_id, pdus)| { + handle_room(services, client, origin, started, room_id, pdus) + .map_ok(Vec::into_iter) + .map_ok(IterStream::try_stream) + }) + .try_flatten() + .try_collect() + .boxed() + .await?; + + // evaluate edus after pdus, at least for now. + edus.for_each_concurrent(automatic_width(), |edu| handle_edu(services, client, origin, edu)) + .boxed() + .await; + + Ok(results) +} + +async fn handle_room( + services: &Services, + _client: &IpAddr, + origin: &ServerName, + txn_start_time: Instant, + room_id: OwnedRoomId, + pdus: Vec, +) -> Result> { + let _room_lock = services + .rooms + .event_handler + .mutex_federation + .lock(&room_id) + .await; + + let mut results = Vec::with_capacity(pdus.len()); + for (_, event_id, value) in pdus { + services.server.check_running()?; + let pdu_start_time = Instant::now(); + let result = services + .rooms + .event_handler + .handle_incoming_pdu(origin, &room_id, &event_id, value, true) + .await + .map(|_| ()); + + debug!( + pdu_elapsed = ?pdu_start_time.elapsed(), + txn_elapsed = ?txn_start_time.elapsed(), + "Finished PDU {event_id}", + ); + + results.push((event_id, result)); + } + + Ok(results) +} + +async fn handle_edu(services: &Services, client: &IpAddr, origin: &ServerName, edu: Edu) { + match edu { + | Edu::Presence(presence) => { + handle_edu_presence(services, client, origin, presence).await; + }, + | Edu::Receipt(receipt) => handle_edu_receipt(services, client, origin, receipt).await, + | Edu::Typing(typing) => handle_edu_typing(services, client, origin, typing).await, + | Edu::DeviceListUpdate(content) => { + handle_edu_device_list_update(services, client, origin, content).await; + }, + | Edu::DirectToDevice(content) => { + handle_edu_direct_to_device(services, client, origin, content).await; + }, + | Edu::SigningKeyUpdate(content) => { + handle_edu_signing_key_update(services, client, origin, content).await; + }, + | Edu::_Custom(ref _custom) => { + debug_warn!(?edu, "received custom/unknown EDU"); + }, } } diff --git a/src/service/rooms/event_handler/parse_incoming_pdu.rs b/src/service/rooms/event_handler/parse_incoming_pdu.rs index 0c11314d..9b130763 100644 --- a/src/service/rooms/event_handler/parse_incoming_pdu.rs +++ b/src/service/rooms/event_handler/parse_incoming_pdu.rs @@ -2,11 +2,10 @@ use conduwuit::{err, implement, pdu::gen_event_id_canonical_json, result::FlatOk use ruma::{CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId}; use serde_json::value::RawValue as RawJsonValue; +type Parsed = (OwnedRoomId, OwnedEventId, CanonicalJsonObject); + #[implement(super::Service)] -pub async fn parse_incoming_pdu( - &self, - pdu: &RawJsonValue, -) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> { +pub async fn parse_incoming_pdu(&self, pdu: &RawJsonValue) -> Result { let value = serde_json::from_str::(pdu.get()).map_err(|e| { err!(BadServerResponse(debug_warn!("Error parsing incoming event {e:?}"))) })?; @@ -28,5 +27,5 @@ pub async fn parse_incoming_pdu( err!(Request(InvalidParam("Could not convert event to canonical json: {e}"))) })?; - Ok((event_id, value, room_id)) + Ok((room_id, event_id, value)) } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 362bfab5..bf585a6b 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1166,7 +1166,7 @@ impl Service { #[tracing::instrument(skip(self, pdu), level = "debug")] pub async fn backfill_pdu(&self, origin: &ServerName, pdu: Box) -> Result<()> { - let (event_id, value, room_id) = + let (room_id, event_id, value) = self.services.event_handler.parse_incoming_pdu(&pdu).await?; // Lock so we cannot backfill the same pdu twice at the same time From c516a8df3e80d38656f2251204119c720b5d96f7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 15:12:08 +0000 Subject: [PATCH 107/328] fanout edu processing Signed-off-by: Jason Volk --- src/api/server/send.rs | 459 ++++++++++++++++++++++++----------------- 1 file changed, 265 insertions(+), 194 deletions(-) diff --git a/src/api/server/send.rs b/src/api/server/send.rs index 016f5194..f4903447 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -20,19 +20,22 @@ use ruma::{ federation::transactions::{ edu::{ DeviceListUpdateContent, DirectDeviceContent, Edu, PresenceContent, - ReceiptContent, SigningKeyUpdateContent, TypingContent, + PresenceUpdate, ReceiptContent, ReceiptData, ReceiptMap, SigningKeyUpdateContent, + TypingContent, }, send_transaction_message, }, }, events::receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType}, + serde::Raw, to_device::DeviceIdOrAllDevices, - CanonicalJsonObject, OwnedEventId, OwnedRoomId, ServerName, + CanonicalJsonObject, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, ServerName, UserId, }; use service::{ sending::{EDU_LIMIT, PDU_LIMIT}, Services, }; +use utils::millis_since_unix_epoch; use crate::{ utils::{self}, @@ -152,8 +155,8 @@ async fn handle( let results: ResolvedMap = pdus .into_iter() .try_stream() - .broad_and_then(|(room_id, pdus)| { - handle_room(services, client, origin, started, room_id, pdus) + .broad_and_then(|(room_id, pdus): (_, Vec<_>)| { + handle_room(services, client, origin, started, room_id, pdus.into_iter()) .map_ok(Vec::into_iter) .map_ok(IterStream::try_stream) }) @@ -176,7 +179,7 @@ async fn handle_room( origin: &ServerName, txn_start_time: Instant, room_id: OwnedRoomId, - pdus: Vec, + pdus: impl Iterator + Send, ) -> Result> { let _room_lock = services .rooms @@ -185,48 +188,53 @@ async fn handle_room( .lock(&room_id) .await; - let mut results = Vec::with_capacity(pdus.len()); - for (_, event_id, value) in pdus { - services.server.check_running()?; - let pdu_start_time = Instant::now(); - let result = services - .rooms - .event_handler - .handle_incoming_pdu(origin, &room_id, &event_id, value, true) - .await - .map(|_| ()); + let room_id = &room_id; + pdus.try_stream() + .and_then(|(_, event_id, value)| async move { + services.server.check_running()?; + let pdu_start_time = Instant::now(); + let result = services + .rooms + .event_handler + .handle_incoming_pdu(origin, room_id, &event_id, value, true) + .await + .map(|_| ()); - debug!( - pdu_elapsed = ?pdu_start_time.elapsed(), - txn_elapsed = ?txn_start_time.elapsed(), - "Finished PDU {event_id}", - ); + debug!( + pdu_elapsed = ?pdu_start_time.elapsed(), + txn_elapsed = ?txn_start_time.elapsed(), + "Finished PDU {event_id}", + ); - results.push((event_id, result)); - } - - Ok(results) + Ok((event_id, result)) + }) + .try_collect() + .await } async fn handle_edu(services: &Services, client: &IpAddr, origin: &ServerName, edu: Edu) { match edu { - | Edu::Presence(presence) => { - handle_edu_presence(services, client, origin, presence).await; - }, - | Edu::Receipt(receipt) => handle_edu_receipt(services, client, origin, receipt).await, - | Edu::Typing(typing) => handle_edu_typing(services, client, origin, typing).await, - | Edu::DeviceListUpdate(content) => { - handle_edu_device_list_update(services, client, origin, content).await; - }, - | Edu::DirectToDevice(content) => { - handle_edu_direct_to_device(services, client, origin, content).await; - }, - | Edu::SigningKeyUpdate(content) => { - handle_edu_signing_key_update(services, client, origin, content).await; - }, - | Edu::_Custom(ref _custom) => { - debug_warn!(?edu, "received custom/unknown EDU"); - }, + | Edu::Presence(presence) if services.server.config.allow_incoming_presence => + handle_edu_presence(services, client, origin, presence).await, + + | Edu::Receipt(receipt) if services.server.config.allow_incoming_read_receipts => + handle_edu_receipt(services, client, origin, receipt).await, + + | Edu::Typing(typing) if services.server.config.allow_incoming_typing => + handle_edu_typing(services, client, origin, typing).await, + + | Edu::DeviceListUpdate(content) => + handle_edu_device_list_update(services, client, origin, content).await, + + | Edu::DirectToDevice(content) => + handle_edu_direct_to_device(services, client, origin, content).await, + + | Edu::SigningKeyUpdate(content) => + handle_edu_signing_key_update(services, client, origin, content).await, + + | Edu::_Custom(ref _custom) => debug_warn!(?edu, "received custom/unknown EDU"), + + | _ => trace!(?edu, "skipped"), } } @@ -236,32 +244,41 @@ async fn handle_edu_presence( origin: &ServerName, presence: PresenceContent, ) { - if !services.globals.allow_incoming_presence() { + presence + .push + .into_iter() + .stream() + .for_each_concurrent(automatic_width(), |update| { + handle_edu_presence_update(services, origin, update) + }) + .await; +} + +async fn handle_edu_presence_update( + services: &Services, + origin: &ServerName, + update: PresenceUpdate, +) { + if update.user_id.server_name() != origin { + debug_warn!( + %update.user_id, %origin, + "received presence EDU for user not belonging to origin" + ); return; } - for update in presence.push { - if update.user_id.server_name() != origin { - debug_warn!( - %update.user_id, %origin, - "received presence EDU for user not belonging to origin" - ); - continue; - } - - services - .presence - .set_presence( - &update.user_id, - &update.presence, - Some(update.currently_active), - Some(update.last_active_ago), - update.status_msg.clone(), - ) - .await - .log_err() - .ok(); - } + services + .presence + .set_presence( + &update.user_id, + &update.presence, + Some(update.currently_active), + Some(update.last_active_ago), + update.status_msg.clone(), + ) + .await + .log_err() + .ok(); } async fn handle_edu_receipt( @@ -270,66 +287,94 @@ async fn handle_edu_receipt( origin: &ServerName, receipt: ReceiptContent, ) { - if !services.globals.allow_incoming_read_receipts() { + receipt + .receipts + .into_iter() + .stream() + .for_each_concurrent(automatic_width(), |(room_id, room_updates)| { + handle_edu_receipt_room(services, origin, room_id, room_updates) + }) + .await; +} + +async fn handle_edu_receipt_room( + services: &Services, + origin: &ServerName, + room_id: OwnedRoomId, + room_updates: ReceiptMap, +) { + if services + .rooms + .event_handler + .acl_check(origin, &room_id) + .await + .is_err() + { + debug_warn!( + %origin, %room_id, + "received read receipt EDU from ACL'd server" + ); return; } - for (room_id, room_updates) in receipt.receipts { - if services - .rooms - .event_handler - .acl_check(origin, &room_id) - .await - .is_err() - { - debug_warn!( - %origin, %room_id, - "received read receipt EDU from ACL'd server" - ); - continue; - } + let room_id = &room_id; + room_updates + .read + .into_iter() + .stream() + .for_each_concurrent(automatic_width(), |(user_id, user_updates)| async move { + handle_edu_receipt_room_user(services, origin, room_id, &user_id, user_updates).await; + }) + .await; +} - for (user_id, user_updates) in room_updates.read { - if user_id.server_name() != origin { - debug_warn!( - %user_id, %origin, - "received read receipt EDU for user not belonging to origin" - ); - continue; - } - - if services - .rooms - .state_cache - .room_members(&room_id) - .ready_any(|member| member.server_name() == user_id.server_name()) - .await - { - for event_id in &user_updates.event_ids { - let user_receipts = - BTreeMap::from([(user_id.clone(), user_updates.data.clone())]); - let receipts = BTreeMap::from([(ReceiptType::Read, user_receipts)]); - let receipt_content = BTreeMap::from([(event_id.to_owned(), receipts)]); - let event = ReceiptEvent { - content: ReceiptEventContent(receipt_content), - room_id: room_id.clone(), - }; - - services - .rooms - .read_receipt - .readreceipt_update(&user_id, &room_id, &event) - .await; - } - } else { - debug_warn!( - %user_id, %room_id, %origin, - "received read receipt EDU from server who does not have a member in the room", - ); - continue; - } - } +async fn handle_edu_receipt_room_user( + services: &Services, + origin: &ServerName, + room_id: &RoomId, + user_id: &UserId, + user_updates: ReceiptData, +) { + if user_id.server_name() != origin { + debug_warn!( + %user_id, %origin, + "received read receipt EDU for user not belonging to origin" + ); + return; } + + if !services + .rooms + .state_cache + .server_in_room(origin, room_id) + .await + { + debug_warn!( + %user_id, %room_id, %origin, + "received read receipt EDU from server who does not have a member in the room", + ); + return; + } + + let data = &user_updates.data; + user_updates + .event_ids + .into_iter() + .stream() + .for_each_concurrent(automatic_width(), |event_id| async move { + let user_data = [(user_id.to_owned(), data.clone())]; + let receipts = [(ReceiptType::Read, BTreeMap::from(user_data))]; + let content = [(event_id.clone(), BTreeMap::from(receipts))]; + services + .rooms + .read_receipt + .readreceipt_update(user_id, room_id, &ReceiptEvent { + content: ReceiptEventContent(content.into()), + room_id: room_id.to_owned(), + }) + .await; + }) + .await; } async fn handle_edu_typing( @@ -338,10 +383,6 @@ async fn handle_edu_typing( origin: &ServerName, typing: TypingContent, ) { - if !services.server.config.allow_incoming_typing { - return; - } - if typing.user_id.server_name() != origin { debug_warn!( %typing.user_id, %origin, @@ -364,41 +405,38 @@ async fn handle_edu_typing( return; } - if services + if !services .rooms .state_cache .is_joined(&typing.user_id, &typing.room_id) .await { - if typing.typing { - let timeout = utils::millis_since_unix_epoch().saturating_add( - services - .server - .config - .typing_federation_timeout_s - .saturating_mul(1000), - ); - services - .rooms - .typing - .typing_add(&typing.user_id, &typing.room_id, timeout) - .await - .log_err() - .ok(); - } else { - services - .rooms - .typing - .typing_remove(&typing.user_id, &typing.room_id) - .await - .log_err() - .ok(); - } - } else { debug_warn!( %typing.user_id, %typing.room_id, %origin, "received typing EDU for user not in room" ); + return; + } + + if typing.typing { + let secs = services.server.config.typing_federation_timeout_s; + let timeout = millis_since_unix_epoch().saturating_add(secs.saturating_mul(1000)); + + services + .rooms + .typing + .typing_add(&typing.user_id, &typing.room_id, timeout) + .await + .log_err() + .ok(); + } else { + services + .rooms + .typing + .typing_remove(&typing.user_id, &typing.room_id) + .await + .log_err() + .ok(); } } @@ -427,7 +465,12 @@ async fn handle_edu_direct_to_device( origin: &ServerName, content: DirectDeviceContent, ) { - let DirectDeviceContent { sender, ev_type, message_id, messages } = content; + let DirectDeviceContent { + ref sender, + ref ev_type, + ref message_id, + messages, + } = content; if sender.server_name() != origin { debug_warn!( @@ -440,60 +483,88 @@ async fn handle_edu_direct_to_device( // Check if this is a new transaction id if services .transaction_ids - .existing_txnid(&sender, None, &message_id) + .existing_txnid(sender, None, message_id) .await .is_ok() { return; } - for (target_user_id, map) in &messages { - for (target_device_id_maybe, event) in map { - let Ok(event) = event.deserialize_as().map_err(|e| { - err!(Request(InvalidParam(error!("To-Device event is invalid: {e}")))) - }) else { - continue; - }; - - let ev_type = ev_type.to_string(); - match target_device_id_maybe { - | DeviceIdOrAllDevices::DeviceId(target_device_id) => { - services - .users - .add_to_device_event( - &sender, - target_user_id, - target_device_id, - &ev_type, - event, - ) - .await; - }, - - | DeviceIdOrAllDevices::AllDevices => { - let (sender, ev_type, event) = (&sender, &ev_type, &event); - services - .users - .all_device_ids(target_user_id) - .for_each(|target_device_id| { - services.users.add_to_device_event( - sender, - target_user_id, - target_device_id, - ev_type, - event.clone(), - ) - }) - .await; - }, - } - } - } + // process messages concurrently for different users + let ev_type = ev_type.to_string(); + messages + .into_iter() + .stream() + .for_each_concurrent(automatic_width(), |(target_user_id, map)| { + handle_edu_direct_to_device_user(services, target_user_id, sender, &ev_type, map) + }) + .await; // Save transaction id with empty data services .transaction_ids - .add_txnid(&sender, None, &message_id, &[]); + .add_txnid(sender, None, message_id, &[]); +} + +async fn handle_edu_direct_to_device_user( + services: &Services, + target_user_id: OwnedUserId, + sender: &UserId, + ev_type: &str, + map: BTreeMap>, +) { + for (target_device_id_maybe, event) in map { + let Ok(event) = event + .deserialize_as() + .map_err(|e| err!(Request(InvalidParam(error!("To-Device event is invalid: {e}"))))) + else { + continue; + }; + + handle_edu_direct_to_device_event( + services, + &target_user_id, + sender, + target_device_id_maybe, + ev_type, + event, + ) + .await; + } +} + +async fn handle_edu_direct_to_device_event( + services: &Services, + target_user_id: &UserId, + sender: &UserId, + target_device_id_maybe: DeviceIdOrAllDevices, + ev_type: &str, + event: serde_json::Value, +) { + match target_device_id_maybe { + | DeviceIdOrAllDevices::DeviceId(ref target_device_id) => { + services + .users + .add_to_device_event(sender, target_user_id, target_device_id, ev_type, event) + .await; + }, + + | DeviceIdOrAllDevices::AllDevices => { + services + .users + .all_device_ids(target_user_id) + .for_each(|target_device_id| { + services.users.add_to_device_event( + sender, + target_user_id, + target_device_id, + ev_type, + event.clone(), + ) + }) + .await; + }, + } } async fn handle_edu_signing_key_update( From b2a565b0b4e32cf998ee5877cecded31f1305240 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 15:44:52 +0000 Subject: [PATCH 108/328] propagate better error from server.check_running() --- src/core/error/response.rs | 1 + src/core/server.rs | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/core/error/response.rs b/src/core/error/response.rs index ede1a05d..75e4050d 100644 --- a/src/core/error/response.rs +++ b/src/core/error/response.rs @@ -106,6 +106,7 @@ pub(super) fn io_error_code(kind: std::io::ErrorKind) -> StatusCode { | ErrorKind::TimedOut => StatusCode::GATEWAY_TIMEOUT, | ErrorKind::FileTooLarge => StatusCode::PAYLOAD_TOO_LARGE, | ErrorKind::StorageFull => StatusCode::INSUFFICIENT_STORAGE, + | ErrorKind::Interrupted => StatusCode::SERVICE_UNAVAILABLE, | _ => StatusCode::INTERNAL_SERVER_ERROR, } } diff --git a/src/core/server.rs b/src/core/server.rs index 05a4aae7..0f2e61b0 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -9,7 +9,7 @@ use std::{ use ruma::OwnedServerName; use tokio::{runtime, sync::broadcast}; -use crate::{config, config::Config, err, log::Log, metrics::Metrics, Err, Result}; +use crate::{config, config::Config, log::Log, metrics::Metrics, Err, Result}; /// Server runtime state; public portion pub struct Server { @@ -127,9 +127,12 @@ impl Server { #[inline] pub fn check_running(&self) -> Result { + use std::{io, io::ErrorKind::Interrupted}; + self.running() .then_some(()) - .ok_or_else(|| err!(debug_warn!("Server is shutting down."))) + .ok_or_else(|| io::Error::new(Interrupted, "Server shutting down")) + .map_err(Into::into) } #[inline] From ffd0fd42424a234d4fbd564b66b79521595b5b5b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 21:46:46 +0000 Subject: [PATCH 109/328] pipeline pdu fetch for federation sending destination Signed-off-by: Jason Volk --- src/service/sending/sender.rs | 135 ++++++++++++++++------------------ 1 file changed, 64 insertions(+), 71 deletions(-) diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index c91e1d31..47be01f1 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -8,12 +8,12 @@ use std::{ time::{Duration, Instant}, }; -use base64::{engine::general_purpose, Engine as _}; +use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine as _}; use conduwuit::{ debug, err, error, result::LogErr, trace, - utils::{calculate_hash, continue_exponential_backoff_secs, ReadyExt}, + utils::{calculate_hash, continue_exponential_backoff_secs, stream::IterStream, ReadyExt}, warn, Error, Result, }; use futures::{ @@ -38,7 +38,9 @@ use ruma::{ push_rules::PushRulesEvent, receipt::ReceiptType, AnySyncEphemeralRoomEvent, GlobalAccountDataEventType, }, - push, uint, CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedServerName, + push, + serde::Raw, + uint, CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UInt, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; @@ -633,7 +635,7 @@ impl Service { } fn send_events(&self, dest: Destination, events: Vec) -> SendingFuture<'_> { - //debug_assert!(!events.is_empty(), "sending empty transaction"); + debug_assert!(!events.is_empty(), "sending empty transaction"); match dest { | Destination::Federation(server) => self.send_events_dest_federation(server, events).boxed(), @@ -698,7 +700,7 @@ impl Service { | SendingEvent::Flush => None, })); - let txn_id = &*general_purpose::URL_SAFE_NO_PAD.encode(txn_hash); + let txn_id = &*URL_SAFE_NO_PAD.encode(txn_hash); //debug_assert!(pdu_jsons.len() + edu_jsons.len() > 0, "sending empty // transaction"); @@ -796,81 +798,72 @@ impl Service { Ok(Destination::Push(user_id, pushkey)) } - #[tracing::instrument( - name = "fed", - level = "debug", - skip(self, events), - fields( - events = %events.len(), - ), - )] async fn send_events_dest_federation( &self, server: OwnedServerName, events: Vec, ) -> SendingResult { - let mut pdu_jsons = Vec::with_capacity( - events - .iter() - .filter(|event| matches!(event, SendingEvent::Pdu(_))) - .count(), - ); - let mut edu_jsons = Vec::with_capacity( - events - .iter() - .filter(|event| matches!(event, SendingEvent::Edu(_))) - .count(), - ); + let pdus: Vec<_> = events + .iter() + .filter_map(|pdu| match pdu { + | SendingEvent::Pdu(pdu) => Some(pdu), + | _ => None, + }) + .stream() + .then(|pdu_id| self.services.timeline.get_pdu_json_from_id(pdu_id)) + .ready_filter_map(Result::ok) + .then(|pdu| self.convert_to_outgoing_federation_event(pdu)) + .collect() + .await; - for event in &events { - match event { - // TODO: check room version and remove event_id if needed - | SendingEvent::Pdu(pdu_id) => { - if let Ok(pdu) = self.services.timeline.get_pdu_json_from_id(pdu_id).await { - pdu_jsons.push(self.convert_to_outgoing_federation_event(pdu).await); - } - }, - | SendingEvent::Edu(edu) => - if let Ok(raw) = serde_json::from_slice(edu) { - edu_jsons.push(raw); - }, - | SendingEvent::Flush => {}, // flush only; no new content + let edus: Vec> = events + .iter() + .filter_map(|edu| match edu { + | SendingEvent::Edu(edu) => Some(edu.as_ref()), + | _ => None, + }) + .map(serde_json::from_slice) + .filter_map(Result::ok) + .collect(); + + if pdus.is_empty() && edus.is_empty() { + return Ok(Destination::Federation(server)); + } + + let preimage = pdus + .iter() + .map(|raw| raw.get().as_bytes()) + .chain(edus.iter().map(|raw| raw.json().get().as_bytes())); + + let txn_hash = calculate_hash(preimage); + let txn_id = &*URL_SAFE_NO_PAD.encode(txn_hash); + let request = send_transaction_message::v1::Request { + transaction_id: txn_id.into(), + origin: self.server.name.clone(), + origin_server_ts: MilliSecondsSinceUnixEpoch::now(), + pdus, + edus, + }; + + let result = self + .services + .federation + .execute_on(&self.services.client.sender, &server, request) + .await; + + for (event_id, result) in result.iter().flat_map(|resp| resp.pdus.iter()) { + if let Err(e) = result { + warn!( + %txn_id, %server, + "error sending PDU {event_id} to remote server: {e:?}" + ); } } - //debug_assert!(pdu_jsons.len() + edu_jsons.len() > 0, "sending empty - // transaction"); - - let txn_hash = calculate_hash(events.iter().filter_map(|e| match e { - | SendingEvent::Edu(b) => Some(&**b), - | SendingEvent::Pdu(b) => Some(b.as_ref()), - | SendingEvent::Flush => None, - })); - - let txn_id = &*general_purpose::URL_SAFE_NO_PAD.encode(txn_hash); - - let request = send_transaction_message::v1::Request { - origin: self.server.name.clone(), - pdus: pdu_jsons, - edus: edu_jsons, - origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - transaction_id: txn_id.into(), - }; - - let client = &self.services.client.sender; - self.services.federation.execute_on(client, &server, request) - .await - .inspect(|response| { - response - .pdus - .iter() - .filter(|(_, res)| res.is_err()) - .for_each( - |(pdu_id, res)| warn!(%txn_id, %server, "error sending PDU {pdu_id} to remote server: {res:?}"), - ); - }) - .map_err(|e| (Destination::Federation(server.clone()), e)) - .map(|_| Destination::Federation(server)) + match result { + | Err(error) => Err((Destination::Federation(server), error)), + | Ok(_) => Ok(Destination::Federation(server)), + } } /// This does not return a full `Pdu` it is only to satisfy ruma's types. From 99fe88c21e54f46aedd731786d73d7d9a721dc04 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 21:47:52 +0000 Subject: [PATCH 110/328] use smallvec for the edu sending event buffer Signed-off-by: Jason Volk --- Cargo.lock | 1 + src/api/client/to_device.rs | 28 ++++++++++++++----------- src/service/Cargo.toml | 1 + src/service/rooms/typing/mod.rs | 13 ++++++------ src/service/sending/data.rs | 6 +++--- src/service/sending/mod.rs | 19 +++++++++++------ src/service/sending/sender.rs | 36 +++++++++++++++++++++------------ 7 files changed, 64 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3a435a10..e379aebb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -845,6 +845,7 @@ dependencies = [ "serde_json", "serde_yaml", "sha2", + "smallvec", "termimad", "tokio", "tracing", diff --git a/src/api/client/to_device.rs b/src/api/client/to_device.rs index 2ded04e7..1b942fba 100644 --- a/src/api/client/to_device.rs +++ b/src/api/client/to_device.rs @@ -10,6 +10,7 @@ use ruma::{ }, to_device::DeviceIdOrAllDevices, }; +use service::sending::EduBuf; use crate::Ruma; @@ -42,18 +43,21 @@ pub(crate) async fn send_event_to_device_route( messages.insert(target_user_id.clone(), map); let count = services.globals.next_count()?; - services.sending.send_edu_server( - target_user_id.server_name(), - serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice( - DirectDeviceContent { - sender: sender_user.clone(), - ev_type: body.event_type.clone(), - message_id: count.to_string().into(), - messages, - }, - )) - .expect("DirectToDevice EDU can be serialized"), - )?; + let mut buf = EduBuf::new(); + serde_json::to_writer( + &mut buf, + &federation::transactions::edu::Edu::DirectToDevice(DirectDeviceContent { + sender: sender_user.clone(), + ev_type: body.event_type.clone(), + message_id: count.to_string().into(), + messages, + }), + ) + .expect("DirectToDevice EDU can be serialized"); + + services + .sending + .send_edu_server(target_user_id.server_name(), buf)?; continue; } diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index 21fbb417..c4f75453 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -74,6 +74,7 @@ serde_json.workspace = true serde.workspace = true serde_yaml.workspace = true sha2.workspace = true +smallvec.workspace = true termimad.workspace = true termimad.optional = true tokio.workspace = true diff --git a/src/service/rooms/typing/mod.rs b/src/service/rooms/typing/mod.rs index a6123322..c710b33a 100644 --- a/src/service/rooms/typing/mod.rs +++ b/src/service/rooms/typing/mod.rs @@ -13,7 +13,7 @@ use ruma::{ }; use tokio::sync::{broadcast, RwLock}; -use crate::{globals, sending, users, Dep}; +use crate::{globals, sending, sending::EduBuf, users, Dep}; pub struct Service { server: Arc, @@ -228,12 +228,13 @@ impl Service { return Ok(()); } - let edu = Edu::Typing(TypingContent::new(room_id.to_owned(), user_id.to_owned(), typing)); + let content = TypingContent::new(room_id.to_owned(), user_id.to_owned(), typing); + let edu = Edu::Typing(content); - self.services - .sending - .send_edu_room(room_id, serde_json::to_vec(&edu).expect("Serialized Edu::Typing")) - .await?; + let mut buf = EduBuf::new(); + serde_json::to_writer(&mut buf, &edu).expect("Serialized Edu::Typing"); + + self.services.sending.send_edu_room(room_id, buf).await?; Ok(()) } diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index 436f633e..4dd2d5aa 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -202,7 +202,7 @@ fn parse_servercurrentevent(key: &[u8], value: &[u8]) -> Result<(Destination, Se if value.is_empty() { SendingEvent::Pdu(event.into()) } else { - SendingEvent::Edu(value.to_vec()) + SendingEvent::Edu(value.into()) }, ) } else if key.starts_with(b"$") { @@ -230,7 +230,7 @@ fn parse_servercurrentevent(key: &[u8], value: &[u8]) -> Result<(Destination, Se SendingEvent::Pdu(event.into()) } else { // I'm pretty sure this should never be called - SendingEvent::Edu(value.to_vec()) + SendingEvent::Edu(value.into()) }, ) } else { @@ -252,7 +252,7 @@ fn parse_servercurrentevent(key: &[u8], value: &[u8]) -> Result<(Destination, Se if value.is_empty() { SendingEvent::Pdu(event.into()) } else { - SendingEvent::Edu(value.to_vec()) + SendingEvent::Edu(value.into()) }, ) }) diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 80bca112..b146ad49 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -21,6 +21,7 @@ use ruma::{ api::{appservice::Registration, OutgoingRequest}, RoomId, ServerName, UserId, }; +use smallvec::SmallVec; use tokio::task::JoinSet; use self::data::Data; @@ -67,10 +68,16 @@ struct Msg { #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum SendingEvent { Pdu(RawPduId), // pduid - Edu(Vec), // pdu json + Edu(EduBuf), // edu json Flush, // none } +pub type EduBuf = SmallVec<[u8; EDU_BUF_CAP]>; +pub type EduVec = SmallVec<[EduBuf; EDU_VEC_CAP]>; + +const EDU_BUF_CAP: usize = 128; +const EDU_VEC_CAP: usize = 1; + #[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { @@ -177,7 +184,6 @@ impl Service { where S: Stream + Send + 'a, { - let _cork = self.db.db.cork(); let requests = servers .map(|server| { (Destination::Federation(server.into()), SendingEvent::Pdu(pdu_id.to_owned())) @@ -185,6 +191,7 @@ impl Service { .collect::>() .await; + let _cork = self.db.db.cork(); let keys = self.db.queue_requests(requests.iter().map(|(o, e)| (e, o))); for ((dest, event), queue_id) in requests.into_iter().zip(keys) { @@ -195,7 +202,7 @@ impl Service { } #[tracing::instrument(skip(self, server, serialized), level = "debug")] - pub fn send_edu_server(&self, server: &ServerName, serialized: Vec) -> Result<()> { + pub fn send_edu_server(&self, server: &ServerName, serialized: EduBuf) -> Result { let dest = Destination::Federation(server.to_owned()); let event = SendingEvent::Edu(serialized); let _cork = self.db.db.cork(); @@ -208,7 +215,7 @@ impl Service { } #[tracing::instrument(skip(self, room_id, serialized), level = "debug")] - pub async fn send_edu_room(&self, room_id: &RoomId, serialized: Vec) -> Result<()> { + pub async fn send_edu_room(&self, room_id: &RoomId, serialized: EduBuf) -> Result { let servers = self .services .state_cache @@ -219,11 +226,10 @@ impl Service { } #[tracing::instrument(skip(self, servers, serialized), level = "debug")] - pub async fn send_edu_servers<'a, S>(&self, servers: S, serialized: Vec) -> Result<()> + pub async fn send_edu_servers<'a, S>(&self, servers: S, serialized: EduBuf) -> Result where S: Stream + Send + 'a, { - let _cork = self.db.db.cork(); let requests = servers .map(|server| { ( @@ -234,6 +240,7 @@ impl Service { .collect::>() .await; + let _cork = self.db.db.cork(); let keys = self.db.queue_requests(requests.iter().map(|(o, e)| (e, o))); for ((dest, event), queue_id) in requests.into_iter().zip(keys) { diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 47be01f1..363bb994 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -45,7 +45,9 @@ use ruma::{ }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; -use super::{appservice, data::QueueItem, Destination, Msg, SendingEvent, Service}; +use super::{ + appservice, data::QueueItem, Destination, EduBuf, EduVec, Msg, SendingEvent, Service, +}; #[derive(Debug)] enum TransactionStatus { @@ -313,7 +315,12 @@ impl Service { if let Destination::Federation(server_name) = dest { if let Ok((select_edus, last_count)) = self.select_edus(server_name).await { debug_assert!(select_edus.len() <= EDU_LIMIT, "exceeded edus limit"); - events.extend(select_edus.into_iter().map(SendingEvent::Edu)); + let select_edus = select_edus + .into_iter() + .map(Into::into) + .map(SendingEvent::Edu); + + events.extend(select_edus); self.db.set_latest_educount(server_name, last_count); } } @@ -357,7 +364,7 @@ impl Service { level = "debug", skip_all, )] - async fn select_edus(&self, server_name: &ServerName) -> Result<(Vec>, u64)> { + async fn select_edus(&self, server_name: &ServerName) -> Result<(EduVec, u64)> { // selection window let since = self.db.get_latest_educount(server_name).await; let since_upper = self.services.globals.current_count()?; @@ -405,8 +412,8 @@ impl Service { since: (u64, u64), max_edu_count: &AtomicU64, events_len: &AtomicUsize, - ) -> Vec> { - let mut events = Vec::new(); + ) -> EduVec { + let mut events = EduVec::new(); let server_rooms = self.services.state_cache.server_rooms(server_name); pin_mut!(server_rooms); @@ -441,10 +448,11 @@ impl Service { keys: None, }); - let edu = serde_json::to_vec(&edu) + let mut buf = EduBuf::new(); + serde_json::to_writer(&mut buf, &edu) .expect("failed to serialize device list update to JSON"); - events.push(edu); + events.push(buf); if events_len.fetch_add(1, Ordering::Relaxed) >= SELECT_EDU_LIMIT - 1 { return events; } @@ -465,7 +473,7 @@ impl Service { server_name: &ServerName, since: (u64, u64), max_edu_count: &AtomicU64, - ) -> Option> { + ) -> Option { let server_rooms = self.services.state_cache.server_rooms(server_name); pin_mut!(server_rooms); @@ -487,10 +495,11 @@ impl Service { let receipt_content = Edu::Receipt(ReceiptContent { receipts }); - let receipt_content = serde_json::to_vec(&receipt_content) + let mut buf = EduBuf::new(); + serde_json::to_writer(&mut buf, &receipt_content) .expect("Failed to serialize Receipt EDU to JSON vec"); - Some(receipt_content) + Some(buf) } /// Look for read receipts in this room @@ -569,7 +578,7 @@ impl Service { server_name: &ServerName, since: (u64, u64), max_edu_count: &AtomicU64, - ) -> Option> { + ) -> Option { let presence_since = self.services.presence.presence_since(since.0); pin_mut!(presence_since); @@ -628,10 +637,11 @@ impl Service { push: presence_updates.into_values().collect(), }); - let presence_content = serde_json::to_vec(&presence_content) + let mut buf = EduBuf::new(); + serde_json::to_writer(&mut buf, &presence_content) .expect("failed to serialize Presence EDU to JSON"); - Some(presence_content) + Some(buf) } fn send_events(&self, dest: Destination, events: Vec) -> SendingFuture<'_> { From ed3cd99781f35dd7e38439ab45b78a851385ca8d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 28 Jan 2025 19:42:09 +0000 Subject: [PATCH 111/328] abstract the config reload checks Signed-off-by: Jason Volk --- src/admin/server/commands.rs | 14 ++++++++------ src/core/config/check.rs | 18 +++++++++++++++++- src/main/server.rs | 6 +++--- 3 files changed, 28 insertions(+), 10 deletions(-) diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index 47509bad..5c0c2a10 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -32,13 +32,15 @@ pub(super) async fn reload_config( &self, path: Option, ) -> Result { - let path = path.as_deref().into_iter(); - let config = Config::load(path).and_then(|raw| Config::new(&raw))?; - if config.server_name != self.services.server.name { - return Err!("You can't change the server name."); - } + use conduwuit::config::check; - let _old = self.services.server.config.update(config)?; + let path = path.as_deref().into_iter(); + let new = Config::load(path).and_then(|raw| Config::new(&raw))?; + + let old = &self.services.server.config; + check::reload(old, &new)?; + + self.services.server.config.update(new)?; Ok(RoomMessageEventContent::text_plain("Successfully reconfigured.")) } diff --git a/src/core/config/check.rs b/src/core/config/check.rs index d7be54b1..988d4143 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -6,8 +6,24 @@ use figment::Figment; use super::DEPRECATED_KEYS; use crate::{debug, debug_info, debug_warn, error, warn, Config, Err, Result, Server}; +/// Performs check() with additional checks specific to reloading old config +/// with new config. +pub fn reload(old: &Config, new: &Config) -> Result { + check(new)?; + + if new.server_name != old.server_name { + return Err!(Config( + "server_name", + "You can't change the server's name from {:?}.", + old.server_name + )); + } + + Ok(()) +} + #[allow(clippy::cognitive_complexity)] -pub fn check(config: &Config) -> Result<()> { +pub fn check(config: &Config) -> Result { if cfg!(debug_assertions) { warn!("Note: conduwuit was built without optimisations (i.e. debug build)"); } diff --git a/src/main/server.rs b/src/main/server.rs index 74859f2b..7376b2fc 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -46,14 +46,14 @@ impl Server { .and_then(|raw| crate::clap::update(raw, args)) .and_then(|raw| Config::new(&raw))?; - #[cfg(feature = "sentry_telemetry")] - let sentry_guard = crate::sentry::init(&config); - let (tracing_reload_handle, tracing_flame_guard, capture) = crate::logging::init(&config)?; config.check()?; + #[cfg(feature = "sentry_telemetry")] + let sentry_guard = crate::sentry::init(&config); + #[cfg(unix)] sys::maximize_fd_limit() .expect("Unable to increase maximum soft and hard file descriptor limit"); From a567e314e96bb8efa2776770cd25a2b1190c9587 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 28 Jan 2025 20:02:29 +0000 Subject: [PATCH 112/328] simplify shutdown signal handlers Signed-off-by: Jason Volk --- src/core/server.rs | 2 +- src/router/run.rs | 27 +++++++-------------------- src/service/sync/watch.rs | 4 ++-- 3 files changed, 10 insertions(+), 23 deletions(-) diff --git a/src/core/server.rs b/src/core/server.rs index 0f2e61b0..45ba7420 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -112,7 +112,7 @@ impl Server { } #[inline] - pub async fn until_shutdown(self: Arc) { + pub async fn until_shutdown(self: &Arc) { while self.running() { self.signal.subscribe().recv().await.ok(); } diff --git a/src/router/run.rs b/src/router/run.rs index 95d12559..ea8a7666 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -9,6 +9,7 @@ use std::{ use axum_server::Handle as ServerHandle; use conduwuit::{debug, debug_error, debug_info, error, info, Error, Result, Server}; +use futures::FutureExt; use service::Services; use tokio::{ sync::broadcast::{self, Sender}, @@ -109,28 +110,14 @@ pub(crate) async fn stop(services: Arc) -> Result<()> { #[tracing::instrument(skip_all)] async fn signal(server: Arc, tx: Sender<()>, handle: axum_server::Handle) { - loop { - let sig: &'static str = server - .signal - .subscribe() - .recv() - .await - .expect("channel error"); - - if !server.running() { - handle_shutdown(&server, &tx, &handle, sig).await; - break; - } - } + server + .clone() + .until_shutdown() + .then(move |()| handle_shutdown(server, tx, handle)) + .await; } -async fn handle_shutdown( - server: &Arc, - tx: &Sender<()>, - handle: &axum_server::Handle, - sig: &str, -) { - debug!("Received signal {sig}"); +async fn handle_shutdown(server: Arc, tx: Sender<()>, handle: axum_server::Handle) { if let Err(e) = tx.send(()) { error!("failed sending shutdown transaction to channel: {e}"); } diff --git a/src/service/sync/watch.rs b/src/service/sync/watch.rs index 2b351c3a..0a9c5d15 100644 --- a/src/service/sync/watch.rs +++ b/src/service/sync/watch.rs @@ -97,8 +97,8 @@ pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result { ); // Server shutdown - let server_shutdown = self.services.server.clone().until_shutdown().boxed(); - futures.push(server_shutdown); + futures.push(self.services.server.until_shutdown().boxed()); + if !self.services.server.running() { return Ok(()); } From 2f449ba47db488ca1d3acb6f7228479af6bb97c2 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 28 Jan 2025 20:55:28 +0000 Subject: [PATCH 113/328] support reloading config via SIGUSR1 Signed-off-by: Jason Volk --- conduwuit-example.toml | 5 ++++ src/admin/server/commands.rs | 11 ++------ src/core/config/mod.rs | 7 +++++ src/main/signal.rs | 2 ++ src/service/config/mod.rs | 55 ++++++++++++++++++++++++++++++++++++ src/service/mod.rs | 1 + src/service/services.rs | 4 ++- 7 files changed, 75 insertions(+), 10 deletions(-) create mode 100644 src/service/config/mod.rs diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 51d948e8..8534e5c6 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1524,6 +1524,11 @@ # #listening = true +# Enables configuration reload when the server receives SIGUSR1 on +# supporting platforms. +# +#config_reload_signal = true + [global.tls] # Path to a valid TLS certificate file. diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index 5c0c2a10..910dce6e 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -1,6 +1,6 @@ use std::{fmt::Write, path::PathBuf, sync::Arc}; -use conduwuit::{info, utils::time, warn, Config, Err, Result}; +use conduwuit::{info, utils::time, warn, Err, Result}; use ruma::events::room::message::RoomMessageEventContent; use crate::admin_command; @@ -32,15 +32,8 @@ pub(super) async fn reload_config( &self, path: Option, ) -> Result { - use conduwuit::config::check; - let path = path.as_deref().into_iter(); - let new = Config::load(path).and_then(|raw| Config::new(&raw))?; - - let old = &self.services.server.config; - check::reload(old, &new)?; - - self.services.server.config.update(new)?; + self.services.config.reload(path)?; Ok(RoomMessageEventContent::text_plain("Successfully reconfigured.")) } diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 94788fa4..8e8176ab 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1742,6 +1742,13 @@ pub struct Config { #[serde(default = "true_fn")] pub listening: bool, + /// Enables configuration reload when the server receives SIGUSR1 on + /// supporting platforms. + /// + /// default: true + #[serde(default = "true_fn")] + pub config_reload_signal: bool, + #[serde(flatten)] #[allow(clippy::zero_sized_map_values)] // this is a catchall, the map shouldn't be zero at runtime diff --git a/src/main/signal.rs b/src/main/signal.rs index cecb718b..dfdca1d5 100644 --- a/src/main/signal.rs +++ b/src/main/signal.rs @@ -16,6 +16,7 @@ pub(super) async fn signal(server: Arc) { let mut quit = unix::signal(SignalKind::quit()).expect("SIGQUIT handler"); let mut term = unix::signal(SignalKind::terminate()).expect("SIGTERM handler"); + let mut usr1 = unix::signal(SignalKind::user_defined1()).expect("SIGUSR1 handler"); loop { trace!("Installed signal handlers"); let sig: &'static str; @@ -23,6 +24,7 @@ pub(super) async fn signal(server: Arc) { _ = signal::ctrl_c() => { sig = "SIGINT"; }, _ = quit.recv() => { sig = "SIGQUIT"; }, _ = term.recv() => { sig = "SIGTERM"; }, + _ = usr1.recv() => { sig = "SIGUSR1"; }, } warn!("Received {sig}"); diff --git a/src/service/config/mod.rs b/src/service/config/mod.rs new file mode 100644 index 00000000..ef98f176 --- /dev/null +++ b/src/service/config/mod.rs @@ -0,0 +1,55 @@ +use std::{iter, path::Path, sync::Arc}; + +use async_trait::async_trait; +use conduwuit::{ + config::{check, Config}, + error, implement, Result, Server, +}; + +pub struct Service { + server: Arc, +} + +const SIGNAL: &str = "SIGUSR1"; + +#[async_trait] +impl crate::Service for Service { + fn build(args: crate::Args<'_>) -> Result> { + Ok(Arc::new(Self { server: args.server.clone() })) + } + + async fn worker(self: Arc) -> Result { + while self.server.running() { + if self.server.signal.subscribe().recv().await == Ok(SIGNAL) { + if let Err(e) = self.handle_reload() { + error!("Failed to reload config: {e}"); + } + } + } + + Ok(()) + } + + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } +} + +#[implement(Service)] +fn handle_reload(&self) -> Result { + if self.server.config.config_reload_signal { + self.reload(iter::empty())?; + } + + Ok(()) +} + +#[implement(Service)] +pub fn reload<'a, I>(&self, paths: I) -> Result> +where + I: Iterator, +{ + let old = self.server.config.clone(); + let new = Config::load(paths).and_then(|raw| Config::new(&raw))?; + + check::reload(&old, &new)?; + self.server.config.update(new) +} diff --git a/src/service/mod.rs b/src/service/mod.rs index 2102921f..71bd0eb4 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -9,6 +9,7 @@ pub mod account_data; pub mod admin; pub mod appservice; pub mod client; +pub mod config; pub mod emergency; pub mod federation; pub mod globals; diff --git a/src/service/services.rs b/src/service/services.rs index cb5cc12f..fb334b96 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -10,7 +10,7 @@ use database::Database; use tokio::sync::Mutex; use crate::{ - account_data, admin, appservice, client, emergency, federation, globals, key_backups, + account_data, admin, appservice, client, config, emergency, federation, globals, key_backups, manager::Manager, media, presence, pusher, resolver, rooms, sending, server_keys, service, service::{Args, Map, Service}, @@ -21,6 +21,7 @@ pub struct Services { pub account_data: Arc, pub admin: Arc, pub appservice: Arc, + pub config: Arc, pub client: Arc, pub emergency: Arc, pub globals: Arc, @@ -68,6 +69,7 @@ impl Services { appservice: build!(appservice::Service), resolver: build!(resolver::Service), client: build!(client::Service), + config: build!(config::Service), emergency: build!(emergency::Service), globals: build!(globals::Service), key_backups: build!(key_backups::Service), From 2c5af902a3b61cf07a07ccfff82a41874d7b10ba Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 28 Jan 2025 21:30:12 +0000 Subject: [PATCH 114/328] support executing configurable admin commands via SIGUSR2 Signed-off-by: Jason Volk --- conduwuit-example.toml | 7 +++ src/core/config/mod.rs | 9 ++++ src/main/signal.rs | 2 + src/service/admin/{startup.rs => execute.rs} | 56 ++++++++++++++------ src/service/admin/mod.rs | 8 ++- src/service/config/mod.rs | 9 +++- 6 files changed, 72 insertions(+), 19 deletions(-) rename src/service/admin/{startup.rs => execute.rs} (57%) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 8534e5c6..4062ba99 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1362,6 +1362,13 @@ # #admin_execute_errors_ignore = false +# List of admin commands to execute on SIGUSR2. +# +# Similar to admin_execute, but these commands are executed when the +# server receives SIGUSR2 on supporting platforms. +# +#admin_signal_execute = [] + # Controls the max log level for admin command log captures (logs # generated from running admin commands). Defaults to "info" on release # builds, else "debug" on debug builds. diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 8e8176ab..415c9ba9 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1554,6 +1554,15 @@ pub struct Config { #[serde(default)] pub admin_execute_errors_ignore: bool, + /// List of admin commands to execute on SIGUSR2. + /// + /// Similar to admin_execute, but these commands are executed when the + /// server receives SIGUSR2 on supporting platforms. + /// + /// default: [] + #[serde(default)] + pub admin_signal_execute: Vec, + /// Controls the max log level for admin command log captures (logs /// generated from running admin commands). Defaults to "info" on release /// builds, else "debug" on debug builds. diff --git a/src/main/signal.rs b/src/main/signal.rs index dfdca1d5..343b95c9 100644 --- a/src/main/signal.rs +++ b/src/main/signal.rs @@ -17,6 +17,7 @@ pub(super) async fn signal(server: Arc) { let mut quit = unix::signal(SignalKind::quit()).expect("SIGQUIT handler"); let mut term = unix::signal(SignalKind::terminate()).expect("SIGTERM handler"); let mut usr1 = unix::signal(SignalKind::user_defined1()).expect("SIGUSR1 handler"); + let mut usr2 = unix::signal(SignalKind::user_defined2()).expect("SIGUSR2 handler"); loop { trace!("Installed signal handlers"); let sig: &'static str; @@ -25,6 +26,7 @@ pub(super) async fn signal(server: Arc) { _ = quit.recv() => { sig = "SIGQUIT"; }, _ = term.recv() => { sig = "SIGTERM"; }, _ = usr1.recv() => { sig = "SIGUSR1"; }, + _ = usr2.recv() => { sig = "SIGUSR2"; }, } warn!("Received {sig}"); diff --git a/src/service/admin/startup.rs b/src/service/admin/execute.rs similarity index 57% rename from src/service/admin/startup.rs rename to src/service/admin/execute.rs index 582e863d..462681da 100644 --- a/src/service/admin/startup.rs +++ b/src/service/admin/execute.rs @@ -2,6 +2,8 @@ use conduwuit::{debug, debug_info, error, implement, info, Err, Result}; use ruma::events::room::message::RoomMessageEventContent; use tokio::time::{sleep, Duration}; +pub(super) const SIGNAL: &str = "SIGUSR2"; + /// Possibly spawn the terminal console at startup if configured. #[implement(super::Service)] pub(super) async fn console_auto_start(&self) { @@ -22,7 +24,7 @@ pub(super) async fn console_auto_stop(&self) { /// Execute admin commands after startup #[implement(super::Service)] -pub(super) async fn startup_execute(&self) -> Result<()> { +pub(super) async fn startup_execute(&self) -> Result { // List of comamnds to execute let commands = &self.services.server.config.admin_execute; @@ -36,7 +38,7 @@ pub(super) async fn startup_execute(&self) -> Result<()> { sleep(Duration::from_millis(500)).await; for (i, command) in commands.iter().enumerate() { - if let Err(e) = self.startup_execute_command(i, command.clone()).await { + if let Err(e) = self.execute_command(i, command.clone()).await { if !errors { return Err(e); } @@ -59,16 +61,38 @@ pub(super) async fn startup_execute(&self) -> Result<()> { Ok(()) } -/// Execute one admin command after startup +/// Execute admin commands after signal #[implement(super::Service)] -async fn startup_execute_command(&self, i: usize, command: String) -> Result<()> { - debug!("Startup command #{i}: executing {command:?}"); +pub(super) async fn signal_execute(&self) -> Result { + // List of comamnds to execute + let commands = self.services.server.config.admin_signal_execute.clone(); + + // When true, errors are ignored and execution continues. + let ignore_errors = self.services.server.config.admin_execute_errors_ignore; + + for (i, command) in commands.iter().enumerate() { + if let Err(e) = self.execute_command(i, command.clone()).await { + if !ignore_errors { + return Err(e); + } + } + + tokio::task::yield_now().await; + } + + Ok(()) +} + +/// Execute one admin command after startup or signal +#[implement(super::Service)] +async fn execute_command(&self, i: usize, command: String) -> Result { + debug!("Execute command #{i}: executing {command:?}"); match self.command_in_place(command, None).await { - | Ok(Some(output)) => Self::startup_command_output(i, &output), - | Err(output) => Self::startup_command_error(i, &output), + | Ok(Some(output)) => Self::execute_command_output(i, &output), + | Err(output) => Self::execute_command_error(i, &output), | Ok(None) => { - info!("Startup command #{i} completed (no output)."); + info!("Execute command #{i} completed (no output)."); Ok(()) }, } @@ -76,28 +100,28 @@ async fn startup_execute_command(&self, i: usize, command: String) -> Result<()> #[cfg(feature = "console")] #[implement(super::Service)] -fn startup_command_output(i: usize, content: &RoomMessageEventContent) -> Result<()> { - debug_info!("Startup command #{i} completed:"); +fn execute_command_output(i: usize, content: &RoomMessageEventContent) -> Result { + debug_info!("Execute command #{i} completed:"); super::console::print(content.body()); Ok(()) } #[cfg(feature = "console")] #[implement(super::Service)] -fn startup_command_error(i: usize, content: &RoomMessageEventContent) -> Result<()> { +fn execute_command_error(i: usize, content: &RoomMessageEventContent) -> Result { super::console::print_err(content.body()); - Err!(debug_error!("Startup command #{i} failed.")) + Err!(debug_error!("Execute command #{i} failed.")) } #[cfg(not(feature = "console"))] #[implement(super::Service)] -fn startup_command_output(i: usize, content: &RoomMessageEventContent) -> Result<()> { - info!("Startup command #{i} completed:\n{:#}", content.body()); +fn execute_command_output(i: usize, content: &RoomMessageEventContent) -> Result { + info!("Execute command #{i} completed:\n{:#}", content.body()); Ok(()) } #[cfg(not(feature = "console"))] #[implement(super::Service)] -fn startup_command_error(i: usize, content: &RoomMessageEventContent) -> Result<()> { - Err!(error!("Startup command #{i} failed:\n{:#}", content.body())) +fn execute_command_error(i: usize, content: &RoomMessageEventContent) -> Result { + Err!(error!("Execute command #{i} failed:\n{:#}", content.body())) } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index bc410631..31b046b7 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -1,7 +1,7 @@ pub mod console; mod create; +mod execute; mod grant; -mod startup; use std::{ future::Future, @@ -183,7 +183,11 @@ impl Service { .map(|complete| complete(command)) } - async fn handle_signal(&self, #[allow(unused_variables)] sig: &'static str) { + async fn handle_signal(&self, sig: &'static str) { + if sig == execute::SIGNAL { + self.signal_execute().await.ok(); + } + #[cfg(feature = "console")] self.console.handle_signal(sig).await; } diff --git a/src/service/config/mod.rs b/src/service/config/mod.rs index ef98f176..8bd09a52 100644 --- a/src/service/config/mod.rs +++ b/src/service/config/mod.rs @@ -1,4 +1,4 @@ -use std::{iter, path::Path, sync::Arc}; +use std::{iter, ops::Deref, path::Path, sync::Arc}; use async_trait::async_trait; use conduwuit::{ @@ -33,6 +33,13 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } +impl Deref for Service { + type Target = Arc; + + #[inline] + fn deref(&self) -> &Self::Target { &self.server.config } +} + #[implement(Service)] fn handle_reload(&self) -> Result { if self.server.config.config_reload_signal { From ad0b0af955cda8b93b6d8c9c665905a2c4dd93d3 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 25 Jan 2025 23:07:50 +0000 Subject: [PATCH 115/328] combine state_accessor data into mod Signed-off-by: Jason Volk --- src/service/rooms/state_accessor/data.rs | 253 ----------------------- src/service/rooms/state_accessor/mod.rs | 183 +++++++++++++--- 2 files changed, 149 insertions(+), 287 deletions(-) delete mode 100644 src/service/rooms/state_accessor/data.rs diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs deleted file mode 100644 index 29b27a05..00000000 --- a/src/service/rooms/state_accessor/data.rs +++ /dev/null @@ -1,253 +0,0 @@ -use std::{borrow::Borrow, collections::HashMap, sync::Arc}; - -use conduwuit::{ - at, err, - utils::stream::{BroadbandExt, IterStream, ReadyExt}, - PduEvent, Result, -}; -use database::{Deserialized, Map}; -use futures::{FutureExt, StreamExt, TryFutureExt}; -use ruma::{events::StateEventType, EventId, OwnedEventId, RoomId}; -use serde::Deserialize; - -use crate::{ - rooms, - rooms::{ - short::{ShortEventId, ShortStateHash, ShortStateKey}, - state_compressor::parse_compressed_state_event, - }, - Dep, -}; - -pub(super) struct Data { - shorteventid_shortstatehash: Arc, - services: Services, -} - -struct Services { - short: Dep, - state: Dep, - state_compressor: Dep, - timeline: Dep, -} - -impl Data { - pub(super) fn new(args: &crate::Args<'_>) -> Self { - let db = &args.db; - Self { - shorteventid_shortstatehash: db["shorteventid_shortstatehash"].clone(), - services: Services { - short: args.depend::("rooms::short"), - state: args.depend::("rooms::state"), - state_compressor: args - .depend::("rooms::state_compressor"), - timeline: args.depend::("rooms::timeline"), - }, - } - } - - pub(super) async fn state_full( - &self, - shortstatehash: ShortStateHash, - ) -> Result> { - let state = self - .state_full_pdus(shortstatehash) - .await? - .into_iter() - .filter_map(|pdu| Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu))) - .collect(); - - Ok(state) - } - - pub(super) async fn state_full_pdus( - &self, - shortstatehash: ShortStateHash, - ) -> Result> { - let short_ids = self.state_full_shortids(shortstatehash).await?; - - let full_pdus = self - .services - .short - .multi_get_eventid_from_short(short_ids.into_iter().map(at!(1)).stream()) - .ready_filter_map(Result::ok) - .broad_filter_map(|event_id: OwnedEventId| async move { - self.services.timeline.get_pdu(&event_id).await.ok() - }) - .collect() - .await; - - Ok(full_pdus) - } - - pub(super) async fn state_full_ids( - &self, - shortstatehash: ShortStateHash, - ) -> Result> - where - Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, - ::Owned: Borrow, - { - let short_ids = self.state_full_shortids(shortstatehash).await?; - - let full_ids = self - .services - .short - .multi_get_eventid_from_short(short_ids.iter().map(at!(1)).stream()) - .zip(short_ids.iter().stream().map(at!(0))) - .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) - .collect() - .boxed() - .await; - - Ok(full_ids) - } - - pub(super) async fn state_full_shortids( - &self, - shortstatehash: ShortStateHash, - ) -> Result> { - let shortids = self - .services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .await - .map_err(|e| err!(Database("Missing state IDs: {e}")))? - .pop() - .expect("there is always one layer") - .full_state - .iter() - .copied() - .map(parse_compressed_state_event) - .collect(); - - Ok(shortids) - } - - /// Returns a single EventId from `room_id` with key - /// (`event_type`,`state_key`). - pub(super) async fn state_get_id( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, - { - let shortstatekey = self - .services - .short - .get_shortstatekey(event_type, state_key) - .await?; - - let full_state = self - .services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .await - .map_err(|e| err!(Database(error!(?event_type, ?state_key, "Missing state: {e:?}"))))? - .pop() - .expect("there is always one layer") - .full_state; - - let compressed = full_state - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .ok_or(err!(Database("No shortstatekey in compressed state")))?; - - let (_, shorteventid) = parse_compressed_state_event(*compressed); - - self.services - .short - .get_eventid_from_short(shorteventid) - .await - } - - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - pub(super) async fn state_get( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - self.state_get_id(shortstatehash, event_type, state_key) - .and_then(|event_id: OwnedEventId| async move { - self.services.timeline.get_pdu(&event_id).await - }) - .await - } - - /// Returns the state hash for this pdu. - pub(super) async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { - const BUFSIZE: usize = size_of::(); - - self.services - .short - .get_shorteventid(event_id) - .and_then(|shorteventid| { - self.shorteventid_shortstatehash - .aqry::(&shorteventid) - }) - .await - .deserialized() - } - - /// Returns the full room state. - pub(super) async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result> { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_full(shortstatehash)) - .map_err(|e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .await - } - - /// Returns the full room state's pdus. - #[allow(unused_qualifications)] // async traits - pub(super) async fn room_state_full_pdus(&self, room_id: &RoomId) -> Result> { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_full_pdus(shortstatehash)) - .map_err(|e| err!(Database("Missing state pdus for {room_id:?}: {e:?}"))) - .await - } - - /// Returns a single EventId from `room_id` with key - /// (`event_type`,`state_key`). - pub(super) async fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, - { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) - .await - } - - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - pub(super) async fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) - .await - } -} diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index d89c8835..3d87534b 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,5 +1,3 @@ -mod data; - use std::{ borrow::Borrow, collections::HashMap, @@ -8,16 +6,18 @@ use std::{ }; use conduwuit::{ - err, error, + at, err, error, pdu::PduBuilder, utils, utils::{ math::{usize_from_f64, Expected}, - ReadyExt, + stream::BroadbandExt, + IterStream, ReadyExt, }, Err, Error, PduEvent, Result, }; -use futures::StreamExt; +use database::{Deserialized, Map}; +use futures::{FutureExt, StreamExt, TryFutureExt}; use lru_cache::LruCache; use ruma::{ events::{ @@ -38,33 +38,40 @@ use ruma::{ }, room::RoomType, space::SpaceRoomJoinRule, - EventEncryptionAlgorithm, EventId, JsOption, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, ServerName, UserId, + EventEncryptionAlgorithm, EventId, JsOption, OwnedEventId, OwnedRoomAliasId, OwnedRoomId, + OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; use serde::Deserialize; -use self::data::Data; use crate::{ rooms, rooms::{ short::{ShortEventId, ShortStateHash, ShortStateKey}, state::RoomMutexGuard, + state_compressor::parse_compressed_state_event, }, Dep, }; pub struct Service { - services: Services, - db: Data, pub server_visibility_cache: Mutex>, pub user_visibility_cache: Mutex>, + services: Services, + db: Data, } struct Services { + short: Dep, + state: Dep, + state_compressor: Dep, state_cache: Dep, timeline: Dep, } +struct Data { + shorteventid_shortstatehash: Arc, +} + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let config = &args.server.config; @@ -74,17 +81,23 @@ impl crate::Service for Service { f64::from(config.user_visibility_cache_capacity) * config.cache_capacity_modifier; Ok(Arc::new(Self { - services: Services { - state_cache: args.depend::("rooms::state_cache"), - timeline: args.depend::("rooms::timeline"), - }, - db: Data::new(&args), server_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64( server_visibility_cache_capacity, )?)), user_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64( user_visibility_cache_capacity, )?)), + services: Services { + state_cache: args.depend::("rooms::state_cache"), + timeline: args.depend::("rooms::timeline"), + short: args.depend::("rooms::short"), + state: args.depend::("rooms::state"), + state_compressor: args + .depend::("rooms::state_compressor"), + }, + db: Data { + shorteventid_shortstatehash: args.db["shorteventid_shortstatehash"].clone(), + }, })) } @@ -130,6 +143,37 @@ impl crate::Service for Service { } impl Service { + pub async fn state_full( + &self, + shortstatehash: ShortStateHash, + ) -> Result> { + let state = self + .state_full_pdus(shortstatehash) + .await? + .into_iter() + .filter_map(|pdu| Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu))) + .collect(); + + Ok(state) + } + + pub async fn state_full_pdus(&self, shortstatehash: ShortStateHash) -> Result> { + let short_ids = self.state_full_shortids(shortstatehash).await?; + + let full_pdus = self + .services + .short + .multi_get_eventid_from_short(short_ids.into_iter().map(at!(1)).stream()) + .ready_filter_map(Result::ok) + .broad_filter_map(|event_id: OwnedEventId| async move { + self.services.timeline.get_pdu(&event_id).await.ok() + }) + .collect() + .await; + + Ok(full_pdus) + } + /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self), level = "debug")] @@ -141,7 +185,19 @@ impl Service { Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, ::Owned: Borrow, { - self.db.state_full_ids::(shortstatehash).await + let short_ids = self.state_full_shortids(shortstatehash).await?; + + let full_ids = self + .services + .short + .multi_get_eventid_from_short(short_ids.iter().map(at!(1)).stream()) + .zip(short_ids.iter().stream().map(at!(0))) + .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) + .collect() + .boxed() + .await; + + Ok(full_ids) } #[inline] @@ -149,14 +205,21 @@ impl Service { &self, shortstatehash: ShortStateHash, ) -> Result> { - self.db.state_full_shortids(shortstatehash).await - } + let shortids = self + .services + .state_compressor + .load_shortstatehash_info(shortstatehash) + .await + .map_err(|e| err!(Database("Missing state IDs: {e}")))? + .pop() + .expect("there is always one layer") + .full_state + .iter() + .copied() + .map(parse_compressed_state_event) + .collect(); - pub async fn state_full( - &self, - shortstatehash: ShortStateHash, - ) -> Result> { - self.db.state_full(shortstatehash).await + Ok(shortids) } /// Returns a single EventId from `room_id` with key (`event_type`, @@ -172,22 +235,47 @@ impl Service { Id: for<'de> Deserialize<'de> + Sized + ToOwned, ::Owned: Borrow, { - self.db - .state_get_id(shortstatehash, event_type, state_key) + let shortstatekey = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await?; + + let full_state = self + .services + .state_compressor + .load_shortstatehash_info(shortstatehash) + .await + .map_err(|e| err!(Database(error!(?event_type, ?state_key, "Missing state: {e:?}"))))? + .pop() + .expect("there is always one layer") + .full_state; + + let compressed = full_state + .iter() + .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) + .ok_or(err!(Database("No shortstatekey in compressed state")))?; + + let (_, shorteventid) = parse_compressed_state_event(*compressed); + + self.services + .short + .get_eventid_from_short(shorteventid) .await } /// Returns a single PDU from `room_id` with key (`event_type`, /// `state_key`). - #[inline] pub async fn state_get( &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, ) -> Result { - self.db - .state_get(shortstatehash, event_type, state_key) + self.state_get_id(shortstatehash, event_type, state_key) + .and_then(|event_id: OwnedEventId| async move { + self.services.timeline.get_pdu(&event_id).await + }) .await } @@ -375,7 +463,18 @@ impl Service { /// Returns the state hash for this pdu. pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { - self.db.pdu_shortstatehash(event_id).await + const BUFSIZE: usize = size_of::(); + + self.services + .short + .get_shorteventid(event_id) + .and_then(|shorteventid| { + self.db + .shorteventid_shortstatehash + .aqry::(&shorteventid) + }) + .await + .deserialized() } /// Returns the full room state. @@ -384,13 +483,23 @@ impl Service { &self, room_id: &RoomId, ) -> Result> { - self.db.room_state_full(room_id).await + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_full(shortstatehash)) + .map_err(|e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .await } /// Returns the full room state pdus #[tracing::instrument(skip(self), level = "debug")] pub async fn room_state_full_pdus(&self, room_id: &RoomId) -> Result> { - self.db.room_state_full_pdus(room_id).await + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_full_pdus(shortstatehash)) + .map_err(|e| err!(Database("Missing state pdus for {room_id:?}: {e:?}"))) + .await } /// Returns a single EventId from `room_id` with key (`event_type`, @@ -406,8 +515,10 @@ impl Service { Id: for<'de> Deserialize<'de> + Sized + ToOwned, ::Owned: Borrow, { - self.db - .room_state_get_id(room_id, event_type, state_key) + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) .await } @@ -420,7 +531,11 @@ impl Service { event_type: &StateEventType, state_key: &str, ) -> Result { - self.db.room_state_get(room_id, event_type, state_key).await + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) + .await } /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). From af399fd5179eed9c72bf0426858301af9ffc92d4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 29 Jan 2025 01:04:02 +0000 Subject: [PATCH 116/328] flatten state accessor iterations Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 11 +- src/api/client/context.rs | 28 ++-- src/api/client/membership.rs | 18 +- src/api/client/message.rs | 6 +- src/api/client/room/initial_sync.rs | 9 +- src/api/client/search.rs | 12 +- src/api/client/state.rs | 12 +- src/api/client/sync/v3.rs | 48 +++--- src/api/client/sync/v4.rs | 8 +- src/api/client/sync/v5.rs | 8 +- src/api/server/send_join.rs | 14 +- src/api/server/state.rs | 10 +- src/api/server/state_ids.rs | 9 +- src/core/pdu/strip.rs | 12 +- .../rooms/event_handler/resolve_state.rs | 5 +- .../rooms/event_handler/state_at_incoming.rs | 17 +- src/service/rooms/spaces/mod.rs | 4 +- src/service/rooms/state_accessor/mod.rs | 155 ++++++++++-------- 18 files changed, 205 insertions(+), 181 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index cdd69c0f..cd892ded 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -9,7 +9,7 @@ use conduwuit::{ debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, PduId, RawPduId, Result, }; -use futures::{FutureExt, StreamExt}; +use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ api::{client::error::ErrorKind, federation::event::get_room_state}, events::room::message::RoomMessageEventContent, @@ -327,11 +327,10 @@ pub(super) async fn get_room_state( .services .rooms .state_accessor - .room_state_full(&room_id) - .await? - .values() - .map(PduEvent::to_state_event) - .collect(); + .room_state_full_pdus(&room_id) + .map_ok(PduEvent::into_state_event) + .try_collect() + .await?; if room_state.is_empty() { return Ok(RoomMessageEventContent::text_plain( diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 388bcf4d..7256683f 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,6 +1,6 @@ use axum::extract::State; use conduwuit::{ - at, deref_at, err, ref_at, + at, err, ref_at, utils::{ future::TryExtExt, stream::{BroadbandExt, ReadyExt, TryIgnore, WidebandExt}, @@ -10,10 +10,10 @@ use conduwuit::{ }; use futures::{ future::{join, join3, try_join3, OptionFuture}, - FutureExt, StreamExt, TryFutureExt, + FutureExt, StreamExt, TryFutureExt, TryStreamExt, }; use ruma::{api::client::context::get_context, events::StateEventType, OwnedEventId, UserId}; -use service::rooms::{lazy_loading, lazy_loading::Options}; +use service::rooms::{lazy_loading, lazy_loading::Options, short::ShortStateKey}; use crate::{ client::message::{event_filter, ignored_filter, lazy_loading_witness, visibility_filter}, @@ -132,21 +132,29 @@ pub(crate) async fn get_context_route( .state_accessor .pdu_shortstatehash(state_at) .or_else(|_| services.rooms.state.get_room_shortstatehash(room_id)) - .and_then(|shortstatehash| services.rooms.state_accessor.state_full_ids(shortstatehash)) + .map_ok(|shortstatehash| { + services + .rooms + .state_accessor + .state_full_ids(shortstatehash) + .map(Ok) + }) .map_err(|e| err!(Database("State not found: {e}"))) + .try_flatten_stream() + .try_collect() .boxed(); let (lazy_loading_witnessed, state_ids) = join(lazy_loading_witnessed, state_ids).await; - let state_ids = state_ids?; + let state_ids: Vec<(ShortStateKey, OwnedEventId)> = state_ids?; + let shortstatekeys = state_ids.iter().map(at!(0)).stream(); + let shorteventids = state_ids.iter().map(ref_at!(1)).stream(); let lazy_loading_witnessed = lazy_loading_witnessed.unwrap_or_default(); - let shortstatekeys = state_ids.iter().stream().map(deref_at!(0)); - let state: Vec<_> = services .rooms .short .multi_get_statekey_from_short(shortstatekeys) - .zip(state_ids.iter().stream().map(at!(1))) + .zip(shorteventids) .ready_filter_map(|item| Some((item.0.ok()?, item.1))) .ready_filter_map(|((event_type, state_key), event_id)| { if filter.lazy_load_options.is_enabled() @@ -162,9 +170,9 @@ pub(crate) async fn get_context_route( Some(event_id) }) .broad_filter_map(|event_id: &OwnedEventId| { - services.rooms.timeline.get_pdu(event_id).ok() + services.rooms.timeline.get_pdu(event_id.as_ref()).ok() }) - .map(|pdu| pdu.to_state_event()) + .map(PduEvent::into_state_event) .collect() .await; diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 2e23dab9..fccb9b53 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -8,14 +8,14 @@ use std::{ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - debug, debug_info, debug_warn, err, info, + at, debug, debug_info, debug_warn, err, info, pdu::{gen_event_id_canonical_json, PduBuilder}, result::FlatOk, trace, utils::{self, shuffle, IterStream, ReadyExt}, warn, Err, PduEvent, Result, }; -use futures::{join, FutureExt, StreamExt}; +use futures::{join, FutureExt, StreamExt, TryFutureExt}; use ruma::{ api::{ client::{ @@ -765,11 +765,12 @@ pub(crate) async fn get_member_events_route( .rooms .state_accessor .room_state_full(&body.room_id) - .await? - .iter() - .filter(|(key, _)| key.0 == StateEventType::RoomMember) - .map(|(_, pdu)| pdu.to_member_event()) - .collect(), + .ready_filter_map(Result::ok) + .ready_filter(|((ty, _), _)| *ty == StateEventType::RoomMember) + .map(at!(1)) + .map(PduEvent::into_member_event) + .collect() + .await, }) } @@ -1707,9 +1708,6 @@ pub async fn leave_room( room_id: &RoomId, reason: Option, ) -> Result<()> { - //use conduwuit::utils::stream::OptionStream; - use futures::TryFutureExt; - // Ask a remote server if we don't have this room and are not knocking on it if !services .rooms diff --git a/src/api/client/message.rs b/src/api/client/message.rs index a508b5da..321d8013 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -6,9 +6,9 @@ use conduwuit::{ stream::{BroadbandExt, TryIgnore, WidebandExt}, IterStream, ReadyExt, }, - Event, PduCount, Result, + Event, PduCount, PduEvent, Result, }; -use futures::{future::OptionFuture, pin_mut, FutureExt, StreamExt}; +use futures::{future::OptionFuture, pin_mut, FutureExt, StreamExt, TryFutureExt}; use ruma::{ api::{ client::{filter::RoomEventFilter, message::get_message_events}, @@ -220,8 +220,8 @@ async fn get_member_event( .rooms .state_accessor .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str()) + .map_ok(PduEvent::into_state_event) .await - .map(|member_event| member_event.to_state_event()) .ok() } diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs index 301b6e8d..233d180f 100644 --- a/src/api/client/room/initial_sync.rs +++ b/src/api/client/room/initial_sync.rs @@ -2,7 +2,7 @@ use axum::extract::State; use conduwuit::{ at, utils::{stream::TryTools, BoolExt}, - Err, Result, + Err, PduEvent, Result, }; use futures::TryStreamExt; use ruma::api::client::room::initial_sync::v3::{PaginationChunk, Request, Response}; @@ -39,10 +39,9 @@ pub(crate) async fn room_initial_sync_route( .rooms .state_accessor .room_state_full_pdus(room_id) - .await? - .into_iter() - .map(|pdu| pdu.to_state_event()) - .collect(); + .map_ok(PduEvent::into_state_event) + .try_collect() + .await?; let messages = PaginationChunk { start: events.last().map(at!(0)).as_ref().map(ToString::to_string), diff --git a/src/api/client/search.rs b/src/api/client/search.rs index e60bd26d..898dfc7f 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -7,7 +7,7 @@ use conduwuit::{ utils::{stream::ReadyExt, IterStream}, Err, PduEvent, Result, }; -use futures::{future::OptionFuture, FutureExt, StreamExt, TryFutureExt}; +use futures::{future::OptionFuture, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{ api::client::search::search_events::{ self, @@ -181,15 +181,15 @@ async fn category_room_events( } async fn procure_room_state(services: &Services, room_id: &RoomId) -> Result { - let state_map = services + let state = services .rooms .state_accessor - .room_state_full(room_id) + .room_state_full_pdus(room_id) + .map_ok(PduEvent::into_state_event) + .try_collect() .await?; - let state_events = state_map.values().map(PduEvent::to_state_event).collect(); - - Ok(state_events) + Ok(state) } async fn check_room_visible( diff --git a/src/api/client/state.rs b/src/api/client/state.rs index d00ee5e5..8555f88b 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -1,5 +1,6 @@ use axum::extract::State; use conduwuit::{err, pdu::PduBuilder, utils::BoolExt, Err, PduEvent, Result}; +use futures::TryStreamExt; use ruma::{ api::client::state::{get_state_events, get_state_events_for_key, send_state_event}, events::{ @@ -82,11 +83,10 @@ pub(crate) async fn get_state_events_route( room_state: services .rooms .state_accessor - .room_state_full(&body.room_id) - .await? - .values() - .map(PduEvent::to_state_event) - .collect(), + .room_state_full_pdus(&body.room_id) + .map_ok(PduEvent::into_state_event) + .try_collect() + .await?, }) } @@ -133,7 +133,7 @@ pub(crate) async fn get_state_events_for_key_route( Ok(get_state_events_for_key::v3::Response { content: event_format.or(|| event.get_content_as_value()), - event: event_format.then(|| event.to_state_event_value()), + event: event_format.then(|| event.into_state_event_value()), }) } diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 7cca9616..cd4dfc90 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -28,7 +28,7 @@ use conduwuit_service::{ }; use futures::{ future::{join, join3, join4, join5, try_join, try_join4, OptionFuture}, - FutureExt, StreamExt, TryFutureExt, + FutureExt, StreamExt, TryFutureExt, TryStreamExt, }; use ruma::{ api::client::{ @@ -503,16 +503,20 @@ async fn handle_left_room( let mut left_state_events = Vec::new(); - let since_shortstatehash = services - .rooms - .user - .get_token_shortstatehash(room_id, since) - .await; + let since_shortstatehash = services.rooms.user.get_token_shortstatehash(room_id, since); - let since_state_ids = match since_shortstatehash { - | Ok(s) => services.rooms.state_accessor.state_full_ids(s).await?, - | Err(_) => HashMap::new(), - }; + let since_state_ids: HashMap<_, OwnedEventId> = since_shortstatehash + .map_ok(|since_shortstatehash| { + services + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .map(Ok) + }) + .try_flatten_stream() + .try_collect() + .await + .unwrap_or_default(); let Ok(left_event_id): Result = services .rooms @@ -534,11 +538,12 @@ async fn handle_left_room( return Ok(None); }; - let mut left_state_ids = services + let mut left_state_ids: HashMap<_, _> = services .rooms .state_accessor .state_full_ids(left_shortstatehash) - .await?; + .collect() + .await; let leave_shortstatekey = services .rooms @@ -960,19 +965,18 @@ async fn calculate_state_initial( current_shortstatehash: ShortStateHash, witness: Option<&Witness>, ) -> Result { - let state_events = services + let (shortstatekeys, event_ids): (Vec<_>, Vec<_>) = services .rooms .state_accessor .state_full_ids(current_shortstatehash) - .await?; - - let shortstatekeys = state_events.keys().copied().stream(); + .unzip() + .await; let state_events = services .rooms .short - .multi_get_statekey_from_short(shortstatekeys) - .zip(state_events.values().cloned().stream()) + .multi_get_statekey_from_short(shortstatekeys.into_iter().stream()) + .zip(event_ids.into_iter().stream()) .ready_filter_map(|item| Some((item.0.ok()?, item.1))) .ready_filter_map(|((event_type, state_key), event_id)| { let lazy_load_enabled = filter.room.state.lazy_load_options.is_enabled() @@ -1036,17 +1040,19 @@ async fn calculate_state_incremental( let current_state_ids = services .rooms .state_accessor - .state_full_ids(current_shortstatehash); + .state_full_ids(current_shortstatehash) + .collect(); let since_state_ids = services .rooms .state_accessor - .state_full_ids(since_shortstatehash); + .state_full_ids(since_shortstatehash) + .collect(); let (current_state_ids, since_state_ids): ( HashMap<_, OwnedEventId>, HashMap<_, OwnedEventId>, - ) = try_join(current_state_ids, since_state_ids).await?; + ) = join(current_state_ids, since_state_ids).await; current_state_ids .iter() diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index a82e9309..b7967498 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -241,13 +241,15 @@ pub(crate) async fn sync_events_v4_route( .rooms .state_accessor .state_full_ids(current_shortstatehash) - .await?; + .collect() + .await; - let since_state_ids = services + let since_state_ids: HashMap<_, _> = services .rooms .state_accessor .state_full_ids(since_shortstatehash) - .await?; + .collect() + .await; for (key, id) in current_state_ids { if since_state_ids.get(&key) != Some(&id) { diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index 1c4f3504..66647f0e 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -748,13 +748,15 @@ async fn collect_e2ee<'a>( .rooms .state_accessor .state_full_ids(current_shortstatehash) - .await?; + .collect() + .await; - let since_state_ids = services + let since_state_ids: HashMap<_, _> = services .rooms .state_accessor .state_full_ids(since_shortstatehash) - .await?; + .collect() + .await; for (key, id) in current_state_ids { if since_state_ids.get(&key) != Some(&id) { diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index e62089b4..2b8a0eef 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -1,10 +1,10 @@ #![allow(deprecated)] -use std::{borrow::Borrow, collections::HashMap}; +use std::borrow::Borrow; use axum::extract::State; use conduwuit::{ - err, + at, err, pdu::gen_event_id_canonical_json, utils::stream::{IterStream, TryBroadbandExt}, warn, Err, Result, @@ -211,14 +211,16 @@ async fn create_join_event( drop(mutex_lock); - let state_ids: HashMap<_, OwnedEventId> = services + let state_ids: Vec = services .rooms .state_accessor .state_full_ids(shortstatehash) - .await?; + .map(at!(1)) + .collect() + .await; let state = state_ids - .values() + .iter() .try_stream() .broad_and_then(|event_id| services.rooms.timeline.get_pdu_json(event_id)) .broad_and_then(|pdu| { @@ -231,7 +233,7 @@ async fn create_join_event( .boxed() .await?; - let starting_events = state_ids.values().map(Borrow::borrow); + let starting_events = state_ids.iter().map(Borrow::borrow); let auth_chain = services .rooms .auth_chain diff --git a/src/api/server/state.rs b/src/api/server/state.rs index 42f7e538..eab1f138 100644 --- a/src/api/server/state.rs +++ b/src/api/server/state.rs @@ -1,7 +1,7 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; -use conduwuit::{err, result::LogErr, utils::IterStream, Result}; +use conduwuit::{at, err, utils::IterStream, Result}; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{api::federation::event::get_room_state, OwnedEventId}; @@ -35,11 +35,9 @@ pub(crate) async fn get_room_state_route( .rooms .state_accessor .state_full_ids(shortstatehash) - .await - .log_err() - .map_err(|_| err!(Request(NotFound("PDU state IDs not found."))))? - .into_values() - .collect(); + .map(at!(1)) + .collect() + .await; let pdus = state_ids .iter() diff --git a/src/api/server/state_ids.rs b/src/api/server/state_ids.rs index 186ef399..4973dd3a 100644 --- a/src/api/server/state_ids.rs +++ b/src/api/server/state_ids.rs @@ -1,7 +1,7 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; -use conduwuit::{err, Result}; +use conduwuit::{at, err, Result}; use futures::StreamExt; use ruma::{api::federation::event::get_room_state_ids, OwnedEventId}; @@ -36,10 +36,9 @@ pub(crate) async fn get_room_state_ids_route( .rooms .state_accessor .state_full_ids(shortstatehash) - .await - .map_err(|_| err!(Request(NotFound("State ids not found"))))? - .into_values() - .collect(); + .map(at!(1)) + .collect() + .await; let auth_chain_ids = services .rooms diff --git a/src/core/pdu/strip.rs b/src/core/pdu/strip.rs index 8e1045db..7d2fb1d6 100644 --- a/src/core/pdu/strip.rs +++ b/src/core/pdu/strip.rs @@ -116,7 +116,7 @@ pub fn to_message_like_event(&self) -> Raw { #[must_use] #[implement(super::Pdu)] -pub fn to_state_event_value(&self) -> JsonValue { +pub fn into_state_event_value(self) -> JsonValue { let mut json = json!({ "content": self.content, "type": self.kind, @@ -127,7 +127,7 @@ pub fn to_state_event_value(&self) -> JsonValue { "state_key": self.state_key, }); - if let Some(unsigned) = &self.unsigned { + if let Some(unsigned) = self.unsigned { json["unsigned"] = json!(unsigned); } @@ -136,8 +136,8 @@ pub fn to_state_event_value(&self) -> JsonValue { #[must_use] #[implement(super::Pdu)] -pub fn to_state_event(&self) -> Raw { - serde_json::from_value(self.to_state_event_value()).expect("Raw::from_value always works") +pub fn into_state_event(self) -> Raw { + serde_json::from_value(self.into_state_event_value()).expect("Raw::from_value always works") } #[must_use] @@ -188,7 +188,7 @@ pub fn to_stripped_spacechild_state_event(&self) -> Raw Raw> { +pub fn into_member_event(self) -> Raw> { let mut json = json!({ "content": self.content, "type": self.kind, @@ -200,7 +200,7 @@ pub fn to_member_event(&self) -> Raw> { "state_key": self.state_key, }); - if let Some(unsigned) = &self.unsigned { + if let Some(unsigned) = self.unsigned { json["unsigned"] = json!(unsigned); } diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 0526d31c..1fd91ac6 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -33,11 +33,12 @@ pub async fn resolve_state( .await .map_err(|e| err!(Database(error!("No state for {room_id:?}: {e:?}"))))?; - let current_state_ids = self + let current_state_ids: HashMap<_, _> = self .services .state_accessor .state_full_ids(current_sstatehash) - .await?; + .collect() + .await; let fork_states = [current_state_ids, incoming_state]; let auth_chain_sets: Vec> = fork_states diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 9e7f8d2a..7ef047ab 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -31,15 +31,12 @@ pub(super) async fn state_at_incoming_degree_one( return Ok(None); }; - let Ok(mut state) = self + let mut state: HashMap<_, _> = self .services .state_accessor .state_full_ids(prev_event_sstatehash) - .await - .log_err() - else { - return Ok(None); - }; + .collect() + .await; debug!("Using cached state"); let prev_pdu = self @@ -103,14 +100,12 @@ pub(super) async fn state_at_incoming_resolved( let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); for (sstatehash, prev_event) in extremity_sstatehashes { - let Ok(mut leaf_state) = self + let mut leaf_state: HashMap<_, _> = self .services .state_accessor .state_full_ids(sstatehash) - .await - else { - continue; - }; + .collect() + .await; if let Some(state_key) = &prev_event.state_key { let shortstatekey = self diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index d60c4c9e..d12a01ab 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -624,8 +624,8 @@ impl Service { .services .state_accessor .state_full_ids(current_shortstatehash) - .await - .map_err(|e| err!(Database("State in space not found: {e}")))?; + .collect() + .await; let mut children_pdus = Vec::with_capacity(state.len()); for (key, id) in state { diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 3d87534b..0f5520bb 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,6 +1,5 @@ use std::{ borrow::Borrow, - collections::HashMap, fmt::Write, sync::{Arc, Mutex as StdMutex, Mutex}, }; @@ -17,7 +16,7 @@ use conduwuit::{ Err, Error, PduEvent, Result, }; use database::{Deserialized, Map}; -use futures::{FutureExt, StreamExt, TryFutureExt}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt}; use lru_cache::LruCache; use ruma::{ events::{ @@ -143,83 +142,74 @@ impl crate::Service for Service { } impl Service { - pub async fn state_full( + pub fn state_full( &self, shortstatehash: ShortStateHash, - ) -> Result> { - let state = self - .state_full_pdus(shortstatehash) - .await? - .into_iter() - .filter_map(|pdu| Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu))) - .collect(); - - Ok(state) + ) -> impl Stream + Send + '_ { + self.state_full_pdus(shortstatehash) + .ready_filter_map(|pdu| { + Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu)) + }) } - pub async fn state_full_pdus(&self, shortstatehash: ShortStateHash) -> Result> { - let short_ids = self.state_full_shortids(shortstatehash).await?; + pub fn state_full_pdus( + &self, + shortstatehash: ShortStateHash, + ) -> impl Stream + Send + '_ { + let short_ids = self + .state_full_shortids(shortstatehash) + .map(|result| result.expect("missing shortstatehash")) + .map(Vec::into_iter) + .map(|iter| iter.map(at!(1))) + .map(IterStream::stream) + .flatten_stream() + .boxed(); - let full_pdus = self - .services + self.services .short - .multi_get_eventid_from_short(short_ids.into_iter().map(at!(1)).stream()) + .multi_get_eventid_from_short(short_ids) .ready_filter_map(Result::ok) - .broad_filter_map(|event_id: OwnedEventId| async move { + .broad_filter_map(move |event_id: OwnedEventId| async move { self.services.timeline.get_pdu(&event_id).await.ok() }) - .collect() - .await; - - Ok(full_pdus) } /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_full_ids( - &self, + pub fn state_full_ids<'a, Id>( + &'a self, shortstatehash: ShortStateHash, - ) -> Result> + ) -> impl Stream + Send + 'a where - Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, + Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned + 'a, ::Owned: Borrow, { - let short_ids = self.state_full_shortids(shortstatehash).await?; - - let full_ids = self - .services - .short - .multi_get_eventid_from_short(short_ids.iter().map(at!(1)).stream()) - .zip(short_ids.iter().stream().map(at!(0))) - .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) - .collect() - .boxed() - .await; - - Ok(full_ids) - } - - #[inline] - pub async fn state_full_shortids( - &self, - shortstatehash: ShortStateHash, - ) -> Result> { let shortids = self - .services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .await - .map_err(|e| err!(Database("Missing state IDs: {e}")))? - .pop() - .expect("there is always one layer") - .full_state - .iter() - .copied() - .map(parse_compressed_state_event) - .collect(); + .state_full_shortids(shortstatehash) + .map(|result| result.expect("missing shortstatehash")) + .map(|vec| vec.into_iter().unzip()) + .boxed() + .shared(); - Ok(shortids) + let shortstatekeys = shortids + .clone() + .map(at!(0)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + let shorteventids = shortids + .map(at!(1)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + self.services + .short + .multi_get_eventid_from_short(shorteventids) + .zip(shortstatekeys) + .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) } /// Returns a single EventId from `room_id` with key (`event_type`, @@ -264,6 +254,28 @@ impl Service { .await } + #[inline] + pub async fn state_full_shortids( + &self, + shortstatehash: ShortStateHash, + ) -> Result> { + let shortids = self + .services + .state_compressor + .load_shortstatehash_info(shortstatehash) + .await + .map_err(|e| err!(Database("Missing state IDs: {e}")))? + .pop() + .expect("there is always one layer") + .full_state + .iter() + .copied() + .map(parse_compressed_state_event) + .collect(); + + Ok(shortids) + } + /// Returns a single PDU from `room_id` with key (`event_type`, /// `state_key`). pub async fn state_get( @@ -479,27 +491,30 @@ impl Service { /// Returns the full room state. #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result> { + pub fn room_state_full<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream> + Send + 'a { self.services .state .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_full(shortstatehash)) - .map_err(|e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .await + .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok)) + .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .try_flatten_stream() } /// Returns the full room state pdus #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_full_pdus(&self, room_id: &RoomId) -> Result> { + pub fn room_state_full_pdus<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream> + Send + 'a { self.services .state .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_full_pdus(shortstatehash)) - .map_err(|e| err!(Database("Missing state pdus for {room_id:?}: {e:?}"))) - .await + .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok)) + .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .try_flatten_stream() } /// Returns a single EventId from `room_id` with key (`event_type`, From 329925c661d6b166dfd6b73a94f7f076cf1ed9bc Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 04:46:10 +0000 Subject: [PATCH 117/328] additional info level span adjustments Signed-off-by: Jason Volk --- src/api/server/publicrooms.rs | 4 ++-- src/api/server/send.rs | 8 +++++--- src/core/debug.rs | 7 +++++++ src/service/federation/execute.rs | 7 ++++--- src/service/rooms/event_handler/handle_incoming_pdu.rs | 4 ++-- src/service/rooms/event_handler/handle_prev_pdu.rs | 5 +++-- src/service/rooms/spaces/mod.rs | 2 +- 7 files changed, 24 insertions(+), 13 deletions(-) diff --git a/src/api/server/publicrooms.rs b/src/api/server/publicrooms.rs index 2c09385b..ff74574a 100644 --- a/src/api/server/publicrooms.rs +++ b/src/api/server/publicrooms.rs @@ -13,7 +13,7 @@ use crate::{Error, Result, Ruma}; /// # `POST /_matrix/federation/v1/publicRooms` /// /// Lists the public rooms on this server. -#[tracing::instrument(skip_all, fields(%client), name = "publicrooms")] +#[tracing::instrument(name = "publicrooms", level = "debug", skip_all, fields(%client))] pub(crate) async fn get_public_rooms_filtered_route( State(services): State, InsecureClientIp(client): InsecureClientIp, @@ -51,7 +51,7 @@ pub(crate) async fn get_public_rooms_filtered_route( /// # `GET /_matrix/federation/v1/publicRooms` /// /// Lists the public rooms on this server. -#[tracing::instrument(skip_all, fields(%client), "publicrooms")] +#[tracing::instrument(name = "publicrooms", level = "debug", skip_all, fields(%client))] pub(crate) async fn get_public_rooms_route( State(services): State, InsecureClientIp(client): InsecureClientIp, diff --git a/src/api/server/send.rs b/src/api/server/send.rs index f4903447..2e615a0c 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -3,7 +3,9 @@ use std::{collections::BTreeMap, net::IpAddr, time::Instant}; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - debug, debug_warn, err, error, + debug, + debug::INFO_SPAN_LEVEL, + debug_warn, err, error, result::LogErr, trace, utils::{ @@ -49,8 +51,8 @@ type Pdu = (OwnedRoomId, OwnedEventId, CanonicalJsonObject); /// /// Push EDUs and PDUs to this server. #[tracing::instrument( - name = "send", - level = "debug", + name = "txn", + level = INFO_SPAN_LEVEL, skip_all, fields( %client, diff --git a/src/core/debug.rs b/src/core/debug.rs index aebfc833..ca0f2f2e 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -4,6 +4,7 @@ use std::{any::Any, panic}; // Export debug proc_macros pub use conduwuit_macros::recursion_depth; +use tracing::Level; // Export all of the ancillary tools from here as well. pub use crate::{result::DebugInspect, utils::debug::*}; @@ -51,6 +52,12 @@ macro_rules! debug_info { } } +pub const INFO_SPAN_LEVEL: Level = if cfg!(debug_assertions) { + Level::INFO +} else { + Level::DEBUG +}; + pub fn set_panic_trap() { let next = panic::take_hook(); panic::set_hook(Box::new(move |info| { diff --git a/src/service/federation/execute.rs b/src/service/federation/execute.rs index 27d98968..3146bb8a 100644 --- a/src/service/federation/execute.rs +++ b/src/service/federation/execute.rs @@ -2,8 +2,8 @@ use std::{fmt::Debug, mem}; use bytes::Bytes; use conduwuit::{ - debug, debug_error, debug_warn, err, error::inspect_debug_log, implement, trace, - utils::string::EMPTY, Err, Error, Result, + debug, debug::INFO_SPAN_LEVEL, debug_error, debug_warn, err, error::inspect_debug_log, + implement, trace, utils::string::EMPTY, Err, Error, Result, }; use http::{header::AUTHORIZATION, HeaderValue}; use ipaddress::IPAddress; @@ -48,7 +48,8 @@ where #[implement(super::Service)] #[tracing::instrument( - level = "debug" + name = "fed", + level = INFO_SPAN_LEVEL, skip(self, client, request), )] pub async fn execute_on( diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index 7db71961..31c7762d 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -3,7 +3,7 @@ use std::{ time::Instant, }; -use conduwuit::{debug, err, implement, warn, Err, Result}; +use conduwuit::{debug, debug::INFO_SPAN_LEVEL, err, implement, warn, Err, Result}; use futures::{ future::{try_join5, OptionFuture}, FutureExt, @@ -42,7 +42,7 @@ use crate::rooms::timeline::RawPduId; #[implement(super::Service)] #[tracing::instrument( name = "pdu", - level = "debug", + level = INFO_SPAN_LEVEL, skip_all, fields(%room_id, %event_id), )] diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index 32ab505f..f911f1fd 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -5,7 +5,8 @@ use std::{ }; use conduwuit::{ - debug, implement, utils::continue_exponential_backoff_secs, Err, PduEvent, Result, + debug, debug::INFO_SPAN_LEVEL, implement, utils::continue_exponential_backoff_secs, Err, + PduEvent, Result, }; use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName, UInt}; @@ -14,7 +15,7 @@ use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName, UInt}; #[allow(clippy::too_many_arguments)] #[tracing::instrument( name = "prev", - level = "debug", + level = INFO_SPAN_LEVEL, skip_all, fields(%prev_id), )] diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index d12a01ab..1ee2727c 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -268,7 +268,7 @@ impl Service { } /// Gets the summary of a space using solely federation - #[tracing::instrument(skip(self))] + #[tracing::instrument(level = "debug", skip(self))] async fn get_summary_and_children_federation( &self, current_room: &OwnedRoomId, From 936161d89ece2474dcba5424adaa159fc4e97b03 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 29 Jan 2025 01:49:10 +0000 Subject: [PATCH 118/328] reduce bottommost compression underrides Signed-off-by: Jason Volk --- src/database/engine/descriptor.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index c735f402..934ef831 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -83,7 +83,7 @@ pub(crate) static RANDOM: Descriptor = Descriptor { write_size: 1024 * 1024 * 32, cache_shards: 128, compression_level: -3, - bottommost_level: Some(4), + bottommost_level: Some(-1), compressed_index: true, ..BASE }; @@ -94,8 +94,8 @@ pub(crate) static SEQUENTIAL: Descriptor = Descriptor { level_size: 1024 * 1024 * 32, file_size: 1024 * 1024 * 2, cache_shards: 128, - compression_level: -1, - bottommost_level: Some(6), + compression_level: -2, + bottommost_level: Some(-1), compression_shape: [0, 0, 1, 1, 1, 1, 1], compressed_index: false, ..BASE @@ -111,7 +111,7 @@ pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { block_size: 512, cache_shards: 64, compression_level: -4, - bottommost_level: Some(1), + bottommost_level: Some(-1), compression_shape: [0, 0, 0, 0, 0, 1, 1], compressed_index: false, ..RANDOM @@ -126,8 +126,8 @@ pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { block_size: 512, cache_shards: 64, block_index_hashing: Some(false), - compression_level: -2, - bottommost_level: Some(4), + compression_level: -4, + bottommost_level: Some(-2), compression_shape: [0, 0, 0, 0, 1, 1, 1], compressed_index: false, ..SEQUENTIAL From eb7d893c8675f955fa770c8ae6f1c32a2394284c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 29 Jan 2025 06:36:14 +0000 Subject: [PATCH 119/328] fix malloc_conf feature-awareness Signed-off-by: Jason Volk --- src/core/alloc/je.rs | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 6bdf8b33..57143e85 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -8,6 +8,7 @@ use std::{ }; use arrayvec::ArrayVec; +use const_str::concat_bytes; use tikv_jemalloc_ctl as mallctl; use tikv_jemalloc_sys as ffi; use tikv_jemallocator as jemalloc; @@ -20,18 +21,24 @@ use crate::{ #[cfg(feature = "jemalloc_conf")] #[unsafe(no_mangle)] -pub static malloc_conf: &[u8] = b"\ -metadata_thp:always\ -,percpu_arena:percpu\ -,background_thread:true\ -,max_background_threads:-1\ -,lg_extent_max_active_fit:4\ -,oversize_threshold:16777216\ -,tcache_max:2097152\ -,dirty_decay_ms:16000\ -,muzzy_decay_ms:144000\ -,prof_active:false\ -\0"; +pub static malloc_conf: &[u8] = concat_bytes!( + "lg_extent_max_active_fit:4", + ",oversize_threshold:16777216", + ",tcache_max:2097152", + ",dirty_decay_ms:16000", + ",muzzy_decay_ms:144000", + ",percpu_arena:percpu", + ",metadata_thp:always", + ",background_thread:true", + ",max_background_threads:-1", + MALLOC_CONF_PROF, + 0 +); + +#[cfg(all(feature = "jemalloc_conf", feature = "jemalloc_prof"))] +const MALLOC_CONF_PROF: &str = ",prof_active:false"; +#[cfg(all(feature = "jemalloc_conf", not(feature = "jemalloc_prof")))] +const MALLOC_CONF_PROF: &str = ""; #[global_allocator] static JEMALLOC: jemalloc::Jemalloc = jemalloc::Jemalloc; From 50acfe783289e6b9b8deb20b3c34f32653f61f11 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 29 Jan 2025 08:39:44 +0000 Subject: [PATCH 120/328] flatten auth chain iterations Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 7 +- src/api/server/event_auth.rs | 4 +- src/api/server/send_join.rs | 2 - src/api/server/state.rs | 2 - src/api/server/state_ids.rs | 8 +- src/service/rooms/auth_chain/mod.rs | 154 +++++++++--------- .../rooms/event_handler/resolve_state.rs | 15 +- .../rooms/event_handler/state_at_incoming.rs | 9 +- 8 files changed, 90 insertions(+), 111 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index cd892ded..4e0ce2e3 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -6,8 +6,9 @@ use std::{ }; use conduwuit::{ - debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, PduId, - RawPduId, Result, + debug_error, err, info, trace, utils, + utils::{stream::ReadyExt, string::EMPTY}, + warn, Error, PduEvent, PduId, RawPduId, Result, }; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ @@ -54,7 +55,7 @@ pub(super) async fn get_auth_chain( .rooms .auth_chain .event_ids_iter(room_id, once(event_id.as_ref())) - .await? + .ready_filter_map(Result::ok) .count() .await; diff --git a/src/api/server/event_auth.rs b/src/api/server/event_auth.rs index 93e867a0..49dcd718 100644 --- a/src/api/server/event_auth.rs +++ b/src/api/server/event_auth.rs @@ -1,7 +1,7 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; -use conduwuit::{Error, Result}; +use conduwuit::{utils::stream::ReadyExt, Error, Result}; use futures::StreamExt; use ruma::{ api::{client::error::ErrorKind, federation::authorization::get_event_authorization}, @@ -48,7 +48,7 @@ pub(crate) async fn get_event_authorization_route( .rooms .auth_chain .event_ids_iter(room_id, once(body.event_id.borrow())) - .await? + .ready_filter_map(Result::ok) .filter_map(|id| async move { services.rooms.timeline.get_pdu_json(&id).await.ok() }) .then(|pdu| services.sending.convert_to_outgoing_federation_event(pdu)) .collect() diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 2b8a0eef..e81d7672 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -238,8 +238,6 @@ async fn create_join_event( .rooms .auth_chain .event_ids_iter(room_id, starting_events) - .await? - .map(Ok) .broad_and_then(|event_id| async move { services.rooms.timeline.get_pdu_json(&event_id).await }) diff --git a/src/api/server/state.rs b/src/api/server/state.rs index eab1f138..b16e61a0 100644 --- a/src/api/server/state.rs +++ b/src/api/server/state.rs @@ -56,8 +56,6 @@ pub(crate) async fn get_room_state_route( .rooms .auth_chain .event_ids_iter(&body.room_id, once(body.event_id.borrow())) - .await? - .map(Ok) .and_then(|id| async move { services.rooms.timeline.get_pdu_json(&id).await }) .and_then(|pdu| { services diff --git a/src/api/server/state_ids.rs b/src/api/server/state_ids.rs index 4973dd3a..7d0440bf 100644 --- a/src/api/server/state_ids.rs +++ b/src/api/server/state_ids.rs @@ -2,7 +2,7 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; use conduwuit::{at, err, Result}; -use futures::StreamExt; +use futures::{StreamExt, TryStreamExt}; use ruma::{api::federation::event::get_room_state_ids, OwnedEventId}; use super::AccessCheck; @@ -44,10 +44,8 @@ pub(crate) async fn get_room_state_ids_route( .rooms .auth_chain .event_ids_iter(&body.room_id, once(body.event_id.borrow())) - .await? - .map(|id| (*id).to_owned()) - .collect() - .await; + .try_collect() + .await?; Ok(get_room_state_ids::v1::Response { auth_chain_ids, pdu_ids }) } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index df2663b2..0ff96846 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -4,6 +4,7 @@ use std::{ collections::{BTreeSet, HashSet, VecDeque}, fmt::Debug, sync::Arc, + time::Instant, }; use conduwuit::{ @@ -14,7 +15,7 @@ use conduwuit::{ }, validated, warn, Err, Result, }; -use futures::{Stream, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{EventId, OwnedEventId, RoomId}; use self::data::Data; @@ -30,6 +31,8 @@ struct Services { timeline: Dep, } +type Bucket<'a> = BTreeSet<(u64, &'a EventId)>; + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { @@ -45,42 +48,22 @@ impl crate::Service for Service { } #[implement(Service)] -pub async fn event_ids_iter<'a, I>( +pub fn event_ids_iter<'a, I>( &'a self, - room_id: &RoomId, + room_id: &'a RoomId, starting_events: I, -) -> Result + Send + '_> +) -> impl Stream> + Send + 'a where I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, { - let stream = self - .get_event_ids(room_id, starting_events) - .await? - .into_iter() - .stream(); - - Ok(stream) -} - -#[implement(Service)] -pub async fn get_event_ids<'a, I>( - &'a self, - room_id: &RoomId, - starting_events: I, -) -> Result> -where - I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, -{ - let chain = self.get_auth_chain(room_id, starting_events).await?; - let event_ids = self - .services - .short - .multi_get_eventid_from_short(chain.into_iter().stream()) - .ready_filter_map(Result::ok) - .collect() - .await; - - Ok(event_ids) + self.get_auth_chain(room_id, starting_events) + .map_ok(|chain| { + self.services + .short + .multi_get_eventid_from_short(chain.into_iter().stream()) + .ready_filter(Result::is_ok) + }) + .try_flatten_stream() } #[implement(Service)] @@ -94,9 +77,9 @@ where I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, { const NUM_BUCKETS: usize = 50; //TODO: change possible w/o disrupting db? - const BUCKET: BTreeSet<(u64, &EventId)> = BTreeSet::new(); + const BUCKET: Bucket<'_> = BTreeSet::new(); - let started = std::time::Instant::now(); + let started = Instant::now(); let mut starting_ids = self .services .short @@ -120,53 +103,7 @@ where let full_auth_chain: Vec = buckets .into_iter() .try_stream() - .broad_and_then(|chunk| async move { - let chunk_key: Vec = chunk.iter().map(at!(0)).collect(); - - if chunk_key.is_empty() { - return Ok(Vec::new()); - } - - if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { - return Ok(cached.to_vec()); - } - - let chunk_cache: Vec<_> = chunk - .into_iter() - .try_stream() - .broad_and_then(|(shortid, event_id)| async move { - if let Ok(cached) = self.get_cached_eventid_authchain(&[shortid]).await { - return Ok(cached.to_vec()); - } - - let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?; - self.cache_auth_chain_vec(vec![shortid], auth_chain.as_slice()); - debug!( - ?event_id, - elapsed = ?started.elapsed(), - "Cache missed event" - ); - - Ok(auth_chain) - }) - .try_collect() - .map_ok(|chunk_cache: Vec<_>| chunk_cache.into_iter().flatten().collect()) - .map_ok(|mut chunk_cache: Vec<_>| { - chunk_cache.sort_unstable(); - chunk_cache.dedup(); - chunk_cache - }) - .await?; - - self.cache_auth_chain_vec(chunk_key, chunk_cache.as_slice()); - debug!( - chunk_cache_length = ?chunk_cache.len(), - elapsed = ?started.elapsed(), - "Cache missed chunk", - ); - - Ok(chunk_cache) - }) + .broad_and_then(|chunk| self.get_auth_chain_outer(room_id, started, chunk)) .try_collect() .map_ok(|auth_chain: Vec<_>| auth_chain.into_iter().flatten().collect()) .map_ok(|mut full_auth_chain: Vec<_>| { @@ -174,6 +111,7 @@ where full_auth_chain.dedup(); full_auth_chain }) + .boxed() .await?; debug!( @@ -185,6 +123,60 @@ where Ok(full_auth_chain) } +#[implement(Service)] +async fn get_auth_chain_outer( + &self, + room_id: &RoomId, + started: Instant, + chunk: Bucket<'_>, +) -> Result> { + let chunk_key: Vec = chunk.iter().map(at!(0)).collect(); + + if chunk_key.is_empty() { + return Ok(Vec::new()); + } + + if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { + return Ok(cached.to_vec()); + } + + let chunk_cache: Vec<_> = chunk + .into_iter() + .try_stream() + .broad_and_then(|(shortid, event_id)| async move { + if let Ok(cached) = self.get_cached_eventid_authchain(&[shortid]).await { + return Ok(cached.to_vec()); + } + + let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?; + self.cache_auth_chain_vec(vec![shortid], auth_chain.as_slice()); + debug!( + ?event_id, + elapsed = ?started.elapsed(), + "Cache missed event" + ); + + Ok(auth_chain) + }) + .try_collect() + .map_ok(|chunk_cache: Vec<_>| chunk_cache.into_iter().flatten().collect()) + .map_ok(|mut chunk_cache: Vec<_>| { + chunk_cache.sort_unstable(); + chunk_cache.dedup(); + chunk_cache + }) + .await?; + + self.cache_auth_chain_vec(chunk_key, chunk_cache.as_slice()); + debug!( + chunk_cache_length = ?chunk_cache.len(), + elapsed = ?started.elapsed(), + "Cache missed chunk", + ); + + Ok(chunk_cache) +} + #[implement(Service)] #[tracing::instrument(name = "inner", level = "trace", skip(self, room_id))] async fn get_auth_chain_inner( diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 1fd91ac6..03f7e822 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -44,18 +44,11 @@ pub async fn resolve_state( let auth_chain_sets: Vec> = fork_states .iter() .try_stream() - .wide_and_then(|state| async move { - let starting_events = state.values().map(Borrow::borrow); - - let auth_chain = self - .services + .wide_and_then(|state| { + self.services .auth_chain - .get_event_ids(room_id, starting_events) - .await? - .into_iter() - .collect(); - - Ok(auth_chain) + .event_ids_iter(room_id, state.values().map(Borrow::borrow)) + .try_collect() }) .try_collect() .await?; diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 7ef047ab..8730232a 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -10,7 +10,7 @@ use conduwuit::{ utils::stream::{BroadbandExt, IterStream}, PduEvent, Result, }; -use futures::{FutureExt, StreamExt}; +use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{state_res::StateMap, OwnedEventId, RoomId, RoomVersionId}; // TODO: if we know the prev_events of the incoming event we can avoid the @@ -140,10 +140,9 @@ pub(super) async fn state_at_incoming_resolved( let auth_chain: HashSet = self .services .auth_chain - .get_event_ids(room_id, starting_events.into_iter()) - .await? - .into_iter() - .collect(); + .event_ids_iter(room_id, starting_events.into_iter()) + .try_collect() + .await?; auth_chain_sets.push(auth_chain); fork_states.push(state); From 3c8376d897e6a1b9b6b61f5ada05b2afec1ab937 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 29 Jan 2025 23:07:12 +0000 Subject: [PATCH 121/328] parallelize state-res pre-gathering Signed-off-by: Jason Volk --- .../rooms/event_handler/resolve_state.rs | 63 +++---- .../rooms/event_handler/state_at_incoming.rs | 173 +++++++++--------- 2 files changed, 123 insertions(+), 113 deletions(-) diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 03f7e822..c3de5f2f 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -5,11 +5,11 @@ use std::{ }; use conduwuit::{ - debug, err, implement, + err, implement, trace, utils::stream::{automatic_width, IterStream, ReadyExt, TryWidebandExt, WidebandExt}, - Result, + Error, Result, }; -use futures::{FutureExt, StreamExt, TryStreamExt}; +use futures::{future::try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{ state_res::{self, StateMap}, OwnedEventId, RoomId, RoomVersionId, @@ -25,13 +25,13 @@ pub async fn resolve_state( room_version_id: &RoomVersionId, incoming_state: HashMap, ) -> Result>> { - debug!("Loading current room state ids"); + trace!("Loading current room state ids"); let current_sstatehash = self .services .state .get_room_shortstatehash(room_id) - .await - .map_err(|e| err!(Database(error!("No state for {room_id:?}: {e:?}"))))?; + .map_err(|e| err!(Database(error!("No state for {room_id:?}: {e:?}")))) + .await?; let current_state_ids: HashMap<_, _> = self .services @@ -40,8 +40,9 @@ pub async fn resolve_state( .collect() .await; + trace!("Loading fork states"); let fork_states = [current_state_ids, incoming_state]; - let auth_chain_sets: Vec> = fork_states + let auth_chain_sets = fork_states .iter() .try_stream() .wide_and_then(|state| { @@ -50,36 +51,33 @@ pub async fn resolve_state( .event_ids_iter(room_id, state.values().map(Borrow::borrow)) .try_collect() }) - .try_collect() - .await?; + .try_collect::>>(); - debug!("Loading fork states"); - let fork_states: Vec> = fork_states - .into_iter() + let fork_states = fork_states + .iter() .stream() - .wide_then(|fork_state| async move { + .wide_then(|fork_state| { let shortstatekeys = fork_state.keys().copied().stream(); - - let event_ids = fork_state.values().cloned().stream().boxed(); - + let event_ids = fork_state.values().cloned().stream(); self.services .short .multi_get_statekey_from_short(shortstatekeys) .zip(event_ids) .ready_filter_map(|(ty_sk, id)| Some((ty_sk.ok()?, id))) .collect() - .await }) - .collect() - .await; + .map(Ok::<_, Error>) + .try_collect::>>(); - debug!("Resolving state"); + let (fork_states, auth_chain_sets) = try_join(fork_states, auth_chain_sets).await?; + + trace!("Resolving state"); let state = self - .state_resolution(room_version_id, &fork_states, &auth_chain_sets) + .state_resolution(room_version_id, fork_states.iter(), &auth_chain_sets) .boxed() .await?; - debug!("State resolution done."); + trace!("State resolution done."); let state_events: Vec<_> = state .iter() .stream() @@ -92,7 +90,7 @@ pub async fn resolve_state( .collect() .await; - debug!("Compressing state..."); + trace!("Compressing state..."); let new_room_state: HashSet<_> = self .services .state_compressor @@ -109,20 +107,23 @@ pub async fn resolve_state( #[implement(super::Service)] #[tracing::instrument(name = "ruma", level = "debug", skip_all)] -pub async fn state_resolution( - &self, - room_version: &RoomVersionId, - state_sets: &[StateMap], - auth_chain_sets: &[HashSet], -) -> Result> { +pub async fn state_resolution<'a, StateSets>( + &'a self, + room_version: &'a RoomVersionId, + state_sets: StateSets, + auth_chain_sets: &'a [HashSet], +) -> Result> +where + StateSets: Iterator> + Clone + Send, +{ state_res::resolve( room_version, - state_sets.iter(), + state_sets, auth_chain_sets, &|event_id| self.event_fetch(event_id), &|event_id| self.event_exists(event_id), automatic_width(), ) - .await .map_err(|e| err!(error!("State resolution failed: {e:?}"))) + .await } diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 8730232a..8ae6354c 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -1,18 +1,20 @@ use std::{ borrow::Borrow, collections::{HashMap, HashSet}, + iter::Iterator, sync::Arc, }; use conduwuit::{ - debug, err, implement, - result::LogErr, - utils::stream::{BroadbandExt, IterStream}, + debug, err, implement, trace, + utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryWidebandExt}, PduEvent, Result, }; -use futures::{FutureExt, StreamExt, TryStreamExt}; +use futures::{future::try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{state_res::StateMap, OwnedEventId, RoomId, RoomVersionId}; +use crate::rooms::short::ShortStateHash; + // TODO: if we know the prev_events of the incoming event we can avoid the #[implement(super::Service)] // request and build the state from a known point and resolve if > 1 prev_event @@ -70,86 +72,44 @@ pub(super) async fn state_at_incoming_resolved( room_id: &RoomId, room_version_id: &RoomVersionId, ) -> Result>> { - debug!("Calculating state at event using state res"); - let mut extremity_sstatehashes = HashMap::with_capacity(incoming_pdu.prev_events.len()); - - let mut okay = true; - for prev_eventid in &incoming_pdu.prev_events { - let Ok(prev_event) = self.services.timeline.get_pdu(prev_eventid).await else { - okay = false; - break; - }; - - let Ok(sstatehash) = self - .services - .state_accessor - .pdu_shortstatehash(prev_eventid) - .await - else { - okay = false; - break; - }; - - extremity_sstatehashes.insert(sstatehash, prev_event); - } - - if !okay { + trace!("Calculating extremity statehashes..."); + let Ok(extremity_sstatehashes) = incoming_pdu + .prev_events + .iter() + .try_stream() + .broad_and_then(|prev_eventid| { + self.services + .timeline + .get_pdu(prev_eventid) + .map_ok(move |prev_event| (prev_eventid, prev_event)) + }) + .broad_and_then(|(prev_eventid, prev_event)| { + self.services + .state_accessor + .pdu_shortstatehash(prev_eventid) + .map_ok(move |sstatehash| (sstatehash, prev_event)) + }) + .try_collect::>() + .await + else { return Ok(None); - } + }; - let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); - let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); - for (sstatehash, prev_event) in extremity_sstatehashes { - let mut leaf_state: HashMap<_, _> = self - .services - .state_accessor - .state_full_ids(sstatehash) - .collect() - .await; - - if let Some(state_key) = &prev_event.state_key { - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&prev_event.kind.to_string().into(), state_key) - .await; - - let event_id = &prev_event.event_id; - leaf_state.insert(shortstatekey, event_id.clone()); - // Now it's the state after the pdu - } - - let mut state = StateMap::with_capacity(leaf_state.len()); - let mut starting_events = Vec::with_capacity(leaf_state.len()); - for (k, id) in &leaf_state { - if let Ok((ty, st_key)) = self - .services - .short - .get_statekey_from_short(*k) - .await - .log_err() - { - // FIXME: Undo .to_string().into() when StateMap - // is updated to use StateEventType - state.insert((ty.to_string().into(), st_key), id.clone()); - } - - starting_events.push(id.borrow()); - } - - let auth_chain: HashSet = self - .services - .auth_chain - .event_ids_iter(room_id, starting_events.into_iter()) + trace!("Calculating fork states..."); + let (fork_states, auth_chain_sets): (Vec>, Vec>) = + extremity_sstatehashes + .into_iter() + .try_stream() + .wide_and_then(|(sstatehash, prev_event)| { + self.state_at_incoming_fork(room_id, sstatehash, prev_event) + }) .try_collect() + .map_ok(Vec::into_iter) + .map_ok(Iterator::unzip) .await?; - auth_chain_sets.push(auth_chain); - fork_states.push(state); - } - let Ok(new_state) = self - .state_resolution(room_version_id, &fork_states, &auth_chain_sets) + .state_resolution(room_version_id, fork_states.iter(), &auth_chain_sets) .boxed() .await else { @@ -157,16 +117,65 @@ pub(super) async fn state_at_incoming_resolved( }; new_state - .iter() + .into_iter() .stream() - .broad_then(|((event_type, state_key), event_id)| { + .broad_then(|((event_type, state_key), event_id)| async move { self.services .short - .get_or_create_shortstatekey(event_type, state_key) - .map(move |shortstatekey| (shortstatekey, event_id.clone())) + .get_or_create_shortstatekey(&event_type, &state_key) + .map(move |shortstatekey| (shortstatekey, event_id)) + .await }) .collect() .map(Some) .map(Ok) .await } + +#[implement(super::Service)] +async fn state_at_incoming_fork( + &self, + room_id: &RoomId, + sstatehash: ShortStateHash, + prev_event: PduEvent, +) -> Result<(StateMap, HashSet)> { + let mut leaf_state: HashMap<_, _> = self + .services + .state_accessor + .state_full_ids(sstatehash) + .collect() + .await; + + if let Some(state_key) = &prev_event.state_key { + let shortstatekey = self + .services + .short + .get_or_create_shortstatekey(&prev_event.kind.to_string().into(), state_key) + .await; + + let event_id = &prev_event.event_id; + leaf_state.insert(shortstatekey, event_id.clone()); + // Now it's the state after the pdu + } + + let auth_chain = self + .services + .auth_chain + .event_ids_iter(room_id, leaf_state.values().map(Borrow::borrow)) + .try_collect(); + + let fork_state = leaf_state + .iter() + .stream() + .broad_then(|(k, id)| { + self.services + .short + .get_statekey_from_short(*k) + .map_ok(|(ty, sk)| ((ty, sk), id.clone())) + }) + .ready_filter_map(Result::ok) + .collect() + .map(Ok); + + try_join(fork_state, auth_chain).await +} From 31c2968bb29e7447e56531333fb330da4ac08ede Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 29 Jan 2025 21:10:33 +0000 Subject: [PATCH 122/328] move db files command w/ filter args; misc related cleanup Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 66 +++++++++++++++++++++++++++++------- src/admin/debug/mod.rs | 8 +++++ src/admin/server/commands.rs | 15 +++----- src/admin/server/mod.rs | 3 -- src/database/engine/files.rs | 35 +++++-------------- src/service/globals/data.rs | 3 -- 6 files changed, 75 insertions(+), 55 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 4e0ce2e3..dcf9879c 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -7,7 +7,10 @@ use std::{ use conduwuit::{ debug_error, err, info, trace, utils, - utils::{stream::ReadyExt, string::EMPTY}, + utils::{ + stream::{IterStream, ReadyExt}, + string::EMPTY, + }, warn, Error, PduEvent, PduId, RawPduId, Result, }; use futures::{FutureExt, StreamExt, TryStreamExt}; @@ -640,6 +643,7 @@ pub(super) async fn force_set_room_state_from_server( room_id: room_id.clone().into(), event_id: first_pdu.event_id.clone(), }) + .boxed() .await?; for pdu in remote_state_response.pdus.clone() { @@ -648,6 +652,7 @@ pub(super) async fn force_set_room_state_from_server( .rooms .event_handler .parse_incoming_pdu(&pdu) + .boxed() .await { | Ok(t) => t, @@ -711,6 +716,7 @@ pub(super) async fn force_set_room_state_from_server( .rooms .event_handler .resolve_state(&room_id, &room_version, state) + .boxed() .await?; info!("Forcing new room state"); @@ -946,21 +952,57 @@ pub(super) async fn database_stats( property: Option, map: Option, ) -> Result { - let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned()); let map_name = map.as_ref().map_or(EMPTY, String::as_str); + let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned()); + self.services + .db + .iter() + .filter(|(&name, _)| map_name.is_empty() || map_name == name) + .try_stream() + .try_for_each(|(&name, map)| { + let res = map.property(&property).expect("invalid property"); + writeln!(self, "##### {name}:\n```\n{}\n```", res.trim()) + }) + .await?; - let mut out = String::new(); - for (&name, map) in self.services.db.iter() { - if !map_name.is_empty() && map_name != name { - continue; - } + Ok(RoomMessageEventContent::notice_plain("")) +} - let res = map.property(&property)?; - let res = res.trim(); - writeln!(out, "##### {name}:\n```\n{res}\n```")?; - } +#[admin_command] +pub(super) async fn database_files( + &self, + map: Option, + level: Option, +) -> Result { + let mut files: Vec<_> = self.services.db.db.file_list().collect::>()?; - Ok(RoomMessageEventContent::notice_markdown(out)) + files.sort_by_key(|f| f.name.clone()); + + writeln!(self, "| lev | sst | keys | dels | size | column |").await?; + writeln!(self, "| ---: | :--- | ---: | ---: | ---: | :--- |").await?; + files + .into_iter() + .filter(|file| { + map.as_deref() + .is_none_or(|map| map == file.column_family_name) + }) + .filter(|file| level.as_ref().is_none_or(|&level| level == file.level)) + .try_stream() + .try_for_each(|file| { + writeln!( + self, + "| {} | {:<13} | {:7}+ | {:4}- | {:9} | {} |", + file.level, + file.name, + file.num_entries, + file.num_deletions, + file.size, + file.column_family_name, + ) + }) + .await?; + + Ok(RoomMessageEventContent::notice_plain("")) } #[admin_command] diff --git a/src/admin/debug/mod.rs b/src/admin/debug/mod.rs index 07f7296b..db04ccf4 100644 --- a/src/admin/debug/mod.rs +++ b/src/admin/debug/mod.rs @@ -226,6 +226,14 @@ pub(super) enum DebugCommand { /// - Trim memory usage TrimMemory, + /// - List database files + DatabaseFiles { + map: Option, + + #[arg(long)] + level: Option, + }, + /// - Developer test stubs #[command(subcommand)] #[allow(non_snake_case)] diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index 910dce6e..d4cfa7d5 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -92,7 +92,7 @@ pub(super) async fn clear_caches(&self) -> Result { #[admin_command] pub(super) async fn list_backups(&self) -> Result { - let result = self.services.globals.db.backup_list()?; + let result = self.services.db.db.backup_list()?; if result.is_empty() { Ok(RoomMessageEventContent::text_plain("No backups found.")) @@ -103,31 +103,24 @@ pub(super) async fn list_backups(&self) -> Result { #[admin_command] pub(super) async fn backup_database(&self) -> Result { - let globals = Arc::clone(&self.services.globals); + let db = Arc::clone(&self.services.db); let mut result = self .services .server .runtime() - .spawn_blocking(move || match globals.db.backup() { + .spawn_blocking(move || match db.db.backup() { | Ok(()) => String::new(), | Err(e) => e.to_string(), }) .await?; if result.is_empty() { - result = self.services.globals.db.backup_list()?; + result = self.services.db.db.backup_list()?; } Ok(RoomMessageEventContent::notice_markdown(result)) } -#[admin_command] -pub(super) async fn list_database_files(&self) -> Result { - let result = self.services.globals.db.file_list()?; - - Ok(RoomMessageEventContent::notice_markdown(result)) -} - #[admin_command] pub(super) async fn admin_notice(&self, message: Vec) -> Result { let message = message.join(" "); diff --git a/src/admin/server/mod.rs b/src/admin/server/mod.rs index 3f3d6c5e..60615365 100644 --- a/src/admin/server/mod.rs +++ b/src/admin/server/mod.rs @@ -46,9 +46,6 @@ pub(super) enum ServerCommand { /// - List database backups ListBackups, - /// - List database files - ListDatabaseFiles, - /// - Send a message to the admin room. AdminNotice { message: Vec, diff --git a/src/database/engine/files.rs b/src/database/engine/files.rs index f603c57b..33d6fdc4 100644 --- a/src/database/engine/files.rs +++ b/src/database/engine/files.rs @@ -1,32 +1,15 @@ -use std::fmt::Write; - use conduwuit::{implement, Result}; +use rocksdb::LiveFile as SstFile; use super::Engine; +use crate::util::map_err; #[implement(Engine)] -pub fn file_list(&self) -> Result { - match self.db.live_files() { - | Err(e) => Ok(String::from(e)), - | Ok(mut files) => { - files.sort_by_key(|f| f.name.clone()); - let mut res = String::new(); - writeln!(res, "| lev | sst | keys | dels | size | column |")?; - writeln!(res, "| ---: | :--- | ---: | ---: | ---: | :--- |")?; - for file in files { - writeln!( - res, - "| {} | {:<13} | {:7}+ | {:4}- | {:9} | {} |", - file.level, - file.name, - file.num_entries, - file.num_deletions, - file.size, - file.column_family_name, - )?; - } - - Ok(res) - }, - } +pub fn file_list(&self) -> impl Iterator> + Send { + self.db + .live_files() + .map_err(map_err) + .into_iter() + .flat_map(Vec::into_iter) + .map(Ok) } diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 07b4ac2c..39cb9be1 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -79,7 +79,4 @@ impl Data { #[inline] pub fn backup_list(&self) -> Result { self.db.db.backup_list() } - - #[inline] - pub fn file_list(&self) -> Result { self.db.db.file_list() } } From 1a8482b3b4865a7f38c342929489ba925a98e05c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 30 Jan 2025 04:39:24 +0000 Subject: [PATCH 123/328] refactor incoming extremities retention; broad filter, single pass Signed-off-by: Jason Volk --- src/api/client/membership.rs | 7 +- .../event_handler/upgrade_outlier_pdu.rs | 74 +++++++++---------- src/service/rooms/state/mod.rs | 17 +++-- src/service/rooms/timeline/mod.rs | 44 ++++++----- 4 files changed, 74 insertions(+), 68 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index fccb9b53..d80aff0c 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1,6 +1,7 @@ use std::{ borrow::Borrow, collections::{BTreeMap, HashMap, HashSet}, + iter::once, net::IpAddr, sync::Arc, }; @@ -1216,7 +1217,7 @@ async fn join_room_by_id_helper_remote( .append_pdu( &parsed_join_pdu, join_event, - vec![(*parsed_join_pdu.event_id).to_owned()], + once(parsed_join_pdu.event_id.borrow()), &state_lock, ) .await?; @@ -2195,7 +2196,7 @@ async fn knock_room_helper_local( .append_pdu( &parsed_knock_pdu, knock_event, - vec![(*parsed_knock_pdu.event_id).to_owned()], + once(parsed_knock_pdu.event_id.borrow()), &state_lock, ) .await?; @@ -2394,7 +2395,7 @@ async fn knock_room_helper_remote( .append_pdu( &parsed_knock_pdu, knock_event, - vec![(*parsed_knock_pdu.event_id).to_owned()], + once(parsed_knock_pdu.event_id.borrow()), &state_lock, ) .await?; diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index f0c8f0c5..ca351981 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,14 +1,18 @@ use std::{ borrow::Borrow, collections::{BTreeMap, HashSet}, + iter::once, sync::Arc, time::Instant, }; -use conduwuit::{debug, debug_info, err, implement, trace, warn, Err, Error, PduEvent, Result}; -use futures::{future::ready, StreamExt}; +use conduwuit::{ + debug, debug_info, err, implement, trace, + utils::stream::{BroadbandExt, ReadyExt}, + warn, Err, PduEvent, Result, +}; +use futures::{future::ready, FutureExt, StreamExt}; use ruma::{ - api::client::error::ErrorKind, events::{room::redaction::RoomRedactionEventContent, StateEventType, TimelineEventType}, state_res::{self, EventTypeExt}, CanonicalJsonValue, RoomId, RoomVersionId, ServerName, @@ -174,42 +178,34 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( // Now we calculate the set of extremities this room has after the incoming // event has been applied. We start with the previous extremities (aka leaves) trace!("Calculating extremities"); - let mut extremities: HashSet<_> = self + let extremities: Vec<_> = self .services .state .get_forward_extremities(room_id) .map(ToOwned::to_owned) + .ready_filter(|event_id| { + // Remove any that are referenced by this incoming event's prev_events + !incoming_pdu.prev_events.contains(event_id) + }) + .broad_filter_map(|event_id| async move { + // Only keep those extremities were not referenced yet + self.services + .pdu_metadata + .is_event_referenced(room_id, &event_id) + .await + .eq(&false) + .then_some(event_id) + }) .collect() .await; - // Remove any forward extremities that are referenced by this incoming event's - // prev_events - trace!( - "Calculated {} extremities; checking against {} prev_events", + debug!( + "Retained {} extremities checked against {} prev_events", extremities.len(), incoming_pdu.prev_events.len() ); - for prev_event in &incoming_pdu.prev_events { - extremities.remove(&(**prev_event)); - } - // Only keep those extremities were not referenced yet - let mut retained = HashSet::new(); - for id in &extremities { - if !self - .services - .pdu_metadata - .is_event_referenced(room_id, id) - .await - { - retained.insert(id.clone()); - } - } - - extremities.retain(|id| retained.contains(id)); - debug!("Retained {} extremities. Compressing state", extremities.len()); - - let state_ids_compressed: HashSet<_> = self + let state_ids_compressed: Arc> = self .services .state_compressor .compress_state_events( @@ -218,10 +214,9 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( .map(|(ssk, eid)| (ssk, eid.borrow())), ) .collect() + .map(Arc::new) .await; - let state_ids_compressed = Arc::new(state_ids_compressed); - if incoming_pdu.state_key.is_some() { debug!("Event is a state-event. Deriving new room state"); @@ -260,12 +255,14 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( // if not soft fail it if soft_fail { debug!("Soft failing event"); + let extremities = extremities.iter().map(Borrow::borrow); + self.services .timeline .append_incoming_pdu( &incoming_pdu, val, - extremities.iter().map(|e| (**e).to_owned()).collect(), + extremities, state_ids_compressed, soft_fail, &state_lock, @@ -273,27 +270,30 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( .await?; // Soft fail, we keep the event as an outlier but don't add it to the timeline - warn!("Event was soft failed: {incoming_pdu:?}"); self.services .pdu_metadata .mark_event_soft_failed(&incoming_pdu.event_id); - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has been soft failed")); + warn!("Event was soft failed: {incoming_pdu:?}"); + return Err!(Request(InvalidParam("Event has been soft failed"))); } - trace!("Appending pdu to timeline"); - extremities.insert(incoming_pdu.event_id.clone()); - // Now that the event has passed all auth it is added into the timeline. // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. + trace!("Appending pdu to timeline"); + let extremities = extremities + .iter() + .map(Borrow::borrow) + .chain(once(incoming_pdu.event_id.borrow())); + let pdu_id = self .services .timeline .append_incoming_pdu( &incoming_pdu, val, - extremities.into_iter().collect(), + extremities, state_ids_compressed, soft_fail, &state_lock, diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index fd303667..8cb4e586 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -398,13 +398,14 @@ impl Service { .ignore_err() } - pub async fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: Vec, - _state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room - * state mutex */ - ) { + pub async fn set_forward_extremities<'a, I>( + &'a self, + room_id: &'a RoomId, + event_ids: I, + _state_lock: &'a RoomMutexGuard, + ) where + I: Iterator + Send + 'a, + { let prefix = (room_id, Interfix); self.db .roomid_pduleaves @@ -413,7 +414,7 @@ impl Service { .ready_for_each(|key| self.db.roomid_pduleaves.remove(key)) .await; - for event_id in &event_ids { + for event_id in event_ids { let key = (room_id, event_id); self.db.roomid_pduleaves.put_raw(key, event_id); } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index bf585a6b..8b3b67a7 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,6 +1,7 @@ mod data; use std::{ + borrow::Borrow, cmp, collections::{BTreeMap, HashSet}, fmt::Write, @@ -260,14 +261,16 @@ impl Service { /// /// Returns pdu id #[tracing::instrument(level = "debug", skip_all)] - pub async fn append_pdu( - &self, - pdu: &PduEvent, + pub async fn append_pdu<'a, Leafs>( + &'a self, + pdu: &'a PduEvent, mut pdu_json: CanonicalJsonObject, - leaves: Vec, - state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state - * mutex */ - ) -> Result { + leafs: Leafs, + state_lock: &'a RoomMutexGuard, + ) -> Result + where + Leafs: Iterator + Send + 'a, + { // Coalesce database writes for the remainder of this scope. let _cork = self.db.db.cork_and_flush(); @@ -335,7 +338,7 @@ impl Service { self.services .state - .set_forward_extremities(&pdu.room_id, leaves, state_lock) + .set_forward_extremities(&pdu.room_id, leafs, state_lock) .await; let insert_lock = self.mutex_insert.lock(&pdu.room_id).await; @@ -819,8 +822,7 @@ impl Service { pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state - * mutex */ + state_lock: &RoomMutexGuard, ) -> Result { let (pdu, pdu_json) = self .create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock) @@ -896,7 +898,7 @@ impl Service { pdu_json, // Since this PDU references all pdu_leaves we can update the leaves // of the room - vec![(*pdu.event_id).to_owned()], + once(pdu.event_id.borrow()), state_lock, ) .boxed() @@ -943,16 +945,18 @@ impl Service { /// Append the incoming event setting the state snapshot to the state from /// the server that sent the event. #[tracing::instrument(level = "debug", skip_all)] - pub async fn append_incoming_pdu( - &self, - pdu: &PduEvent, + pub async fn append_incoming_pdu<'a, Leafs>( + &'a self, + pdu: &'a PduEvent, pdu_json: CanonicalJsonObject, - new_room_leaves: Vec, + new_room_leafs: Leafs, state_ids_compressed: Arc>, soft_fail: bool, - state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state - * mutex */ - ) -> Result> { + state_lock: &'a RoomMutexGuard, + ) -> Result> + where + Leafs: Iterator + Send + 'a, + { // We append to state before appending the pdu, so we don't have a moment in // time with the pdu without it's state. This is okay because append_pdu can't // fail. @@ -968,14 +972,14 @@ impl Service { self.services .state - .set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock) + .set_forward_extremities(&pdu.room_id, new_room_leafs, state_lock) .await; return Ok(None); } let pdu_id = self - .append_pdu(pdu, pdu_json, new_room_leaves, state_lock) + .append_pdu(pdu, pdu_json, new_room_leafs, state_lock) .await?; Ok(Some(pdu_id)) From ff8bbd4cfa6ad9426bd9efbe610547dd89030c85 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 30 Jan 2025 05:14:45 +0000 Subject: [PATCH 124/328] untwist the redaction check stanza Signed-off-by: Jason Volk --- src/core/pdu/redact.rs | 18 +++++++ .../event_handler/upgrade_outlier_pdu.rs | 52 ++++--------------- 2 files changed, 28 insertions(+), 42 deletions(-) diff --git a/src/core/pdu/redact.rs b/src/core/pdu/redact.rs index 5d33eeca..7c332719 100644 --- a/src/core/pdu/redact.rs +++ b/src/core/pdu/redact.rs @@ -90,3 +90,21 @@ pub fn copy_redacts(&self) -> (Option, Box) { (self.redacts.clone(), self.content.clone()) } + +#[implement(super::Pdu)] +#[must_use] +pub fn redacts_id(&self, room_version: &RoomVersionId) -> Option { + use RoomVersionId::*; + + if self.kind != TimelineEventType::RoomRedaction { + return None; + } + + match *room_version { + | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => self.redacts.clone(), + | _ => + self.get_content::() + .ok()? + .redacts, + } +} diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index ca351981..03697558 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -13,9 +13,9 @@ use conduwuit::{ }; use futures::{future::ready, FutureExt, StreamExt}; use ruma::{ - events::{room::redaction::RoomRedactionEventContent, StateEventType, TimelineEventType}, + events::StateEventType, state_res::{self, EventTypeExt}, - CanonicalJsonValue, RoomId, RoomVersionId, ServerName, + CanonicalJsonValue, RoomId, ServerName, }; use super::{get_room_version_id, to_room_version}; @@ -127,46 +127,14 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( // Soft fail check before doing state res debug!("Performing soft-fail check"); - let soft_fail = { - use RoomVersionId::*; - - !auth_check - || incoming_pdu.kind == TimelineEventType::RoomRedaction - && match room_version_id { - | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { - if let Some(redact_id) = &incoming_pdu.redacts { - !self - .services - .state_accessor - .user_can_redact( - redact_id, - &incoming_pdu.sender, - &incoming_pdu.room_id, - true, - ) - .await? - } else { - false - } - }, - | _ => { - let content: RoomRedactionEventContent = incoming_pdu.get_content()?; - if let Some(redact_id) = &content.redacts { - !self - .services - .state_accessor - .user_can_redact( - redact_id, - &incoming_pdu.sender, - &incoming_pdu.room_id, - true, - ) - .await? - } else { - false - } - }, - } + let soft_fail = match (auth_check, incoming_pdu.redacts_id(&room_version_id)) { + | (false, _) => true, + | (true, None) => false, + | (true, Some(redact_id)) => + self.services + .state_accessor + .user_can_redact(&redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true) + .await?, }; // 13. Use state resolution to find new room state From 69837671bbc02b1cfba351e1c1321be506ef88b1 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 30 Jan 2025 09:28:34 +0000 Subject: [PATCH 125/328] simplify request handler task base Signed-off-by: Jason Volk --- src/core/metrics/mod.rs | 4 --- src/router/layers.rs | 33 ++++++++++++---------- src/router/request.rs | 59 +++++++-------------------------------- src/router/run.rs | 1 - src/router/serve/plain.rs | 7 ----- src/router/serve/unix.rs | 7 ++++- 6 files changed, 35 insertions(+), 76 deletions(-) diff --git a/src/core/metrics/mod.rs b/src/core/metrics/mod.rs index f2022166..8f7a5571 100644 --- a/src/core/metrics/mod.rs +++ b/src/core/metrics/mod.rs @@ -19,8 +19,6 @@ pub struct Metrics { runtime_intervals: std::sync::Mutex>, // TODO: move stats - pub requests_spawn_active: AtomicU32, - pub requests_spawn_finished: AtomicU32, pub requests_handle_active: AtomicU32, pub requests_handle_finished: AtomicU32, pub requests_panic: AtomicU32, @@ -48,8 +46,6 @@ impl Metrics { #[cfg(tokio_unstable)] runtime_intervals: std::sync::Mutex::new(runtime_intervals), - requests_spawn_active: AtomicU32::new(0), - requests_spawn_finished: AtomicU32::new(0), requests_handle_active: AtomicU32::new(0), requests_handle_finished: AtomicU32::new(0), requests_panic: AtomicU32::new(0), diff --git a/src/router/layers.rs b/src/router/layers.rs index 96bca4fd..c5227c22 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -5,7 +5,7 @@ use axum::{ Router, }; use axum_client_ip::SecureClientIpSource; -use conduwuit::{error, Result, Server}; +use conduwuit::{debug, error, Result, Server}; use conduwuit_api::router::state::Guard; use conduwuit_service::Services; use http::{ @@ -50,7 +50,6 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { let layers = layers .layer(SetSensitiveHeadersLayer::new([header::AUTHORIZATION])) - .layer(axum::middleware::from_fn_with_state(Arc::clone(services), request::spawn)) .layer( TraceLayer::new_for_http() .make_span_with(tracing_span::<_>) @@ -196,20 +195,26 @@ fn catch_panic( } fn tracing_span(request: &http::Request) -> tracing::Span { - let path = request.extensions().get::().map_or_else( - || { - request - .uri() - .path_and_query() - .expect("all requests have a path") - .as_str() - }, - truncated_matched_path, - ); + let path = request + .extensions() + .get::() + .map_or_else(|| request_path_str(request), truncated_matched_path); - let method = request.method(); + tracing::span! { + parent: None, + debug::INFO_SPAN_LEVEL, + "router", + method = %request.method(), + %path, + } +} - tracing::debug_span!(parent: None, "router", %method, %path) +fn request_path_str(request: &http::Request) -> &str { + request + .uri() + .path_and_query() + .expect("all requests have a path") + .as_str() } fn truncated_matched_path(path: &MatchedPath) -> &str { diff --git a/src/router/request.rs b/src/router/request.rs index ca063338..f7b94417 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -8,48 +8,6 @@ use conduwuit::{debug, debug_error, debug_warn, err, error, trace, Result}; use conduwuit_service::Services; use http::{Method, StatusCode, Uri}; -#[tracing::instrument( - parent = None, - level = "trace", - skip_all, - fields( - handled = %services - .server - .metrics - .requests_spawn_finished - .fetch_add(1, Ordering::Relaxed), - active = %services - .server - .metrics - .requests_spawn_active - .fetch_add(1, Ordering::Relaxed), - ) -)] -pub(crate) async fn spawn( - State(services): State>, - req: http::Request, - next: axum::middleware::Next, -) -> Result { - let server = &services.server; - - #[cfg(debug_assertions)] - conduwuit::defer! {{ - _ = server - .metrics - .requests_spawn_active - .fetch_sub(1, Ordering::Relaxed); - }}; - - if !server.running() { - debug_warn!("unavailable pending shutdown"); - return Err(StatusCode::SERVICE_UNAVAILABLE); - } - - let fut = next.run(req); - let task = server.runtime().spawn(fut); - task.await.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) -} - #[tracing::instrument( level = "debug", skip_all, @@ -71,17 +29,15 @@ pub(crate) async fn handle( req: http::Request, next: axum::middleware::Next, ) -> Result { - let server = &services.server; - #[cfg(debug_assertions)] conduwuit::defer! {{ - _ = server + _ = services.server .metrics .requests_handle_active .fetch_sub(1, Ordering::Relaxed); }}; - if !server.running() { + if !services.server.running() { debug_warn!( method = %req.method(), uri = %req.uri(), @@ -91,10 +47,15 @@ pub(crate) async fn handle( return Err(StatusCode::SERVICE_UNAVAILABLE); } - let uri = req.uri().clone(); let method = req.method().clone(); - let result = next.run(req).await; - handle_result(&method, &uri, result) + let uri = req.uri().clone(); + services + .server + .runtime() + .spawn(next.run(req)) + .await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) + .and_then(|result| handle_result(&method, &uri, result)) } fn handle_result(method: &Method, uri: &Uri, result: Response) -> Result { diff --git a/src/router/run.rs b/src/router/run.rs index ea8a7666..605168b8 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -125,7 +125,6 @@ async fn handle_shutdown(server: Arc, tx: Sender<()>, handle: axum_serve let timeout = Duration::from_secs(36); debug!( ?timeout, - spawn_active = ?server.metrics.requests_spawn_active.load(Ordering::Relaxed), handle_active = ?server.metrics.requests_handle_active.load(Ordering::Relaxed), "Notifying for graceful shutdown" ); diff --git a/src/router/serve/plain.rs b/src/router/serve/plain.rs index 0e971f3c..535282b9 100644 --- a/src/router/serve/plain.rs +++ b/src/router/serve/plain.rs @@ -24,27 +24,20 @@ pub(super) async fn serve( info!("Listening on {addrs:?}"); while join_set.join_next().await.is_some() {} - let spawn_active = server.metrics.requests_spawn_active.load(Ordering::Relaxed); let handle_active = server .metrics .requests_handle_active .load(Ordering::Relaxed); debug_info!( - spawn_finished = server - .metrics - .requests_spawn_finished - .load(Ordering::Relaxed), handle_finished = server .metrics .requests_handle_finished .load(Ordering::Relaxed), panics = server.metrics.requests_panic.load(Ordering::Relaxed), - spawn_active, handle_active, "Stopped listening on {addrs:?}", ); - debug_assert!(spawn_active == 0, "active request tasks are not joined"); debug_assert!(handle_active == 0, "active request handles still pending"); Ok(()) diff --git a/src/router/serve/unix.rs b/src/router/serve/unix.rs index 6855b34c..6a030c30 100644 --- a/src/router/serve/unix.rs +++ b/src/router/serve/unix.rs @@ -159,7 +159,12 @@ async fn fini(server: &Arc, listener: UnixListener, mut tasks: JoinSet<( drop(listener); debug!("Waiting for requests to finish..."); - while server.metrics.requests_spawn_active.load(Ordering::Relaxed) > 0 { + while server + .metrics + .requests_handle_active + .load(Ordering::Relaxed) + .gt(&0) + { tokio::select! { task = tasks.join_next() => if task.is_none() { break; }, () = sleep(FINI_POLL_INTERVAL) => {}, From f698254c412b5a142567f6b0ad710aa212c9b34d Mon Sep 17 00:00:00 2001 From: morguldir Date: Fri, 31 Jan 2025 02:36:14 +0100 Subject: [PATCH 126/328] make registration tokens reloadable, and allow configuring multiple Signed-off-by: morguldir --- conduwuit-example.toml | 5 +++-- src/admin/room/alias.rs | 15 +++++++++------ src/core/config/mod.rs | 5 +++-- src/service/uiaa/mod.rs | 35 ++++++++++++++++++++++++++--------- 4 files changed, 41 insertions(+), 19 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 4062ba99..3fd95044 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -406,8 +406,9 @@ # #registration_token = -# Path to a file on the system that gets read for the registration token. -# this config option takes precedence/priority over "registration_token". +# Path to a file on the system that gets read for additional registration +# tokens. Multiple tokens can be added if you separate them with +# whitespace # # conduwuit must be able to access the file, and it must not be empty # diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index 9710cfc8..d3b956e1 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -72,7 +72,7 @@ pub(super) async fn reprocess( ))), }; match command { - | RoomAliasCommand::Set { force, room_id, .. } => + | RoomAliasCommand::Set { force, room_id, .. } => { match (force, services.rooms.alias.resolve_local_alias(&room_alias).await) { | (true, Ok(id)) => { match services.rooms.alias.set_alias( @@ -106,8 +106,9 @@ pub(super) async fn reprocess( ))), } }, - }, - | RoomAliasCommand::Remove { .. } => + } + }, + | RoomAliasCommand::Remove { .. } => { match services.rooms.alias.resolve_local_alias(&room_alias).await { | Ok(id) => match services .rooms @@ -124,15 +125,17 @@ pub(super) async fn reprocess( }, | Err(_) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), - }, - | RoomAliasCommand::Which { .. } => + } + }, + | RoomAliasCommand::Which { .. } => { match services.rooms.alias.resolve_local_alias(&room_alias).await { | Ok(id) => Ok(RoomMessageEventContent::text_plain(format!( "Alias resolves to {id}" ))), | Err(_) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), - }, + } + }, | RoomAliasCommand::List { .. } => unreachable!(), } }, diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 415c9ba9..ff038975 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -510,8 +510,9 @@ pub struct Config { /// display: sensitive pub registration_token: Option, - /// Path to a file on the system that gets read for the registration token. - /// this config option takes precedence/priority over "registration_token". + /// Path to a file on the system that gets read for additional registration + /// tokens. Multiple tokens can be added if you separate them with + /// whitespace /// /// conduwuit must be able to access the file, and it must not be empty /// diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index f7e55251..7084f32a 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -1,5 +1,5 @@ use std::{ - collections::BTreeMap, + collections::{BTreeMap, HashSet}, sync::{Arc, RwLock}, }; @@ -17,7 +17,7 @@ use ruma::{ CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedUserId, UserId, }; -use crate::{globals, users, Dep}; +use crate::{config, globals, users, Dep}; pub struct Service { userdevicesessionid_uiaarequest: RwLock, @@ -28,6 +28,7 @@ pub struct Service { struct Services { globals: Dep, users: Dep, + config: Dep, } struct Data { @@ -49,6 +50,7 @@ impl crate::Service for Service { services: Services { globals: args.depend::("globals"), users: args.depend::("users"), + config: args.depend::("config"), }, })) } @@ -56,6 +58,26 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } +#[implement(Service)] +pub async fn read_tokens(&self) -> Result> { + let mut tokens = HashSet::new(); + if let Some(file) = &self.services.config.registration_token_file.as_ref() { + match std::fs::read_to_string(file) { + | Ok(text) => { + text.split_ascii_whitespace().for_each(|token| { + tokens.insert(token.to_owned()); + }); + }, + | Err(e) => error!("Failed to read the registration token file: {e}"), + } + }; + if let Some(token) = &self.services.config.registration_token { + tokens.insert(token.to_owned()); + } + + Ok(tokens) +} + /// Creates a new Uiaa session. Make sure the session token is unique. #[implement(Service)] pub fn create( @@ -152,13 +174,8 @@ pub async fn try_auth( uiaainfo.completed.push(AuthType::Password); }, | AuthData::RegistrationToken(t) => { - if self - .services - .globals - .registration_token - .as_ref() - .is_some_and(|reg_token| t.token.trim() == reg_token) - { + let tokens = self.read_tokens().await?; + if tokens.contains(t.token.trim()) { uiaainfo.completed.push(AuthType::RegistrationToken); } else { uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { From e161e5dd61b006056ef35fbd034492130bffe150 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 00:54:00 +0000 Subject: [PATCH 127/328] add pair_of! macro Signed-off-by: Jason Volk --- src/core/utils/mod.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 1a4b52da..c2d8ed45 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -84,6 +84,17 @@ macro_rules! apply { }; } +#[macro_export] +macro_rules! pair_of { + ($decl:ty) => { + ($decl, $decl) + }; + + ($init:expr) => { + ($init, $init) + }; +} + /// Functor for truthy #[macro_export] macro_rules! is_true { From 4ff1155bf0aefddd02e34ed9c709db25c0da3ecd Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 01:23:27 +0000 Subject: [PATCH 128/328] reroll encrypted_room branch in incremental sync state Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 150 ++++++++++++++++++-------------------- 1 file changed, 69 insertions(+), 81 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index cd4dfc90..f5b612e4 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -6,7 +6,7 @@ use std::{ use axum::extract::State; use conduwuit::{ - at, err, error, extract_variant, is_equal_to, + at, err, error, extract_variant, is_equal_to, pair_of, pdu::EventHash, result::FlatOk, utils::{ @@ -16,7 +16,7 @@ use conduwuit::{ stream::{BroadbandExt, Tools, WidebandExt}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, }, - Error, PduCount, PduEvent, Result, + PduCount, PduEvent, Result, }; use conduwuit_service::{ rooms::{ @@ -64,6 +64,8 @@ struct StateChanges { invited_member_count: Option, joined_since_last_sync: bool, state_events: Vec, + device_list_updates: HashSet, + left_encrypted_users: HashSet, } type PresenceUpdates = HashMap; @@ -325,18 +327,16 @@ pub(crate) async fn build_sync_events( // If the user doesn't share an encrypted room with the target anymore, we need // to tell them - let device_list_left = left_encrypted_users + let device_list_left: HashSet<_> = left_encrypted_users .into_iter() .stream() .broad_filter_map(|user_id| async move { - let no_shared_encrypted_room = - !share_encrypted_room(services, sender_user, &user_id, None).await; - no_shared_encrypted_room.then_some(user_id) - }) - .ready_fold(HashSet::new(), |mut device_list_left, user_id| { - device_list_left.insert(user_id); - device_list_left + share_encrypted_room(services, sender_user, &user_id, None) + .await + .eq(&false) + .then_some(user_id) }) + .collect() .await; let response = sync_events::v3::Response { @@ -730,14 +730,14 @@ async fn load_joined_room( .into(); let witness = witness.await; - let mut device_list_updates = HashSet::::new(); - let mut left_encrypted_users = HashSet::::new(); let StateChanges { heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events, + mut device_list_updates, + left_encrypted_users, } = if no_state_changes { StateChanges::default() } else { @@ -747,8 +747,6 @@ async fn load_joined_room( room_id, full_state, filter, - &mut device_list_updates, - &mut left_encrypted_users, since_shortstatehash, current_shortstatehash, joined_since_last_sync, @@ -919,8 +917,6 @@ async fn calculate_state_changes( room_id: &RoomId, full_state: bool, filter: &FilterDefinition, - device_list_updates: &mut HashSet, - left_encrypted_users: &mut HashSet, since_shortstatehash: Option, current_shortstatehash: ShortStateHash, joined_since_last_sync: bool, @@ -944,8 +940,6 @@ async fn calculate_state_changes( room_id, full_state, filter, - device_list_updates, - left_encrypted_users, since_shortstatehash, current_shortstatehash, joined_since_last_sync, @@ -1013,6 +1007,7 @@ async fn calculate_state_initial( invited_member_count, joined_since_last_sync: true, state_events, + ..Default::default() }) } @@ -1024,8 +1019,6 @@ async fn calculate_state_incremental( room_id: &RoomId, full_state: bool, _filter: &FilterDefinition, - device_list_updates: &mut HashSet, - left_encrypted_users: &mut HashSet, since_shortstatehash: Option, current_shortstatehash: ShortStateHash, joined_since_last_sync: bool, @@ -1063,79 +1056,72 @@ async fn calculate_state_incremental( .await; } - let encrypted_room = services - .rooms - .state_accessor - .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") - .is_ok(); - let since_encryption = services .rooms .state_accessor .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "") .is_ok(); - let (encrypted_room, since_encryption) = join(encrypted_room, since_encryption).await; + let encrypted_room = services + .rooms + .state_accessor + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") + .is_ok() + .await; - // Calculations: - let new_encrypted_room = encrypted_room && !since_encryption; + let (mut device_list_updates, left_encrypted_users) = delta_state_events + .iter() + .stream() + .ready_filter(|_| encrypted_room) + .ready_filter(|state_event| state_event.kind == RoomMember) + .ready_filter_map(|state_event| { + let content = state_event.get_content().ok()?; + let user_id = state_event.state_key.as_ref()?.parse().ok()?; + Some((content, user_id)) + }) + .ready_filter(|(_, user_id): &(RoomMemberEventContent, OwnedUserId)| { + user_id != sender_user + }) + .fold_default(|(mut dlu, mut leu): pair_of!(HashSet<_>), (content, user_id)| async move { + use MembershipState::*; + + let shares_encrypted_room = + |user_id| share_encrypted_room(services, sender_user, user_id, Some(room_id)); + + match content.membership { + | Join if !shares_encrypted_room(&user_id).await => dlu.insert(user_id), + | Leave => leu.insert(user_id), + | _ => false, + }; + + (dlu, leu) + }) + .await; + + // If the user is in a new encrypted room, give them all joined users + let new_encrypted_room = encrypted_room && !since_encryption.await; + if joined_since_last_sync && encrypted_room || new_encrypted_room { + services + .rooms + .state_cache + .room_members(room_id) + .ready_filter(|&user_id| sender_user != user_id) + .map(ToOwned::to_owned) + .broad_filter_map(|user_id| async move { + share_encrypted_room(services, sender_user, &user_id, Some(room_id)) + .await + .or_some(user_id) + }) + .ready_for_each(|user_id| { + device_list_updates.insert(user_id); + }) + .await; + } let send_member_count = delta_state_events .iter() .any(|event| event.kind == RoomMember); - if encrypted_room { - for state_event in &delta_state_events { - if state_event.kind != RoomMember { - continue; - } - - if let Some(state_key) = &state_event.state_key { - let user_id = UserId::parse(state_key) - .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; - - if user_id == sender_user { - continue; - } - - let content: RoomMemberEventContent = state_event.get_content()?; - - match content.membership { - | MembershipState::Join => { - // A new user joined an encrypted room - if !share_encrypted_room(services, sender_user, user_id, Some(room_id)) - .await - { - device_list_updates.insert(user_id.into()); - } - }, - | MembershipState::Leave => { - // Write down users that have left encrypted rooms we are in - left_encrypted_users.insert(user_id.into()); - }, - | _ => {}, - } - } - } - } - - if joined_since_last_sync && encrypted_room || new_encrypted_room { - let updates: Vec = services - .rooms - .state_cache - .room_members(room_id) - .ready_filter(|user_id| sender_user != *user_id) - .filter_map(|user_id| { - share_encrypted_room(services, sender_user, user_id, Some(room_id)) - .map(|res| res.or_some(user_id.to_owned())) - }) - .collect() - .await; - - // If the user is in a new encrypted room, give them all joined users - device_list_updates.extend(updates); - } - let (joined_member_count, invited_member_count, heroes) = if send_member_count { calculate_counts(services, room_id, sender_user).await? } else { @@ -1148,6 +1134,8 @@ async fn calculate_state_incremental( invited_member_count, joined_since_last_sync, state_events: delta_state_events, + device_list_updates, + left_encrypted_users, }) } From 4e0cedbe5122c478e63e26b3f5156475629ada3e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 05:05:32 +0000 Subject: [PATCH 129/328] simplify v3 sync presence collecting Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 55 +++++++-------------------------------- 1 file changed, 10 insertions(+), 45 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index f5b612e4..cd95fa42 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -1,6 +1,6 @@ use std::{ cmp::{self}, - collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, time::Duration, }; @@ -45,7 +45,7 @@ use ruma::{ uiaa::UiaaResponse, }, events::{ - presence::PresenceEvent, + presence::{PresenceEvent, PresenceEventContent}, room::member::{MembershipState, RoomMemberEventContent}, AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType::*, @@ -68,7 +68,7 @@ struct StateChanges { left_encrypted_users: HashSet, } -type PresenceUpdates = HashMap; +type PresenceUpdates = HashMap; /// # `GET /_matrix/client/r0/sync` /// @@ -351,9 +351,11 @@ pub(crate) async fn build_sync_events( next_batch: next_batch.to_string(), presence: Presence { events: presence_updates - .unwrap_or_default() - .into_values() - .map(|v| Raw::new(&v).expect("PresenceEvent always serializes successfully")) + .into_iter() + .flat_map(IntoIterator::into_iter) + .map(|(sender, content)| PresenceEvent { content, sender }) + .map(|ref event| Raw::new(event)) + .filter_map(Result::ok) .collect(), }, rooms: Rooms { @@ -390,45 +392,8 @@ async fn process_presence_updates( .map_ok(move |event| (user_id, event)) .ok() }) - .ready_fold(PresenceUpdates::new(), |mut updates, (user_id, event)| { - match updates.entry(user_id.into()) { - | Entry::Vacant(slot) => { - let mut new_event = event; - new_event.content.last_active_ago = match new_event.content.currently_active { - | Some(true) => None, - | _ => new_event.content.last_active_ago, - }; - - slot.insert(new_event); - }, - | Entry::Occupied(mut slot) => { - let curr_event = slot.get_mut(); - let curr_content = &mut curr_event.content; - let new_content = event.content; - - // Update existing presence event with more info - curr_content.presence = new_content.presence; - curr_content.status_msg = new_content - .status_msg - .or_else(|| curr_content.status_msg.take()); - curr_content.displayname = new_content - .displayname - .or_else(|| curr_content.displayname.take()); - curr_content.avatar_url = new_content - .avatar_url - .or_else(|| curr_content.avatar_url.take()); - curr_content.currently_active = new_content - .currently_active - .or(curr_content.currently_active); - curr_content.last_active_ago = match curr_content.currently_active { - | Some(true) => None, - | _ => new_content.last_active_ago.or(curr_content.last_active_ago), - }; - }, - }; - - updates - }) + .map(|(user_id, event)| (user_id.to_owned(), event.content)) + .collect() .await } From a4ef04cd1427ea2eeb474775e7c4c86937d063ab Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 08:31:58 +0000 Subject: [PATCH 130/328] fix room join completion taking wrong sync branch Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index cd95fa42..e3f559f5 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -887,7 +887,7 @@ async fn calculate_state_changes( joined_since_last_sync: bool, witness: Option<&Witness>, ) -> Result { - if since_shortstatehash.is_none() || joined_since_last_sync { + if since_shortstatehash.is_none() { calculate_state_initial( services, sender_user, From 6983798487ec563be83a3ba8739afa9977d98741 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 08:34:32 +0000 Subject: [PATCH 131/328] implement lazy-loading for incremental sync Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 182 ++++++++++++++++++++------------------ 1 file changed, 98 insertions(+), 84 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index e3f559f5..49246514 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -55,7 +55,10 @@ use ruma::{ }; use super::{load_timeline, share_encrypted_room}; -use crate::{client::ignored_filter, Ruma, RumaResponse}; +use crate::{ + client::{ignored_filter, lazy_loading_witness}, + Ruma, RumaResponse, +}; #[derive(Default)] struct StateChanges { @@ -633,10 +636,6 @@ async fn load_joined_room( }) .into(); - let no_state_changes = timeline_pdus.is_empty() - && (since_shortstatehash.is_none() - || since_shortstatehash.is_some_and(is_equal_to!(current_shortstatehash))); - let since_sender_member: OptionFuture<_> = since_shortstatehash .map(|short| { services @@ -658,11 +657,7 @@ async fn load_joined_room( let lazy_loading_enabled = filter.room.state.lazy_load_options.is_enabled() || filter.room.timeline.lazy_load_options.is_enabled(); - let generate_witness = - lazy_loading_enabled && (since_shortstatehash.is_none() || joined_since_last_sync); - - let lazy_reset = lazy_loading_enabled && since_shortstatehash.is_none(); - + let lazy_reset = since_shortstatehash.is_none(); let lazy_loading_context = &lazy_loading::Context { user_id: sender_user, device_id: sender_device, @@ -677,24 +672,10 @@ async fn load_joined_room( .into(); lazy_load_reset.await; - let witness: Option = generate_witness.then(|| { - timeline_pdus - .iter() - .map(|(_, pdu)| pdu.sender.clone()) - .chain(receipt_events.keys().cloned()) - .collect() - }); - - let witness: OptionFuture<_> = witness - .map(|witness| { - services - .rooms - .lazy_loading - .witness_retain(witness, lazy_loading_context) - }) + let witness: OptionFuture<_> = lazy_loading_enabled + .then(|| lazy_loading_witness(services, lazy_loading_context, timeline_pdus.iter())) .into(); - let witness = witness.await; let StateChanges { heroes, joined_member_count, @@ -703,23 +684,19 @@ async fn load_joined_room( state_events, mut device_list_updates, left_encrypted_users, - } = if no_state_changes { - StateChanges::default() - } else { - calculate_state_changes( - services, - sender_user, - room_id, - full_state, - filter, - since_shortstatehash, - current_shortstatehash, - joined_since_last_sync, - witness.as_ref(), - ) - .boxed() - .await? - }; + } = calculate_state_changes( + services, + sender_user, + room_id, + full_state, + filter, + since_shortstatehash, + current_shortstatehash, + joined_since_last_sync, + witness.await.as_ref(), + ) + .boxed() + .await?; let account_data_events = services .account_data @@ -908,6 +885,7 @@ async fn calculate_state_changes( since_shortstatehash, current_shortstatehash, joined_since_last_sync, + witness, ) .await } @@ -920,7 +898,7 @@ async fn calculate_state_initial( sender_user: &UserId, room_id: &RoomId, full_state: bool, - filter: &FilterDefinition, + _filter: &FilterDefinition, current_shortstatehash: ShortStateHash, witness: Option<&Witness>, ) -> Result { @@ -938,20 +916,14 @@ async fn calculate_state_initial( .zip(event_ids.into_iter().stream()) .ready_filter_map(|item| Some((item.0.ok()?, item.1))) .ready_filter_map(|((event_type, state_key), event_id)| { - let lazy_load_enabled = filter.room.state.lazy_load_options.is_enabled() - || filter.room.timeline.lazy_load_options.is_enabled(); - - if lazy_load_enabled + let lazy = !full_state && event_type == StateEventType::RoomMember - && !full_state && state_key.as_str().try_into().is_ok_and(|user_id: &UserId| { sender_user != user_id && witness.is_some_and(|witness| !witness.contains(user_id)) - }) { - return None; - } + }); - Some(event_id) + lazy.or_some(event_id) }) .broad_filter_map(|event_id: OwnedEventId| async move { services.rooms.timeline.get_pdu(&event_id).await.ok() @@ -978,7 +950,7 @@ async fn calculate_state_initial( #[tracing::instrument(name = "incremental", level = "trace", skip_all)] #[allow(clippy::too_many_arguments)] -async fn calculate_state_incremental( +async fn calculate_state_incremental<'a>( services: &Services, sender_user: &UserId, room_id: &RoomId, @@ -987,39 +959,80 @@ async fn calculate_state_incremental( since_shortstatehash: Option, current_shortstatehash: ShortStateHash, joined_since_last_sync: bool, + witness: Option<&'a Witness>, ) -> Result { - // Incremental /sync - let since_shortstatehash = - since_shortstatehash.expect("missing since_shortstatehash on incremental sync"); + let since_shortstatehash = since_shortstatehash.unwrap_or(current_shortstatehash); - let mut delta_state_events = Vec::new(); + let state_changed = since_shortstatehash != current_shortstatehash; - if since_shortstatehash != current_shortstatehash { - let current_state_ids = services + let state_get_id = |user_id: &'a UserId| { + services .rooms .state_accessor - .state_full_ids(current_shortstatehash) - .collect(); + .state_get_id(current_shortstatehash, &StateEventType::RoomMember, user_id.as_str()) + .ok() + }; - let since_state_ids = services - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .collect(); + let lazy_state_ids: OptionFuture<_> = witness + .map(|witness| { + witness + .iter() + .stream() + .broad_filter_map(|user_id| state_get_id(user_id)) + .collect::>() + }) + .into(); - let (current_state_ids, since_state_ids): ( - HashMap<_, OwnedEventId>, - HashMap<_, OwnedEventId>, - ) = join(current_state_ids, since_state_ids).await; + let current_state_ids: OptionFuture<_> = state_changed + .then(|| { + services + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .collect::>() + }) + .into(); - current_state_ids - .iter() - .stream() - .ready_filter(|(key, id)| full_state || since_state_ids.get(key) != Some(id)) - .wide_filter_map(|(_, id)| services.rooms.timeline.get_pdu(id).ok()) - .ready_for_each(|pdu| delta_state_events.push(pdu)) - .await; - } + let since_state_ids: OptionFuture<_> = (state_changed && !full_state) + .then(|| { + services + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .collect::>() + }) + .into(); + + let lazy_state_ids = lazy_state_ids + .map(Option::into_iter) + .map(|iter| iter.flat_map(Vec::into_iter)) + .map(IterStream::stream) + .flatten_stream(); + + let ref since_state_ids = since_state_ids.shared(); + let delta_state_events = current_state_ids + .map(Option::into_iter) + .map(|iter| iter.flat_map(Vec::into_iter)) + .map(IterStream::stream) + .flatten_stream() + .filter_map(|(shortstatekey, event_id): (u64, OwnedEventId)| async move { + since_state_ids + .clone() + .await + .is_none_or(|since_state| since_state.get(&shortstatekey) != Some(&event_id)) + .then_some(event_id) + }) + .chain(lazy_state_ids) + .broad_filter_map(|event_id: OwnedEventId| async move { + services + .rooms + .timeline + .get_pdu(&event_id) + .await + .map(move |pdu| (event_id, pdu)) + .ok() + }) + .collect::>(); let since_encryption = services .rooms @@ -1031,11 +1044,12 @@ async fn calculate_state_incremental( .rooms .state_accessor .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") - .is_ok() - .await; + .is_ok(); + + let (delta_state_events, encrypted_room) = join(delta_state_events, encrypted_room).await; let (mut device_list_updates, left_encrypted_users) = delta_state_events - .iter() + .values() .stream() .ready_filter(|_| encrypted_room) .ready_filter(|state_event| state_event.kind == RoomMember) @@ -1084,7 +1098,7 @@ async fn calculate_state_incremental( } let send_member_count = delta_state_events - .iter() + .values() .any(|event| event.kind == RoomMember); let (joined_member_count, invited_member_count, heroes) = if send_member_count { @@ -1098,9 +1112,9 @@ async fn calculate_state_incremental( joined_member_count, invited_member_count, joined_since_last_sync, - state_events: delta_state_events, device_list_updates, left_encrypted_users, + state_events: delta_state_events.into_values().collect(), }) } From 09bc71caaba40321ec0f987574a94e788175c4f9 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 09:08:13 +0000 Subject: [PATCH 132/328] fix missed concurrent fetch opportunities in sender (ffd0fd42424a) Signed-off-by: Jason Volk --- src/service/sending/sender.rs | 41 +++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 363bb994..f19b69da 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -13,7 +13,12 @@ use conduwuit::{ debug, err, error, result::LogErr, trace, - utils::{calculate_hash, continue_exponential_backoff_secs, stream::IterStream, ReadyExt}, + utils::{ + calculate_hash, continue_exponential_backoff_secs, + future::TryExtExt, + stream::{BroadbandExt, IterStream, WidebandExt}, + ReadyExt, + }, warn, Error, Result, }; use futures::{ @@ -474,20 +479,25 @@ impl Service { since: (u64, u64), max_edu_count: &AtomicU64, ) -> Option { - let server_rooms = self.services.state_cache.server_rooms(server_name); - - pin_mut!(server_rooms); let mut num = 0; - let mut receipts = BTreeMap::::new(); - while let Some(room_id) = server_rooms.next().await { - let receipt_map = self - .select_edus_receipts_room(room_id, since, max_edu_count, &mut num) - .await; + let receipts: BTreeMap = self + .services + .state_cache + .server_rooms(server_name) + .map(ToOwned::to_owned) + .broad_filter_map(|room_id| async move { + let receipt_map = self + .select_edus_receipts_room(&room_id, since, max_edu_count, &mut num) + .await; - if !receipt_map.read.is_empty() { - receipts.insert(room_id.into(), receipt_map); - } - } + receipt_map + .read + .is_empty() + .eq(&false) + .then_some((room_id, receipt_map)) + }) + .collect() + .await; if receipts.is_empty() { return None; @@ -820,9 +830,8 @@ impl Service { | _ => None, }) .stream() - .then(|pdu_id| self.services.timeline.get_pdu_json_from_id(pdu_id)) - .ready_filter_map(Result::ok) - .then(|pdu| self.convert_to_outgoing_federation_event(pdu)) + .wide_filter_map(|pdu_id| self.services.timeline.get_pdu_json_from_id(pdu_id).ok()) + .wide_then(|pdu| self.convert_to_outgoing_federation_event(pdu)) .collect() .await; From 2fa9621f3a358740917af7a55c5d0be1e1d79ae4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 11:54:06 +0000 Subject: [PATCH 133/328] flatten state_full_shortids Signed-off-by: Jason Volk --- src/service/rooms/state/mod.rs | 60 +++++++++++-------------- src/service/rooms/state_accessor/mod.rs | 47 +++++++++---------- 2 files changed, 48 insertions(+), 59 deletions(-) diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 8cb4e586..1b0d0d58 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -429,60 +429,54 @@ impl Service { sender: &UserId, state_key: Option<&str>, content: &serde_json::value::RawValue, - ) -> Result>> { + ) -> Result> { let Ok(shortstatehash) = self.get_room_shortstatehash(room_id).await else { return Ok(HashMap::new()); }; - let mut sauthevents: HashMap<_, _> = - state_res::auth_types_for_event(kind, sender, state_key, content)? - .iter() - .stream() - .broad_filter_map(|(event_type, state_key)| { - self.services - .short - .get_shortstatekey(event_type, state_key) - .map_ok(move |ssk| (ssk, (event_type, state_key))) - .map(Result::ok) - }) - .map(|(ssk, (event_type, state_key))| { - (ssk, (event_type.to_owned(), state_key.to_owned())) - }) - .collect() - .await; + let auth_types = state_res::auth_types_for_event(kind, sender, state_key, content)?; + + let sauthevents: HashMap<_, _> = auth_types + .iter() + .stream() + .broad_filter_map(|(event_type, state_key)| { + self.services + .short + .get_shortstatekey(event_type, state_key) + .map_ok(move |ssk| (ssk, (event_type, state_key))) + .map(Result::ok) + }) + .collect() + .await; let (state_keys, event_ids): (Vec<_>, Vec<_>) = self .services .state_accessor .state_full_shortids(shortstatehash) - .await - .map_err(|e| err!(Database(error!(?room_id, ?shortstatehash, "{e:?}"))))? - .into_iter() - .filter_map(|(shortstatekey, shorteventid)| { + .ready_filter_map(Result::ok) + .ready_filter_map(|(shortstatekey, shorteventid)| { sauthevents - .remove(&shortstatekey) - .map(|(event_type, state_key)| ((event_type, state_key), shorteventid)) + .get(&shortstatekey) + .map(|(ty, sk)| ((ty, sk), shorteventid)) }) - .unzip(); + .unzip() + .await; - let auth_pdus = self - .services + self.services .short .multi_get_eventid_from_short(event_ids.into_iter().stream()) .zip(state_keys.into_iter().stream()) - .ready_filter_map(|(event_id, tsk)| Some((tsk, event_id.ok()?))) - .broad_filter_map(|(tsk, event_id): (_, OwnedEventId)| async move { + .ready_filter_map(|(event_id, (ty, sk))| Some(((ty, sk), event_id.ok()?))) + .broad_filter_map(|((ty, sk), event_id): (_, OwnedEventId)| async move { self.services .timeline .get_pdu(&event_id) .await - .map(Arc::new) - .map(move |pdu| (tsk, pdu)) + .map(move |pdu| (((*ty).clone(), (*sk).clone()), pdu)) .ok() }) .collect() - .await; - - Ok(auth_pdus) + .map(Ok) + .await } } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 0f5520bb..98aac138 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,6 +1,7 @@ use std::{ borrow::Borrow, fmt::Write, + ops::Deref, sync::{Arc, Mutex as StdMutex, Mutex}, }; @@ -10,8 +11,7 @@ use conduwuit::{ utils, utils::{ math::{usize_from_f64, Expected}, - stream::BroadbandExt, - IterStream, ReadyExt, + stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, }, Err, Error, PduEvent, Result, }; @@ -158,12 +158,8 @@ impl Service { ) -> impl Stream + Send + '_ { let short_ids = self .state_full_shortids(shortstatehash) - .map(|result| result.expect("missing shortstatehash")) - .map(Vec::into_iter) - .map(|iter| iter.map(at!(1))) - .map(IterStream::stream) - .flatten_stream() - .boxed(); + .expect_ok() + .map(at!(1)); self.services .short @@ -187,9 +183,8 @@ impl Service { { let shortids = self .state_full_shortids(shortstatehash) - .map(|result| result.expect("missing shortstatehash")) - .map(|vec| vec.into_iter().unzip()) - .boxed() + .expect_ok() + .unzip() .shared(); let shortstatekeys = shortids @@ -255,25 +250,25 @@ impl Service { } #[inline] - pub async fn state_full_shortids( + pub fn state_full_shortids( &self, shortstatehash: ShortStateHash, - ) -> Result> { - let shortids = self - .services + ) -> impl Stream> + Send + '_ { + self.services .state_compressor .load_shortstatehash_info(shortstatehash) - .await - .map_err(|e| err!(Database("Missing state IDs: {e}")))? - .pop() - .expect("there is always one layer") - .full_state - .iter() - .copied() - .map(parse_compressed_state_event) - .collect(); - - Ok(shortids) + .map_err(|e| err!(Database("Missing state IDs: {e}"))) + .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) + .map_ok(|full_state| { + full_state + .deref() + .iter() + .copied() + .map(parse_compressed_state_event) + .collect() + }) + .map_ok(|vec: Vec<_>| vec.into_iter().try_stream()) + .try_flatten_stream() } /// Returns a single PDU from `room_id` with key (`event_type`, From ea49b60273c987cc673c3aad439c6fbb50bb795f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 1 Feb 2025 22:28:09 +0000 Subject: [PATCH 134/328] add Option support to database deserializer Signed-off-by: Jason Volk --- src/database/de.rs | 23 ++++-- src/database/tests.rs | 159 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 176 insertions(+), 6 deletions(-) diff --git a/src/database/de.rs b/src/database/de.rs index 7cc8f00a..8e914fcc 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -22,7 +22,7 @@ pub(crate) fn from_slice<'a, T>(buf: &'a [u8]) -> Result where T: Deserialize<'a>, { - let mut deserializer = Deserializer { buf, pos: 0, seq: false }; + let mut deserializer = Deserializer { buf, pos: 0, rec: 0, seq: false }; T::deserialize(&mut deserializer).debug_inspect(|_| { deserializer @@ -35,6 +35,7 @@ where pub(crate) struct Deserializer<'de> { buf: &'de [u8], pos: usize, + rec: usize, seq: bool, } @@ -107,7 +108,7 @@ impl<'de> Deserializer<'de> { /// consumed None is returned instead. #[inline] fn record_peek_byte(&self) -> Option { - let started = self.pos != 0; + let started = self.pos != 0 || self.rec > 0; let buf = &self.buf[self.pos..]; debug_assert!( !started || buf[0] == Self::SEP, @@ -121,13 +122,14 @@ impl<'de> Deserializer<'de> { /// the start of the next record. (Case for some sequences) #[inline] fn record_start(&mut self) { - let started = self.pos != 0; + let started = self.pos != 0 || self.rec > 0; debug_assert!( !started || self.buf[self.pos] == Self::SEP, "Missing expected record separator at current position" ); self.inc_pos(started.into()); + self.inc_rec(1); } /// Consume all remaining bytes, which may include record separators, @@ -157,6 +159,9 @@ impl<'de> Deserializer<'de> { debug_assert!(self.pos <= self.buf.len(), "pos out of range"); } + #[inline] + fn inc_rec(&mut self, n: usize) { self.rec = self.rec.saturating_add(n); } + /// Unconsumed input bytes. #[inline] fn remaining(&self) -> Result { @@ -270,8 +275,16 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { } #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_option>(self, _visitor: V) -> Result { - unhandled!("deserialize Option not implemented") + fn deserialize_option>(self, visitor: V) -> Result { + if self + .buf + .get(self.pos) + .is_none_or(|b| *b == Deserializer::SEP) + { + visitor.visit_none() + } else { + visitor.visit_some(self) + } } #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] diff --git a/src/database/tests.rs b/src/database/tests.rs index 2f143698..e6c85983 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -3,7 +3,7 @@ use std::fmt::Debug; use arrayvec::ArrayVec; -use conduwuit::ruma::{serde::Raw, RoomId, UserId}; +use conduwuit::ruma::{serde::Raw, EventId, RoomId, UserId}; use serde::Serialize; use crate::{ @@ -389,3 +389,160 @@ fn de_complex() { assert_eq!(arr, key, "deserialization of serialization does not match"); } + +#[test] +fn serde_tuple_option_value_some() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (&RoomId, Option<&UserId>) = (room_id, Some(user_id)); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (&RoomId, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(bb.1, cc.1); + assert_eq!(cc.0, bb.0); +} + +#[test] +fn serde_tuple_option_value_none() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + + let bb: (&RoomId, Option<&UserId>) = (room_id, None); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (&RoomId, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(None, cc.1); + assert_eq!(cc.0, bb.0); +} + +#[test] +fn serde_tuple_option_none_value() { + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (Option<&RoomId>, &UserId) = (None, user_id); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, &UserId) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(None, cc.0); + assert_eq!(cc.1, bb.1); +} + +#[test] +fn serde_tuple_option_some_value() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (Option<&RoomId>, &UserId) = (Some(room_id), user_id); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, &UserId) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(bb.0, cc.0); + assert_eq!(cc.1, bb.1); +} + +#[test] +fn serde_tuple_option_some_some() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (Option<&RoomId>, Option<&UserId>) = (Some(room_id), Some(user_id)); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(cc.0, bb.0); + assert_eq!(bb.1, cc.1); +} + +#[test] +fn serde_tuple_option_none_none() { + let aa = vec![0xFF]; + + let bb: (Option<&RoomId>, Option<&UserId>) = (None, None); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(cc.0, bb.0); + assert_eq!(None, cc.1); +} + +#[test] +fn serde_tuple_option_some_none_some() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = + (Some(room_id), None, Some(user_id)); + + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(bb.0, cc.0); + assert_eq!(None, cc.1); + assert_eq!(bb.1, cc.1); + assert_eq!(bb.2, cc.2); +} + +#[test] +fn serde_tuple_option_none_none_none() { + let aa = vec![0xFF, 0xFF]; + + let bb: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = (None, None, None); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(None, cc.0); + assert_eq!(bb, cc); +} From 4add39d0fedcbe7946c6dfffac33d1e48111ea8b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 15:50:09 +0000 Subject: [PATCH 135/328] cache compressed state in a sorted structure for logarithmic queries with partial keys Signed-off-by: Jason Volk --- src/api/client/membership.rs | 9 +- .../rooms/event_handler/resolve_state.rs | 6 +- .../event_handler/upgrade_outlier_pdu.rs | 15 ++- src/service/rooms/state/mod.rs | 28 +++--- src/service/rooms/state_accessor/mod.rs | 99 ++++++++++++++----- src/service/rooms/state_compressor/mod.rs | 30 +++--- src/service/rooms/timeline/mod.rs | 4 +- 7 files changed, 118 insertions(+), 73 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index d80aff0c..449d44d5 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -46,7 +46,10 @@ use ruma::{ use service::{ appservice::RegistrationInfo, pdu::gen_event_id, - rooms::{state::RoomMutexGuard, state_compressor::HashSetCompressStateEvent}, + rooms::{ + state::RoomMutexGuard, + state_compressor::{CompressedState, HashSetCompressStateEvent}, + }, Services, }; @@ -1169,7 +1172,7 @@ async fn join_room_by_id_helper_remote( } info!("Compressing state from send_join"); - let compressed: HashSet<_> = services + let compressed: CompressedState = services .rooms .state_compressor .compress_state_events(state.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) @@ -2340,7 +2343,7 @@ async fn knock_room_helper_remote( } info!("Compressing state from send_knock"); - let compressed: HashSet<_> = services + let compressed: CompressedState = services .rooms .state_compressor .compress_state_events(state_map.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index c3de5f2f..4d99b088 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -15,7 +15,7 @@ use ruma::{ OwnedEventId, RoomId, RoomVersionId, }; -use crate::rooms::state_compressor::CompressedStateEvent; +use crate::rooms::state_compressor::CompressedState; #[implement(super::Service)] #[tracing::instrument(name = "resolve", level = "debug", skip_all)] @@ -24,7 +24,7 @@ pub async fn resolve_state( room_id: &RoomId, room_version_id: &RoomVersionId, incoming_state: HashMap, -) -> Result>> { +) -> Result> { trace!("Loading current room state ids"); let current_sstatehash = self .services @@ -91,7 +91,7 @@ pub async fn resolve_state( .await; trace!("Compressing state..."); - let new_room_state: HashSet<_> = self + let new_room_state: CompressedState = self .services .state_compressor .compress_state_events( diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 03697558..132daca7 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,10 +1,4 @@ -use std::{ - borrow::Borrow, - collections::{BTreeMap, HashSet}, - iter::once, - sync::Arc, - time::Instant, -}; +use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant}; use conduwuit::{ debug, debug_info, err, implement, trace, @@ -19,7 +13,10 @@ use ruma::{ }; use super::{get_room_version_id, to_room_version}; -use crate::rooms::{state_compressor::HashSetCompressStateEvent, timeline::RawPduId}; +use crate::rooms::{ + state_compressor::{CompressedState, HashSetCompressStateEvent}, + timeline::RawPduId, +}; #[implement(super::Service)] pub(super) async fn upgrade_outlier_to_timeline_pdu( @@ -173,7 +170,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( incoming_pdu.prev_events.len() ); - let state_ids_compressed: Arc> = self + let state_ids_compressed: Arc = self .services .state_compressor .compress_state_events( diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 1b0d0d58..de90a89c 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,9 +1,4 @@ -use std::{ - collections::{HashMap, HashSet}, - fmt::Write, - iter::once, - sync::Arc, -}; +use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc}; use conduwuit::{ err, @@ -33,7 +28,7 @@ use crate::{ globals, rooms, rooms::{ short::{ShortEventId, ShortStateHash}, - state_compressor::{parse_compressed_state_event, CompressedStateEvent}, + state_compressor::{parse_compressed_state_event, CompressedState}, }, Dep, }; @@ -102,10 +97,9 @@ impl Service { &self, room_id: &RoomId, shortstatehash: u64, - statediffnew: Arc>, - _statediffremoved: Arc>, - state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state - * mutex */ + statediffnew: Arc, + _statediffremoved: Arc, + state_lock: &RoomMutexGuard, ) -> Result { let event_ids = statediffnew .iter() @@ -176,7 +170,7 @@ impl Service { &self, event_id: &EventId, room_id: &RoomId, - state_ids_compressed: Arc>, + state_ids_compressed: Arc, ) -> Result { const KEY_LEN: usize = size_of::(); const VAL_LEN: usize = size_of::(); @@ -209,12 +203,12 @@ impl Service { let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed + let statediffnew: CompressedState = state_ids_compressed .difference(&parent_stateinfo.full_state) .copied() .collect(); - let statediffremoved: HashSet<_> = parent_stateinfo + let statediffremoved: CompressedState = parent_stateinfo .full_state .difference(&state_ids_compressed) .copied() @@ -222,7 +216,7 @@ impl Service { (Arc::new(statediffnew), Arc::new(statediffremoved)) } else { - (state_ids_compressed, Arc::new(HashSet::new())) + (state_ids_compressed, Arc::new(CompressedState::new())) }; self.services.state_compressor.save_state_from_diff( shortstatehash, @@ -300,10 +294,10 @@ impl Service { // TODO: statehash with deterministic inputs let shortstatehash = self.services.globals.next_count()?; - let mut statediffnew = HashSet::new(); + let mut statediffnew = CompressedState::new(); statediffnew.insert(new); - let mut statediffremoved = HashSet::new(); + let mut statediffremoved = CompressedState::new(); if let Some(replaces) = replaces { statediffremoved.insert(*replaces); } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 98aac138..8b56c8b6 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -11,6 +11,7 @@ use conduwuit::{ utils, utils::{ math::{usize_from_f64, Expected}, + result::FlatOk, stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, }, Err, Error, PduEvent, Result, @@ -47,7 +48,7 @@ use crate::{ rooms::{ short::{ShortEventId, ShortStateHash, ShortStateKey}, state::RoomMutexGuard, - state_compressor::parse_compressed_state_event, + state_compressor::{compress_state_event, parse_compressed_state_event}, }, Dep, }; @@ -220,36 +221,88 @@ impl Service { Id: for<'de> Deserialize<'de> + Sized + ToOwned, ::Owned: Borrow, { - let shortstatekey = self - .services - .short - .get_shortstatekey(event_type, state_key) + let shorteventid = self + .state_get_shortid(shortstatehash, event_type, state_key) .await?; - let full_state = self - .services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .await - .map_err(|e| err!(Database(error!(?event_type, ?state_key, "Missing state: {e:?}"))))? - .pop() - .expect("there is always one layer") - .full_state; - - let compressed = full_state - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .ok_or(err!(Database("No shortstatekey in compressed state")))?; - - let (_, shorteventid) = parse_compressed_state_event(*compressed); - self.services .short .get_eventid_from_short(shorteventid) .await } - #[inline] + /// Returns a single EventId from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_get_shortid( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> Result { + let shortstatekey = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await?; + + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + self.services + .state_compressor + .load_shortstatehash_info(shortstatehash) + .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) + .map_ok(|full_state| { + full_state + .range(start..end) + .next() + .copied() + .map(parse_compressed_state_event) + .map(at!(1)) + .ok_or(err!(Request(NotFound("Not found in room state")))) + }) + .await? + } + + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_contains( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> bool { + let Ok(shortstatekey) = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await + else { + return false; + }; + + self.state_contains_shortstatekey(shortstatehash, shortstatekey) + .await + } + + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_contains_shortstatekey( + &self, + shortstatehash: ShortStateHash, + shortstatekey: ShortStateKey, + ) -> bool { + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + + self.services + .state_compressor + .load_shortstatehash_info(shortstatehash) + .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) + .map_ok(|full_state| full_state.range(start..end).next().copied()) + .await + .flat_ok() + .is_some() + } + pub fn state_full_shortids( &self, shortstatehash: ShortStateHash, diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 532df360..3d68dff6 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -1,5 +1,5 @@ use std::{ - collections::{HashMap, HashSet}, + collections::{BTreeSet, HashMap}, fmt::{Debug, Write}, mem::size_of, sync::{Arc, Mutex}, @@ -63,8 +63,8 @@ type StateInfoLruCache = LruCache; type ShortStateInfoVec = Vec; type ParentStatesVec = Vec; -pub(crate) type CompressedState = HashSet; -pub(crate) type CompressedStateEvent = [u8; 2 * size_of::()]; +pub type CompressedState = BTreeSet; +pub type CompressedStateEvent = [u8; 2 * size_of::()]; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { @@ -249,8 +249,8 @@ impl Service { pub fn save_state_from_diff( &self, shortstatehash: ShortStateHash, - statediffnew: Arc>, - statediffremoved: Arc>, + statediffnew: Arc, + statediffremoved: Arc, diff_to_sibling: usize, mut parent_states: ParentStatesVec, ) -> Result { @@ -363,7 +363,7 @@ impl Service { pub async fn save_state( &self, room_id: &RoomId, - new_state_ids_compressed: Arc>, + new_state_ids_compressed: Arc, ) -> Result { let previous_shortstatehash = self .services @@ -396,12 +396,12 @@ impl Service { let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = new_state_ids_compressed + let statediffnew: CompressedState = new_state_ids_compressed .difference(&parent_stateinfo.full_state) .copied() .collect(); - let statediffremoved: HashSet<_> = parent_stateinfo + let statediffremoved: CompressedState = parent_stateinfo .full_state .difference(&new_state_ids_compressed) .copied() @@ -409,7 +409,7 @@ impl Service { (Arc::new(statediffnew), Arc::new(statediffremoved)) } else { - (new_state_ids_compressed, Arc::new(HashSet::new())) + (new_state_ids_compressed, Arc::new(CompressedState::new())) }; if !already_existed { @@ -448,11 +448,11 @@ impl Service { .take_if(|parent| *parent != 0); debug_assert!(value.len() % STRIDE == 0, "value not aligned to stride"); - let num_values = value.len() / STRIDE; + let _num_values = value.len() / STRIDE; let mut add_mode = true; - let mut added = HashSet::with_capacity(num_values); - let mut removed = HashSet::with_capacity(num_values); + let mut added = CompressedState::new(); + let mut removed = CompressedState::new(); let mut i = STRIDE; while let Some(v) = value.get(i..expected!(i + 2 * STRIDE)) { @@ -469,8 +469,6 @@ impl Service { i = expected!(i + 2 * STRIDE); } - added.shrink_to_fit(); - removed.shrink_to_fit(); Ok(StateDiff { parent, added: Arc::new(added), @@ -507,7 +505,7 @@ impl Service { #[inline] #[must_use] -fn compress_state_event( +pub(crate) fn compress_state_event( shortstatekey: ShortStateKey, shorteventid: ShortEventId, ) -> CompressedStateEvent { @@ -523,7 +521,7 @@ fn compress_state_event( #[inline] #[must_use] -pub fn parse_compressed_state_event( +pub(crate) fn parse_compressed_state_event( compressed_event: CompressedStateEvent, ) -> (ShortStateKey, ShortEventId) { use utils::u64_from_u8; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 8b3b67a7..a913034d 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -49,7 +49,7 @@ use crate::{ account_data, admin, appservice, appservice::NamespaceRegex, globals, pusher, rooms, - rooms::{short::ShortRoomId, state_compressor::CompressedStateEvent}, + rooms::{short::ShortRoomId, state_compressor::CompressedState}, sending, server_keys, users, Dep, }; @@ -950,7 +950,7 @@ impl Service { pdu: &'a PduEvent, pdu_json: CanonicalJsonObject, new_room_leafs: Leafs, - state_ids_compressed: Arc>, + state_ids_compressed: Arc, soft_fail: bool, state_lock: &'a RoomMutexGuard, ) -> Result> From 7ce782ddf4cb6989caff7a3781cfc667183b9b63 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 1 Feb 2025 01:17:28 +0000 Subject: [PATCH 136/328] fix jemalloc cfgs lacking msvc conditions Signed-off-by: Jason Volk --- src/core/config/check.rs | 2 +- src/database/pool.rs | 9 ++++++--- src/main/runtime.rs | 6 +++--- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/src/core/config/check.rs b/src/core/config/check.rs index 988d4143..5532c5a2 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -38,7 +38,7 @@ pub fn check(config: &Config) -> Result { )); } - if cfg!(all(feature = "hardened_malloc", feature = "jemalloc")) { + if cfg!(all(feature = "hardened_malloc", feature = "jemalloc", not(target_env = "msvc"))) { debug_warn!( "hardened_malloc and jemalloc compile-time features are both enabled, this causes \ jemalloc to be used." diff --git a/src/database/pool.rs b/src/database/pool.rs index 86516c31..c753855a 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -13,7 +13,7 @@ use std::{ use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; use conduwuit::{ debug, debug_warn, err, error, implement, - result::{DebugInspect, LogDebugErr}, + result::DebugInspect, trace, utils::sys::compute::{get_affinity, nth_core_available, set_affinity}, Error, Result, Server, @@ -290,9 +290,12 @@ fn worker_init(&self, id: usize) { // affinity is empty (no-op) if there's only one queue set_affinity(affinity.clone()); - #[cfg(feature = "jemalloc")] + #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] if affinity.clone().count() == 1 && conduwuit::alloc::je::is_affine_arena() { - use conduwuit::alloc::je::this_thread::{arena_id, set_arena}; + use conduwuit::{ + alloc::je::this_thread::{arena_id, set_arena}, + result::LogDebugErr, + }; let id = affinity.clone().next().expect("at least one id"); diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 9f4f60f8..02b9931f 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -122,7 +122,7 @@ fn set_worker_affinity() { set_worker_mallctl(id); } -#[cfg(feature = "jemalloc")] +#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] fn set_worker_mallctl(id: usize) { use conduwuit::alloc::je::{ is_affine_arena, @@ -143,7 +143,7 @@ fn set_worker_mallctl(id: usize) { } } -#[cfg(not(feature = "jemalloc"))] +#[cfg(any(not(feature = "jemalloc"), target_env = "msvc"))] fn set_worker_mallctl(_: usize) {} #[tracing::instrument( @@ -189,7 +189,7 @@ fn thread_park() { } fn gc_on_park() { - #[cfg(feature = "jemalloc")] + #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] conduwuit::alloc::je::this_thread::decay() .log_debug_err() .ok(); From b4d22bd05e3cf81476669cc2e37eef60eeade07e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 1 Feb 2025 23:41:05 +0000 Subject: [PATCH 137/328] remove unnecessary cf arc refcnt workaround log errors and panics propagating through the request task join Signed-off-by: Jason Volk --- src/database/engine.rs | 6 ++--- src/database/engine/open.rs | 6 ++--- src/database/map.rs | 12 ++++----- src/database/map/open.rs | 5 +--- src/router/request.rs | 52 ++++++++++++++++++++++++++++--------- 5 files changed, 53 insertions(+), 28 deletions(-) diff --git a/src/database/engine.rs b/src/database/engine.rs index be3d62cf..22e2b9c8 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -30,13 +30,13 @@ use crate::{ }; pub struct Engine { + pub(crate) db: Db, + pub(crate) pool: Arc, + pub(crate) ctx: Arc, pub(super) read_only: bool, pub(super) secondary: bool, pub(crate) checksums: bool, corks: AtomicU32, - pub(crate) db: Db, - pub(crate) pool: Arc, - pub(crate) ctx: Arc, } pub(crate) type Db = DBWithThreadMode; diff --git a/src/database/engine/open.rs b/src/database/engine/open.rs index ad724765..59dabce1 100644 --- a/src/database/engine/open.rs +++ b/src/database/engine/open.rs @@ -56,13 +56,13 @@ pub(crate) async fn open(ctx: Arc, desc: &[Descriptor]) -> Result, - cf: Arc, watchers: Watchers, - write_options: WriteOptions, + cf: Arc, + db: Arc, read_options: ReadOptions, cache_read_options: ReadOptions, + write_options: WriteOptions, } impl Map { pub(crate) fn open(db: &Arc, name: &'static str) -> Result> { Ok(Arc::new(Self { name, - db: db.clone(), - cf: open::open(db, name), watchers: Watchers::default(), - write_options: write_options_default(db), + cf: open::open(db, name), + db: db.clone(), read_options: read_options_default(db), cache_read_options: cache_read_options_default(db), + write_options: write_options_default(db), })) } diff --git a/src/database/map/open.rs b/src/database/map/open.rs index 6ecec044..07f7a0c6 100644 --- a/src/database/map/open.rs +++ b/src/database/map/open.rs @@ -30,8 +30,5 @@ pub(super) fn open(db: &Arc, name: &str) -> Arc { // lifetime parameter. We should not hold this handle, even in its Arc, after // closing the database (dropping `Engine`). Since `Arc` is a sibling // member along with this handle in `Map`, that is prevented. - unsafe { - Arc::increment_strong_count(cf_ptr); - Arc::from_raw(cf_ptr) - } + unsafe { Arc::from_raw(cf_ptr) } } diff --git a/src/router/request.rs b/src/router/request.rs index f7b94417..19cd751b 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -1,4 +1,7 @@ -use std::sync::{atomic::Ordering, Arc}; +use std::{ + fmt::Debug, + sync::{atomic::Ordering, Arc}, +}; use axum::{ extract::State, @@ -12,16 +15,16 @@ use http::{Method, StatusCode, Uri}; level = "debug", skip_all, fields( - handled = %services - .server - .metrics - .requests_handle_finished - .fetch_add(1, Ordering::Relaxed), active = %services .server .metrics .requests_handle_active .fetch_add(1, Ordering::Relaxed), + handled = %services + .server + .metrics + .requests_handle_finished + .load(Ordering::Relaxed), ) )] pub(crate) async fn handle( @@ -31,6 +34,10 @@ pub(crate) async fn handle( ) -> Result { #[cfg(debug_assertions)] conduwuit::defer! {{ + _ = services.server + .metrics + .requests_handle_finished + .fetch_add(1, Ordering::Relaxed); _ = services.server .metrics .requests_handle_active @@ -47,21 +54,35 @@ pub(crate) async fn handle( return Err(StatusCode::SERVICE_UNAVAILABLE); } - let method = req.method().clone(); let uri = req.uri().clone(); - services + let method = req.method().clone(); + let services_ = services.clone(); + let task = services .server .runtime() - .spawn(next.run(req)) - .await - .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) - .and_then(|result| handle_result(&method, &uri, result)) + .spawn(async move { execute(services_, req, next).await }); + + task.await + .map_err(unhandled) + .and_then(move |result| handle_result(&method, &uri, result)) +} + +async fn execute( + // we made a safety contract that Services will not go out of scope + // during the request; this ensures a reference is accounted for at + // the base frame of the task regardless of its detachment. + _services: Arc, + req: http::Request, + next: axum::middleware::Next, +) -> Response { + next.run(req).await } fn handle_result(method: &Method, uri: &Uri, result: Response) -> Result { let status = result.status(); let reason = status.canonical_reason().unwrap_or("Unknown Reason"); let code = status.as_u16(); + if status.is_server_error() { error!(method = ?method, uri = ?uri, "{code} {reason}"); } else if status.is_client_error() { @@ -78,3 +99,10 @@ fn handle_result(method: &Method, uri: &Uri, result: Response) -> Result(e: Error) -> StatusCode { + error!("unhandled error or panic during request: {e:?}"); + + StatusCode::INTERNAL_SERVER_ERROR +} From bd6d4bc58f45251313b33e65947a4131ea9114e7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 10:07:00 +0000 Subject: [PATCH 138/328] enforce timeout on request layers Signed-off-by: Jason Volk --- Cargo.toml | 3 ++- conduwuit-example.toml | 12 ++++++++++++ src/core/config/mod.rs | 24 ++++++++++++++++++++++++ src/router/layers.rs | 4 ++++ src/router/request.rs | 23 ++++++++++++++++++----- 5 files changed, 60 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c4af4a7c..1cf787c6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -127,12 +127,13 @@ version = "0.6.2" default-features = false features = [ "add-extension", + "catch-panic", "cors", "sensitive-headers", "set-header", + "timeout", "trace", "util", - "catch-panic", ] [workspace.dependencies.rustls] diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 3fd95044..f4f42365 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -377,6 +377,18 @@ # #pusher_idle_timeout = 15 +# Maximum time to receive a request from a client (seconds). +# +#client_receive_timeout = 75 + +# Maximum time to process a request received from a client (seconds). +# +#client_request_timeout = 180 + +# Maximum time to transmit a response to a client (seconds) +# +#client_response_timeout = 120 + # Enables registration. If set to false, no users can register on this # server. # diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index ff038975..b8cfd91b 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -480,6 +480,24 @@ pub struct Config { #[serde(default = "default_pusher_idle_timeout")] pub pusher_idle_timeout: u64, + /// Maximum time to receive a request from a client (seconds). + /// + /// default: 75 + #[serde(default = "default_client_receive_timeout")] + pub client_receive_timeout: u64, + + /// Maximum time to process a request received from a client (seconds). + /// + /// default: 180 + #[serde(default = "default_client_request_timeout")] + pub client_request_timeout: u64, + + /// Maximum time to transmit a response to a client (seconds) + /// + /// default: 120 + #[serde(default = "default_client_response_timeout")] + pub client_response_timeout: u64, + /// Enables registration. If set to false, no users can register on this /// server. /// @@ -2170,3 +2188,9 @@ fn default_stream_width_default() -> usize { 32 } fn default_stream_width_scale() -> f32 { 1.0 } fn default_stream_amplification() -> usize { 1024 } + +fn default_client_receive_timeout() -> u64 { 75 } + +fn default_client_request_timeout() -> u64 { 180 } + +fn default_client_response_timeout() -> u64 { 120 } diff --git a/src/router/layers.rs b/src/router/layers.rs index c5227c22..e8a8b7e8 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -18,6 +18,7 @@ use tower_http::{ cors::{self, CorsLayer}, sensitive_headers::SetSensitiveHeadersLayer, set_header::SetResponseHeaderLayer, + timeout::{RequestBodyTimeoutLayer, ResponseBodyTimeoutLayer, TimeoutLayer}, trace::{DefaultOnFailure, DefaultOnRequest, DefaultOnResponse, TraceLayer}, }; use tracing::Level; @@ -59,6 +60,9 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { ) .layer(axum::middleware::from_fn_with_state(Arc::clone(services), request::handle)) .layer(SecureClientIpSource::ConnectInfo.into_extension()) + .layer(ResponseBodyTimeoutLayer::new(Duration::from_secs(server.config.client_response_timeout))) + .layer(RequestBodyTimeoutLayer::new(Duration::from_secs(server.config.client_receive_timeout))) + .layer(TimeoutLayer::new(Duration::from_secs(server.config.client_request_timeout))) .layer(SetResponseHeaderLayer::if_not_present( HeaderName::from_static("origin-agent-cluster"), // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin-Agent-Cluster HeaderValue::from_static("?1"), diff --git a/src/router/request.rs b/src/router/request.rs index 19cd751b..68ea742c 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -10,8 +10,10 @@ use axum::{ use conduwuit::{debug, debug_error, debug_warn, err, error, trace, Result}; use conduwuit_service::Services; use http::{Method, StatusCode, Uri}; +use tracing::Span; #[tracing::instrument( + name = "request", level = "debug", skip_all, fields( @@ -57,23 +59,34 @@ pub(crate) async fn handle( let uri = req.uri().clone(); let method = req.method().clone(); let services_ = services.clone(); - let task = services - .server - .runtime() - .spawn(async move { execute(services_, req, next).await }); + let parent = Span::current(); + let task = services.server.runtime().spawn(async move { + tokio::select! { + response = execute(&services_, req, next, parent) => response, + () = services_.server.until_shutdown() => + StatusCode::SERVICE_UNAVAILABLE.into_response(), + } + }); task.await .map_err(unhandled) .and_then(move |result| handle_result(&method, &uri, result)) } +#[tracing::instrument( + name = "handle", + level = "debug", + parent = parent, + skip_all, +)] async fn execute( // we made a safety contract that Services will not go out of scope // during the request; this ensures a reference is accounted for at // the base frame of the task regardless of its detachment. - _services: Arc, + _services: &Arc, req: http::Request, next: axum::middleware::Next, + parent: Span, ) -> Response { next.run(req).await } From ffe3b0faf2740faa53415a661466c19d4fe722ad Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 10:43:02 +0000 Subject: [PATCH 139/328] make shutdown grace periods configurable Signed-off-by: Jason Volk --- conduwuit-example.toml | 8 ++++++++ src/core/config/mod.rs | 16 ++++++++++++++++ src/router/request.rs | 13 +++++++++++-- src/router/run.rs | 3 ++- src/service/sending/sender.rs | 5 ++--- 5 files changed, 39 insertions(+), 6 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index f4f42365..3e64522c 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -389,6 +389,14 @@ # #client_response_timeout = 120 +# Grace period for clean shutdown of client requests (seconds). +# +#client_shutdown_timeout = 10 + +# Grace period for clean shutdown of federation requests (seconds). +# +#sender_shutdown_timeout = 5 + # Enables registration. If set to false, no users can register on this # server. # diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index b8cfd91b..ff80d1cf 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -498,6 +498,18 @@ pub struct Config { #[serde(default = "default_client_response_timeout")] pub client_response_timeout: u64, + /// Grace period for clean shutdown of client requests (seconds). + /// + /// default: 10 + #[serde(default = "default_client_shutdown_timeout")] + pub client_shutdown_timeout: u64, + + /// Grace period for clean shutdown of federation requests (seconds). + /// + /// default: 5 + #[serde(default = "default_sender_shutdown_timeout")] + pub sender_shutdown_timeout: u64, + /// Enables registration. If set to false, no users can register on this /// server. /// @@ -2194,3 +2206,7 @@ fn default_client_receive_timeout() -> u64 { 75 } fn default_client_request_timeout() -> u64 { 180 } fn default_client_response_timeout() -> u64 { 120 } + +fn default_client_shutdown_timeout() -> u64 { 15 } + +fn default_sender_shutdown_timeout() -> u64 { 5 } diff --git a/src/router/request.rs b/src/router/request.rs index 68ea742c..e0373646 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -1,6 +1,7 @@ use std::{ fmt::Debug, sync::{atomic::Ordering, Arc}, + time::Duration, }; use axum::{ @@ -9,7 +10,9 @@ use axum::{ }; use conduwuit::{debug, debug_error, debug_warn, err, error, trace, Result}; use conduwuit_service::Services; +use futures::FutureExt; use http::{Method, StatusCode, Uri}; +use tokio::time::sleep; use tracing::Span; #[tracing::instrument( @@ -63,8 +66,14 @@ pub(crate) async fn handle( let task = services.server.runtime().spawn(async move { tokio::select! { response = execute(&services_, req, next, parent) => response, - () = services_.server.until_shutdown() => - StatusCode::SERVICE_UNAVAILABLE.into_response(), + response = services_.server.until_shutdown() + .then(|()| { + let timeout = services_.server.config.client_shutdown_timeout; + let timeout = Duration::from_secs(timeout); + sleep(timeout) + }) + .map(|()| StatusCode::SERVICE_UNAVAILABLE) + .map(IntoResponse::into_response) => response, } }); diff --git a/src/router/run.rs b/src/router/run.rs index 605168b8..26701735 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -122,7 +122,8 @@ async fn handle_shutdown(server: Arc, tx: Sender<()>, handle: axum_serve error!("failed sending shutdown transaction to channel: {e}"); } - let timeout = Duration::from_secs(36); + let timeout = server.config.client_shutdown_timeout; + let timeout = Duration::from_secs(timeout); debug!( ?timeout, handle_active = ?server.metrics.requests_handle_active.load(Ordering::Relaxed), diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index f19b69da..3e86de2d 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -67,8 +67,6 @@ type SendingFuture<'a> = BoxFuture<'a, SendingResult>; type SendingFutures<'a> = FuturesUnordered>; type CurTransactionStatus = HashMap; -const CLEANUP_TIMEOUT_MS: u64 = 3500; - const SELECT_PRESENCE_LIMIT: usize = 256; const SELECT_RECEIPT_LIMIT: usize = 256; const SELECT_EDU_LIMIT: usize = EDU_LIMIT - 2; @@ -216,8 +214,9 @@ impl Service { time::{sleep_until, Instant}, }; + let timeout = self.server.config.sender_shutdown_timeout; + let timeout = Duration::from_secs(timeout); let now = Instant::now(); - let timeout = Duration::from_millis(CLEANUP_TIMEOUT_MS); let deadline = now.checked_add(timeout).unwrap_or(now); loop { trace!("Waiting for {} requests to complete...", futures.len()); From a774afe8370bd6eed3deed6e663229e8457d73c7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 08:59:14 +0000 Subject: [PATCH 140/328] modernize remove_to_device_events Signed-off-by: Jason Volk --- src/service/users/mod.rs | 43 ++++++++++++++++------------------------ 1 file changed, 17 insertions(+), 26 deletions(-) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index b2d3a94a..e5caed47 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,12 +1,12 @@ -use std::{collections::BTreeMap, mem, mem::size_of, sync::Arc}; +use std::{collections::BTreeMap, mem, sync::Arc}; use conduwuit::{ debug_warn, err, trace, utils::{self, stream::TryIgnore, string::Unquoted, ReadyExt}, Err, Error, Result, Server, }; -use database::{Database, Deserialized, Ignore, Interfix, Json, Map}; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt}; +use database::{Deserialized, Ignore, Interfix, Json, Map}; +use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, @@ -28,7 +28,6 @@ pub struct Service { struct Services { server: Arc, - db: Arc, account_data: Dep, admin: Dep, globals: Dep, @@ -64,7 +63,6 @@ impl crate::Service for Service { Ok(Arc::new(Self { services: Services { server: args.server.clone(), - db: args.db.clone(), account_data: args.depend::("account_data"), admin: args.depend::("admin"), globals: args.depend::("globals"), @@ -801,35 +799,28 @@ impl Service { .map(|(_, val): (Ignore, Raw)| val) } - pub async fn remove_to_device_events( + pub async fn remove_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, - until: u64, - ) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xFF); + until: Until, + ) where + Until: Into> + Send, + { + type Key<'a> = (&'a UserId, &'a DeviceId, u64); - let mut last = prefix.clone(); - last.extend_from_slice(&until.to_be_bytes()); - - let _cork = self.services.db.cork_and_flush(); + let until = until.into().unwrap_or(u64::MAX); + let from = (user_id, device_id, until); self.db .todeviceid_events - .rev_raw_keys_from(&last) // this includes last + .rev_keys_from(&from) .ignore_err() - .ready_take_while(move |key| key.starts_with(&prefix)) - .map(|key| { - let len = key.len(); - let start = len.saturating_sub(size_of::()); - let count = utils::u64_from_u8(&key[start..len]); - (key, count) + .ready_take_while(move |(user_id_, device_id_, _): &Key<'_>| { + user_id == *user_id_ && device_id == *device_id_ + }) + .ready_for_each(|key: Key<'_>| { + self.db.todeviceid_events.del(key); }) - .ready_take_while(move |(_, count)| *count <= until) - .ready_for_each(|(key, _)| self.db.todeviceid_events.remove(&key)) - .boxed() .await; } From 5e59ce37c4799c24723997326e1ccc26bb3345b0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 13:51:39 +0000 Subject: [PATCH 141/328] snapshot sync results at next_batch upper-bound Signed-off-by: Jason Volk --- src/admin/query/account_data.rs | 2 +- src/admin/query/users.rs | 2 +- src/api/client/sync/v3.rs | 10 +++++----- src/api/client/sync/v4.rs | 13 +++++++++---- src/api/client/sync/v5.rs | 8 ++++---- src/service/account_data/mod.rs | 12 +++++++----- src/service/users/mod.rs | 18 ++++++++++++++---- 7 files changed, 41 insertions(+), 24 deletions(-) diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index b75d8234..bb8ddeff 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -41,7 +41,7 @@ async fn changes_since( let results: Vec<_> = self .services .account_data - .changes_since(room_id.as_deref(), &user_id, since) + .changes_since(room_id.as_deref(), &user_id, since, None) .collect() .await; let query_time = timer.elapsed(); diff --git a/src/admin/query/users.rs b/src/admin/query/users.rs index 3715ac25..c517d9dd 100644 --- a/src/admin/query/users.rs +++ b/src/admin/query/users.rs @@ -413,7 +413,7 @@ async fn get_to_device_events( let result = self .services .users - .get_to_device_events(&user_id, &device_id) + .get_to_device_events(&user_id, &device_id, None, None) .collect::>() .await; let query_time = timer.elapsed(); diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 49246514..b548aa23 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -290,20 +290,20 @@ pub(crate) async fn build_sync_events( let account_data = services .account_data - .changes_since(None, sender_user, since) + .changes_since(None, sender_user, since, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) .collect(); // Look for device list updates of this account let keys_changed = services .users - .keys_changed(sender_user, since, None) + .keys_changed(sender_user, since, Some(next_batch)) .map(ToOwned::to_owned) .collect::>(); let to_device_events = services .users - .get_to_device_events(sender_user, sender_device) + .get_to_device_events(sender_user, sender_device, Some(since), Some(next_batch)) .collect::>(); let device_one_time_keys_count = services @@ -700,14 +700,14 @@ async fn load_joined_room( let account_data_events = services .account_data - .changes_since(Some(room_id), sender_user, since) + .changes_since(Some(room_id), sender_user, since, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect(); // Look for device list updates in this room let device_updates = services .users - .room_keys_changed(room_id, since, None) + .room_keys_changed(room_id, since, Some(next_batch)) .map(|(user_id, _)| user_id) .map(ToOwned::to_owned) .collect::>(); diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index b7967498..66793ba1 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -153,7 +153,7 @@ pub(crate) async fn sync_events_v4_route( if body.extensions.account_data.enabled.unwrap_or(false) { account_data.global = services .account_data - .changes_since(None, sender_user, globalsince) + .changes_since(None, sender_user, globalsince, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) .collect() .await; @@ -164,7 +164,7 @@ pub(crate) async fn sync_events_v4_route( room.clone(), services .account_data - .changes_since(Some(&room), sender_user, globalsince) + .changes_since(Some(&room), sender_user, globalsince, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect() .await, @@ -531,7 +531,7 @@ pub(crate) async fn sync_events_v4_route( room_id.to_owned(), services .account_data - .changes_since(Some(room_id), sender_user, *roomsince) + .changes_since(Some(room_id), sender_user, *roomsince, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect() .await, @@ -779,7 +779,12 @@ pub(crate) async fn sync_events_v4_route( Some(sync_events::v4::ToDevice { events: services .users - .get_to_device_events(sender_user, &sender_device) + .get_to_device_events( + sender_user, + &sender_device, + Some(globalsince), + Some(next_batch), + ) .collect() .await, next_batch: next_batch.to_string(), diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index 66647f0e..e7b5fe74 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -390,7 +390,7 @@ async fn process_rooms( room_id.to_owned(), services .account_data - .changes_since(Some(room_id), sender_user, *roomsince) + .changes_since(Some(room_id), sender_user, *roomsince, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect() .await, @@ -644,7 +644,7 @@ async fn collect_account_data( account_data.global = services .account_data - .changes_since(None, sender_user, globalsince) + .changes_since(None, sender_user, globalsince, None) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) .collect() .await; @@ -655,7 +655,7 @@ async fn collect_account_data( room.clone(), services .account_data - .changes_since(Some(room), sender_user, globalsince) + .changes_since(Some(room), sender_user, globalsince, None) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect() .await, @@ -876,7 +876,7 @@ async fn collect_to_device( next_batch: next_batch.to_string(), events: services .users - .get_to_device_events(sender_user, sender_device) + .get_to_device_events(sender_user, sender_device, None, Some(next_batch)) .collect() .await, }) diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index ddbc15a4..5a943f88 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -5,7 +5,7 @@ use conduwuit::{ utils::{result::LogErr, stream::TryIgnore, ReadyExt}, Err, Result, }; -use database::{Deserialized, Handle, Interfix, Json, Map}; +use database::{Deserialized, Handle, Ignore, Json, Map}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ events::{ @@ -131,18 +131,20 @@ pub fn changes_since<'a>( room_id: Option<&'a RoomId>, user_id: &'a UserId, since: u64, + to: Option, ) -> impl Stream + Send + 'a { - let prefix = (room_id, user_id, Interfix); - let prefix = database::serialize_key(prefix).expect("failed to serialize prefix"); + type Key<'a> = (Option<&'a RoomId>, &'a UserId, u64, Ignore); // Skip the data that's exactly at since, because we sent that last time let first_possible = (room_id, user_id, since.saturating_add(1)); self.db .roomuserdataid_accountdata - .stream_from_raw(&first_possible) + .stream_from(&first_possible) .ignore_err() - .ready_take_while(move |(k, _)| k.starts_with(&prefix)) + .ready_take_while(move |((room_id_, user_id_, count, _), _): &(Key<'_>, _)| { + room_id == *room_id_ && user_id == *user_id_ && to.is_none_or(|to| *count <= to) + }) .map(move |(_, v)| { match room_id { | Some(_) => serde_json::from_slice::>(v) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index e5caed47..68b87541 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,7 +1,7 @@ use std::{collections::BTreeMap, mem, sync::Arc}; use conduwuit::{ - debug_warn, err, trace, + at, debug_warn, err, trace, utils::{self, stream::TryIgnore, string::Unquoted, ReadyExt}, Err, Error, Result, Server, }; @@ -790,13 +790,23 @@ impl Service { &'a self, user_id: &'a UserId, device_id: &'a DeviceId, + since: Option, + to: Option, ) -> impl Stream> + Send + 'a { - let prefix = (user_id, device_id, Interfix); + type Key<'a> = (&'a UserId, &'a DeviceId, u64); + + let from = (user_id, device_id, since.map_or(0, |since| since.saturating_add(1))); + self.db .todeviceid_events - .stream_prefix(&prefix) + .stream_from(&from) .ignore_err() - .map(|(_, val): (Ignore, Raw)| val) + .ready_take_while(move |((user_id_, device_id_, count), _): &(Key<'_>, _)| { + user_id == *user_id_ + && device_id == *device_id_ + && to.is_none_or(|to| *count <= to) + }) + .map(at!(1)) } pub async fn remove_to_device_events( From 32f990fc72c6bfbf4a869dac9f5b2b88ee334684 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 23:19:35 +0000 Subject: [PATCH 142/328] fix the panic counter in the tower layer Signed-off-by: Jason Volk --- src/router/layers.rs | 18 +++++++------- src/router/request.rs | 56 ++++++++++++++++++++----------------------- 2 files changed, 35 insertions(+), 39 deletions(-) diff --git a/src/router/layers.rs b/src/router/layers.rs index e8a8b7e8..7ebec16e 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -49,6 +49,7 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { ))] let layers = layers.layer(compression_layer(server)); + let services_ = services.clone(); let layers = layers .layer(SetSensitiveHeadersLayer::new([header::AUTHORIZATION])) .layer( @@ -89,7 +90,7 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { )) .layer(cors_layer(server)) .layer(body_limit_layer(server)) - .layer(CatchPanicLayer::custom(catch_panic)); + .layer(CatchPanicLayer::custom(move |panic| catch_panic(panic, services_.clone()))); let (router, guard) = router::build(services); Ok((router.layer(layers), guard)) @@ -167,15 +168,14 @@ fn body_limit_layer(server: &Server) -> DefaultBodyLimit { #[allow(clippy::needless_pass_by_value)] fn catch_panic( err: Box, + services: Arc, ) -> http::Response> { - //TODO: XXX - /* - conduwuit_service::services() - .server - .metrics - .requests_panic - .fetch_add(1, std::sync::atomic::Ordering::Release); - */ + services + .server + .metrics + .requests_panic + .fetch_add(1, std::sync::atomic::Ordering::Release); + let details = if let Some(s) = err.downcast_ref::() { s.clone() } else if let Some(s) = err.downcast_ref::<&str>() { diff --git a/src/router/request.rs b/src/router/request.rs index e0373646..b6c22d45 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -15,40 +15,12 @@ use http::{Method, StatusCode, Uri}; use tokio::time::sleep; use tracing::Span; -#[tracing::instrument( - name = "request", - level = "debug", - skip_all, - fields( - active = %services - .server - .metrics - .requests_handle_active - .fetch_add(1, Ordering::Relaxed), - handled = %services - .server - .metrics - .requests_handle_finished - .load(Ordering::Relaxed), - ) -)] +#[tracing::instrument(name = "request", level = "debug", skip_all)] pub(crate) async fn handle( State(services): State>, req: http::Request, next: axum::middleware::Next, ) -> Result { - #[cfg(debug_assertions)] - conduwuit::defer! {{ - _ = services.server - .metrics - .requests_handle_finished - .fetch_add(1, Ordering::Relaxed); - _ = services.server - .metrics - .requests_handle_active - .fetch_sub(1, Ordering::Relaxed); - }}; - if !services.server.running() { debug_warn!( method = %req.method(), @@ -87,16 +59,40 @@ pub(crate) async fn handle( level = "debug", parent = parent, skip_all, + fields( + active = %services + .server + .metrics + .requests_handle_active + .fetch_add(1, Ordering::Relaxed), + handled = %services + .server + .metrics + .requests_handle_finished + .load(Ordering::Relaxed), + ) )] async fn execute( // we made a safety contract that Services will not go out of scope // during the request; this ensures a reference is accounted for at // the base frame of the task regardless of its detachment. - _services: &Arc, + services: &Arc, req: http::Request, next: axum::middleware::Next, parent: Span, ) -> Response { + #[cfg(debug_assertions)] + conduwuit::defer! {{ + _ = services.server + .metrics + .requests_handle_finished + .fetch_add(1, Ordering::Relaxed); + _ = services.server + .metrics + .requests_handle_active + .fetch_sub(1, Ordering::Relaxed); + }}; + next.run(req).await } From da4b94d80dc9939ad385860af764ed1a1837b84e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 22:13:27 +0000 Subject: [PATCH 143/328] trap panics when running in gdb Signed-off-by: Jason Volk --- src/core/debug.rs | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/core/debug.rs b/src/core/debug.rs index ca0f2f2e..8a5eccfd 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -1,6 +1,6 @@ #![allow(clippy::disallowed_macros)] -use std::{any::Any, panic}; +use std::{any::Any, env, panic, sync::LazyLock}; // Export debug proc_macros pub use conduwuit_macros::recursion_depth; @@ -58,16 +58,26 @@ pub const INFO_SPAN_LEVEL: Level = if cfg!(debug_assertions) { Level::DEBUG }; -pub fn set_panic_trap() { +pub static DEBUGGER: LazyLock = + LazyLock::new(|| env::var("_").unwrap_or_default().ends_with("gdb")); + +#[cfg_attr(debug_assertions, crate::ctor)] +#[cfg_attr(not(debug_assertions), allow(dead_code))] +fn set_panic_trap() { + if !*DEBUGGER { + return; + } + let next = panic::take_hook(); panic::set_hook(Box::new(move |info| { panic_handler(info, &next); })); } -#[inline(always)] +#[cold] +#[inline(never)] #[allow(deprecated_in_future)] -fn panic_handler(info: &panic::PanicHookInfo<'_>, next: &dyn Fn(&panic::PanicHookInfo<'_>)) { +pub fn panic_handler(info: &panic::PanicHookInfo<'_>, next: &dyn Fn(&panic::PanicHookInfo<'_>)) { trap(); next(info); } From 106bcd30b75b6846be197fc5431063b0b82c4336 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 07:40:08 +0000 Subject: [PATCH 144/328] optimize incremental sync state diff Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 366 +++++++++-------- src/service/rooms/state_accessor/mod.rs | 523 +++++++++++++----------- 2 files changed, 474 insertions(+), 415 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index b548aa23..a97e4329 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -7,13 +7,13 @@ use std::{ use axum::extract::State; use conduwuit::{ at, err, error, extract_variant, is_equal_to, pair_of, - pdu::EventHash, + pdu::{Event, EventHash}, + ref_at, result::FlatOk, utils::{ self, - future::OptionExt, math::ruma_from_u64, - stream::{BroadbandExt, Tools, WidebandExt}, + stream::{BroadbandExt, Tools, TryExpect, WidebandExt}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, }, PduCount, PduEvent, Result, @@ -53,19 +53,16 @@ use ruma::{ serde::Raw, uint, DeviceId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, }; +use service::rooms::short::{ShortEventId, ShortStateKey}; use super::{load_timeline, share_encrypted_room}; -use crate::{ - client::{ignored_filter, lazy_loading_witness}, - Ruma, RumaResponse, -}; +use crate::{client::ignored_filter, Ruma, RumaResponse}; #[derive(Default)] struct StateChanges { heroes: Option>, joined_member_count: Option, invited_member_count: Option, - joined_since_last_sync: bool, state_events: Vec, device_list_updates: HashSet, left_encrypted_users: HashSet, @@ -625,6 +622,40 @@ async fn load_joined_room( .await?; let (timeline_pdus, limited) = timeline; + let initial = since_shortstatehash.is_none(); + let lazy_loading_enabled = filter.room.state.lazy_load_options.is_enabled() + || filter.room.timeline.lazy_load_options.is_enabled(); + + let lazy_loading_context = &lazy_loading::Context { + user_id: sender_user, + device_id: sender_device, + room_id, + token: Some(since), + options: Some(&filter.room.state.lazy_load_options), + }; + + // Reset lazy loading because this is an initial sync + let lazy_load_reset: OptionFuture<_> = initial + .then(|| services.rooms.lazy_loading.reset(lazy_loading_context)) + .into(); + + lazy_load_reset.await; + let witness: OptionFuture<_> = lazy_loading_enabled + .then(|| { + let witness: Witness = timeline_pdus + .iter() + .map(ref_at!(1)) + .map(Event::sender) + .map(Into::into) + .chain(receipt_events.keys().map(Into::into)) + .collect(); + + services + .rooms + .lazy_loading + .witness_retain(witness, lazy_loading_context) + }) + .into(); let last_notification_read: OptionFuture<_> = timeline_pdus .is_empty() @@ -646,41 +677,20 @@ async fn load_joined_room( }) .into(); + let (last_notification_read, since_sender_member, witness) = + join3(last_notification_read, since_sender_member, witness).await; + let joined_since_last_sync = since_sender_member - .await .flatten() .is_none_or(|content: RoomMemberEventContent| { content.membership != MembershipState::Join }); - let lazy_loading_enabled = filter.room.state.lazy_load_options.is_enabled() - || filter.room.timeline.lazy_load_options.is_enabled(); - - let lazy_reset = since_shortstatehash.is_none(); - let lazy_loading_context = &lazy_loading::Context { - user_id: sender_user, - device_id: sender_device, - room_id, - token: None, - options: Some(&filter.room.state.lazy_load_options), - }; - - // Reset lazy loading because this is an initial sync - let lazy_load_reset: OptionFuture<_> = lazy_reset - .then(|| services.rooms.lazy_loading.reset(lazy_loading_context)) - .into(); - - lazy_load_reset.await; - let witness: OptionFuture<_> = lazy_loading_enabled - .then(|| lazy_loading_witness(services, lazy_loading_context, timeline_pdus.iter())) - .into(); - let StateChanges { heroes, joined_member_count, invited_member_count, - joined_since_last_sync, state_events, mut device_list_updates, left_encrypted_users, @@ -693,7 +703,7 @@ async fn load_joined_room( since_shortstatehash, current_shortstatehash, joined_since_last_sync, - witness.await.as_ref(), + witness.as_ref(), ) .boxed() .await?; @@ -719,28 +729,7 @@ async fn load_joined_room( .map(|(_, pdu)| pdu.to_sync_room_event()) .collect(); - let typing_events = services - .rooms - .typing - .last_typing_update(room_id) - .and_then(|count| async move { - if count <= since { - return Ok(Vec::>::new()); - } - - let typings = services - .rooms - .typing - .typings_all(room_id, sender_user) - .await?; - - Ok(vec![serde_json::from_str(&serde_json::to_string(&typings)?)?]) - }) - .unwrap_or(Vec::new()); - - let send_notification_counts = last_notification_read - .is_none_or(|&count| count > since) - .await; + let send_notification_counts = last_notification_read.is_none_or(|count| count > since); let notification_count: OptionFuture<_> = send_notification_counts .then(|| { @@ -764,8 +753,27 @@ async fn load_joined_room( }) .into(); - let events = join3(room_events, account_data_events, typing_events); + let typing_events = services + .rooms + .typing + .last_typing_update(room_id) + .and_then(|count| async move { + if count <= since { + return Ok(Vec::>::new()); + } + + let typings = services + .rooms + .typing + .typings_all(room_id, sender_user) + .await?; + + Ok(vec![serde_json::from_str(&serde_json::to_string(&typings)?)?]) + }) + .unwrap_or(Vec::new()); + let unread_notifications = join(notification_count, highlight_count); + let events = join3(room_events, account_data_events, typing_events); let (unread_notifications, events, device_updates) = join3(unread_notifications, events, device_updates) .boxed() @@ -942,7 +950,6 @@ async fn calculate_state_initial( heroes, joined_member_count, invited_member_count, - joined_since_last_sync: true, state_events, ..Default::default() }) @@ -952,7 +959,7 @@ async fn calculate_state_initial( #[allow(clippy::too_many_arguments)] async fn calculate_state_incremental<'a>( services: &Services, - sender_user: &UserId, + sender_user: &'a UserId, room_id: &RoomId, full_state: bool, _filter: &FilterDefinition, @@ -965,102 +972,130 @@ async fn calculate_state_incremental<'a>( let state_changed = since_shortstatehash != current_shortstatehash; - let state_get_id = |user_id: &'a UserId| { - services - .rooms - .state_accessor - .state_get_id(current_shortstatehash, &StateEventType::RoomMember, user_id.as_str()) - .ok() - }; - - let lazy_state_ids: OptionFuture<_> = witness - .map(|witness| { - witness - .iter() - .stream() - .broad_filter_map(|user_id| state_get_id(user_id)) - .collect::>() - }) - .into(); - - let current_state_ids: OptionFuture<_> = state_changed - .then(|| { - services - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .collect::>() - }) - .into(); - - let since_state_ids: OptionFuture<_> = (state_changed && !full_state) - .then(|| { - services - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .collect::>() - }) - .into(); - - let lazy_state_ids = lazy_state_ids - .map(Option::into_iter) - .map(|iter| iter.flat_map(Vec::into_iter)) - .map(IterStream::stream) - .flatten_stream(); - - let ref since_state_ids = since_state_ids.shared(); - let delta_state_events = current_state_ids - .map(Option::into_iter) - .map(|iter| iter.flat_map(Vec::into_iter)) - .map(IterStream::stream) - .flatten_stream() - .filter_map(|(shortstatekey, event_id): (u64, OwnedEventId)| async move { - since_state_ids - .clone() - .await - .is_none_or(|since_state| since_state.get(&shortstatekey) != Some(&event_id)) - .then_some(event_id) - }) - .chain(lazy_state_ids) - .broad_filter_map(|event_id: OwnedEventId| async move { - services - .rooms - .timeline - .get_pdu(&event_id) - .await - .map(move |pdu| (event_id, pdu)) - .ok() - }) - .collect::>(); - - let since_encryption = services - .rooms - .state_accessor - .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "") - .is_ok(); - let encrypted_room = services .rooms .state_accessor .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") - .is_ok(); + .is_ok() + .await; - let (delta_state_events, encrypted_room) = join(delta_state_events, encrypted_room).await; + let state_get_shorteventid = |user_id: &'a UserId| { + services + .rooms + .state_accessor + .state_get_shortid( + current_shortstatehash, + &StateEventType::RoomMember, + user_id.as_str(), + ) + .ok() + }; - let (mut device_list_updates, left_encrypted_users) = delta_state_events - .values() + let lazy_state_ids: OptionFuture<_> = witness + .filter(|_| !full_state && !encrypted_room) + .map(|witness| { + witness + .iter() + .stream() + .broad_filter_map(|user_id| state_get_shorteventid(user_id)) + .into_future() + }) + .into(); + + let state_diff: OptionFuture<_> = (!full_state && state_changed) + .then(|| { + services + .rooms + .state_accessor + .state_added((since_shortstatehash, current_shortstatehash)) + .boxed() + .into_future() + }) + .into(); + + let current_state_ids: OptionFuture<_> = full_state + .then(|| { + services + .rooms + .state_accessor + .state_full_shortids(current_shortstatehash) + .expect_ok() + .boxed() + .into_future() + }) + .into(); + + let lazy_state_ids = lazy_state_ids + .map(|opt| { + opt.map(|(curr, next)| { + let opt = curr; + let iter = Option::into_iter(opt); + IterStream::stream(iter).chain(next) + }) + }) + .map(Option::into_iter) + .map(IterStream::stream) + .flatten_stream() + .flatten(); + + let state_diff_ids = state_diff + .map(|opt| { + opt.map(|(curr, next)| { + let opt = curr; + let iter = Option::into_iter(opt); + IterStream::stream(iter).chain(next) + }) + }) + .map(Option::into_iter) + .map(IterStream::stream) + .flatten_stream() + .flatten(); + + let state_events = current_state_ids + .map(|opt| { + opt.map(|(curr, next)| { + let opt = curr; + let iter = Option::into_iter(opt); + IterStream::stream(iter).chain(next) + }) + }) + .map(Option::into_iter) + .map(IterStream::stream) + .flatten_stream() + .flatten() + .chain(state_diff_ids) + .broad_filter_map(|(shortstatekey, shorteventid)| async move { + if witness.is_none() || encrypted_room { + return Some(shorteventid); + } + + lazy_filter(services, sender_user, shortstatekey, shorteventid).await + }) + .chain(lazy_state_ids) + .broad_filter_map(|shorteventid| { + services + .rooms + .short + .get_eventid_from_short(shorteventid) + .ok() + }) + .broad_filter_map(|event_id: OwnedEventId| async move { + services.rooms.timeline.get_pdu(&event_id).await.ok() + }) + .collect::>() + .await; + + let (device_list_updates, left_encrypted_users) = state_events + .iter() .stream() .ready_filter(|_| encrypted_room) .ready_filter(|state_event| state_event.kind == RoomMember) .ready_filter_map(|state_event| { - let content = state_event.get_content().ok()?; - let user_id = state_event.state_key.as_ref()?.parse().ok()?; + let content: RoomMemberEventContent = state_event.get_content().ok()?; + let user_id: OwnedUserId = state_event.state_key.as_ref()?.parse().ok()?; + Some((content, user_id)) }) - .ready_filter(|(_, user_id): &(RoomMemberEventContent, OwnedUserId)| { - user_id != sender_user - }) .fold_default(|(mut dlu, mut leu): pair_of!(HashSet<_>), (content, user_id)| async move { use MembershipState::*; @@ -1068,8 +1103,9 @@ async fn calculate_state_incremental<'a>( |user_id| share_encrypted_room(services, sender_user, user_id, Some(room_id)); match content.membership { - | Join if !shares_encrypted_room(&user_id).await => dlu.insert(user_id), | Leave => leu.insert(user_id), + | Join if joined_since_last_sync || !shares_encrypted_room(&user_id).await => + dlu.insert(user_id), | _ => false, }; @@ -1077,29 +1113,7 @@ async fn calculate_state_incremental<'a>( }) .await; - // If the user is in a new encrypted room, give them all joined users - let new_encrypted_room = encrypted_room && !since_encryption.await; - if joined_since_last_sync && encrypted_room || new_encrypted_room { - services - .rooms - .state_cache - .room_members(room_id) - .ready_filter(|&user_id| sender_user != user_id) - .map(ToOwned::to_owned) - .broad_filter_map(|user_id| async move { - share_encrypted_room(services, sender_user, &user_id, Some(room_id)) - .await - .or_some(user_id) - }) - .ready_for_each(|user_id| { - device_list_updates.insert(user_id); - }) - .await; - } - - let send_member_count = delta_state_events - .values() - .any(|event| event.kind == RoomMember); + let send_member_count = state_events.iter().any(|event| event.kind == RoomMember); let (joined_member_count, invited_member_count, heroes) = if send_member_count { calculate_counts(services, room_id, sender_user).await? @@ -1111,13 +1125,29 @@ async fn calculate_state_incremental<'a>( heroes, joined_member_count, invited_member_count, - joined_since_last_sync, + state_events, device_list_updates, left_encrypted_users, - state_events: delta_state_events.into_values().collect(), }) } +async fn lazy_filter( + services: &Services, + sender_user: &UserId, + shortstatekey: ShortStateKey, + shorteventid: ShortEventId, +) -> Option { + let (event_type, state_key) = services + .rooms + .short + .get_statekey_from_short(shortstatekey) + .await + .ok()?; + + (event_type != StateEventType::RoomMember || state_key == sender_user.as_str()) + .then_some(shorteventid) +} + async fn calculate_counts( services: &Services, room_id: &RoomId, diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 8b56c8b6..bed8d210 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -6,7 +6,7 @@ use std::{ }; use conduwuit::{ - at, err, error, + at, err, error, pair_of, pdu::PduBuilder, utils, utils::{ @@ -17,7 +17,7 @@ use conduwuit::{ Err, Error, PduEvent, Result, }; use database::{Deserialized, Map}; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt}; +use futures::{future::try_join, FutureExt, Stream, StreamExt, TryFutureExt}; use lru_cache::LruCache; use ruma::{ events::{ @@ -48,7 +48,7 @@ use crate::{ rooms::{ short::{ShortEventId, ShortStateHash, ShortStateKey}, state::RoomMutexGuard, - state_compressor::{compress_state_event, parse_compressed_state_event}, + state_compressor::{compress_state_event, parse_compressed_state_event, CompressedState}, }, Dep, }; @@ -143,6 +143,256 @@ impl crate::Service for Service { } impl Service { + /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). + pub async fn room_state_get_content( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, + ) -> Result + where + T: for<'de> Deserialize<'de>, + { + self.room_state_get(room_id, event_type, state_key) + .await + .and_then(|event| event.get_content()) + } + + /// Returns the full room state. + #[tracing::instrument(skip(self), level = "debug")] + pub fn room_state_full<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream> + Send + 'a { + self.services + .state + .get_room_shortstatehash(room_id) + .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok)) + .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .try_flatten_stream() + } + + /// Returns the full room state pdus + #[tracing::instrument(skip(self), level = "debug")] + pub fn room_state_full_pdus<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream> + Send + 'a { + self.services + .state + .get_room_shortstatehash(room_id) + .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok)) + .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .try_flatten_stream() + } + + /// Returns a single EventId from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self), level = "debug")] + pub async fn room_state_get_id( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, + ) -> Result + where + Id: for<'de> Deserialize<'de> + Sized + ToOwned, + ::Owned: Borrow, + { + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) + .await + } + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self), level = "debug")] + pub async fn room_state_get( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, + ) -> Result { + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) + .await + } + + /// The user was a joined member at this state (potentially in the past) + #[inline] + async fn user_was_joined(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { + self.user_membership(shortstatehash, user_id).await == MembershipState::Join + } + + /// The user was an invited or joined room member at this state (potentially + /// in the past) + #[inline] + async fn user_was_invited(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { + let s = self.user_membership(shortstatehash, user_id).await; + s == MembershipState::Join || s == MembershipState::Invite + } + + /// Get membership for given user in state + async fn user_membership( + &self, + shortstatehash: ShortStateHash, + user_id: &UserId, + ) -> MembershipState { + self.state_get_content(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) + .await + .map_or(MembershipState::Leave, |c: RoomMemberEventContent| c.membership) + } + + /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). + pub async fn state_get_content( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> Result + where + T: for<'de> Deserialize<'de>, + { + self.state_get(shortstatehash, event_type, state_key) + .await + .and_then(|event| event.get_content()) + } + + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_contains( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> bool { + let Ok(shortstatekey) = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await + else { + return false; + }; + + self.state_contains_shortstatekey(shortstatehash, shortstatekey) + .await + } + + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_contains_shortstatekey( + &self, + shortstatehash: ShortStateHash, + shortstatekey: ShortStateKey, + ) -> bool { + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + + self.load_full_state(shortstatehash) + .map_ok(|full_state| full_state.range(start..end).next().copied()) + .await + .flat_ok() + .is_some() + } + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + pub async fn state_get( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> Result { + self.state_get_id(shortstatehash, event_type, state_key) + .and_then(|event_id: OwnedEventId| async move { + self.services.timeline.get_pdu(&event_id).await + }) + .await + } + + /// Returns a single EventId from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_get_id( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> Result + where + Id: for<'de> Deserialize<'de> + Sized + ToOwned, + ::Owned: Borrow, + { + let shorteventid = self + .state_get_shortid(shortstatehash, event_type, state_key) + .await?; + + self.services + .short + .get_eventid_from_short(shorteventid) + .await + } + + /// Returns a single EventId from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_get_shortid( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> Result { + let shortstatekey = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await?; + + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + self.load_full_state(shortstatehash) + .map_ok(|full_state| { + full_state + .range(start..end) + .next() + .copied() + .map(parse_compressed_state_event) + .map(at!(1)) + .ok_or(err!(Request(NotFound("Not found in room state")))) + }) + .await? + } + + /// Returns the state events removed between the interval (present in .0 but + /// not in .1) + #[inline] + pub fn state_removed( + &self, + shortstatehash: pair_of!(ShortStateHash), + ) -> impl Stream + Send + '_ { + self.state_added((shortstatehash.1, shortstatehash.0)) + } + + /// Returns the state events added between the interval (present in .1 but + /// not in .0) + #[tracing::instrument(skip(self), level = "debug")] + pub fn state_added<'a>( + &'a self, + shortstatehash: pair_of!(ShortStateHash), + ) -> impl Stream + Send + 'a { + let a = self.load_full_state(shortstatehash.0); + let b = self.load_full_state(shortstatehash.1); + try_join(a, b) + .map_ok(|(a, b)| b.difference(&a).copied().collect::>()) + .map_ok(IterStream::try_stream) + .try_flatten_stream() + .expect_ok() + .map(parse_compressed_state_event) + } + pub fn state_full( &self, shortstatehash: ShortStateHash, @@ -208,110 +458,11 @@ impl Service { .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) } - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_get_id( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, - { - let shorteventid = self - .state_get_shortid(shortstatehash, event_type, state_key) - .await?; - - self.services - .short - .get_eventid_from_short(shorteventid) - .await - } - - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_get_shortid( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - let shortstatekey = self - .services - .short - .get_shortstatekey(event_type, state_key) - .await?; - - let start = compress_state_event(shortstatekey, 0); - let end = compress_state_event(shortstatekey, u64::MAX); - self.services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) - .map_ok(|full_state| { - full_state - .range(start..end) - .next() - .copied() - .map(parse_compressed_state_event) - .map(at!(1)) - .ok_or(err!(Request(NotFound("Not found in room state")))) - }) - .await? - } - - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_contains( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> bool { - let Ok(shortstatekey) = self - .services - .short - .get_shortstatekey(event_type, state_key) - .await - else { - return false; - }; - - self.state_contains_shortstatekey(shortstatehash, shortstatekey) - .await - } - - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_contains_shortstatekey( - &self, - shortstatehash: ShortStateHash, - shortstatekey: ShortStateKey, - ) -> bool { - let start = compress_state_event(shortstatekey, 0); - let end = compress_state_event(shortstatekey, u64::MAX); - - self.services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) - .map_ok(|full_state| full_state.range(start..end).next().copied()) - .await - .flat_ok() - .is_some() - } - pub fn state_full_shortids( &self, shortstatehash: ShortStateHash, ) -> impl Stream> + Send + '_ { - self.services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .map_err(|e| err!(Database("Missing state IDs: {e}"))) - .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) + self.load_full_state(shortstatehash) .map_ok(|full_state| { full_state .deref() @@ -324,59 +475,32 @@ impl Service { .try_flatten_stream() } - /// Returns a single PDU from `room_id` with key (`event_type`, - /// `state_key`). - pub async fn state_get( + async fn load_full_state( &self, shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - self.state_get_id(shortstatehash, event_type, state_key) - .and_then(|event_id: OwnedEventId| async move { - self.services.timeline.get_pdu(&event_id).await + ) -> Result> { + self.services + .state_compressor + .load_shortstatehash_info(shortstatehash) + .map_err(|e| err!(Database("Missing state IDs: {e}"))) + .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) + .await + } + + /// Returns the state hash for this pdu. + pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { + const BUFSIZE: usize = size_of::(); + + self.services + .short + .get_shorteventid(event_id) + .and_then(|shorteventid| { + self.db + .shorteventid_shortstatehash + .aqry::(&shorteventid) }) .await - } - - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - pub async fn state_get_content( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - T: for<'de> Deserialize<'de>, - { - self.state_get(shortstatehash, event_type, state_key) - .await - .and_then(|event| event.get_content()) - } - - /// Get membership for given user in state - async fn user_membership( - &self, - shortstatehash: ShortStateHash, - user_id: &UserId, - ) -> MembershipState { - self.state_get_content(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) - .await - .map_or(MembershipState::Leave, |c: RoomMemberEventContent| c.membership) - } - - /// The user was a joined member at this state (potentially in the past) - #[inline] - async fn user_was_joined(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { - self.user_membership(shortstatehash, user_id).await == MembershipState::Join - } - - /// The user was an invited or joined room member at this state (potentially - /// in the past) - #[inline] - async fn user_was_invited(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { - let s = self.user_membership(shortstatehash, user_id).await; - s == MembershipState::Join || s == MembershipState::Invite + .deserialized() } /// Whether a server is allowed to see an event through federation, based on @@ -521,101 +645,6 @@ impl Service { } } - /// Returns the state hash for this pdu. - pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { - const BUFSIZE: usize = size_of::(); - - self.services - .short - .get_shorteventid(event_id) - .and_then(|shorteventid| { - self.db - .shorteventid_shortstatehash - .aqry::(&shorteventid) - }) - .await - .deserialized() - } - - /// Returns the full room state. - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_state_full<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream> + Send + 'a { - self.services - .state - .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok)) - .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .try_flatten_stream() - } - - /// Returns the full room state pdus - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_state_full_pdus<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream> + Send + 'a { - self.services - .state - .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok)) - .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .try_flatten_stream() - } - - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, - { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) - .await - } - - /// Returns a single PDU from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) - .await - } - - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - pub async fn room_state_get_content( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - T: for<'de> Deserialize<'de>, - { - self.room_state_get(room_id, event_type, state_key) - .await - .and_then(|event| event.get_content()) - } - pub async fn get_name(&self, room_id: &RoomId) -> Result { self.room_state_get_content(room_id, &StateEventType::RoomName, "") .await From b3271e0d653de1c585b1b5db95447045b0453b06 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 17:27:39 +0000 Subject: [PATCH 145/328] split state_accessor Signed-off-by: Jason Volk --- src/service/rooms/state_accessor/mod.rs | 634 +----------------- .../rooms/state_accessor/room_state.rs | 90 +++ .../rooms/state_accessor/server_can.rs | 73 ++ src/service/rooms/state_accessor/state.rs | 320 +++++++++ src/service/rooms/state_accessor/user_can.rs | 187 ++++++ 5 files changed, 684 insertions(+), 620 deletions(-) create mode 100644 src/service/rooms/state_accessor/room_state.rs create mode 100644 src/service/rooms/state_accessor/server_can.rs create mode 100644 src/service/rooms/state_accessor/state.rs create mode 100644 src/service/rooms/state_accessor/user_can.rs diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index bed8d210..b7952ce6 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,23 +1,19 @@ +mod room_state; +mod server_can; +mod state; +mod user_can; + use std::{ - borrow::Borrow, fmt::Write, - ops::Deref, sync::{Arc, Mutex as StdMutex, Mutex}, }; use conduwuit::{ - at, err, error, pair_of, - pdu::PduBuilder, - utils, - utils::{ - math::{usize_from_f64, Expected}, - result::FlatOk, - stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, - }, - Err, Error, PduEvent, Result, + err, utils, + utils::math::{usize_from_f64, Expected}, + Result, }; -use database::{Deserialized, Map}; -use futures::{future::try_join, FutureExt, Stream, StreamExt, TryFutureExt}; +use database::Map; use lru_cache::LruCache; use ruma::{ events::{ @@ -29,29 +25,19 @@ use ruma::{ guest_access::{GuestAccess, RoomGuestAccessEventContent}, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent, RoomMembership}, - member::{MembershipState, RoomMemberEventContent}, + member::RoomMemberEventContent, name::RoomNameEventContent, - power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, topic::RoomTopicEventContent, }, - StateEventType, TimelineEventType, + StateEventType, }, room::RoomType, space::SpaceRoomJoinRule, - EventEncryptionAlgorithm, EventId, JsOption, OwnedEventId, OwnedRoomAliasId, OwnedRoomId, - OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, + EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, UserId, }; -use serde::Deserialize; -use crate::{ - rooms, - rooms::{ - short::{ShortEventId, ShortStateHash, ShortStateKey}, - state::RoomMutexGuard, - state_compressor::{compress_state_event, parse_compressed_state_event, CompressedState}, - }, - Dep, -}; +use crate::{rooms, rooms::short::ShortStateHash, Dep}; pub struct Service { pub server_visibility_cache: Mutex>, @@ -143,508 +129,6 @@ impl crate::Service for Service { } impl Service { - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - pub async fn room_state_get_content( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - T: for<'de> Deserialize<'de>, - { - self.room_state_get(room_id, event_type, state_key) - .await - .and_then(|event| event.get_content()) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_state_full<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream> + Send + 'a { - self.services - .state - .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok)) - .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .try_flatten_stream() - } - - /// Returns the full room state pdus - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_state_full_pdus<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream> + Send + 'a { - self.services - .state - .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok)) - .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .try_flatten_stream() - } - - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, - { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) - .await - } - - /// Returns a single PDU from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) - .await - } - - /// The user was a joined member at this state (potentially in the past) - #[inline] - async fn user_was_joined(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { - self.user_membership(shortstatehash, user_id).await == MembershipState::Join - } - - /// The user was an invited or joined room member at this state (potentially - /// in the past) - #[inline] - async fn user_was_invited(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { - let s = self.user_membership(shortstatehash, user_id).await; - s == MembershipState::Join || s == MembershipState::Invite - } - - /// Get membership for given user in state - async fn user_membership( - &self, - shortstatehash: ShortStateHash, - user_id: &UserId, - ) -> MembershipState { - self.state_get_content(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) - .await - .map_or(MembershipState::Leave, |c: RoomMemberEventContent| c.membership) - } - - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - pub async fn state_get_content( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - T: for<'de> Deserialize<'de>, - { - self.state_get(shortstatehash, event_type, state_key) - .await - .and_then(|event| event.get_content()) - } - - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_contains( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> bool { - let Ok(shortstatekey) = self - .services - .short - .get_shortstatekey(event_type, state_key) - .await - else { - return false; - }; - - self.state_contains_shortstatekey(shortstatehash, shortstatekey) - .await - } - - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_contains_shortstatekey( - &self, - shortstatehash: ShortStateHash, - shortstatekey: ShortStateKey, - ) -> bool { - let start = compress_state_event(shortstatekey, 0); - let end = compress_state_event(shortstatekey, u64::MAX); - - self.load_full_state(shortstatehash) - .map_ok(|full_state| full_state.range(start..end).next().copied()) - .await - .flat_ok() - .is_some() - } - - /// Returns a single PDU from `room_id` with key (`event_type`, - /// `state_key`). - pub async fn state_get( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - self.state_get_id(shortstatehash, event_type, state_key) - .and_then(|event_id: OwnedEventId| async move { - self.services.timeline.get_pdu(&event_id).await - }) - .await - } - - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_get_id( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, - { - let shorteventid = self - .state_get_shortid(shortstatehash, event_type, state_key) - .await?; - - self.services - .short - .get_eventid_from_short(shorteventid) - .await - } - - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_get_shortid( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - let shortstatekey = self - .services - .short - .get_shortstatekey(event_type, state_key) - .await?; - - let start = compress_state_event(shortstatekey, 0); - let end = compress_state_event(shortstatekey, u64::MAX); - self.load_full_state(shortstatehash) - .map_ok(|full_state| { - full_state - .range(start..end) - .next() - .copied() - .map(parse_compressed_state_event) - .map(at!(1)) - .ok_or(err!(Request(NotFound("Not found in room state")))) - }) - .await? - } - - /// Returns the state events removed between the interval (present in .0 but - /// not in .1) - #[inline] - pub fn state_removed( - &self, - shortstatehash: pair_of!(ShortStateHash), - ) -> impl Stream + Send + '_ { - self.state_added((shortstatehash.1, shortstatehash.0)) - } - - /// Returns the state events added between the interval (present in .1 but - /// not in .0) - #[tracing::instrument(skip(self), level = "debug")] - pub fn state_added<'a>( - &'a self, - shortstatehash: pair_of!(ShortStateHash), - ) -> impl Stream + Send + 'a { - let a = self.load_full_state(shortstatehash.0); - let b = self.load_full_state(shortstatehash.1); - try_join(a, b) - .map_ok(|(a, b)| b.difference(&a).copied().collect::>()) - .map_ok(IterStream::try_stream) - .try_flatten_stream() - .expect_ok() - .map(parse_compressed_state_event) - } - - pub fn state_full( - &self, - shortstatehash: ShortStateHash, - ) -> impl Stream + Send + '_ { - self.state_full_pdus(shortstatehash) - .ready_filter_map(|pdu| { - Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu)) - }) - } - - pub fn state_full_pdus( - &self, - shortstatehash: ShortStateHash, - ) -> impl Stream + Send + '_ { - let short_ids = self - .state_full_shortids(shortstatehash) - .expect_ok() - .map(at!(1)); - - self.services - .short - .multi_get_eventid_from_short(short_ids) - .ready_filter_map(Result::ok) - .broad_filter_map(move |event_id: OwnedEventId| async move { - self.services.timeline.get_pdu(&event_id).await.ok() - }) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self), level = "debug")] - pub fn state_full_ids<'a, Id>( - &'a self, - shortstatehash: ShortStateHash, - ) -> impl Stream + Send + 'a - where - Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned + 'a, - ::Owned: Borrow, - { - let shortids = self - .state_full_shortids(shortstatehash) - .expect_ok() - .unzip() - .shared(); - - let shortstatekeys = shortids - .clone() - .map(at!(0)) - .map(Vec::into_iter) - .map(IterStream::stream) - .flatten_stream(); - - let shorteventids = shortids - .map(at!(1)) - .map(Vec::into_iter) - .map(IterStream::stream) - .flatten_stream(); - - self.services - .short - .multi_get_eventid_from_short(shorteventids) - .zip(shortstatekeys) - .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) - } - - pub fn state_full_shortids( - &self, - shortstatehash: ShortStateHash, - ) -> impl Stream> + Send + '_ { - self.load_full_state(shortstatehash) - .map_ok(|full_state| { - full_state - .deref() - .iter() - .copied() - .map(parse_compressed_state_event) - .collect() - }) - .map_ok(|vec: Vec<_>| vec.into_iter().try_stream()) - .try_flatten_stream() - } - - async fn load_full_state( - &self, - shortstatehash: ShortStateHash, - ) -> Result> { - self.services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .map_err(|e| err!(Database("Missing state IDs: {e}"))) - .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) - .await - } - - /// Returns the state hash for this pdu. - pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { - const BUFSIZE: usize = size_of::(); - - self.services - .short - .get_shorteventid(event_id) - .and_then(|shorteventid| { - self.db - .shorteventid_shortstatehash - .aqry::(&shorteventid) - }) - .await - .deserialized() - } - - /// Whether a server is allowed to see an event through federation, based on - /// the room's history_visibility at that event's state. - #[tracing::instrument(skip_all, level = "trace")] - pub async fn server_can_see_event( - &self, - origin: &ServerName, - room_id: &RoomId, - event_id: &EventId, - ) -> bool { - let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { - return true; - }; - - if let Some(visibility) = self - .server_visibility_cache - .lock() - .expect("locked") - .get_mut(&(origin.to_owned(), shortstatehash)) - { - return *visibility; - } - - let history_visibility = self - .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") - .await - .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { - c.history_visibility - }); - - let current_server_members = self - .services - .state_cache - .room_members(room_id) - .ready_filter(|member| member.server_name() == origin); - - let visibility = match history_visibility { - | HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, - | HistoryVisibility::Invited => { - // Allow if any member on requesting server was AT LEAST invited, else deny - current_server_members - .any(|member| self.user_was_invited(shortstatehash, member)) - .await - }, - | HistoryVisibility::Joined => { - // Allow if any member on requested server was joined, else deny - current_server_members - .any(|member| self.user_was_joined(shortstatehash, member)) - .await - }, - | _ => { - error!("Unknown history visibility {history_visibility}"); - false - }, - }; - - self.server_visibility_cache - .lock() - .expect("locked") - .insert((origin.to_owned(), shortstatehash), visibility); - - visibility - } - - /// Whether a user is allowed to see an event, based on - /// the room's history_visibility at that event's state. - #[tracing::instrument(skip_all, level = "trace")] - pub async fn user_can_see_event( - &self, - user_id: &UserId, - room_id: &RoomId, - event_id: &EventId, - ) -> bool { - let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { - return true; - }; - - if let Some(visibility) = self - .user_visibility_cache - .lock() - .expect("locked") - .get_mut(&(user_id.to_owned(), shortstatehash)) - { - return *visibility; - } - - let currently_member = self.services.state_cache.is_joined(user_id, room_id).await; - - let history_visibility = self - .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") - .await - .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { - c.history_visibility - }); - - let visibility = match history_visibility { - | HistoryVisibility::WorldReadable => true, - | HistoryVisibility::Shared => currently_member, - | HistoryVisibility::Invited => { - // Allow if any member on requesting server was AT LEAST invited, else deny - self.user_was_invited(shortstatehash, user_id).await - }, - | HistoryVisibility::Joined => { - // Allow if any member on requested server was joined, else deny - self.user_was_joined(shortstatehash, user_id).await - }, - | _ => { - error!("Unknown history visibility {history_visibility}"); - false - }, - }; - - self.user_visibility_cache - .lock() - .expect("locked") - .insert((user_id.to_owned(), shortstatehash), visibility); - - visibility - } - - /// Whether a user is allowed to see an event, based on - /// the room's history_visibility at that event's state. - #[tracing::instrument(skip_all, level = "trace")] - pub async fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> bool { - if self.services.state_cache.is_joined(user_id, room_id).await { - return true; - } - - let history_visibility = self - .room_state_get_content(room_id, &StateEventType::RoomHistoryVisibility, "") - .await - .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { - c.history_visibility - }); - - match history_visibility { - | HistoryVisibility::Invited => - self.services.state_cache.is_invited(user_id, room_id).await, - | HistoryVisibility::WorldReadable => true, - | _ => false, - } - } - pub async fn get_name(&self, room_id: &RoomId) -> Result { self.room_state_get_content(room_id, &StateEventType::RoomName, "") .await @@ -669,28 +153,6 @@ impl Service { .await } - pub async fn user_can_invite( - &self, - room_id: &RoomId, - sender: &UserId, - target_user: &UserId, - state_lock: &RoomMutexGuard, - ) -> bool { - self.services - .timeline - .create_hash_and_sign_event( - PduBuilder::state( - target_user.into(), - &RoomMemberEventContent::new(MembershipState::Invite), - ), - sender, - room_id, - state_lock, - ) - .await - .is_ok() - } - /// Checks if guests are able to view room content without joining pub async fn is_world_readable(&self, room_id: &RoomId) -> bool { self.room_state_get_content(room_id, &StateEventType::RoomHistoryVisibility, "") @@ -726,74 +188,6 @@ impl Service { .map(|c: RoomTopicEventContent| c.topic) } - /// Checks if a given user can redact a given event - /// - /// If federation is true, it allows redaction events from any user of the - /// same server as the original event sender - pub async fn user_can_redact( - &self, - redacts: &EventId, - sender: &UserId, - room_id: &RoomId, - federation: bool, - ) -> Result { - let redacting_event = self.services.timeline.get_pdu(redacts).await; - - if redacting_event - .as_ref() - .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomCreate) - { - return Err!(Request(Forbidden("Redacting m.room.create is not safe, forbidding."))); - } - - if redacting_event - .as_ref() - .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomServerAcl) - { - return Err!(Request(Forbidden( - "Redacting m.room.server_acl will result in the room being inaccessible for \ - everyone (empty allow key), forbidding." - ))); - } - - if let Ok(pl_event_content) = self - .room_state_get_content::( - room_id, - &StateEventType::RoomPowerLevels, - "", - ) - .await - { - let pl_event: RoomPowerLevels = pl_event_content.into(); - Ok(pl_event.user_can_redact_event_of_other(sender) - || pl_event.user_can_redact_own_event(sender) - && if let Ok(redacting_event) = redacting_event { - if federation { - redacting_event.sender.server_name() == sender.server_name() - } else { - redacting_event.sender == sender - } - } else { - false - }) - } else { - // Falling back on m.room.create to judge power level - if let Ok(room_create) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .await - { - Ok(room_create.sender == sender - || redacting_event - .as_ref() - .is_ok_and(|redacting_event| redacting_event.sender == sender)) - } else { - Err(Error::bad_database( - "No m.room.power_levels or m.room.create events in database for room", - )) - } - } - } - /// Returns the join rule (`SpaceRoomJoinRule`) for a given room pub async fn get_join_rule( &self, diff --git a/src/service/rooms/state_accessor/room_state.rs b/src/service/rooms/state_accessor/room_state.rs new file mode 100644 index 00000000..98a82cea --- /dev/null +++ b/src/service/rooms/state_accessor/room_state.rs @@ -0,0 +1,90 @@ +use std::borrow::Borrow; + +use conduwuit::{err, implement, PduEvent, Result}; +use futures::{Stream, StreamExt, TryFutureExt}; +use ruma::{events::StateEventType, EventId, RoomId}; +use serde::Deserialize; + +/// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). +#[implement(super::Service)] +pub async fn room_state_get_content( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, +) -> Result +where + T: for<'de> Deserialize<'de>, +{ + self.room_state_get(room_id, event_type, state_key) + .await + .and_then(|event| event.get_content()) +} + +/// Returns the full room state. +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn room_state_full<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream> + Send + 'a { + self.services + .state + .get_room_shortstatehash(room_id) + .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok)) + .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .try_flatten_stream() +} + +/// Returns the full room state pdus +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn room_state_full_pdus<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream> + Send + 'a { + self.services + .state + .get_room_shortstatehash(room_id) + .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok)) + .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .try_flatten_stream() +} + +/// Returns a single EventId from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn room_state_get_id( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, +) -> Result +where + Id: for<'de> Deserialize<'de> + Sized + ToOwned, + ::Owned: Borrow, +{ + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) + .await +} + +/// Returns a single PDU from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn room_state_get( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, +) -> Result { + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) + .await +} diff --git a/src/service/rooms/state_accessor/server_can.rs b/src/service/rooms/state_accessor/server_can.rs new file mode 100644 index 00000000..4d834227 --- /dev/null +++ b/src/service/rooms/state_accessor/server_can.rs @@ -0,0 +1,73 @@ +use conduwuit::{error, implement, utils::stream::ReadyExt}; +use futures::StreamExt; +use ruma::{ + events::{ + room::history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + StateEventType, + }, + EventId, RoomId, ServerName, +}; + +/// Whether a server is allowed to see an event through federation, based on +/// the room's history_visibility at that event's state. +#[implement(super::Service)] +#[tracing::instrument(skip_all, level = "trace")] +pub async fn server_can_see_event( + &self, + origin: &ServerName, + room_id: &RoomId, + event_id: &EventId, +) -> bool { + let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { + return true; + }; + + if let Some(visibility) = self + .server_visibility_cache + .lock() + .expect("locked") + .get_mut(&(origin.to_owned(), shortstatehash)) + { + return *visibility; + } + + let history_visibility = self + .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") + .await + .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { + c.history_visibility + }); + + let current_server_members = self + .services + .state_cache + .room_members(room_id) + .ready_filter(|member| member.server_name() == origin); + + let visibility = match history_visibility { + | HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, + | HistoryVisibility::Invited => { + // Allow if any member on requesting server was AT LEAST invited, else deny + current_server_members + .any(|member| self.user_was_invited(shortstatehash, member)) + .await + }, + | HistoryVisibility::Joined => { + // Allow if any member on requested server was joined, else deny + current_server_members + .any(|member| self.user_was_joined(shortstatehash, member)) + .await + }, + | _ => { + error!("Unknown history visibility {history_visibility}"); + false + }, + }; + + self.server_visibility_cache + .lock() + .expect("locked") + .insert((origin.to_owned(), shortstatehash), visibility); + + visibility +} diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs new file mode 100644 index 00000000..c47a5693 --- /dev/null +++ b/src/service/rooms/state_accessor/state.rs @@ -0,0 +1,320 @@ +use std::{borrow::Borrow, ops::Deref, sync::Arc}; + +use conduwuit::{ + at, err, implement, pair_of, + utils::{ + result::FlatOk, + stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, + }, + PduEvent, Result, +}; +use database::Deserialized; +use futures::{future::try_join, FutureExt, Stream, StreamExt, TryFutureExt}; +use ruma::{ + events::{ + room::member::{MembershipState, RoomMemberEventContent}, + StateEventType, + }, + EventId, OwnedEventId, UserId, +}; +use serde::Deserialize; + +use crate::rooms::{ + short::{ShortEventId, ShortStateHash, ShortStateKey}, + state_compressor::{compress_state_event, parse_compressed_state_event, CompressedState}, +}; + +/// The user was a joined member at this state (potentially in the past) +#[implement(super::Service)] +#[inline] +pub async fn user_was_joined(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { + self.user_membership(shortstatehash, user_id).await == MembershipState::Join +} + +/// The user was an invited or joined room member at this state (potentially +/// in the past) +#[implement(super::Service)] +#[inline] +pub async fn user_was_invited(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { + let s = self.user_membership(shortstatehash, user_id).await; + s == MembershipState::Join || s == MembershipState::Invite +} + +/// Get membership for given user in state +#[implement(super::Service)] +pub async fn user_membership( + &self, + shortstatehash: ShortStateHash, + user_id: &UserId, +) -> MembershipState { + self.state_get_content(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) + .await + .map_or(MembershipState::Leave, |c: RoomMemberEventContent| c.membership) +} + +/// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). +#[implement(super::Service)] +pub async fn state_get_content( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> Result +where + T: for<'de> Deserialize<'de>, +{ + self.state_get(shortstatehash, event_type, state_key) + .await + .and_then(|event| event.get_content()) +} + +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn state_contains( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> bool { + let Ok(shortstatekey) = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await + else { + return false; + }; + + self.state_contains_shortstatekey(shortstatehash, shortstatekey) + .await +} + +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn state_contains_shortstatekey( + &self, + shortstatehash: ShortStateHash, + shortstatekey: ShortStateKey, +) -> bool { + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + + self.load_full_state(shortstatehash) + .map_ok(|full_state| full_state.range(start..=end).next().copied()) + .await + .flat_ok() + .is_some() +} + +/// Returns a single PDU from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +pub async fn state_get( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> Result { + self.state_get_id(shortstatehash, event_type, state_key) + .and_then(|event_id: OwnedEventId| async move { + self.services.timeline.get_pdu(&event_id).await + }) + .await +} + +/// Returns a single EventId from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn state_get_id( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> Result +where + Id: for<'de> Deserialize<'de> + Sized + ToOwned, + ::Owned: Borrow, +{ + let shorteventid = self + .state_get_shortid(shortstatehash, event_type, state_key) + .await?; + + self.services + .short + .get_eventid_from_short(shorteventid) + .await +} + +/// Returns a single EventId from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn state_get_shortid( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> Result { + let shortstatekey = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await?; + + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + self.load_full_state(shortstatehash) + .map_ok(|full_state| { + full_state + .range(start..=end) + .next() + .copied() + .map(parse_compressed_state_event) + .map(at!(1)) + .ok_or(err!(Request(NotFound("Not found in room state")))) + }) + .await? +} + +/// Returns the state events removed between the interval (present in .0 but +/// not in .1) +#[implement(super::Service)] +#[inline] +pub fn state_removed( + &self, + shortstatehash: pair_of!(ShortStateHash), +) -> impl Stream + Send + '_ { + self.state_added((shortstatehash.1, shortstatehash.0)) +} + +/// Returns the state events added between the interval (present in .1 but +/// not in .0) +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn state_added<'a>( + &'a self, + shortstatehash: pair_of!(ShortStateHash), +) -> impl Stream + Send + 'a { + let a = self.load_full_state(shortstatehash.0); + let b = self.load_full_state(shortstatehash.1); + try_join(a, b) + .map_ok(|(a, b)| b.difference(&a).copied().collect::>()) + .map_ok(IterStream::try_stream) + .try_flatten_stream() + .expect_ok() + .map(parse_compressed_state_event) +} + +#[implement(super::Service)] +pub fn state_full( + &self, + shortstatehash: ShortStateHash, +) -> impl Stream + Send + '_ { + self.state_full_pdus(shortstatehash) + .ready_filter_map(|pdu| { + Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu)) + }) +} + +#[implement(super::Service)] +pub fn state_full_pdus( + &self, + shortstatehash: ShortStateHash, +) -> impl Stream + Send + '_ { + let short_ids = self + .state_full_shortids(shortstatehash) + .expect_ok() + .map(at!(1)); + + self.services + .short + .multi_get_eventid_from_short(short_ids) + .ready_filter_map(Result::ok) + .broad_filter_map(move |event_id: OwnedEventId| async move { + self.services.timeline.get_pdu(&event_id).await.ok() + }) +} + +/// Builds a StateMap by iterating over all keys that start +/// with state_hash, this gives the full state for the given state_hash. +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn state_full_ids<'a, Id>( + &'a self, + shortstatehash: ShortStateHash, +) -> impl Stream + Send + 'a +where + Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned + 'a, + ::Owned: Borrow, +{ + let shortids = self + .state_full_shortids(shortstatehash) + .expect_ok() + .unzip() + .shared(); + + let shortstatekeys = shortids + .clone() + .map(at!(0)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + let shorteventids = shortids + .map(at!(1)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + self.services + .short + .multi_get_eventid_from_short(shorteventids) + .zip(shortstatekeys) + .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) +} + +#[implement(super::Service)] +pub fn state_full_shortids( + &self, + shortstatehash: ShortStateHash, +) -> impl Stream> + Send + '_ { + self.load_full_state(shortstatehash) + .map_ok(|full_state| { + full_state + .deref() + .iter() + .copied() + .map(parse_compressed_state_event) + .collect() + }) + .map_ok(|vec: Vec<_>| vec.into_iter().try_stream()) + .try_flatten_stream() +} + +#[implement(super::Service)] +async fn load_full_state(&self, shortstatehash: ShortStateHash) -> Result> { + self.services + .state_compressor + .load_shortstatehash_info(shortstatehash) + .map_err(|e| err!(Database("Missing state IDs: {e}"))) + .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) + .await +} + +/// Returns the state hash for this pdu. +#[implement(super::Service)] +pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { + const BUFSIZE: usize = size_of::(); + + self.services + .short + .get_shorteventid(event_id) + .and_then(|shorteventid| { + self.db + .shorteventid_shortstatehash + .aqry::(&shorteventid) + }) + .await + .deserialized() +} diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs new file mode 100644 index 00000000..725a4fba --- /dev/null +++ b/src/service/rooms/state_accessor/user_can.rs @@ -0,0 +1,187 @@ +use conduwuit::{error, implement, pdu::PduBuilder, Err, Error, Result}; +use ruma::{ + events::{ + room::{ + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + member::{MembershipState, RoomMemberEventContent}, + power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, + }, + StateEventType, TimelineEventType, + }, + EventId, RoomId, UserId, +}; + +use crate::rooms::state::RoomMutexGuard; + +/// Checks if a given user can redact a given event +/// +/// If federation is true, it allows redaction events from any user of the +/// same server as the original event sender +#[implement(super::Service)] +pub async fn user_can_redact( + &self, + redacts: &EventId, + sender: &UserId, + room_id: &RoomId, + federation: bool, +) -> Result { + let redacting_event = self.services.timeline.get_pdu(redacts).await; + + if redacting_event + .as_ref() + .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomCreate) + { + return Err!(Request(Forbidden("Redacting m.room.create is not safe, forbidding."))); + } + + if redacting_event + .as_ref() + .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomServerAcl) + { + return Err!(Request(Forbidden( + "Redacting m.room.server_acl will result in the room being inaccessible for \ + everyone (empty allow key), forbidding." + ))); + } + + if let Ok(pl_event_content) = self + .room_state_get_content::( + room_id, + &StateEventType::RoomPowerLevels, + "", + ) + .await + { + let pl_event: RoomPowerLevels = pl_event_content.into(); + Ok(pl_event.user_can_redact_event_of_other(sender) + || pl_event.user_can_redact_own_event(sender) + && if let Ok(redacting_event) = redacting_event { + if federation { + redacting_event.sender.server_name() == sender.server_name() + } else { + redacting_event.sender == sender + } + } else { + false + }) + } else { + // Falling back on m.room.create to judge power level + if let Ok(room_create) = self + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .await + { + Ok(room_create.sender == sender + || redacting_event + .as_ref() + .is_ok_and(|redacting_event| redacting_event.sender == sender)) + } else { + Err(Error::bad_database( + "No m.room.power_levels or m.room.create events in database for room", + )) + } + } +} + +/// Whether a user is allowed to see an event, based on +/// the room's history_visibility at that event's state. +#[implement(super::Service)] +#[tracing::instrument(skip_all, level = "trace")] +pub async fn user_can_see_event( + &self, + user_id: &UserId, + room_id: &RoomId, + event_id: &EventId, +) -> bool { + let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { + return true; + }; + + if let Some(visibility) = self + .user_visibility_cache + .lock() + .expect("locked") + .get_mut(&(user_id.to_owned(), shortstatehash)) + { + return *visibility; + } + + let currently_member = self.services.state_cache.is_joined(user_id, room_id).await; + + let history_visibility = self + .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") + .await + .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { + c.history_visibility + }); + + let visibility = match history_visibility { + | HistoryVisibility::WorldReadable => true, + | HistoryVisibility::Shared => currently_member, + | HistoryVisibility::Invited => { + // Allow if any member on requesting server was AT LEAST invited, else deny + self.user_was_invited(shortstatehash, user_id).await + }, + | HistoryVisibility::Joined => { + // Allow if any member on requested server was joined, else deny + self.user_was_joined(shortstatehash, user_id).await + }, + | _ => { + error!("Unknown history visibility {history_visibility}"); + false + }, + }; + + self.user_visibility_cache + .lock() + .expect("locked") + .insert((user_id.to_owned(), shortstatehash), visibility); + + visibility +} + +/// Whether a user is allowed to see an event, based on +/// the room's history_visibility at that event's state. +#[implement(super::Service)] +#[tracing::instrument(skip_all, level = "trace")] +pub async fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> bool { + if self.services.state_cache.is_joined(user_id, room_id).await { + return true; + } + + let history_visibility = self + .room_state_get_content(room_id, &StateEventType::RoomHistoryVisibility, "") + .await + .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { + c.history_visibility + }); + + match history_visibility { + | HistoryVisibility::Invited => + self.services.state_cache.is_invited(user_id, room_id).await, + | HistoryVisibility::WorldReadable => true, + | _ => false, + } +} + +#[implement(super::Service)] +pub async fn user_can_invite( + &self, + room_id: &RoomId, + sender: &UserId, + target_user: &UserId, + state_lock: &RoomMutexGuard, +) -> bool { + self.services + .timeline + .create_hash_and_sign_event( + PduBuilder::state( + target_user.into(), + &RoomMemberEventContent::new(MembershipState::Invite), + ), + sender, + room_id, + state_lock, + ) + .await + .is_ok() +} From d32534164c0092a30ac351337b7dd34aa8f5d456 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 4 Feb 2025 20:30:33 +0000 Subject: [PATCH 146/328] fix soft-failed redaction regression (ff8bbd4cfa) Signed-off-by: Jason Volk --- src/service/rooms/event_handler/upgrade_outlier_pdu.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 132daca7..b33b0388 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -128,7 +128,8 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( | (false, _) => true, | (true, None) => false, | (true, Some(redact_id)) => - self.services + !self + .services .state_accessor .user_can_redact(&redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true) .await?, From 80277f6aa2629a8b9dc2b4e96a64d8e508d47270 Mon Sep 17 00:00:00 2001 From: Nineko Date: Tue, 4 Feb 2025 16:46:00 -0500 Subject: [PATCH 147/328] Adds .gitattributes to the projects to prevent LN and CLRF conflicts. (#681) --- .gitattributes | 87 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..3dfaca65 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,87 @@ +# taken from https://github.com/gitattributes/gitattributes/blob/46a8961ad73f5bd4d8d193708840fbc9e851d702/Rust.gitattributes +# Auto detect text files and perform normalization +* text=auto + +*.rs text diff=rust +*.toml text diff=toml +Cargo.lock text + +# taken from https://github.com/gitattributes/gitattributes/blob/46a8961ad73f5bd4d8d193708840fbc9e851d702/Common.gitattributes +# Documents +*.bibtex text diff=bibtex +*.doc diff=astextplain +*.DOC diff=astextplain +*.docx diff=astextplain +*.DOCX diff=astextplain +*.dot diff=astextplain +*.DOT diff=astextplain +*.pdf diff=astextplain +*.PDF diff=astextplain +*.rtf diff=astextplain +*.RTF diff=astextplain +*.md text diff=markdown +*.mdx text diff=markdown +*.tex text diff=tex +*.adoc text +*.textile text +*.mustache text +*.csv text eol=crlf +*.tab text +*.tsv text +*.txt text +*.sql text +*.epub diff=astextplain + +# Graphics +*.png binary +*.jpg binary +*.jpeg binary +*.gif binary +*.tif binary +*.tiff binary +*.ico binary +# SVG treated as text by default. +*.svg text +*.eps binary + +# Scripts +*.bash text eol=lf +*.fish text eol=lf +*.ksh text eol=lf +*.sh text eol=lf +*.zsh text eol=lf +# These are explicitly windows files and should use crlf +*.bat text eol=crlf +*.cmd text eol=crlf +*.ps1 text eol=crlf + +# Serialisation +*.json text +*.toml text +*.xml text +*.yaml text +*.yml text + +# Archives +*.7z binary +*.bz binary +*.bz2 binary +*.bzip2 binary +*.gz binary +*.lz binary +*.lzma binary +*.rar binary +*.tar binary +*.taz binary +*.tbz binary +*.tbz2 binary +*.tgz binary +*.tlz binary +*.txz binary +*.xz binary +*.Z binary +*.zip binary +*.zst binary + +# Text files where line endings should be preserved +*.patch -text \ No newline at end of file From 62180897c02d9c306b2179f3685e60ffdc615c1f Mon Sep 17 00:00:00 2001 From: Niko Date: Sat, 1 Feb 2025 18:35:23 -0500 Subject: [PATCH 148/328] Added blurhash.rs to fascilitate blurhashing. Signed-off-by: Niko --- Cargo.lock | 373 +++++++++++++++++++++++++++++++++- Cargo.toml | 8 +- conduwuit-example.toml | 18 ++ src/api/Cargo.toml | 1 + src/api/client/media.rs | 21 ++ src/core/Cargo.toml | 1 + src/core/config/mod.rs | 40 +++- src/main/Cargo.toml | 1 + src/service/Cargo.toml | 3 + src/service/media/blurhash.rs | 159 +++++++++++++++ src/service/media/mod.rs | 3 +- 11 files changed, 621 insertions(+), 7 deletions(-) create mode 100644 src/service/media/blurhash.rs diff --git a/Cargo.lock b/Cargo.lock index e379aebb..b710d6fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -26,6 +26,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "aligned-vec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4aa90d7ce82d4be67b64039a3d588d38dbcc6736577de4a847025ce5b0c468d1" + [[package]] name = "alloc-no-stdlib" version = "2.0.4" @@ -53,12 +59,29 @@ version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" +[[package]] +name = "arbitrary" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" + [[package]] name = "arc-swap" version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +[[package]] +name = "arg_enum_proc_macro" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ae92a5119aa49cdbcf6b9f893fe4e1d98b04ccbf82ee0584ad948a44a734dea" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "argon2" version = "0.5.3" @@ -173,6 +196,29 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +[[package]] +name = "av1-grain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6678909d8c5d46a42abcf571271e15fdbc0a225e3646cf23762cd415046c78bf" +dependencies = [ + "anyhow", + "arrayvec", + "log", + "nom", + "num-rational", + "v_frame", +] + +[[package]] +name = "avif-serialize" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e335041290c43101ca215eed6f43ec437eb5a42125573f600fc3fa42b9bddd62" +dependencies = [ + "arrayvec", +] + [[package]] name = "aws-lc-rs" version = "1.12.1" @@ -385,6 +431,12 @@ dependencies = [ "which", ] +[[package]] +name = "bit_field" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc827186963e592360843fb5ba4b973e145841266c1357f7180c43526f2e5b61" + [[package]] name = "bitflags" version = "1.3.2" @@ -397,6 +449,12 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" +[[package]] +name = "bitstream-io" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6099cdc01846bc367c4e7dd630dc5966dccf36b652fae7a74e17b640411a91b2" + [[package]] name = "blake2" version = "0.10.6" @@ -415,6 +473,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "blurhash" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e79769241dcd44edf79a732545e8b5cec84c247ac060f5252cd51885d093a8fc" +dependencies = [ + "image", +] + [[package]] name = "brotli" version = "7.0.0" @@ -436,6 +503,12 @@ dependencies = [ "alloc-stdlib", ] +[[package]] +name = "built" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c360505aed52b7ec96a3636c3f039d99103c37d1d9b4f7a8c743d3ea9ffcd03b" + [[package]] name = "bumpalo" version = "3.16.0" @@ -513,6 +586,16 @@ dependencies = [ "nom", ] +[[package]] +name = "cfg-expr" +version = "0.15.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d067ad48b8650848b989a59a86c6c36a995d02d2bf778d45c3c5d57bc2718f02" +dependencies = [ + "smallvec", + "target-lexicon", +] + [[package]] name = "cfg-if" version = "1.0.0" @@ -822,6 +905,7 @@ dependencies = [ "arrayvec", "async-trait", "base64 0.22.1", + "blurhash", "bytes", "conduwuit_core", "conduwuit_database", @@ -1071,6 +1155,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "crunchy" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" + [[package]] name = "crypto-common" version = "0.1.6" @@ -1252,7 +1342,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -1275,6 +1365,21 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "exr" +version = "1.73.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83197f59927b46c04a183a619b7c29df34e63e63c7869320862268c0ef687e0" +dependencies = [ + "bit_field", + "half", + "lebe", + "miniz_oxide", + "rayon-core", + "smallvec", + "zune-inflate", +] + [[package]] name = "fdeflate" version = "0.3.7" @@ -1519,6 +1624,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "half" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", +] + [[package]] name = "hardened_malloc-rs" version = "0.1.2+12" @@ -1973,10 +2088,16 @@ dependencies = [ "bytemuck", "byteorder-lite", "color_quant", + "exr", "gif", "image-webp", "num-traits", "png", + "qoi", + "ravif", + "rayon", + "rgb", + "tiff", "zune-core", "zune-jpeg", ] @@ -1991,6 +2112,12 @@ dependencies = [ "quick-error 2.0.1", ] +[[package]] +name = "imgref" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0263a3d970d5c054ed9312c0057b4f3bde9c0b33836d3637361d4a9e6e7a408" + [[package]] name = "indexmap" version = "1.9.3" @@ -2024,6 +2151,17 @@ version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" +[[package]] +name = "interpolate_name" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c34819042dc3d3971c46c2190835914dfbe0c3c13f61449b2997f4e9722dfa60" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "ipaddress" version = "0.1.3" @@ -2089,6 +2227,12 @@ dependencies = [ "libc", ] +[[package]] +name = "jpeg-decoder" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5d4a7da358eff58addd2877a45865158f0d78c911d43a5784ceb7bbf52833b0" + [[package]] name = "js-sys" version = "0.3.77" @@ -2172,12 +2316,28 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" +[[package]] +name = "lebe" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8" + [[package]] name = "libc" version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" +[[package]] +name = "libfuzzer-sys" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf78f52d400cf2d84a3a973a78a592b4adc535739e0a5597a0da6f0c357adc75" +dependencies = [ + "arbitrary", + "cc", +] + [[package]] name = "libloading" version = "0.8.6" @@ -2185,7 +2345,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -2243,6 +2403,15 @@ dependencies = [ "futures-sink", ] +[[package]] +name = "loop9" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fae87c125b03c1d2c0150c90365d7d6bcc53fb73a9acaef207d2d065860f062" +dependencies = [ + "imgref", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -2321,6 +2490,16 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" +[[package]] +name = "maybe-rayon" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea1f30cedd69f0a2954655f7188c6a834246d2bcf1e315e2ac40c4b24dc9519" +dependencies = [ + "cfg-if", + "rayon", +] + [[package]] name = "memchr" version = "2.7.4" @@ -2434,6 +2613,12 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" +[[package]] +name = "noop_proc_macro" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0676bb32a98c1a483ce53e500a81ad9c3d5b3f7c920c28c24e9cb0980d0b5bc8" + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -2483,6 +2668,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +[[package]] +name = "num-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "num-integer" version = "0.1.46" @@ -2907,6 +3103,25 @@ dependencies = [ "yansi", ] +[[package]] +name = "profiling" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afbdc74edc00b6f6a218ca6a5364d6226a259d4b8ea1af4a0ea063f27e179f4d" +dependencies = [ + "profiling-procmacros", +] + +[[package]] +name = "profiling-procmacros" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a65f2e60fbf1063868558d69c6beacf412dc755f9fc020f514b7955fc914fe30" +dependencies = [ + "quote", + "syn 2.0.96", +] + [[package]] name = "prost" version = "0.13.4" @@ -2957,6 +3172,15 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "007d8adb5ddab6f8e3f491ac63566a7d5002cc7ed73901f72057943fa71ae1ae" +[[package]] +name = "qoi" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6d64c71eb498fe9eae14ce4ec935c555749aef511cca85b5568910d6e48001" +dependencies = [ + "bytemuck", +] + [[package]] name = "quick-error" version = "1.2.3" @@ -3018,7 +3242,7 @@ dependencies = [ "once_cell", "socket2", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3060,6 +3284,76 @@ dependencies = [ "getrandom", ] +[[package]] +name = "rav1e" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd87ce80a7665b1cce111f8a16c1f3929f6547ce91ade6addf4ec86a8dda5ce9" +dependencies = [ + "arbitrary", + "arg_enum_proc_macro", + "arrayvec", + "av1-grain", + "bitstream-io", + "built", + "cfg-if", + "interpolate_name", + "itertools 0.12.1", + "libc", + "libfuzzer-sys", + "log", + "maybe-rayon", + "new_debug_unreachable", + "noop_proc_macro", + "num-derive", + "num-traits", + "once_cell", + "paste", + "profiling", + "rand", + "rand_chacha", + "simd_helpers", + "system-deps", + "thiserror 1.0.69", + "v_frame", + "wasm-bindgen", +] + +[[package]] +name = "ravif" +version = "0.11.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2413fd96bd0ea5cdeeb37eaf446a22e6ed7b981d792828721e74ded1980a45c6" +dependencies = [ + "avif-serialize", + "imgref", + "loop9", + "quick-error 2.0.1", + "rav1e", + "rayon", + "rgb", +] + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + [[package]] name = "redox_syscall" version = "0.5.8" @@ -3172,6 +3466,12 @@ dependencies = [ "quick-error 1.2.3", ] +[[package]] +name = "rgb" +version = "0.8.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57397d16646700483b67d2dd6511d79318f9d057fdbd21a4066aeac8b41d310a" + [[package]] name = "ring" version = "0.17.8" @@ -3479,7 +3779,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3945,6 +4245,15 @@ version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" +[[package]] +name = "simd_helpers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95890f873bec569a0362c235787f3aca6e1e887302ba4840839bcc6459c42da6" +dependencies = [ + "quote", +] + [[package]] name = "siphasher" version = "0.3.11" @@ -4096,6 +4405,25 @@ dependencies = [ "syn 2.0.96", ] +[[package]] +name = "system-deps" +version = "6.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e535eb8dded36d55ec13eddacd30dec501792ff23a0b1682c38601b8cf2349" +dependencies = [ + "cfg-expr", + "heck", + "pkg-config", + "toml", + "version-compare", +] + +[[package]] +name = "target-lexicon" +version = "0.12.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" + [[package]] name = "tendril" version = "0.4.3" @@ -4205,6 +4533,17 @@ dependencies = [ "threadpool", ] +[[package]] +name = "tiff" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba1310fcea54c6a9a4fd1aad794ecc02c31682f6bfbecdf460bf19533eed1e3e" +dependencies = [ + "flate2", + "jpeg-decoder", + "weezl", +] + [[package]] name = "tikv-jemalloc-ctl" version = "0.6.0" @@ -4744,6 +5083,17 @@ dependencies = [ "serde", ] +[[package]] +name = "v_frame" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6f32aaa24bacd11e488aa9ba66369c7cd514885742c9fe08cfe85884db3e92b" +dependencies = [ + "aligned-vec", + "num-traits", + "wasm-bindgen", +] + [[package]] name = "valuable" version = "0.1.1" @@ -4756,6 +5106,12 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" +[[package]] +name = "version-compare" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "852e951cb7832cb45cb1169900d19760cfa39b82bc0ea9c0e5a14ae88411c98b" + [[package]] name = "version_check" version = "0.9.5" @@ -5324,6 +5680,15 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f423a2c17029964870cfaabb1f13dfab7d092a62a29a89264f4d36990ca414a" +[[package]] +name = "zune-inflate" +version = "0.2.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73ab332fe2f6680068f3582b16a24f90ad7096d5d39b974d1c0aff0125116f02" +dependencies = [ + "simd-adler32", +] + [[package]] name = "zune-jpeg" version = "0.4.14" diff --git a/Cargo.toml b/Cargo.toml index 1cf787c6..c580d22d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -179,7 +179,7 @@ version = "0.5.3" features = ["alloc", "rand"] default-features = false -# Used to generate thumbnails for images +# Used to generate thumbnails for images & blurhashes [workspace.dependencies.image] version = "0.25.5" default-features = false @@ -190,6 +190,12 @@ features = [ "webp", ] +[workspace.dependencies.blurhash] +version = "0.2.3" +default-features = false +features = [ + "fast-linear-to-srgb","image" +] # logging [workspace.dependencies.log] version = "0.4.22" diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 3e64522c..f9da856d 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1607,3 +1607,21 @@ # This item is undocumented. Please contribute documentation for it. # #support_mxid = + +[global.blurhashing] + +# blurhashing x component, 4 is recommended by https://blurha.sh/ +# +#components_x = 4 + +# blurhashing y component, 3 is recommended by https://blurha.sh/ +# +#components_y = 3 + +# Max raw size that the server will blurhash, this is the size of the +# image after converting it to raw data, it should be higher than the +# upload limit but not too high. The higher it is the higher the +# potential load will be for clients requesting blurhashes. The default +# is 33.55MB. Setting it to 0 disables blurhashing. +# +#blurhash_max_raw_size = 33554432 diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 385e786f..8a5ef3f0 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -17,6 +17,7 @@ crate-type = [ ] [features] +blurhashing=[] element_hacks = [] release_max_log_level = [ "tracing/max_level_trace", diff --git a/src/api/client/media.rs b/src/api/client/media.rs index afbc218a..115f2581 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -62,6 +62,27 @@ pub(crate) async fn create_content_route( media_id: &utils::random_string(MXC_LENGTH), }; + #[cfg(feature = "blurhashing")] + { + if body.generate_blurhash { + let (blurhash, create_media_result) = tokio::join!( + services + .media + .create_blurhash(&body.file, content_type, filename), + services.media.create( + &mxc, + Some(user), + Some(&content_disposition), + content_type, + &body.file + ) + ); + return create_media_result.map(|()| create_content::v3::Response { + content_uri: mxc.to_string().into(), + blurhash, + }); + } + } services .media .create(&mxc, Some(user), Some(&content_disposition), content_type, &body.file) diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index ef2df4ff..5d46ec3b 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -54,6 +54,7 @@ sentry_telemetry = [] conduwuit_mods = [ "dep:libloading" ] +blurhashing = [] [dependencies] argon2.workspace = true diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index ff80d1cf..9514f7a0 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -52,7 +52,7 @@ use crate::{err, error::Error, utils::sys, Result}; ### For more information, see: ### https://conduwuit.puppyirl.gay/configuration.html "#, - ignore = "catchall well_known tls" + ignore = "catchall well_known tls blurhashing" )] pub struct Config { /// The server_name is the pretty name of this server. It is used as a @@ -1789,6 +1789,9 @@ pub struct Config { #[serde(default = "true_fn")] pub config_reload_signal: bool, + // external structure; separate section + #[serde(default)] + pub blurhashing: BlurhashConfig, #[serde(flatten)] #[allow(clippy::zero_sized_map_values)] // this is a catchall, the map shouldn't be zero at runtime @@ -1839,6 +1842,31 @@ pub struct WellKnownConfig { pub support_mxid: Option, } +#[derive(Clone, Copy, Debug, Deserialize, Default)] +#[allow(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)] +#[config_example_generator(filename = "conduwuit-example.toml", section = "global.blurhashing")] +pub struct BlurhashConfig { + /// blurhashing x component, 4 is recommended by https://blurha.sh/ + /// + /// default: 4 + #[serde(default = "default_blurhash_x_component")] + pub components_x: u32, + /// blurhashing y component, 3 is recommended by https://blurha.sh/ + /// + /// default: 3 + #[serde(default = "default_blurhash_y_component")] + pub components_y: u32, + /// Max raw size that the server will blurhash, this is the size of the + /// image after converting it to raw data, it should be higher than the + /// upload limit but not too high. The higher it is the higher the + /// potential load will be for clients requesting blurhashes. The default + /// is 33.55MB. Setting it to 0 disables blurhashing. + /// + /// default: 33554432 + #[serde(default = "default_blurhash_max_raw_size")] + pub blurhash_max_raw_size: u64, +} + #[derive(Deserialize, Clone, Debug)] #[serde(transparent)] struct ListeningPort { @@ -2210,3 +2238,13 @@ fn default_client_response_timeout() -> u64 { 120 } fn default_client_shutdown_timeout() -> u64 { 15 } fn default_sender_shutdown_timeout() -> u64 { 5 } + +// blurhashing defaults recommended by https://blurha.sh/ +// 2^25 +pub(super) fn default_blurhash_max_raw_size() -> u64 { 33_554_432 } + +pub(super) fn default_blurhash_x_component() -> u32 { 4 } + +pub(super) fn default_blurhash_y_component() -> u32 { 3 } + +// end recommended & blurhashing defaults diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index f774c37a..7e1cb86b 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -101,6 +101,7 @@ perf_measurements = [ "conduwuit-core/perf_measurements", "conduwuit-core/sentry_telemetry", ] +blurhashing =["conduwuit-service/blurhashing","conduwuit-core/blurhashing","conduwuit-api/blurhashing"] # increases performance, reduces build times, and reduces binary size by not compiling or # genreating code for log level filters that users will generally not use (debug and trace) release_max_log_level = [ diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index c4f75453..30183179 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -44,6 +44,7 @@ url_preview = [ zstd_compression = [ "reqwest/zstd", ] +blurhashing = ["dep:image","dep:blurhash"] [dependencies] arrayvec.workspace = true @@ -82,6 +83,8 @@ tracing.workspace = true url.workspace = true webpage.workspace = true webpage.optional = true +blurhash.workspace = true +blurhash.optional = true [lints] workspace = true diff --git a/src/service/media/blurhash.rs b/src/service/media/blurhash.rs new file mode 100644 index 00000000..c470925c --- /dev/null +++ b/src/service/media/blurhash.rs @@ -0,0 +1,159 @@ +use std::{fmt::Display, io::Cursor, path::Path}; + +use blurhash::encode_image; +use conduwuit::{config::BlurhashConfig as CoreBlurhashConfig, debug_error, implement, trace}; +use image::{DynamicImage, ImageDecoder, ImageError, ImageFormat, ImageReader}; + +use super::Service; +#[implement(Service)] +pub async fn create_blurhash( + &self, + file: &[u8], + content_type: Option<&str>, + file_name: Option<&str>, +) -> Option { + let config = BlurhashConfig::from(self.services.server.config.blurhashing); + if config.size_limit == 0 { + trace!("since 0 means disabled blurhashing, skipped blurhashing logic"); + return None; + } + let file_data = file.to_owned(); + let content_type = content_type.map(String::from); + let file_name = file_name.map(String::from); + + let blurhashing_result = tokio::task::spawn_blocking(move || { + get_blurhash_from_request(&file_data, content_type, file_name, config) + }) + .await + .expect("no join error"); + + match blurhashing_result { + | Ok(result) => Some(result), + | Err(e) => { + debug_error!("Error when blurhashing: {e}"); + None + }, + } +} + +/// Returns the blurhash or a blurhash error which implements Display. +fn get_blurhash_from_request( + data: &[u8], + mime: Option, + filename: Option, + config: BlurhashConfig, +) -> Result { + // Get format image is supposed to be in + let format = get_format_from_data_mime_and_filename(data, mime, filename)?; + // Get the image reader for said image format + let decoder = get_image_decoder_with_format_and_data(format, data)?; + // Check image size makes sense before unpacking whole image + if is_image_above_size_limit(&decoder, config) { + return Err(BlurhashingError::ImageTooLarge); + } + // decode the image finally + let image = DynamicImage::from_decoder(decoder)?; + + blurhash_an_image(&image, config) +} + +/// Gets the Image Format value from the data,mime, and filename +/// It first checks if the mime is a valid image format +/// Then it checks if the filename has a format, otherwise just guess based on +/// the binary data Assumes that mime and filename extension won't be for a +/// different file format than file. +fn get_format_from_data_mime_and_filename( + data: &[u8], + mime: Option, + filename: Option, +) -> Result { + let mut image_format = None; + if let Some(mime) = mime { + image_format = ImageFormat::from_mime_type(mime); + } + if let (Some(filename), None) = (filename, image_format) { + if let Some(extension) = Path::new(&filename).extension() { + image_format = ImageFormat::from_mime_type(extension.to_string_lossy()); + } + } + + if let Some(format) = image_format { + Ok(format) + } else { + image::guess_format(data).map_err(Into::into) + } +} + +fn get_image_decoder_with_format_and_data( + image_format: ImageFormat, + data: &[u8], +) -> Result, BlurhashingError> { + let mut image_reader = ImageReader::new(Cursor::new(data)); + image_reader.set_format(image_format); + Ok(Box::new(image_reader.into_decoder()?)) +} + +fn is_image_above_size_limit( + decoder: &T, + blurhash_config: BlurhashConfig, +) -> bool { + decoder.total_bytes() >= blurhash_config.size_limit +} +#[inline] +fn blurhash_an_image( + image: &DynamicImage, + blurhash_config: BlurhashConfig, +) -> Result { + Ok(encode_image( + blurhash_config.components_x, + blurhash_config.components_y, + &image.to_rgba8(), + )?) +} +#[derive(Clone, Copy)] +pub struct BlurhashConfig { + components_x: u32, + components_y: u32, + /// size limit in bytes + size_limit: u64, +} + +impl From for BlurhashConfig { + fn from(value: CoreBlurhashConfig) -> Self { + Self { + components_x: value.components_x, + components_y: value.components_y, + size_limit: value.blurhash_max_raw_size, + } + } +} + +#[derive(Debug)] +pub(crate) enum BlurhashingError { + ImageError(Box), + HashingLibError(Box), + ImageTooLarge, +} +impl From for BlurhashingError { + fn from(value: ImageError) -> Self { Self::ImageError(Box::new(value)) } +} + +impl From for BlurhashingError { + fn from(value: blurhash::Error) -> Self { Self::HashingLibError(Box::new(value)) } +} + +impl Display for BlurhashingError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Blurhash Error:")?; + match &self { + | Self::ImageTooLarge => write!(f, "Image was too large to blurhash")?, + | Self::HashingLibError(e) => + write!(f, "There was an error with the blurhashing library => {e}")?, + + | Self::ImageError(e) => + write!(f, "There was an error with the image loading library => {e}")?, + }; + + Ok(()) + } +} diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 0d98853d..7775173a 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -1,10 +1,11 @@ +#[cfg(feature = "blurhashing")] +pub mod blurhash; mod data; pub(super) mod migrations; mod preview; mod remote; mod tests; mod thumbnail; - use std::{path::PathBuf, sync::Arc, time::SystemTime}; use async_trait::async_trait; From 442bb9889c45e5b17cdf5c7fd90e4751f7582400 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 4 Feb 2025 02:24:50 +0000 Subject: [PATCH 149/328] improvements on blurhashing feature Signed-off-by: Jason Volk --- Cargo.toml | 4 +- src/api/Cargo.toml | 1 - src/api/client/media.rs | 44 +++++-------- src/core/Cargo.toml | 1 - src/main/Cargo.toml | 4 +- src/service/media/blurhash.rs | 113 +++++++++++++++++++--------------- src/service/media/mod.rs | 1 - 7 files changed, 87 insertions(+), 81 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c580d22d..b25d9175 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -194,8 +194,10 @@ features = [ version = "0.2.3" default-features = false features = [ - "fast-linear-to-srgb","image" + "fast-linear-to-srgb", + "image", ] + # logging [workspace.dependencies.log] version = "0.4.22" diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 8a5ef3f0..385e786f 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -17,7 +17,6 @@ crate-type = [ ] [features] -blurhashing=[] element_hacks = [] release_max_log_level = [ "tracing/max_level_trace", diff --git a/src/api/client/media.rs b/src/api/client/media.rs index 115f2581..0cff8185 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -57,40 +57,28 @@ pub(crate) async fn create_content_route( let filename = body.filename.as_deref(); let content_type = body.content_type.as_deref(); let content_disposition = make_content_disposition(None, content_type, filename); - let mxc = Mxc { + let ref mxc = Mxc { server_name: services.globals.server_name(), media_id: &utils::random_string(MXC_LENGTH), }; - #[cfg(feature = "blurhashing")] - { - if body.generate_blurhash { - let (blurhash, create_media_result) = tokio::join!( - services - .media - .create_blurhash(&body.file, content_type, filename), - services.media.create( - &mxc, - Some(user), - Some(&content_disposition), - content_type, - &body.file - ) - ); - return create_media_result.map(|()| create_content::v3::Response { - content_uri: mxc.to_string().into(), - blurhash, - }); - } - } services .media - .create(&mxc, Some(user), Some(&content_disposition), content_type, &body.file) - .await - .map(|()| create_content::v3::Response { - content_uri: mxc.to_string().into(), - blurhash: None, - }) + .create(mxc, Some(user), Some(&content_disposition), content_type, &body.file) + .await?; + + let blurhash = body.generate_blurhash.then(|| { + services + .media + .create_blurhash(&body.file, content_type, filename) + .ok() + .flatten() + }); + + Ok(create_content::v3::Response { + content_uri: mxc.to_string().into(), + blurhash: blurhash.flatten(), + }) } /// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}` diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index 5d46ec3b..ef2df4ff 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -54,7 +54,6 @@ sentry_telemetry = [] conduwuit_mods = [ "dep:libloading" ] -blurhashing = [] [dependencies] argon2.workspace = true diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index 7e1cb86b..87ca48c8 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -49,6 +49,9 @@ default = [ "zstd_compression", ] +blurhashing = [ + "conduwuit-service/blurhashing", +] brotli_compression = [ "conduwuit-api/brotli_compression", "conduwuit-core/brotli_compression", @@ -101,7 +104,6 @@ perf_measurements = [ "conduwuit-core/perf_measurements", "conduwuit-core/sentry_telemetry", ] -blurhashing =["conduwuit-service/blurhashing","conduwuit-core/blurhashing","conduwuit-api/blurhashing"] # increases performance, reduces build times, and reduces binary size by not compiling or # genreating code for log level filters that users will generally not use (debug and trace) release_max_log_level = [ diff --git a/src/service/media/blurhash.rs b/src/service/media/blurhash.rs index c470925c..aa6685b2 100644 --- a/src/service/media/blurhash.rs +++ b/src/service/media/blurhash.rs @@ -1,56 +1,58 @@ -use std::{fmt::Display, io::Cursor, path::Path}; +use std::{error::Error, ffi::OsStr, fmt::Display, io::Cursor, path::Path}; -use blurhash::encode_image; -use conduwuit::{config::BlurhashConfig as CoreBlurhashConfig, debug_error, implement, trace}; +use conduwuit::{config::BlurhashConfig as CoreBlurhashConfig, err, implement, Result}; use image::{DynamicImage, ImageDecoder, ImageError, ImageFormat, ImageReader}; use super::Service; #[implement(Service)] -pub async fn create_blurhash( +pub fn create_blurhash( &self, file: &[u8], content_type: Option<&str>, file_name: Option<&str>, -) -> Option { +) -> Result> { + if !cfg!(feature = "blurhashing") { + return Ok(None); + } + let config = BlurhashConfig::from(self.services.server.config.blurhashing); + + // since 0 means disabled blurhashing, skipped blurhashing if config.size_limit == 0 { - trace!("since 0 means disabled blurhashing, skipped blurhashing logic"); - return None; + return Ok(None); } - let file_data = file.to_owned(); - let content_type = content_type.map(String::from); - let file_name = file_name.map(String::from); - let blurhashing_result = tokio::task::spawn_blocking(move || { - get_blurhash_from_request(&file_data, content_type, file_name, config) - }) - .await - .expect("no join error"); - - match blurhashing_result { - | Ok(result) => Some(result), - | Err(e) => { - debug_error!("Error when blurhashing: {e}"); - None - }, - } + get_blurhash_from_request(file, content_type, file_name, config) + .map_err(|e| err!(debug_error!("blurhashing error: {e}"))) + .map(Some) } /// Returns the blurhash or a blurhash error which implements Display. +#[tracing::instrument( + name = "blurhash", + level = "debug", + skip(data), + fields( + bytes = data.len(), + ), +)] fn get_blurhash_from_request( data: &[u8], - mime: Option, - filename: Option, + mime: Option<&str>, + filename: Option<&str>, config: BlurhashConfig, ) -> Result { // Get format image is supposed to be in let format = get_format_from_data_mime_and_filename(data, mime, filename)?; + // Get the image reader for said image format let decoder = get_image_decoder_with_format_and_data(format, data)?; + // Check image size makes sense before unpacking whole image if is_image_above_size_limit(&decoder, config) { return Err(BlurhashingError::ImageTooLarge); } + // decode the image finally let image = DynamicImage::from_decoder(decoder)?; @@ -64,24 +66,17 @@ fn get_blurhash_from_request( /// different file format than file. fn get_format_from_data_mime_and_filename( data: &[u8], - mime: Option, - filename: Option, + mime: Option<&str>, + filename: Option<&str>, ) -> Result { - let mut image_format = None; - if let Some(mime) = mime { - image_format = ImageFormat::from_mime_type(mime); - } - if let (Some(filename), None) = (filename, image_format) { - if let Some(extension) = Path::new(&filename).extension() { - image_format = ImageFormat::from_mime_type(extension.to_string_lossy()); - } - } + let extension = filename + .map(Path::new) + .and_then(Path::extension) + .map(OsStr::to_string_lossy); - if let Some(format) = image_format { - Ok(format) - } else { - image::guess_format(data).map_err(Into::into) - } + mime.or(extension.as_deref()) + .and_then(ImageFormat::from_mime_type) + .map_or_else(|| image::guess_format(data).map_err(Into::into), Ok) } fn get_image_decoder_with_format_and_data( @@ -99,23 +94,37 @@ fn is_image_above_size_limit( ) -> bool { decoder.total_bytes() >= blurhash_config.size_limit } + +#[cfg(feature = "blurhashing")] +#[tracing::instrument(name = "encode", level = "debug", skip_all)] #[inline] fn blurhash_an_image( image: &DynamicImage, blurhash_config: BlurhashConfig, ) -> Result { - Ok(encode_image( + Ok(blurhash::encode_image( blurhash_config.components_x, blurhash_config.components_y, &image.to_rgba8(), )?) } -#[derive(Clone, Copy)] + +#[cfg(not(feature = "blurhashing"))] +#[inline] +fn blurhash_an_image( + _image: &DynamicImage, + _blurhash_config: BlurhashConfig, +) -> Result { + Err(BlurhashingError::Unavailable) +} + +#[derive(Clone, Copy, Debug)] pub struct BlurhashConfig { - components_x: u32, - components_y: u32, + pub components_x: u32, + pub components_y: u32, + /// size limit in bytes - size_limit: u64, + pub size_limit: u64, } impl From for BlurhashConfig { @@ -129,15 +138,20 @@ impl From for BlurhashConfig { } #[derive(Debug)] -pub(crate) enum BlurhashingError { +pub enum BlurhashingError { + HashingLibError(Box), ImageError(Box), - HashingLibError(Box), ImageTooLarge, + + #[cfg(not(feature = "blurhashing"))] + Unavailable, } + impl From for BlurhashingError { fn from(value: ImageError) -> Self { Self::ImageError(Box::new(value)) } } +#[cfg(feature = "blurhashing")] impl From for BlurhashingError { fn from(value: blurhash::Error) -> Self { Self::HashingLibError(Box::new(value)) } } @@ -152,6 +166,9 @@ impl Display for BlurhashingError { | Self::ImageError(e) => write!(f, "There was an error with the image loading library => {e}")?, + + #[cfg(not(feature = "blurhashing"))] + | Self::Unavailable => write!(f, "Blurhashing is not supported")?, }; Ok(()) diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 7775173a..f5913f43 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -1,4 +1,3 @@ -#[cfg(feature = "blurhashing")] pub mod blurhash; mod data; pub(super) mod migrations; From 04656a78865dfd60176965c5ae531d1939e0dd7d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 5 Feb 2025 03:00:47 +0000 Subject: [PATCH 150/328] fix spaces pagination bug Signed-off-by: Jason Volk --- src/service/rooms/spaces/mod.rs | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 1ee2727c..11794752 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -582,7 +582,7 @@ impl Service { parents.pop_front(); parents.push_back(room); - let short_room_ids: Vec<_> = parents + let next_short_room_ids: Vec<_> = parents .iter() .stream() .filter_map(|room_id| async move { @@ -591,16 +591,18 @@ impl Service { .collect() .await; - Some( - PaginationToken { - short_room_ids, - limit: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - max_depth: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - suggested_only, - } - .to_string(), + (next_short_room_ids != short_room_ids && !next_short_room_ids.is_empty()).then( + || { + PaginationToken { + short_room_ids: next_short_room_ids, + limit: UInt::new(max_depth) + .expect("When sent in request it must have been valid UInt"), + max_depth: UInt::new(max_depth) + .expect("When sent in request it must have been valid UInt"), + suggested_only, + } + .to_string() + }, ) } else { None From 9158edfb7c98229af43b2124e972723b1ab4e75a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 5 Feb 2025 05:10:30 +0000 Subject: [PATCH 151/328] fix empty join timeline bug Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 48 ++++++++++++++++++++++++++++----------- 1 file changed, 35 insertions(+), 13 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index a97e4329..1d1a91ba 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -691,7 +691,7 @@ async fn load_joined_room( heroes, joined_member_count, invited_member_count, - state_events, + mut state_events, mut device_list_updates, left_encrypted_users, } = calculate_state_changes( @@ -708,6 +708,39 @@ async fn load_joined_room( .boxed() .await?; + let is_sender_membership = |pdu: &PduEvent| { + pdu.kind == StateEventType::RoomMember.into() + && pdu + .state_key + .as_deref() + .is_some_and(is_equal_to!(sender_user.as_str())) + }; + + let joined_sender_member: Option<_> = (joined_since_last_sync && timeline_pdus.is_empty()) + .then(|| { + state_events + .iter() + .position(is_sender_membership) + .map(|pos| state_events.swap_remove(pos)) + }) + .flatten(); + + let prev_batch = timeline_pdus.first().map(at!(0)).or_else(|| { + joined_sender_member + .is_some() + .then_some(since) + .map(Into::into) + }); + + let room_events = timeline_pdus + .into_iter() + .stream() + .wide_filter_map(|item| ignored_filter(services, item, sender_user)) + .map(at!(1)) + .chain(joined_sender_member.into_iter().stream()) + .map(|pdu| pdu.to_sync_room_event()) + .collect::>(); + let account_data_events = services .account_data .changes_since(Some(room_id), sender_user, since, Some(next_batch)) @@ -722,13 +755,6 @@ async fn load_joined_room( .map(ToOwned::to_owned) .collect::>(); - let room_events = timeline_pdus - .iter() - .stream() - .wide_filter_map(|item| ignored_filter(services, item.clone(), sender_user)) - .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect(); - let send_notification_counts = last_notification_read.is_none_or(|count| count > since); let notification_count: OptionFuture<_> = send_notification_counts @@ -830,12 +856,8 @@ async fn load_joined_room( unread_notifications: UnreadNotificationsCount { highlight_count, notification_count }, timeline: Timeline { limited: limited || joined_since_last_sync, + prev_batch: prev_batch.as_ref().map(ToString::to_string), events: room_events, - prev_batch: timeline_pdus - .first() - .map(at!(0)) - .as_ref() - .map(ToString::to_string), }, state: RoomState { events: state_events From f80d85e1076f1f155a7484d5ad80acbb58a9b1ac Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 5 Feb 2025 01:43:27 -0500 Subject: [PATCH 152/328] add SIGUSR1 systemctl reload config support to systemd units Signed-off-by: strawberry --- arch/conduwuit.service | 3 ++- debian/conduwuit.service | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/conduwuit.service b/arch/conduwuit.service index 7c05c259..4b7853e3 100644 --- a/arch/conduwuit.service +++ b/arch/conduwuit.service @@ -7,7 +7,8 @@ RequiresMountsFor=/var/lib/private/conduwuit [Service] DynamicUser=yes -Type=notify +Type=notify-reload +ReloadSignal=SIGUSR1 AmbientCapabilities= CapabilityBoundingSet= diff --git a/debian/conduwuit.service b/debian/conduwuit.service index 3c2ec49d..452544bf 100644 --- a/debian/conduwuit.service +++ b/debian/conduwuit.service @@ -8,7 +8,8 @@ Documentation=https://conduwuit.puppyirl.gay/ DynamicUser=yes User=conduwuit Group=conduwuit -Type=notify +Type=notify-reload +ReloadSignal=SIGUSR1 Environment="CONDUWUIT_CONFIG=/etc/conduwuit/conduwuit.toml" From f6dfc9538f8b625c2ae28a462ecf4b7e3d208f85 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 5 Feb 2025 01:44:49 -0500 Subject: [PATCH 153/328] bump ruwuma to stop erroring on duplicate yaml values on appservice EDUs (we dont implement this atm anyways) Signed-off-by: strawberry --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b710d6fc..926099b5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3490,7 +3490,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "assign", "js_int", @@ -3512,7 +3512,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "js_int", "ruma-common", @@ -3524,7 +3524,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "as_variant", "assign", @@ -3547,7 +3547,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "as_variant", "base64 0.22.1", @@ -3578,7 +3578,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3603,7 +3603,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "bytes", "http", @@ -3621,7 +3621,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3630,7 +3630,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "js_int", "ruma-common", @@ -3640,7 +3640,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3655,7 +3655,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "js_int", "ruma-common", @@ -3667,7 +3667,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "headers", "http", @@ -3680,7 +3680,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3696,7 +3696,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index b25d9175..ce483bbc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -342,7 +342,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "b560338b2a50dbf61ecfe80808b9b095ad4cec00" +rev = "517ac4572276a2e0ad587113776c544b51166f08" features = [ "compat", "rand", From fda8b3680986dc8e038d51b93f7d36bf5c991ef6 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 5 Feb 2025 01:45:21 -0500 Subject: [PATCH 154/328] add more systemd notify integration with stopping/reloading/ready states Signed-off-by: strawberry --- src/core/server.rs | 12 ++++++++++-- src/router/run.rs | 4 ---- src/service/config/mod.rs | 8 ++++++++ 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/src/core/server.rs b/src/core/server.rs index 45ba7420..80493c94 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -69,6 +69,10 @@ impl Server { return Err!("Reloading not enabled"); } + #[cfg(all(feature = "systemd", target_os = "linux"))] + sd_notify::notify(true, &[sd_notify::NotifyState::Reloading]) + .expect("failed to notify systemd of reloading state"); + if self.reloading.swap(true, Ordering::AcqRel) { return Err!("Reloading already in progress"); } @@ -83,7 +87,7 @@ impl Server { }) } - pub fn restart(&self) -> Result<()> { + pub fn restart(&self) -> Result { if self.restarting.swap(true, Ordering::AcqRel) { return Err!("Restart already in progress"); } @@ -93,7 +97,11 @@ impl Server { }) } - pub fn shutdown(&self) -> Result<()> { + pub fn shutdown(&self) -> Result { + #[cfg(all(feature = "systemd", target_os = "linux"))] + sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]) + .expect("failed to notify systemd of stopping state"); + if self.stopping.swap(true, Ordering::AcqRel) { return Err!("Shutdown already in progress"); } diff --git a/src/router/run.rs b/src/router/run.rs index 26701735..024cb813 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -100,10 +100,6 @@ pub(crate) async fn stop(services: Arc) -> Result<()> { ); } - #[cfg(all(feature = "systemd", target_os = "linux"))] - sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]) - .expect("failed to notify systemd of stopping state"); - info!("Shutdown complete."); Ok(()) } diff --git a/src/service/config/mod.rs b/src/service/config/mod.rs index 8bd09a52..c9ac37a3 100644 --- a/src/service/config/mod.rs +++ b/src/service/config/mod.rs @@ -43,7 +43,15 @@ impl Deref for Service { #[implement(Service)] fn handle_reload(&self) -> Result { if self.server.config.config_reload_signal { + #[cfg(all(feature = "systemd", target_os = "linux"))] + sd_notify::notify(true, &[sd_notify::NotifyState::Reloading]) + .expect("failed to notify systemd of reloading state"); + self.reload(iter::empty())?; + + #[cfg(all(feature = "systemd", target_os = "linux"))] + sd_notify::notify(true, &[sd_notify::NotifyState::Ready]) + .expect("failed to notify systemd of ready state"); } Ok(()) From 62d80b97e65237539a103ded87f4e650ddafe4b8 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 03:14:37 +0000 Subject: [PATCH 155/328] add systemd unit logging mode Signed-off-by: Jason Volk --- src/core/log/console.rs | 77 +++++++++++++++++++++++++++++++++--- src/core/log/mod.rs | 4 +- src/main/logging.rs | 4 +- src/service/admin/console.rs | 5 ++- 4 files changed, 78 insertions(+), 12 deletions(-) diff --git a/src/core/log/console.rs b/src/core/log/console.rs index 0bc44fa7..1f04ba26 100644 --- a/src/core/log/console.rs +++ b/src/core/log/console.rs @@ -1,3 +1,5 @@ +use std::{env, io, sync::LazyLock}; + use tracing::{ field::{Field, Visit}, Event, Level, Subscriber, @@ -7,12 +9,59 @@ use tracing_subscriber::{ fmt, fmt::{ format::{Compact, DefaultVisitor, Format, Full, Pretty, Writer}, - FmtContext, FormatEvent, FormatFields, + FmtContext, FormatEvent, FormatFields, MakeWriter, }, registry::LookupSpan, }; -use crate::{Config, Result}; +use crate::{apply, Config, Result}; + +static SYSTEMD_MODE: LazyLock = + LazyLock::new(|| env::var("SYSTEMD_EXEC_PID").is_ok() && env::var("JOURNAL_STREAM").is_ok()); + +pub struct ConsoleWriter { + stdout: io::Stdout, + stderr: io::Stderr, + _journal_stream: [u64; 2], + use_stderr: bool, +} + +impl ConsoleWriter { + #[must_use] + pub fn new(_config: &Config) -> Self { + let journal_stream = get_journal_stream(); + Self { + stdout: io::stdout(), + stderr: io::stderr(), + _journal_stream: journal_stream.into(), + use_stderr: journal_stream.0 != 0, + } + } +} + +impl<'a> MakeWriter<'a> for ConsoleWriter { + type Writer = &'a Self; + + fn make_writer(&'a self) -> Self::Writer { self } +} + +impl io::Write for &'_ ConsoleWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + if self.use_stderr { + self.stderr.lock().write(buf) + } else { + self.stdout.lock().write(buf) + } + } + + fn flush(&mut self) -> io::Result<()> { + if self.use_stderr { + self.stderr.lock().flush() + } else { + self.stdout.lock().flush() + } + } +} pub struct ConsoleFormat { _compact: Format, @@ -20,10 +69,6 @@ pub struct ConsoleFormat { pretty: Format, } -struct ConsoleVisitor<'a> { - visitor: DefaultVisitor<'a>, -} - impl ConsoleFormat { #[must_use] pub fn new(config: &Config) -> Self { @@ -68,6 +113,10 @@ where } } +struct ConsoleVisitor<'a> { + visitor: DefaultVisitor<'a>, +} + impl<'writer> FormatFields<'writer> for ConsoleFormat { fn format_fields(&self, writer: Writer<'writer>, fields: R) -> Result<(), std::fmt::Error> where @@ -92,3 +141,19 @@ impl Visit for ConsoleVisitor<'_> { self.visitor.record_debug(field, value); } } + +#[must_use] +fn get_journal_stream() -> (u64, u64) { + is_systemd_mode() + .then(|| env::var("JOURNAL_STREAM").ok()) + .flatten() + .as_deref() + .and_then(|s| s.split_once(':')) + .map(apply!(2, str::parse)) + .map(apply!(2, Result::unwrap_or_default)) + .unwrap_or((0, 0)) +} + +#[inline] +#[must_use] +pub fn is_systemd_mode() -> bool { *SYSTEMD_MODE } diff --git a/src/core/log/mod.rs b/src/core/log/mod.rs index 0c51a383..0c1840d0 100644 --- a/src/core/log/mod.rs +++ b/src/core/log/mod.rs @@ -2,14 +2,14 @@ pub mod capture; pub mod color; -mod console; +pub mod console; pub mod fmt; pub mod fmt_span; mod reload; mod suppress; pub use capture::Capture; -pub use console::ConsoleFormat; +pub use console::{is_systemd_mode, ConsoleFormat, ConsoleWriter}; pub use reload::{LogLevelReloadHandles, ReloadHandle}; pub use suppress::Suppress; pub use tracing::Level; diff --git a/src/main/logging.rs b/src/main/logging.rs index 85945e8a..35e482de 100644 --- a/src/main/logging.rs +++ b/src/main/logging.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use conduwuit::{ config::Config, debug_warn, err, - log::{capture, fmt_span, ConsoleFormat, LogLevelReloadHandles}, + log::{capture, fmt_span, ConsoleFormat, ConsoleWriter, LogLevelReloadHandles}, result::UnwrapOrErr, Result, }; @@ -30,7 +30,7 @@ pub(crate) fn init( .with_span_events(console_span_events) .event_format(ConsoleFormat::new(config)) .fmt_fields(ConsoleFormat::new(config)) - .map_writer(|w| w); + .with_writer(ConsoleWriter::new(config)); let (console_reload_filter, console_reload_handle) = reload::Layer::new(console_filter.clone()); diff --git a/src/service/admin/console.rs b/src/service/admin/console.rs index de201f4b..59b9a31b 100644 --- a/src/service/admin/console.rs +++ b/src/service/admin/console.rs @@ -1,10 +1,11 @@ #![cfg(feature = "console")] + use std::{ collections::VecDeque, sync::{Arc, Mutex}, }; -use conduwuit::{debug, defer, error, log, Server}; +use conduwuit::{debug, defer, error, log, log::is_systemd_mode, Server}; use futures::future::{AbortHandle, Abortable}; use ruma::events::room::message::RoomMessageEventContent; use rustyline_async::{Readline, ReadlineError, ReadlineEvent}; @@ -123,7 +124,7 @@ impl Console { } async fn readline(self: &Arc) -> Result { - let _suppression = log::Suppress::new(&self.server); + let _suppression = (!is_systemd_mode()).then(|| log::Suppress::new(&self.server)); let (mut readline, _writer) = Readline::new(PROMPT.to_owned())?; let self_ = Arc::clone(self); From 16b07ae3ecf6dee591b79dd6198cb3e5a99410be Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 16:47:10 -0500 Subject: [PATCH 156/328] add default systemd support for a TTY to use console mode from Signed-off-by: strawberry --- arch/conduwuit.service | 12 ++++++++++++ debian/conduwuit.service | 12 ++++++++++++ 2 files changed, 24 insertions(+) diff --git a/arch/conduwuit.service b/arch/conduwuit.service index 4b7853e3..fa3616d8 100644 --- a/arch/conduwuit.service +++ b/arch/conduwuit.service @@ -10,6 +10,18 @@ DynamicUser=yes Type=notify-reload ReloadSignal=SIGUSR1 +TTYPath=/dev/tty25 +DeviceAllow=char-tty +StandardInput=tty-force +StandardOutput=tty +StandardError=journal+console +TTYReset=yes +# uncomment to allow buffer to be cleared every restart +TTYVTDisallocate=no + +TTYColumns=120 +TTYRows=40 + AmbientCapabilities= CapabilityBoundingSet= diff --git a/debian/conduwuit.service b/debian/conduwuit.service index 452544bf..4d6f4eef 100644 --- a/debian/conduwuit.service +++ b/debian/conduwuit.service @@ -11,6 +11,18 @@ Group=conduwuit Type=notify-reload ReloadSignal=SIGUSR1 +TTYPath=/dev/tty25 +DeviceAllow=char-tty +StandardInput=tty-force +StandardOutput=tty +StandardError=journal+console +TTYReset=yes +# uncomment to allow buffer to be cleared every restart +TTYVTDisallocate=no + +TTYColumns=120 +TTYRows=40 + Environment="CONDUWUIT_CONFIG=/etc/conduwuit/conduwuit.toml" ExecStart=/usr/sbin/conduwuit From f761d4d5c9e347699725bff0437a8df3b1b3db59 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 16:48:19 -0500 Subject: [PATCH 157/328] bump db version to 17, cleanup, rerun old migrations for users who downgraded Signed-off-by: strawberry --- src/service/globals/data.rs | 3 +-- src/service/migrations.rs | 32 +++++++++++++------------------- 2 files changed, 14 insertions(+), 21 deletions(-) diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 39cb9be1..26a18607 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -69,9 +69,8 @@ impl Data { } #[inline] - pub fn bump_database_version(&self, new_version: u64) -> Result<()> { + pub fn bump_database_version(&self, new_version: u64) { self.global.raw_put(b"version", new_version); - Ok(()) } #[inline] diff --git a/src/service/migrations.rs b/src/service/migrations.rs index 27b4ab5a..9c3ea293 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -27,15 +27,7 @@ use crate::{media, Services}; /// - If database is opened at lesser version we apply migrations up to this. /// Note that named-feature migrations may also be performed when opening at /// equal or lesser version. These are expected to be backward-compatible. -pub(crate) const DATABASE_VERSION: u64 = 13; - -/// Conduit's database version. -/// -/// Conduit bumped the database version to 16, but did not introduce any -/// breaking changes. Their database migrations are extremely fragile and risky, -/// and also do not really apply to us, so just to retain Conduit -> conduwuit -/// compatibility we'll check for both versions. -pub(crate) const CONDUIT_DATABASE_VERSION: u64 = 16; +pub(crate) const DATABASE_VERSION: u64 = 17; pub(crate) async fn migrations(services: &Services) -> Result<()> { let users_count = services.users.count().await; @@ -63,10 +55,7 @@ pub(crate) async fn migrations(services: &Services) -> Result<()> { async fn fresh(services: &Services) -> Result<()> { let db = &services.db; - services - .globals - .db - .bump_database_version(DATABASE_VERSION)?; + services.globals.db.bump_database_version(DATABASE_VERSION); db["global"].insert(b"feat_sha256_media", []); db["global"].insert(b"fix_bad_double_separator_in_state_cache", []); @@ -130,6 +119,7 @@ async fn migrate(services: &Services) -> Result<()> { .get(b"fix_referencedevents_missing_sep") .await .is_not_found() + || services.globals.db.database_version().await < 17 { fix_referencedevents_missing_sep(services).await?; } @@ -138,15 +128,19 @@ async fn migrate(services: &Services) -> Result<()> { .get(b"fix_readreceiptid_readreceipt_duplicates") .await .is_not_found() + || services.globals.db.database_version().await < 17 { fix_readreceiptid_readreceipt_duplicates(services).await?; } - let version_match = services.globals.db.database_version().await == DATABASE_VERSION - || services.globals.db.database_version().await == CONDUIT_DATABASE_VERSION; + if services.globals.db.database_version().await < 17 { + services.globals.db.bump_database_version(17); + info!("Migration: Bumped database version to 17"); + } - assert!( - version_match, + assert_eq!( + services.globals.db.database_version().await, + DATABASE_VERSION, "Failed asserting local database version {} is equal to known latest conduwuit database \ version {}", services.globals.db.database_version().await, @@ -290,7 +284,7 @@ async fn db_lt_12(services: &Services) -> Result<()> { .await?; } - services.globals.db.bump_database_version(12)?; + services.globals.db.bump_database_version(12); info!("Migration: 11 -> 12 finished"); Ok(()) } @@ -335,7 +329,7 @@ async fn db_lt_13(services: &Services) -> Result<()> { .await?; } - services.globals.db.bump_database_version(13)?; + services.globals.db.bump_database_version(13); info!("Migration: 12 -> 13 finished"); Ok(()) } From ef2d307c15dba1731dc6b4d67e758f27590640c6 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 16:48:48 -0500 Subject: [PATCH 158/328] fix warnings and errors when building with no features Signed-off-by: strawberry --- src/main/runtime.rs | 11 ++--- src/service/media/blurhash.rs | 87 +++++++++++++++++---------------- src/service/media/migrations.rs | 8 +-- 3 files changed, 51 insertions(+), 55 deletions(-) diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 02b9931f..474b373b 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -8,13 +8,11 @@ use std::{ time::Duration, }; +#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] +use conduwuit::result::LogDebugErr; use conduwuit::{ is_true, - result::LogDebugErr, - utils::{ - available_parallelism, - sys::compute::{nth_core_available, set_affinity}, - }, + utils::sys::compute::{nth_core_available, set_affinity}, Result, }; use tokio::runtime::Builder; @@ -25,6 +23,7 @@ const WORKER_NAME: &str = "conduwuit:worker"; const WORKER_MIN: usize = 2; const WORKER_KEEPALIVE: u64 = 36; const MAX_BLOCKING_THREADS: usize = 1024; +#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] const DISABLE_MUZZY_THRESHOLD: usize = 4; static WORKER_AFFINITY: OnceLock = OnceLock::new(); @@ -137,7 +136,7 @@ fn set_worker_mallctl(id: usize) { .get() .expect("GC_MUZZY initialized by runtime::new()"); - let muzzy_auto_disable = available_parallelism() >= DISABLE_MUZZY_THRESHOLD; + let muzzy_auto_disable = conduwuit::utils::available_parallelism() >= DISABLE_MUZZY_THRESHOLD; if matches!(muzzy_option, Some(false) | None if muzzy_auto_disable) { set_muzzy_decay(-1).log_debug_err().ok(); } diff --git a/src/service/media/blurhash.rs b/src/service/media/blurhash.rs index aa6685b2..60ade723 100644 --- a/src/service/media/blurhash.rs +++ b/src/service/media/blurhash.rs @@ -1,20 +1,30 @@ -use std::{error::Error, ffi::OsStr, fmt::Display, io::Cursor, path::Path}; - -use conduwuit::{config::BlurhashConfig as CoreBlurhashConfig, err, implement, Result}; -use image::{DynamicImage, ImageDecoder, ImageError, ImageFormat, ImageReader}; +#[cfg(feature = "blurhashing")] +use conduwuit::config::BlurhashConfig as CoreBlurhashConfig; +use conduwuit::{implement, Result}; use super::Service; + #[implement(Service)] +#[cfg(not(feature = "blurhashing"))] +pub fn create_blurhash( + &self, + _file: &[u8], + _content_type: Option<&str>, + _file_name: Option<&str>, +) -> Result> { + conduwuit::debug_warn!("blurhashing on upload support was not compiled"); + + Ok(None) +} + +#[implement(Service)] +#[cfg(feature = "blurhashing")] pub fn create_blurhash( &self, file: &[u8], content_type: Option<&str>, file_name: Option<&str>, ) -> Result> { - if !cfg!(feature = "blurhashing") { - return Ok(None); - } - let config = BlurhashConfig::from(self.services.server.config.blurhashing); // since 0 means disabled blurhashing, skipped blurhashing @@ -23,7 +33,7 @@ pub fn create_blurhash( } get_blurhash_from_request(file, content_type, file_name, config) - .map_err(|e| err!(debug_error!("blurhashing error: {e}"))) + .map_err(|e| conduwuit::err!(debug_error!("blurhashing error: {e}"))) .map(Some) } @@ -36,6 +46,7 @@ pub fn create_blurhash( bytes = data.len(), ), )] +#[cfg(feature = "blurhashing")] fn get_blurhash_from_request( data: &[u8], mime: Option<&str>, @@ -53,8 +64,7 @@ fn get_blurhash_from_request( return Err(BlurhashingError::ImageTooLarge); } - // decode the image finally - let image = DynamicImage::from_decoder(decoder)?; + let image = image::DynamicImage::from_decoder(decoder)?; blurhash_an_image(&image, config) } @@ -64,31 +74,34 @@ fn get_blurhash_from_request( /// Then it checks if the filename has a format, otherwise just guess based on /// the binary data Assumes that mime and filename extension won't be for a /// different file format than file. +#[cfg(feature = "blurhashing")] fn get_format_from_data_mime_and_filename( data: &[u8], mime: Option<&str>, filename: Option<&str>, -) -> Result { +) -> Result { let extension = filename - .map(Path::new) - .and_then(Path::extension) - .map(OsStr::to_string_lossy); + .map(std::path::Path::new) + .and_then(std::path::Path::extension) + .map(std::ffi::OsStr::to_string_lossy); mime.or(extension.as_deref()) - .and_then(ImageFormat::from_mime_type) + .and_then(image::ImageFormat::from_mime_type) .map_or_else(|| image::guess_format(data).map_err(Into::into), Ok) } +#[cfg(feature = "blurhashing")] fn get_image_decoder_with_format_and_data( - image_format: ImageFormat, + image_format: image::ImageFormat, data: &[u8], -) -> Result, BlurhashingError> { - let mut image_reader = ImageReader::new(Cursor::new(data)); +) -> Result, BlurhashingError> { + let mut image_reader = image::ImageReader::new(std::io::Cursor::new(data)); image_reader.set_format(image_format); Ok(Box::new(image_reader.into_decoder()?)) } -fn is_image_above_size_limit( +#[cfg(feature = "blurhashing")] +fn is_image_above_size_limit( decoder: &T, blurhash_config: BlurhashConfig, ) -> bool { @@ -99,7 +112,7 @@ fn is_image_above_size_limit( #[tracing::instrument(name = "encode", level = "debug", skip_all)] #[inline] fn blurhash_an_image( - image: &DynamicImage, + image: &image::DynamicImage, blurhash_config: BlurhashConfig, ) -> Result { Ok(blurhash::encode_image( @@ -109,15 +122,6 @@ fn blurhash_an_image( )?) } -#[cfg(not(feature = "blurhashing"))] -#[inline] -fn blurhash_an_image( - _image: &DynamicImage, - _blurhash_config: BlurhashConfig, -) -> Result { - Err(BlurhashingError::Unavailable) -} - #[derive(Clone, Copy, Debug)] pub struct BlurhashConfig { pub components_x: u32, @@ -127,6 +131,7 @@ pub struct BlurhashConfig { pub size_limit: u64, } +#[cfg(feature = "blurhashing")] impl From for BlurhashConfig { fn from(value: CoreBlurhashConfig) -> Self { Self { @@ -138,17 +143,17 @@ impl From for BlurhashConfig { } #[derive(Debug)] +#[cfg(feature = "blurhashing")] pub enum BlurhashingError { - HashingLibError(Box), - ImageError(Box), + HashingLibError(Box), + #[cfg(feature = "blurhashing")] + ImageError(Box), ImageTooLarge, - - #[cfg(not(feature = "blurhashing"))] - Unavailable, } -impl From for BlurhashingError { - fn from(value: ImageError) -> Self { Self::ImageError(Box::new(value)) } +#[cfg(feature = "blurhashing")] +impl From for BlurhashingError { + fn from(value: image::ImageError) -> Self { Self::ImageError(Box::new(value)) } } #[cfg(feature = "blurhashing")] @@ -156,19 +161,17 @@ impl From for BlurhashingError { fn from(value: blurhash::Error) -> Self { Self::HashingLibError(Box::new(value)) } } -impl Display for BlurhashingError { +#[cfg(feature = "blurhashing")] +impl std::fmt::Display for BlurhashingError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Blurhash Error:")?; match &self { | Self::ImageTooLarge => write!(f, "Image was too large to blurhash")?, | Self::HashingLibError(e) => write!(f, "There was an error with the blurhashing library => {e}")?, - + #[cfg(feature = "blurhashing")] | Self::ImageError(e) => write!(f, "There was an error with the image loading library => {e}")?, - - #[cfg(not(feature = "blurhashing"))] - | Self::Unavailable => write!(f, "Blurhashing is not supported")?, }; Ok(()) diff --git a/src/service/media/migrations.rs b/src/service/media/migrations.rs index 9555edd7..8526ffcd 100644 --- a/src/service/media/migrations.rs +++ b/src/service/media/migrations.rs @@ -13,7 +13,7 @@ use conduwuit::{ warn, Config, Result, }; -use crate::{migrations, Services}; +use crate::Services; /// Migrates a media directory from legacy base64 file names to sha2 file names. /// All errors are fatal. Upon success the database is keyed to not perform this @@ -48,12 +48,6 @@ pub(crate) async fn migrate_sha256_media(services: &Services) -> Result<()> { } } - // Apply fix from when sha256_media was backward-incompat and bumped the schema - // version from 13 to 14. For users satisfying these conditions we can go back. - if services.globals.db.database_version().await == 14 && migrations::DATABASE_VERSION == 13 { - services.globals.db.bump_database_version(13)?; - } - db["global"].insert(b"feat_sha256_media", []); info!("Finished applying sha256_media"); Ok(()) From c7c9f0e4a60ffd4b497bb8e426ffc34c5e118913 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 16:57:30 -0500 Subject: [PATCH 159/328] catch clippy lints for --no-default-features builds Signed-off-by: strawberry --- engage.toml | 50 +++++++++++++++++++++++++++++++------------------- 1 file changed, 31 insertions(+), 19 deletions(-) diff --git a/engage.toml b/engage.toml index 1d6a5475..279e999c 100644 --- a/engage.toml +++ b/engage.toml @@ -101,7 +101,6 @@ direnv exec . \ cargo clippy \ --workspace \ --profile test \ - --all-targets \ --color=always \ -- \ -D warnings @@ -116,13 +115,27 @@ env DIRENV_DEVSHELL=all-features \ cargo clippy \ --workspace \ --profile test \ - --all-targets \ --all-features \ --color=always \ -- \ -D warnings """ +[[task]] +name = "clippy/no-features" +group = "lints" +script = """ +env DIRENV_DEVSHELL=no-features \ + direnv exec . \ + cargo clippy \ + --workspace \ + --profile test \ + --no-default-features \ + --color=always \ + -- \ + -D warnings +""" + [[task]] name = "clippy/jemalloc" group = "lints" @@ -131,26 +144,12 @@ direnv exec . \ cargo clippy \ --workspace \ --profile test \ - --features jemalloc \ - --all-targets \ + --features=jemalloc \ --color=always \ -- \ -D warnings """ -#[[task]] -#name = "clippy/hardened_malloc" -#group = "lints" -#script = """ -#cargo clippy \ -# --workspace \ -# --features hardened_malloc \ -# --all-targets \ -# --color=always \ -# -- \ -# -D warnings -#""" - [[task]] name = "lychee" group = "lints" @@ -170,7 +169,6 @@ env DIRENV_DEVSHELL=all-features \ cargo test \ --workspace \ --profile test \ - --all-targets \ --all-features \ --color=always \ -- \ @@ -186,7 +184,21 @@ env DIRENV_DEVSHELL=default \ cargo test \ --workspace \ --profile test \ - --all-targets \ + --color=always \ + -- \ + --color=always +""" + +[[task]] +name = "cargo/no-features" +group = "tests" +script = """ +env DIRENV_DEVSHELL=no-features \ + direnv exec . \ + cargo test \ + --workspace \ + --profile test \ + --no-default-features \ --color=always \ -- \ --color=always From 43e6c27bb772461722409e9c56146a106d6c6343 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 18:07:49 -0500 Subject: [PATCH 160/328] misc nix tweaks to maybe speedup ci Signed-off-by: strawberry --- bin/complement | 3 +- flake.nix | 14 +------- nix/pkgs/complement/config.toml | 21 +++++++++--- nix/pkgs/complement/default.nix | 6 ---- nix/pkgs/main/default.nix | 58 +++++++++++++++++---------------- src/router/serve/tls.rs | 9 +++-- 6 files changed, 54 insertions(+), 57 deletions(-) diff --git a/bin/complement b/bin/complement index a1db4b32..a4c62856 100755 --- a/bin/complement +++ b/bin/complement @@ -34,7 +34,8 @@ toplevel="$(git rev-parse --show-toplevel)" pushd "$toplevel" > /dev/null -bin/nix-build-and-cache just .#linux-complement +#bin/nix-build-and-cache just .#linux-complement +bin/nix-build-and-cache just .#complement docker load < result popd > /dev/null diff --git a/flake.nix b/flake.nix index 920d3d14..3cef1af5 100644 --- a/flake.nix +++ b/flake.nix @@ -169,21 +169,9 @@ # used for rust caching in CI to speed it up sccache - - # needed so we can get rid of gcc and other unused deps that bloat OCI images - removeReferencesTo ] # liburing is Linux-exclusive - ++ lib.optional stdenv.hostPlatform.isLinux liburing - # needed to build Rust applications on macOS - ++ lib.optionals stdenv.hostPlatform.isDarwin [ - # https://github.com/NixOS/nixpkgs/issues/206242 - # ld: library not found for -liconv - libiconv - # https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell - # https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612 - pkgsBuildHost.darwin.apple_sdk.frameworks.Security - ]) + ++ lib.optional stdenv.hostPlatform.isLinux liburing) ++ scope.main.buildInputs ++ scope.main.propagatedBuildInputs ++ scope.main.nativeBuildInputs; diff --git a/nix/pkgs/complement/config.toml b/nix/pkgs/complement/config.toml index f20abee2..99c151c5 100644 --- a/nix/pkgs/complement/config.toml +++ b/nix/pkgs/complement/config.toml @@ -17,19 +17,32 @@ ip_range_denylist = [] url_preview_domain_contains_allowlist = ["*"] url_preview_domain_explicit_denylist = ["*"] media_compat_file_link = false -media_startup_check = false -prune_missing_media = false +media_startup_check = true +prune_missing_media = true log_colors = false admin_room_notices = false allow_check_for_updates = false -allow_unstable_room_versions = true +intentionally_unknown_config_option_for_testing = true rocksdb_log_level = "debug" rocksdb_max_log_files = 1 rocksdb_recovery_mode = 0 rocksdb_paranoid_file_checks = true log_guest_registrations = false allow_legacy_media = true -startup_netburst = false +startup_netburst = true +startup_netburst_keep = -1 + +# valgrind makes things so slow +dns_timeout = 60 +dns_attempts = 20 +request_conn_timeout = 60 +request_timeout = 120 +well_known_conn_timeout = 60 +well_known_timeout = 60 +federation_idle_timeout = 300 +sender_timeout = 300 +sender_idle_timeout = 300 +sender_retry_backoff_limit = 300 [global.tls] certs = "/certificate.crt" diff --git a/nix/pkgs/complement/default.nix b/nix/pkgs/complement/default.nix index e35cbf04..d9af0779 100644 --- a/nix/pkgs/complement/default.nix +++ b/nix/pkgs/complement/default.nix @@ -18,18 +18,12 @@ let all_features = true; disable_release_max_log_level = true; disable_features = [ - # no reason to use jemalloc for complement, just has compatibility/build issues - "jemalloc" - "jemalloc_stats" - "jemalloc_prof" # console/CLI stuff isn't used or relevant for complement "console" "tokio_console" # sentry telemetry isn't useful for complement, disabled by default anyways "sentry_telemetry" "perf_measurements" - # the containers don't use or need systemd signal support - "systemd" # this is non-functional on nix for some reason "hardened_malloc" # dont include experimental features diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index d7424d11..4150b389 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -82,7 +82,7 @@ rust-jemalloc-sys' = (rust-jemalloc-sys.override { buildDepsOnlyEnv = let rocksdb' = (rocksdb.override { - jemalloc = rust-jemalloc-sys'; + jemalloc = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys'; # rocksdb fails to build with prefixed jemalloc, which is required on # darwin due to [1]. In this case, fall back to building rocksdb with # libc malloc. This should not cause conflicts, because all of the @@ -103,6 +103,12 @@ buildDepsOnlyEnv = ++ [ "-DPORTABLE=haswell" ]) else ([ "-DPORTABLE=1" ]) ) ++ old.cmakeFlags; + + # outputs has "tools" which we dont need or use + outputs = [ "out" ]; + + # preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use + preInstall = ""; }); in { @@ -156,6 +162,19 @@ commonAttrs = { ]; }; + # This is redundant with CI + doCheck = false; + + cargoTestCommand = "cargo test --locked "; + cargoExtraArgs = "--no-default-features --locked " + + lib.optionalString + (features'' != []) + "--features " + (builtins.concatStringsSep "," features''); + cargoTestExtraArgs = "--no-default-features --locked " + + lib.optionalString + (features'' != []) + "--features " + (builtins.concatStringsSep "," features''); + dontStrip = profile == "dev" || profile == "test"; dontPatchELF = profile == "dev" || profile == "test"; @@ -181,27 +200,7 @@ commonAttrs = { # differing values for `NIX_CFLAGS_COMPILE`, which contributes to spurious # rebuilds of bindgen and its depedents. jq - - # needed so we can get rid of gcc and other unused deps that bloat OCI images - removeReferencesTo - ] - # needed to build Rust applications on macOS - ++ lib.optionals stdenv.hostPlatform.isDarwin [ - # https://github.com/NixOS/nixpkgs/issues/206242 - # ld: library not found for -liconv - libiconv - - # https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell - # https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612 - pkgsBuildHost.darwin.apple_sdk.frameworks.Security - ]; - - # for some reason gcc and other weird deps are added to OCI images and bloats it up - # - # - postInstall = with pkgsBuildHost; '' - find "$out" -type f -exec remove-references-to -t ${stdenv.cc} -t ${gcc} -t ${llvm} -t ${rustc.unwrapped} -t ${rustc} '{}' + - ''; + ]; }; in @@ -210,15 +209,18 @@ craneLib.buildPackage ( commonAttrs // { env = buildDepsOnlyEnv; }); - cargoExtraArgs = "--no-default-features " + # This is redundant with CI + doCheck = false; + + cargoTestCommand = "cargo test --locked "; + cargoExtraArgs = "--no-default-features --locked " + + lib.optionalString + (features'' != []) + "--features " + (builtins.concatStringsSep "," features''); + cargoTestExtraArgs = "--no-default-features --locked " + lib.optionalString (features'' != []) "--features " + (builtins.concatStringsSep "," features''); - - # This is redundant with CI - cargoTestCommand = ""; - cargoCheckCommand = ""; - doCheck = false; env = buildPackageEnv; diff --git a/src/router/serve/tls.rs b/src/router/serve/tls.rs index 9d3fbd3b..ab1a9371 100644 --- a/src/router/serve/tls.rs +++ b/src/router/serve/tls.rs @@ -17,14 +17,13 @@ pub(super) async fn serve( addrs: Vec, ) -> Result { let tls = &server.config.tls; - let certs = tls - .certs - .as_ref() - .ok_or(err!(Config("tls.certs", "Missing required value in tls config section")))?; + let certs = tls.certs.as_ref().ok_or_else(|| { + err!(Config("tls.certs", "Missing required value in tls config section")) + })?; let key = tls .key .as_ref() - .ok_or(err!(Config("tls.key", "Missing required value in tls config section")))?; + .ok_or_else(|| err!(Config("tls.key", "Missing required value in tls config section")))?; // we use ring for ruma and hashing state, but aws-lc-rs is the new default. // without this, TLS mode will panic. From add2e0e9eefc2cfcc154b1e4877988f15ca682a7 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 18:20:02 -0500 Subject: [PATCH 161/328] bump rust-rocksdb Signed-off-by: strawberry --- Cargo.lock | 4 ++-- deps/rust-rocksdb/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 926099b5..82962421 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3711,7 +3711,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.32.0+9.10.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1f032427d3a0e7b0f13c04b4e34712bd8610291b#1f032427d3a0e7b0f13c04b4e34712bd8610291b" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=7b0e1bbe395a41ba8a11347a4921da590e3ad0d9#7b0e1bbe395a41ba8a11347a4921da590e3ad0d9" dependencies = [ "bindgen", "bzip2-sys", @@ -3728,7 +3728,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.36.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1f032427d3a0e7b0f13c04b4e34712bd8610291b#1f032427d3a0e7b0f13c04b4e34712bd8610291b" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=7b0e1bbe395a41ba8a11347a4921da590e3ad0d9#7b0e1bbe395a41ba8a11347a4921da590e3ad0d9" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index ba8259a3..c6af428d 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "1f032427d3a0e7b0f13c04b4e34712bd8610291b" +rev = "7b0e1bbe395a41ba8a11347a4921da590e3ad0d9" #branch = "master" default-features = false From 8345ea2cd31d26bcf5c5eb61bbda5cd9958c11c5 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 18:47:54 -0500 Subject: [PATCH 162/328] add --locked and --no-fail-fast to cargo test, add other feature test Signed-off-by: strawberry --- engage.toml | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/engage.toml b/engage.toml index 279e999c..c1a2be1f 100644 --- a/engage.toml +++ b/engage.toml @@ -86,6 +86,7 @@ env DIRENV_DEVSHELL=all-features \ direnv exec . \ cargo doc \ --workspace \ + --locked \ --profile test \ --all-features \ --no-deps \ @@ -100,6 +101,7 @@ script = """ direnv exec . \ cargo clippy \ --workspace \ + --locked \ --profile test \ --color=always \ -- \ @@ -114,6 +116,7 @@ env DIRENV_DEVSHELL=all-features \ direnv exec . \ cargo clippy \ --workspace \ + --locked \ --profile test \ --all-features \ --color=always \ @@ -129,6 +132,7 @@ env DIRENV_DEVSHELL=no-features \ direnv exec . \ cargo clippy \ --workspace \ + --locked \ --profile test \ --no-default-features \ --color=always \ @@ -137,14 +141,16 @@ env DIRENV_DEVSHELL=no-features \ """ [[task]] -name = "clippy/jemalloc" +name = "clippy/other-features" group = "lints" script = """ direnv exec . \ cargo clippy \ --workspace \ + --locked \ --profile test \ - --features=jemalloc \ + --no-default-features \ + --features=console,systemd,element_hacks,direct_tls,perf_measurements,brotli_compression,blurhashing \ --color=always \ -- \ -D warnings @@ -168,7 +174,10 @@ env DIRENV_DEVSHELL=all-features \ direnv exec . \ cargo test \ --workspace \ + --locked \ --profile test \ + --all-targets \ + --no-fail-fast \ --all-features \ --color=always \ -- \ @@ -183,7 +192,10 @@ env DIRENV_DEVSHELL=default \ direnv exec . \ cargo test \ --workspace \ + --locked \ --profile test \ + --all-targets \ + --no-fail-fast \ --color=always \ -- \ --color=always @@ -197,7 +209,10 @@ env DIRENV_DEVSHELL=no-features \ direnv exec . \ cargo test \ --workspace \ + --locked \ --profile test \ + --all-targets \ + --no-fail-fast \ --no-default-features \ --color=always \ -- \ From 88e7e50daff94ef8e3fe3d67e72214f002fdb22b Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 7 Feb 2025 11:49:00 -0500 Subject: [PATCH 163/328] add missing source OCI image label metadata Signed-off-by: strawberry --- nix/pkgs/oci-image/default.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/nix/pkgs/oci-image/default.nix b/nix/pkgs/oci-image/default.nix index 5520c920..1650053d 100644 --- a/nix/pkgs/oci-image/default.nix +++ b/nix/pkgs/oci-image/default.nix @@ -36,6 +36,7 @@ dockerTools.buildLayeredImage { "org.opencontainers.image.documentation" = "https://conduwuit.puppyirl.gay/"; "org.opencontainers.image.licenses" = "Apache-2.0"; "org.opencontainers.image.revision" = inputs.self.rev or inputs.self.dirtyRev or ""; + "org.opencontainers.image.source" = "https://github.com/girlbossceo/conduwuit"; "org.opencontainers.image.title" = main.pname; "org.opencontainers.image.url" = "https://conduwuit.puppyirl.gay/"; "org.opencontainers.image.vendor" = "girlbossceo"; From cfcd6eb1a6a117db94e6f9e631a0d881a62d3299 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 7 Feb 2025 18:00:58 -0500 Subject: [PATCH 164/328] bump ruwuma to stop erroring on empty push response body Signed-off-by: strawberry --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 82962421..caef5859 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3490,7 +3490,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "assign", "js_int", @@ -3512,7 +3512,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "js_int", "ruma-common", @@ -3524,7 +3524,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "as_variant", "assign", @@ -3547,7 +3547,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "as_variant", "base64 0.22.1", @@ -3578,7 +3578,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3603,7 +3603,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "bytes", "http", @@ -3621,7 +3621,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3630,7 +3630,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "js_int", "ruma-common", @@ -3640,7 +3640,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3655,7 +3655,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "js_int", "ruma-common", @@ -3667,7 +3667,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "headers", "http", @@ -3680,7 +3680,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3696,7 +3696,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index ce483bbc..38654be3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -342,7 +342,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "517ac4572276a2e0ad587113776c544b51166f08" +rev = "f5667c6292adb43fbe4725d31d6b5127a0cf60ce" features = [ "compat", "rand", From b6e9dc3d98704c56027219d3775336910a0136c6 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 9 Feb 2025 10:17:28 -0500 Subject: [PATCH 165/328] comment out borked ci thing for now Signed-off-by: strawberry --- .github/workflows/ci.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 35d60aa1..24f2db45 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -128,7 +128,7 @@ jobs: - name: Restore and cache Nix store # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ !startsWith(github.ref, 'refs/tags/') }} uses: nix-community/cache-nix-action@v5.1.0 with: # restore and save a cache using this key @@ -191,14 +191,14 @@ jobs: - name: Run sccache-cache # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} uses: mozilla-actions/sccache-action@main # use rust-cache - uses: Swatinem/rust-cache@v2 # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ !startsWith(github.ref, 'refs/tags/') }} with: cache-all-crates: "true" cache-on-failure: "true" @@ -323,7 +323,7 @@ jobs: - name: Restore and cache Nix store # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ !startsWith(github.ref, 'refs/tags/') }} uses: nix-community/cache-nix-action@v5.1.0 with: # restore and save a cache using this key @@ -379,14 +379,14 @@ jobs: - name: Run sccache-cache # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} uses: mozilla-actions/sccache-action@main # use rust-cache - uses: Swatinem/rust-cache@v2 # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ !startsWith(github.ref, 'refs/tags/') }} with: cache-all-crates: "true" cache-on-failure: "true" @@ -679,7 +679,7 @@ jobs: - name: Run sccache-cache # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} uses: mozilla-actions/sccache-action@main # use rust-cache From e3b81f7b6488b5c483e8b13e3959fe591bf4cb92 Mon Sep 17 00:00:00 2001 From: Dzming Li Date: Mon, 10 Feb 2025 22:45:57 +0800 Subject: [PATCH 166/328] Fix in caddyfile guide If the reverse_proxy directive is omitted before 127.0.0.1:6167 in your Caddyfile, enabling the service with systemctl enable will result in an error. --- docs/deploying/generic.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index cc50544e..8ca2f387 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -216,7 +216,7 @@ your server name). ```caddyfile your.server.name, your.server.name:8448 { # TCP reverse_proxy - 127.0.0.1:6167 + reverse_proxy 127.0.0.1:6167 # UNIX socket #reverse_proxy unix//run/conduwuit/conduwuit.sock } From 3ec43be95965488d720403264c4edc6170c67c02 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 04:30:17 +0000 Subject: [PATCH 167/328] join initial fetches in get_relations() skip recursion for max_depth=0 Signed-off-by: Jason Volk --- src/service/rooms/pdu_metadata/mod.rs | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 4cb14ebc..ba289f9b 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -2,7 +2,7 @@ mod data; use std::sync::Arc; use conduwuit::{PduCount, Result}; -use futures::StreamExt; +use futures::{future::try_join, StreamExt}; use ruma::{api::Direction, EventId, RoomId, UserId}; use self::data::{Data, PdusIterItem}; @@ -54,10 +54,16 @@ impl Service { max_depth: u8, dir: Direction, ) -> Vec { - let room_id = self.services.short.get_or_create_shortroomid(room_id).await; + let room_id = self.services.short.get_shortroomid(room_id); - let target = match self.services.timeline.get_pdu_count(target).await { - | Ok(PduCount::Normal(c)) => c, + let target = self.services.timeline.get_pdu_count(target); + + let Ok((room_id, target)) = try_join(room_id, target).await else { + return Vec::new(); + }; + + let target = match target { + | PduCount::Normal(c) => c, // TODO: Support backfilled relations | _ => 0, // This will result in an empty iterator }; @@ -68,7 +74,11 @@ impl Service { .collect() .await; - let mut stack: Vec<_> = pdus.iter().map(|pdu| (pdu.clone(), 1)).collect(); + let mut stack: Vec<_> = pdus + .iter() + .filter(|_| max_depth > 0) + .map(|pdu| (pdu.clone(), 1)) + .collect(); 'limit: while let Some(stack_pdu) = stack.pop() { let target = match stack_pdu.0 .0 { From 2d71d5590a81cd26f22181131d2e5a6439fe391d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 09:53:53 +0000 Subject: [PATCH 168/328] fix pdu add_relation() helper Signed-off-by: Jason Volk --- src/core/pdu/unsigned.rs | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/src/core/pdu/unsigned.rs b/src/core/pdu/unsigned.rs index fa305d71..fe4d6a1c 100644 --- a/src/core/pdu/unsigned.rs +++ b/src/core/pdu/unsigned.rs @@ -46,23 +46,26 @@ pub fn add_age(&mut self) -> Result { } #[implement(Pdu)] -pub fn add_relation(&mut self, name: &str, pdu: &Pdu) -> Result { - let mut unsigned: BTreeMap = self +pub fn add_relation(&mut self, name: &str, pdu: Option<&Pdu>) -> Result { + use serde_json::Map; + + let mut unsigned: Map = self .unsigned .as_ref() - .map_or_else(|| Ok(BTreeMap::new()), |u| serde_json::from_str(u.get())) + .map_or_else(|| Ok(Map::new()), |u| serde_json::from_str(u.get())) .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; - let relations: &mut JsonValue = unsigned.entry("m.relations".into()).or_default(); - if relations.as_object_mut().is_none() { - let mut object = serde_json::Map::::new(); - _ = relations.as_object_mut().insert(&mut object); - } + let pdu = pdu + .map(serde_json::to_value) + .transpose()? + .unwrap_or_else(|| JsonValue::Object(Map::new())); - relations + unsigned + .entry("m.relations") + .or_insert(JsonValue::Object(Map::new())) .as_object_mut() - .expect("we just created it") - .insert(name.to_owned(), serde_json::to_value(pdu)?); + .unwrap() + .insert(name.to_owned(), pdu); self.unsigned = to_raw_value(&unsigned) .map(Some) From 565837ad753bbd6d346157c5b52a6a0275984e50 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 04:21:39 +0000 Subject: [PATCH 169/328] request auth media first Signed-off-by: Jason Volk --- src/service/media/remote.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index ca73c3ef..72f1184e 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -32,12 +32,12 @@ pub async fn fetch_remote_thumbnail( self.check_fetch_authorized(mxc)?; let result = self - .fetch_thumbnail_unauthenticated(mxc, user, server, timeout_ms, dim) + .fetch_thumbnail_authenticated(mxc, user, server, timeout_ms, dim) .await; if let Err(Error::Request(NotFound, ..)) = &result { return self - .fetch_thumbnail_authenticated(mxc, user, server, timeout_ms, dim) + .fetch_thumbnail_unauthenticated(mxc, user, server, timeout_ms, dim) .await; } @@ -55,12 +55,12 @@ pub async fn fetch_remote_content( self.check_fetch_authorized(mxc)?; let result = self - .fetch_content_unauthenticated(mxc, user, server, timeout_ms) + .fetch_content_authenticated(mxc, user, server, timeout_ms) .await; if let Err(Error::Request(NotFound, ..)) = &result { return self - .fetch_content_authenticated(mxc, user, server, timeout_ms) + .fetch_content_unauthenticated(mxc, user, server, timeout_ms) .await; } From 31ab84e9284ce7d5b6ec9fb212970b1a9e18fe7f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 10:23:17 +0000 Subject: [PATCH 170/328] simplify client event endpoint Signed-off-by: Jason Volk --- src/api/client/message.rs | 53 ++++++++++++++++++++++++++---------- src/api/client/room/event.rs | 40 +++++++++++---------------- src/core/pdu/unsigned.rs | 31 +++++++++++---------- 3 files changed, 71 insertions(+), 53 deletions(-) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 321d8013..bb4e72dd 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,6 +1,6 @@ use axum::extract::State; use conduwuit::{ - at, is_equal_to, + at, utils::{ result::{FlatOk, LogErr}, stream::{BroadbandExt, TryIgnore, WidebandExt}, @@ -30,7 +30,7 @@ use service::{ use crate::Ruma; /// list of safe and common non-state events to ignore if the user is ignored -const IGNORED_MESSAGE_TYPES: &[TimelineEventType; 17] = &[ +const IGNORED_MESSAGE_TYPES: &[TimelineEventType] = &[ Audio, CallInvite, Emote, @@ -225,34 +225,50 @@ async fn get_member_event( .ok() } +#[inline] pub(crate) async fn ignored_filter( services: &Services, item: PdusIterItem, user_id: &UserId, ) -> Option { - let (_, pdu) = &item; + let (_, ref pdu) = item; + is_ignored_pdu(services, pdu, user_id) + .await + .eq(&false) + .then_some(item) +} + +#[inline] +pub(crate) async fn is_ignored_pdu( + services: &Services, + pdu: &PduEvent, + user_id: &UserId, +) -> bool { // exclude Synapse's dummy events from bloating up response bodies. clients // don't need to see this. if pdu.kind.to_cow_str() == "org.matrix.dummy_event" { - return None; + return true; } - if IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok() - && (services.users.user_is_ignored(&pdu.sender, user_id).await - || services - .server - .config - .forbidden_remote_server_names - .iter() - .any(is_equal_to!(pdu.sender().server_name()))) + let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok(); + + let ignored_server = services + .server + .config + .forbidden_remote_server_names + .contains(pdu.sender().server_name()); + + if ignored_type + && (ignored_server || services.users.user_is_ignored(&pdu.sender, user_id).await) { - return None; + return true; } - Some(item) + false } +#[inline] pub(crate) async fn visibility_filter( services: &Services, item: PdusIterItem, @@ -268,7 +284,16 @@ pub(crate) async fn visibility_filter( .then_some(item) } +#[inline] pub(crate) fn event_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Option { let (_, pdu) = &item; pdu.matches(filter).then_some(item) } + +#[cfg_attr(debug_assertions, conduwuit::ctor)] +fn _is_sorted() { + debug_assert!( + IGNORED_MESSAGE_TYPES.is_sorted(), + "IGNORED_MESSAGE_TYPES must be sorted by the developer" + ); +} diff --git a/src/api/client/room/event.rs b/src/api/client/room/event.rs index bc5ec0d7..f0ae64dd 100644 --- a/src/api/client/room/event.rs +++ b/src/api/client/room/event.rs @@ -1,52 +1,44 @@ use axum::extract::State; use conduwuit::{err, Err, Event, Result}; -use futures::{try_join, FutureExt, TryFutureExt}; +use futures::{future::try_join, FutureExt, TryFutureExt}; use ruma::api::client::room::get_room_event; -use crate::{client::ignored_filter, Ruma}; +use crate::{client::is_ignored_pdu, Ruma}; /// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}` /// /// Gets a single event. pub(crate) async fn get_room_event_route( - State(services): State, + State(ref services): State, ref body: Ruma, ) -> Result { + let event_id = &body.event_id; + let room_id = &body.room_id; + let event = services .rooms .timeline - .get_pdu(&body.event_id) - .map_err(|_| err!(Request(NotFound("Event {} not found.", &body.event_id)))); - - let token = services - .rooms - .timeline - .get_pdu_count(&body.event_id) - .map_err(|_| err!(Request(NotFound("Event not found.")))); + .get_pdu(event_id) + .map_err(|_| err!(Request(NotFound("Event {} not found.", event_id)))); let visible = services .rooms .state_accessor - .user_can_see_event(body.sender_user(), &body.room_id, &body.event_id) + .user_can_see_event(body.sender_user(), room_id, event_id) .map(Ok); - let (token, mut event, visible) = try_join!(token, event, visible)?; + let (mut event, visible) = try_join(event, visible).await?; - if !visible - || ignored_filter(&services, (token, event.clone()), body.sender_user()) - .await - .is_none() - { + if !visible || is_ignored_pdu(services, &event, body.sender_user()).await { return Err!(Request(Forbidden("You don't have permission to view this event."))); } - if event.event_id() != &body.event_id || event.room_id() != body.room_id { - return Err!(Request(NotFound("Event not found"))); - } + debug_assert!( + event.event_id() == event_id && event.room_id() == room_id, + "Fetched PDU must match requested" + ); event.add_age().ok(); - let event = event.to_room_event(); - - Ok(get_room_event::v3::Response { event }) + Ok(get_room_event::v3::Response { event: event.to_room_event() }) } diff --git a/src/core/pdu/unsigned.rs b/src/core/pdu/unsigned.rs index fe4d6a1c..8482a48a 100644 --- a/src/core/pdu/unsigned.rs +++ b/src/core/pdu/unsigned.rs @@ -9,11 +9,13 @@ use crate::{err, implement, is_true, Result}; #[implement(Pdu)] pub fn remove_transaction_id(&mut self) -> Result { + use BTreeMap as Map; + let Some(unsigned) = &self.unsigned else { return Ok(()); }; - let mut unsigned: BTreeMap> = serde_json::from_str(unsigned.get()) + let mut unsigned: Map<&str, Box> = serde_json::from_str(unsigned.get()) .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; unsigned.remove("transaction_id"); @@ -26,10 +28,13 @@ pub fn remove_transaction_id(&mut self) -> Result { #[implement(Pdu)] pub fn add_age(&mut self) -> Result { - let mut unsigned: BTreeMap> = self + use BTreeMap as Map; + + let mut unsigned: Map<&str, Box> = self .unsigned - .as_ref() - .map_or_else(|| Ok(BTreeMap::new()), |u| serde_json::from_str(u.get())) + .as_deref() + .map(RawJsonValue::get) + .map_or_else(|| Ok(Map::new()), serde_json::from_str) .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; // deliberately allowing for the possibility of negative age @@ -37,10 +42,8 @@ pub fn add_age(&mut self) -> Result { let then: i128 = self.origin_server_ts.into(); let this_age = now.saturating_sub(then); - unsigned.insert("age".to_owned(), to_raw_value(&this_age).expect("age is valid")); - self.unsigned = to_raw_value(&unsigned) - .map(Some) - .expect("unsigned is valid"); + unsigned.insert("age", to_raw_value(&this_age)?); + self.unsigned = Some(to_raw_value(&unsigned)?); Ok(()) } @@ -51,8 +54,9 @@ pub fn add_relation(&mut self, name: &str, pdu: Option<&Pdu>) -> Result { let mut unsigned: Map = self .unsigned - .as_ref() - .map_or_else(|| Ok(Map::new()), |u| serde_json::from_str(u.get())) + .as_deref() + .map(RawJsonValue::get) + .map_or_else(|| Ok(Map::new()), serde_json::from_str) .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; let pdu = pdu @@ -64,12 +68,9 @@ pub fn add_relation(&mut self, name: &str, pdu: Option<&Pdu>) -> Result { .entry("m.relations") .or_insert(JsonValue::Object(Map::new())) .as_object_mut() - .unwrap() - .insert(name.to_owned(), pdu); + .map(|object| object.insert(name.to_owned(), pdu)); - self.unsigned = to_raw_value(&unsigned) - .map(Some) - .expect("unsigned is valid"); + self.unsigned = Some(to_raw_value(&unsigned)?); Ok(()) } From d8e94ee965d961fd7c8a042b0ed32d7a38190668 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 20:08:00 +0000 Subject: [PATCH 171/328] split spaces service Signed-off-by: Jason Volk --- src/api/client/space.rs | 183 ++++++++++- src/api/server/hierarchy.rs | 70 ++++- src/service/rooms/spaces/mod.rs | 311 +------------------ src/service/rooms/spaces/pagination_token.rs | 76 +++++ 4 files changed, 318 insertions(+), 322 deletions(-) create mode 100644 src/service/rooms/spaces/pagination_token.rs diff --git a/src/api/client/space.rs b/src/api/client/space.rs index 409c9083..8f54de2a 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -1,9 +1,15 @@ -use std::str::FromStr; +use std::{collections::VecDeque, str::FromStr}; use axum::extract::State; +use conduwuit::{checked, pdu::ShortRoomId, utils::stream::IterStream}; +use futures::{StreamExt, TryFutureExt}; use ruma::{ api::client::{error::ErrorKind, space::get_hierarchy}, - UInt, + OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, +}; +use service::{ + rooms::spaces::{get_parent_children_via, summary_to_chunk, SummaryAccessibility}, + Services, }; use crate::{service::rooms::spaces::PaginationToken, Error, Result, Ruma}; @@ -16,8 +22,6 @@ pub(crate) async fn get_hierarchy_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let limit = body .limit .unwrap_or_else(|| UInt::from(10_u32)) @@ -43,16 +47,163 @@ pub(crate) async fn get_hierarchy_route( } } - services - .rooms - .spaces - .get_client_hierarchy( - sender_user, - &body.room_id, - limit.try_into().unwrap_or(10), - key.map_or(vec![], |token| token.short_room_ids), - max_depth.into(), - body.suggested_only, - ) - .await + get_client_hierarchy( + &services, + body.sender_user(), + &body.room_id, + limit.try_into().unwrap_or(10), + key.map_or(vec![], |token| token.short_room_ids), + max_depth.into(), + body.suggested_only, + ) + .await +} + +async fn get_client_hierarchy( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + limit: usize, + short_room_ids: Vec, + max_depth: u64, + suggested_only: bool, +) -> Result { + let mut parents = VecDeque::new(); + + // Don't start populating the results if we have to start at a specific room. + let mut populate_results = short_room_ids.is_empty(); + + let mut stack = vec![vec![(room_id.to_owned(), match room_id.server_name() { + | Some(server_name) => vec![server_name.into()], + | None => vec![], + })]]; + + let mut results = Vec::with_capacity(limit); + + while let Some((current_room, via)) = { next_room_to_traverse(&mut stack, &mut parents) } { + if results.len() >= limit { + break; + } + + match ( + services + .rooms + .spaces + .get_summary_and_children_client(¤t_room, suggested_only, sender_user, &via) + .await?, + current_room == room_id, + ) { + | (Some(SummaryAccessibility::Accessible(summary)), _) => { + let mut children: Vec<(OwnedRoomId, Vec)> = + get_parent_children_via(&summary, suggested_only) + .into_iter() + .filter(|(room, _)| parents.iter().all(|parent| parent != room)) + .rev() + .collect(); + + if populate_results { + results.push(summary_to_chunk(*summary.clone())); + } else { + children = children + .iter() + .rev() + .stream() + .skip_while(|(room, _)| { + services + .rooms + .short + .get_shortroomid(room) + .map_ok(|short| Some(&short) != short_room_ids.get(parents.len())) + .unwrap_or_else(|_| false) + }) + .map(Clone::clone) + .collect::)>>() + .await + .into_iter() + .rev() + .collect(); + + if children.is_empty() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Room IDs in token were not found.", + )); + } + + // We have reached the room after where we last left off + let parents_len = parents.len(); + if checked!(parents_len + 1)? == short_room_ids.len() { + populate_results = true; + } + } + + let parents_len: u64 = parents.len().try_into()?; + if !children.is_empty() && parents_len < max_depth { + parents.push_back(current_room.clone()); + stack.push(children); + } + // Root room in the space hierarchy, we return an error + // if this one fails. + }, + | (Some(SummaryAccessibility::Inaccessible), true) => { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "The requested room is inaccessible", + )); + }, + | (None, true) => { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "The requested room was not found", + )); + }, + // Just ignore other unavailable rooms + | (None | Some(SummaryAccessibility::Inaccessible), false) => (), + } + } + + Ok(get_hierarchy::v1::Response { + next_batch: if let Some((room, _)) = next_room_to_traverse(&mut stack, &mut parents) { + parents.pop_front(); + parents.push_back(room); + + let next_short_room_ids: Vec<_> = parents + .iter() + .stream() + .filter_map(|room_id| async move { + services.rooms.short.get_shortroomid(room_id).await.ok() + }) + .collect() + .await; + + (next_short_room_ids != short_room_ids && !next_short_room_ids.is_empty()).then( + || { + PaginationToken { + short_room_ids: next_short_room_ids, + limit: UInt::new(max_depth) + .expect("When sent in request it must have been valid UInt"), + max_depth: UInt::new(max_depth) + .expect("When sent in request it must have been valid UInt"), + suggested_only, + } + .to_string() + }, + ) + } else { + None + }, + rooms: results, + }) +} + +fn next_room_to_traverse( + stack: &mut Vec)>>, + parents: &mut VecDeque, +) -> Option<(OwnedRoomId, Vec)> { + while stack.last().is_some_and(Vec::is_empty) { + stack.pop(); + parents.pop_back(); + } + + stack.last_mut().and_then(Vec::pop) } diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs index a10df6ac..bcf2f7bc 100644 --- a/src/api/server/hierarchy.rs +++ b/src/api/server/hierarchy.rs @@ -1,7 +1,12 @@ use axum::extract::State; -use ruma::api::{client::error::ErrorKind, federation::space::get_hierarchy}; +use conduwuit::{Err, Result}; +use ruma::{api::federation::space::get_hierarchy, RoomId, ServerName}; +use service::{ + rooms::spaces::{get_parent_children_via, Identifier, SummaryAccessibility}, + Services, +}; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/hierarchy/{roomId}` /// @@ -11,13 +16,58 @@ pub(crate) async fn get_hierarchy_route( State(services): State, body: Ruma, ) -> Result { - if services.rooms.metadata.exists(&body.room_id).await { - services - .rooms - .spaces - .get_federation_hierarchy(&body.room_id, body.origin(), body.suggested_only) - .await - } else { - Err(Error::BadRequest(ErrorKind::NotFound, "Room does not exist.")) + if !services.rooms.metadata.exists(&body.room_id).await { + return Err!(Request(NotFound("Room does not exist."))); + } + + get_hierarchy(&services, &body.room_id, body.origin(), body.suggested_only).await +} + +/// Gets the response for the space hierarchy over federation request +/// +/// Errors if the room does not exist, so a check if the room exists should +/// be done +async fn get_hierarchy( + services: &Services, + room_id: &RoomId, + server_name: &ServerName, + suggested_only: bool, +) -> Result { + match services + .rooms + .spaces + .get_summary_and_children_local(&room_id.to_owned(), Identifier::ServerName(server_name)) + .await? + { + | Some(SummaryAccessibility::Accessible(room)) => { + let mut children = Vec::new(); + let mut inaccessible_children = Vec::new(); + + for (child, _via) in get_parent_children_via(&room, suggested_only) { + match services + .rooms + .spaces + .get_summary_and_children_local(&child, Identifier::ServerName(server_name)) + .await? + { + | Some(SummaryAccessibility::Accessible(summary)) => { + children.push((*summary).into()); + }, + | Some(SummaryAccessibility::Inaccessible) => { + inaccessible_children.push(child); + }, + | None => (), + } + } + + Ok(get_hierarchy::v1::Response { + room: *room, + children, + inaccessible_children, + }) + }, + | Some(SummaryAccessibility::Inaccessible) => + Err!(Request(NotFound("The requested room is inaccessible"))), + | None => Err!(Request(NotFound("The requested room was not found"))), } } diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 11794752..1e2b0a9f 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -1,22 +1,14 @@ +mod pagination_token; mod tests; -use std::{ - collections::{HashMap, VecDeque}, - fmt::{Display, Formatter}, - str::FromStr, - sync::Arc, -}; +use std::{collections::HashMap, sync::Arc}; -use conduwuit::{ - checked, debug_info, err, - utils::{math::usize_from_f64, IterStream}, - Error, Result, -}; -use futures::{StreamExt, TryFutureExt}; +use conduwuit::{debug_info, err, utils::math::usize_from_f64, Error, Result}; +use futures::StreamExt; use lru_cache::LruCache; use ruma::{ api::{ - client::{self, error::ErrorKind, space::SpaceHierarchyRoomsChunk}, + client::{error::ErrorKind, space::SpaceHierarchyRoomsChunk}, federation::{ self, space::{SpaceHierarchyChildSummary, SpaceHierarchyParentSummary}, @@ -29,11 +21,12 @@ use ruma::{ }, serde::Raw, space::SpaceRoomJoinRule, - OwnedRoomId, OwnedServerName, RoomId, ServerName, UInt, UserId, + OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, }; use tokio::sync::Mutex; -use crate::{rooms, rooms::short::ShortRoomId, sending, Dep}; +pub use self::pagination_token::PaginationToken; +use crate::{rooms, sending, Dep}; pub struct CachedSpaceHierarchySummary { summary: SpaceHierarchyParentSummary, @@ -44,81 +37,10 @@ pub enum SummaryAccessibility { Inaccessible, } -// TODO: perhaps use some better form of token rather than just room count -#[derive(Debug, Eq, PartialEq)] -pub struct PaginationToken { - /// Path down the hierarchy of the room to start the response at, - /// excluding the root space. - pub short_room_ids: Vec, - pub limit: UInt, - pub max_depth: UInt, - pub suggested_only: bool, -} - -impl FromStr for PaginationToken { - type Err = Error; - - fn from_str(value: &str) -> Result { - let mut values = value.split('_'); - - let mut pag_tok = || { - let rooms = values - .next()? - .split(',') - .filter_map(|room_s| u64::from_str(room_s).ok()) - .collect(); - - Some(Self { - short_room_ids: rooms, - limit: UInt::from_str(values.next()?).ok()?, - max_depth: UInt::from_str(values.next()?).ok()?, - suggested_only: { - let slice = values.next()?; - - if values.next().is_none() { - if slice == "true" { - true - } else if slice == "false" { - false - } else { - None? - } - } else { - None? - } - }, - }) - }; - - if let Some(token) = pag_tok() { - Ok(token) - } else { - Err(Error::BadRequest(ErrorKind::InvalidParam, "invalid token")) - } - } -} - -impl Display for PaginationToken { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!( - f, - "{}_{}_{}_{}", - self.short_room_ids - .iter() - .map(ToString::to_string) - .collect::>() - .join(","), - self.limit, - self.max_depth, - self.suggested_only - ) - } -} - /// Identifier used to check if rooms are accessible /// /// None is used if you want to return the room, no matter if accessible or not -enum Identifier<'a> { +pub enum Identifier<'a> { UserId(&'a UserId), ServerName(&'a ServerName), } @@ -164,60 +86,8 @@ impl crate::Service for Service { } impl Service { - /// Gets the response for the space hierarchy over federation request - /// - /// Errors if the room does not exist, so a check if the room exists should - /// be done - pub async fn get_federation_hierarchy( - &self, - room_id: &RoomId, - server_name: &ServerName, - suggested_only: bool, - ) -> Result { - match self - .get_summary_and_children_local( - &room_id.to_owned(), - Identifier::ServerName(server_name), - ) - .await? - { - | Some(SummaryAccessibility::Accessible(room)) => { - let mut children = Vec::new(); - let mut inaccessible_children = Vec::new(); - - for (child, _via) in get_parent_children_via(&room, suggested_only) { - match self - .get_summary_and_children_local( - &child, - Identifier::ServerName(server_name), - ) - .await? - { - | Some(SummaryAccessibility::Accessible(summary)) => { - children.push((*summary).into()); - }, - | Some(SummaryAccessibility::Inaccessible) => { - inaccessible_children.push(child); - }, - | None => (), - } - } - - Ok(federation::space::get_hierarchy::v1::Response { - room: *room, - children, - inaccessible_children, - }) - }, - | Some(SummaryAccessibility::Inaccessible) => - Err(Error::BadRequest(ErrorKind::NotFound, "The requested room is inaccessible")), - | None => - Err(Error::BadRequest(ErrorKind::NotFound, "The requested room was not found")), - } - } - /// Gets the summary of a space using solely local information - async fn get_summary_and_children_local( + pub async fn get_summary_and_children_local( &self, current_room: &OwnedRoomId, identifier: Identifier<'_>, @@ -366,7 +236,7 @@ impl Service { /// Gets the summary of a space using either local or remote (federation) /// sources - async fn get_summary_and_children_client( + pub async fn get_summary_and_children_client( &self, current_room: &OwnedRoomId, suggested_only: bool, @@ -470,147 +340,6 @@ impl Service { }) } - pub async fn get_client_hierarchy( - &self, - sender_user: &UserId, - room_id: &RoomId, - limit: usize, - short_room_ids: Vec, - max_depth: u64, - suggested_only: bool, - ) -> Result { - let mut parents = VecDeque::new(); - - // Don't start populating the results if we have to start at a specific room. - let mut populate_results = short_room_ids.is_empty(); - - let mut stack = vec![vec![(room_id.to_owned(), match room_id.server_name() { - | Some(server_name) => vec![server_name.into()], - | None => vec![], - })]]; - - let mut results = Vec::with_capacity(limit); - - while let Some((current_room, via)) = { next_room_to_traverse(&mut stack, &mut parents) } - { - if results.len() >= limit { - break; - } - - match ( - self.get_summary_and_children_client( - ¤t_room, - suggested_only, - sender_user, - &via, - ) - .await?, - current_room == room_id, - ) { - | (Some(SummaryAccessibility::Accessible(summary)), _) => { - let mut children: Vec<(OwnedRoomId, Vec)> = - get_parent_children_via(&summary, suggested_only) - .into_iter() - .filter(|(room, _)| parents.iter().all(|parent| parent != room)) - .rev() - .collect(); - - if populate_results { - results.push(summary_to_chunk(*summary.clone())); - } else { - children = children - .iter() - .rev() - .stream() - .skip_while(|(room, _)| { - self.services - .short - .get_shortroomid(room) - .map_ok(|short| { - Some(&short) != short_room_ids.get(parents.len()) - }) - .unwrap_or_else(|_| false) - }) - .map(Clone::clone) - .collect::)>>() - .await - .into_iter() - .rev() - .collect(); - - if children.is_empty() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Room IDs in token were not found.", - )); - } - - // We have reached the room after where we last left off - let parents_len = parents.len(); - if checked!(parents_len + 1)? == short_room_ids.len() { - populate_results = true; - } - } - - let parents_len: u64 = parents.len().try_into()?; - if !children.is_empty() && parents_len < max_depth { - parents.push_back(current_room.clone()); - stack.push(children); - } - // Root room in the space hierarchy, we return an error - // if this one fails. - }, - | (Some(SummaryAccessibility::Inaccessible), true) => { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "The requested room is inaccessible", - )); - }, - | (None, true) => { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "The requested room was not found", - )); - }, - // Just ignore other unavailable rooms - | (None | Some(SummaryAccessibility::Inaccessible), false) => (), - } - } - - Ok(client::space::get_hierarchy::v1::Response { - next_batch: if let Some((room, _)) = next_room_to_traverse(&mut stack, &mut parents) { - parents.pop_front(); - parents.push_back(room); - - let next_short_room_ids: Vec<_> = parents - .iter() - .stream() - .filter_map(|room_id| async move { - self.services.short.get_shortroomid(room_id).await.ok() - }) - .collect() - .await; - - (next_short_room_ids != short_room_ids && !next_short_room_ids.is_empty()).then( - || { - PaginationToken { - short_room_ids: next_short_room_ids, - limit: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - max_depth: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - suggested_only, - } - .to_string() - }, - ) - } else { - None - }, - rooms: results, - }) - } - /// Simply returns the stripped m.space.child events of a room async fn get_stripped_space_child_events( &self, @@ -757,7 +486,8 @@ impl From for SpaceHierarchyRoomsChunk { /// Here because cannot implement `From` across ruma-federation-api and /// ruma-client-api types -fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyRoomsChunk { +#[must_use] +pub fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyRoomsChunk { let SpaceHierarchyParentSummary { canonical_alias, name, @@ -790,7 +520,8 @@ fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyRooms /// Returns the children of a SpaceHierarchyParentSummary, making use of the /// children_state field -fn get_parent_children_via( +#[must_use] +pub fn get_parent_children_via( parent: &SpaceHierarchyParentSummary, suggested_only: bool, ) -> Vec<(OwnedRoomId, Vec)> { @@ -808,15 +539,3 @@ fn get_parent_children_via( }) .collect() } - -fn next_room_to_traverse( - stack: &mut Vec)>>, - parents: &mut VecDeque, -) -> Option<(OwnedRoomId, Vec)> { - while stack.last().is_some_and(Vec::is_empty) { - stack.pop(); - parents.pop_back(); - } - - stack.last_mut().and_then(Vec::pop) -} diff --git a/src/service/rooms/spaces/pagination_token.rs b/src/service/rooms/spaces/pagination_token.rs new file mode 100644 index 00000000..8f019e8d --- /dev/null +++ b/src/service/rooms/spaces/pagination_token.rs @@ -0,0 +1,76 @@ +use std::{ + fmt::{Display, Formatter}, + str::FromStr, +}; + +use conduwuit::{Error, Result}; +use ruma::{api::client::error::ErrorKind, UInt}; + +use crate::rooms::short::ShortRoomId; + +// TODO: perhaps use some better form of token rather than just room count +#[derive(Debug, Eq, PartialEq)] +pub struct PaginationToken { + /// Path down the hierarchy of the room to start the response at, + /// excluding the root space. + pub short_room_ids: Vec, + pub limit: UInt, + pub max_depth: UInt, + pub suggested_only: bool, +} + +impl FromStr for PaginationToken { + type Err = Error; + + fn from_str(value: &str) -> Result { + let mut values = value.split('_'); + let mut pag_tok = || { + let short_room_ids = values + .next()? + .split(',') + .filter_map(|room_s| u64::from_str(room_s).ok()) + .collect(); + + let limit = UInt::from_str(values.next()?).ok()?; + let max_depth = UInt::from_str(values.next()?).ok()?; + let slice = values.next()?; + let suggested_only = if values.next().is_none() { + if slice == "true" { + true + } else if slice == "false" { + false + } else { + None? + } + } else { + None? + }; + + Some(Self { + short_room_ids, + limit, + max_depth, + suggested_only, + }) + }; + + if let Some(token) = pag_tok() { + Ok(token) + } else { + Err(Error::BadRequest(ErrorKind::InvalidParam, "invalid token")) + } + } +} + +impl Display for PaginationToken { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let short_room_ids = self + .short_room_ids + .iter() + .map(ToString::to_string) + .collect::>() + .join(","); + + write!(f, "{short_room_ids}_{}_{}_{}", self.limit, self.max_depth, self.suggested_only) + } +} From 5428526120cf49efda7b129d48b5a35ea1d87dde Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 23:03:24 +0000 Subject: [PATCH 172/328] add tail-efficient logic extension Signed-off-by: Jason Volk --- src/core/utils/future/bool_ext.rs | 82 +++++++++++++++++++++++++++++++ src/core/utils/future/mod.rs | 2 + 2 files changed, 84 insertions(+) create mode 100644 src/core/utils/future/bool_ext.rs diff --git a/src/core/utils/future/bool_ext.rs b/src/core/utils/future/bool_ext.rs new file mode 100644 index 00000000..6cb2f1fe --- /dev/null +++ b/src/core/utils/future/bool_ext.rs @@ -0,0 +1,82 @@ +//! Extended external extensions to futures::FutureExt + +use std::marker::Unpin; + +use futures::{ + future::{select_ok, try_join, try_join_all, try_select}, + Future, FutureExt, +}; + +pub trait BoolExt +where + Self: Future + Send, +{ + fn and(self, b: B) -> impl Future + Send + where + B: Future + Send, + Self: Sized; + + fn or(self, b: B) -> impl Future + Send + where + B: Future + Send + Unpin, + Self: Sized + Unpin; +} + +pub async fn and(args: I) -> impl Future + Send +where + I: Iterator + Send, + F: Future + Send, +{ + type Result = crate::Result<(), ()>; + + let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); + + try_join_all(args).map(|result| result.is_ok()) +} + +pub async fn or(args: I) -> impl Future + Send +where + I: Iterator + Send, + F: Future + Send + Unpin, +{ + type Result = crate::Result<(), ()>; + + let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); + + select_ok(args).map(|result| result.is_ok()) +} + +impl BoolExt for Fut +where + Fut: Future + Send, +{ + #[inline] + fn and(self, b: B) -> impl Future + Send + where + B: Future + Send, + Self: Sized, + { + type Result = crate::Result<(), ()>; + + let a = self.map(|a| a.then_some(()).ok_or(Result::Err(()))); + + let b = b.map(|b| b.then_some(()).ok_or(Result::Err(()))); + + try_join(a, b).map(|result| result.is_ok()) + } + + #[inline] + fn or(self, b: B) -> impl Future + Send + where + B: Future + Send + Unpin, + Self: Sized + Unpin, + { + type Result = crate::Result<(), ()>; + + let a = self.map(|a| a.then_some(()).ok_or(Result::Err(()))); + + let b = b.map(|b| b.then_some(()).ok_or(Result::Err(()))); + + try_select(a, b).map(|result| result.is_ok()) + } +} diff --git a/src/core/utils/future/mod.rs b/src/core/utils/future/mod.rs index 153dcfe1..2198a84f 100644 --- a/src/core/utils/future/mod.rs +++ b/src/core/utils/future/mod.rs @@ -1,7 +1,9 @@ +mod bool_ext; mod ext_ext; mod option_ext; mod try_ext_ext; +pub use bool_ext::{and, or, BoolExt}; pub use ext_ext::ExtExt; pub use option_ext::OptionExt; pub use try_ext_ext::TryExtExt; From 59c073d0d86ca8a6b9606037e2278890b5b84821 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 23:58:45 +0000 Subject: [PATCH 173/328] add unconstrained feature to service worker Signed-off-by: Jason Volk --- src/service/manager.rs | 9 +++++++-- src/service/sending/mod.rs | 13 +++++++++++-- src/service/service.rs | 5 +++++ 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/src/service/manager.rs b/src/service/manager.rs index ea33d285..e0d885c2 100644 --- a/src/service/manager.rs +++ b/src/service/manager.rs @@ -1,7 +1,7 @@ use std::{panic::AssertUnwindSafe, sync::Arc, time::Duration}; use conduwuit::{debug, debug_warn, error, trace, utils::time, warn, Err, Error, Result, Server}; -use futures::FutureExt; +use futures::{FutureExt, TryFutureExt}; use tokio::{ sync::{Mutex, MutexGuard}, task::{JoinHandle, JoinSet}, @@ -183,9 +183,14 @@ async fn worker(service: Arc) -> WorkerResult { let service_ = Arc::clone(&service); let result = AssertUnwindSafe(service_.worker()) .catch_unwind() - .await .map_err(Error::from_panic); + let result = if service.unconstrained() { + tokio::task::unconstrained(result).await + } else { + result.await + }; + // flattens JoinError for panic into worker's Error (service, result.unwrap_or_else(Err)) } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index b146ad49..86b219f7 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -22,7 +22,7 @@ use ruma::{ RoomId, ServerName, UserId, }; use smallvec::SmallVec; -use tokio::task::JoinSet; +use tokio::{task, task::JoinSet}; use self::data::Data; pub use self::{ @@ -111,8 +111,15 @@ impl crate::Service for Service { .enumerate() .fold(JoinSet::new(), |mut joinset, (id, _)| { let self_ = self.clone(); + let worker = self_.sender(id); + let worker = if self.unconstrained() { + task::unconstrained(worker).boxed() + } else { + worker.boxed() + }; + let runtime = self.server.runtime(); - let _abort = joinset.spawn_on(self_.sender(id).boxed(), runtime); + let _abort = joinset.spawn_on(worker, runtime); joinset }); @@ -139,6 +146,8 @@ impl crate::Service for Service { } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } + + fn unconstrained(&self) -> bool { true } } impl Service { diff --git a/src/service/service.rs b/src/service/service.rs index 7adb189e..cad01437 100644 --- a/src/service/service.rs +++ b/src/service/service.rs @@ -39,6 +39,11 @@ pub(crate) trait Service: Any + Send + Sync { /// Return the name of the service. /// i.e. `crate::service::make_name(std::module_path!())` fn name(&self) -> &str; + + /// Return true if the service worker opts out of the tokio cooperative + /// budgeting. This can reduce tail latency at the risk of event loop + /// starvation. + fn unconstrained(&self) -> bool { false } } /// Args are passed to `Service::build` when a service is constructed. This From e123a5b660a21ae444e154ac60812468c878ec58 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 7 Feb 2025 01:16:46 +0000 Subject: [PATCH 174/328] add state accessories for iterating state_keys of a type Signed-off-by: Jason Volk --- src/service/rooms/state_accessor/state.rs | 124 ++++++++++++++++++++-- 1 file changed, 114 insertions(+), 10 deletions(-) diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs index c47a5693..3cf168c1 100644 --- a/src/service/rooms/state_accessor/state.rs +++ b/src/service/rooms/state_accessor/state.rs @@ -9,7 +9,7 @@ use conduwuit::{ PduEvent, Result, }; use database::Deserialized; -use futures::{future::try_join, FutureExt, Stream, StreamExt, TryFutureExt}; +use futures::{future::try_join, pin_mut, FutureExt, Stream, StreamExt, TryFutureExt}; use ruma::{ events::{ room::member::{MembershipState, RoomMemberEventContent}, @@ -69,7 +69,6 @@ where } #[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] pub async fn state_contains( &self, shortstatehash: ShortStateHash, @@ -90,7 +89,18 @@ pub async fn state_contains( } #[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] +pub async fn state_contains_type( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, +) -> bool { + let state_keys = self.state_keys(shortstatehash, event_type); + + pin_mut!(state_keys); + state_keys.next().await.is_some() +} + +#[implement(super::Service)] pub async fn state_contains_shortstatekey( &self, shortstatehash: ShortStateHash, @@ -125,7 +135,6 @@ pub async fn state_get( /// Returns a single EventId from `room_id` with key (`event_type`, /// `state_key`). #[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] pub async fn state_get_id( &self, shortstatehash: ShortStateHash, @@ -149,7 +158,6 @@ where /// Returns a single EventId from `room_id` with key (`event_type`, /// `state_key`). #[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] pub async fn state_get_shortid( &self, shortstatehash: ShortStateHash, @@ -177,6 +185,103 @@ pub async fn state_get_shortid( .await? } +/// Iterates the state_keys for an event_type in the state; current state +/// event_id included. +#[implement(super::Service)] +pub fn state_keys_with_ids<'a, Id>( + &'a self, + shortstatehash: ShortStateHash, + event_type: &'a StateEventType, +) -> impl Stream + Send + 'a +where + Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned + 'a, + ::Owned: Borrow, +{ + let state_keys_with_short_ids = self + .state_keys_with_shortids(shortstatehash, event_type) + .unzip() + .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) + .shared(); + + let state_keys = state_keys_with_short_ids + .clone() + .map(at!(0)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + let shorteventids = state_keys_with_short_ids + .map(at!(1)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + self.services + .short + .multi_get_eventid_from_short(shorteventids) + .zip(state_keys) + .ready_filter_map(|(eid, sk)| eid.map(move |eid| (sk, eid)).ok()) +} + +/// Iterates the state_keys for an event_type in the state; current state +/// event_id included. +#[implement(super::Service)] +pub fn state_keys_with_shortids<'a>( + &'a self, + shortstatehash: ShortStateHash, + event_type: &'a StateEventType, +) -> impl Stream + Send + 'a { + let short_ids = self + .state_full_shortids(shortstatehash) + .expect_ok() + .unzip() + .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) + .shared(); + + let shortstatekeys = short_ids + .clone() + .map(at!(0)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + let shorteventids = short_ids + .map(at!(1)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + self.services + .short + .multi_get_statekey_from_short(shortstatekeys) + .zip(shorteventids) + .ready_filter_map(|(res, id)| res.map(|res| (res, id)).ok()) + .ready_filter_map(move |((event_type_, state_key), event_id)| { + event_type_.eq(event_type).then_some((state_key, event_id)) + }) +} + +/// Iterates the state_keys for an event_type in the state +#[implement(super::Service)] +pub fn state_keys<'a>( + &'a self, + shortstatehash: ShortStateHash, + event_type: &'a StateEventType, +) -> impl Stream + Send + 'a { + let short_ids = self + .state_full_shortids(shortstatehash) + .expect_ok() + .map(at!(0)); + + self.services + .short + .multi_get_statekey_from_short(short_ids) + .ready_filter_map(Result::ok) + .ready_filter_map(move |(event_type_, state_key)| { + event_type_.eq(event_type).then_some(state_key) + }) +} + /// Returns the state events removed between the interval (present in .0 but /// not in .1) #[implement(super::Service)] @@ -191,11 +296,10 @@ pub fn state_removed( /// Returns the state events added between the interval (present in .1 but /// not in .0) #[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] -pub fn state_added<'a>( - &'a self, +pub fn state_added( + &self, shortstatehash: pair_of!(ShortStateHash), -) -> impl Stream + Send + 'a { +) -> impl Stream + Send + '_ { let a = self.load_full_state(shortstatehash.0); let b = self.load_full_state(shortstatehash.1); try_join(a, b) @@ -239,7 +343,6 @@ pub fn state_full_pdus( /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] pub fn state_full_ids<'a, Id>( &'a self, shortstatehash: ShortStateHash, @@ -293,6 +396,7 @@ pub fn state_full_shortids( } #[implement(super::Service)] +#[tracing::instrument(name = "load", level = "debug", skip(self))] async fn load_full_state(&self, shortstatehash: ShortStateHash) -> Result> { self.services .state_compressor From ecc9099127cc6779cd74723ae6169f7a22276ab7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 7 Feb 2025 23:18:02 +0000 Subject: [PATCH 175/328] add conf item to re-disable atomic flush Signed-off-by: Jason Volk --- conduwuit-example.toml | 7 +++++++ src/core/config/mod.rs | 7 +++++++ src/database/engine/db_opts.rs | 4 ++-- 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index f9da856d..9b6f6ce0 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -925,6 +925,13 @@ # #rocksdb_checksums = true +# Enables the "atomic flush" mode in rocksdb. This option is not intended +# for users. It may be removed or ignored in future versions. Atomic flush +# may be enabled by the paranoid to possibly improve database integrity at +# the cost of performance. +# +#rocksdb_atomic_flush = false + # Database repair mode (for RocksDB SST corruption). # # Use this option when the server reports corruption while running or diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 9514f7a0..e66532ee 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1089,6 +1089,13 @@ pub struct Config { #[serde(default = "true_fn")] pub rocksdb_checksums: bool, + /// Enables the "atomic flush" mode in rocksdb. This option is not intended + /// for users. It may be removed or ignored in future versions. Atomic flush + /// may be enabled by the paranoid to possibly improve database integrity at + /// the cost of performance. + #[serde(default)] + pub rocksdb_atomic_flush: bool, + /// Database repair mode (for RocksDB SST corruption). /// /// Use this option when the server reports corruption while running or diff --git a/src/database/engine/db_opts.rs b/src/database/engine/db_opts.rs index 01847257..6abeb4b0 100644 --- a/src/database/engine/db_opts.rs +++ b/src/database/engine/db_opts.rs @@ -29,9 +29,9 @@ pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache) -> Resul opts.set_max_file_opening_threads(0); // IO - opts.set_atomic_flush(true); opts.set_manual_wal_flush(true); - opts.set_enable_pipelined_write(false); + opts.set_atomic_flush(config.rocksdb_atomic_flush); + opts.set_enable_pipelined_write(!config.rocksdb_atomic_flush); if config.rocksdb_direct_io { opts.set_use_direct_reads(true); opts.set_use_direct_io_for_flush_and_compaction(true); From b872f8e593afaee437331edd429a2d801f069aab Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 8 Feb 2025 00:16:37 +0000 Subject: [PATCH 176/328] optimize with SmallString; consolidate related re-exports Signed-off-by: Jason Volk --- Cargo.lock | 16 ++++++++++++---- Cargo.toml | 4 ++++ src/api/client/room/create.rs | 8 ++++---- src/api/client/room/upgrade.rs | 12 ++++++------ src/api/client/state.rs | 2 +- src/api/client/sync/v3.rs | 2 +- src/api/client/sync/v4.rs | 17 +++++++---------- src/api/client/sync/v5.rs | 18 +++++++----------- src/core/Cargo.toml | 2 ++ src/core/mod.rs | 5 ++++- src/core/pdu/builder.rs | 9 ++++++--- src/core/pdu/mod.rs | 8 +++++--- src/core/pdu/state_key.rs | 8 ++++++++ src/database/Cargo.toml | 2 -- src/database/de.rs | 5 +++-- src/database/keyval.rs | 3 +-- src/database/map/contains.rs | 2 +- src/database/map/insert.rs | 3 +-- src/database/map/qry.rs | 3 +-- src/database/map/remove.rs | 3 +-- src/database/pool.rs | 2 +- src/database/tests.rs | 6 ++++-- src/service/Cargo.toml | 2 -- src/service/migrations.rs | 4 +++- src/service/resolver/cache.rs | 2 +- src/service/resolver/fed.rs | 3 +-- src/service/resolver/mod.rs | 3 +-- .../rooms/event_handler/handle_outlier_pdu.rs | 10 ++++------ .../rooms/event_handler/resolve_state.rs | 1 + .../rooms/event_handler/state_at_incoming.rs | 1 + src/service/rooms/pdu_metadata/data.rs | 2 +- src/service/rooms/search/mod.rs | 2 +- src/service/rooms/short/mod.rs | 9 ++++----- src/service/rooms/state_accessor/room_state.rs | 4 ++-- src/service/rooms/state_accessor/state.rs | 12 ++++++------ src/service/rooms/state_accessor/user_can.rs | 2 +- src/service/rooms/state_compressor/mod.rs | 2 +- src/service/rooms/timeline/mod.rs | 8 ++++---- src/service/sending/mod.rs | 2 +- 39 files changed, 113 insertions(+), 96 deletions(-) create mode 100644 src/core/pdu/state_key.rs diff --git a/Cargo.lock b/Cargo.lock index caef5859..5981a2a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -822,6 +822,8 @@ dependencies = [ "serde_json", "serde_regex", "serde_yaml", + "smallstr", + "smallvec", "thiserror 2.0.11", "tikv-jemalloc-ctl", "tikv-jemalloc-sys", @@ -839,7 +841,6 @@ dependencies = [ name = "conduwuit_database" version = "0.5.0" dependencies = [ - "arrayvec", "async-channel", "conduwuit_core", "const-str", @@ -850,7 +851,6 @@ dependencies = [ "rust-rocksdb-uwu", "serde", "serde_json", - "smallvec", "tokio", "tracing", ] @@ -902,7 +902,6 @@ dependencies = [ name = "conduwuit_service" version = "0.5.0" dependencies = [ - "arrayvec", "async-trait", "base64 0.22.1", "blurhash", @@ -929,7 +928,6 @@ dependencies = [ "serde_json", "serde_yaml", "sha2", - "smallvec", "termimad", "tokio", "tracing", @@ -4275,6 +4273,16 @@ dependencies = [ "autocfg", ] +[[package]] +name = "smallstr" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63b1aefdf380735ff8ded0b15f31aab05daf1f70216c01c02a12926badd1df9d" +dependencies = [ + "serde", + "smallvec", +] + [[package]] name = "smallvec" version = "1.13.2" diff --git a/Cargo.toml b/Cargo.toml index 38654be3..b93877bd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,6 +40,10 @@ features = [ "write", ] +[workspace.dependencies.smallstr] +version = "0.3" +features = ["ffi", "std", "union"] + [workspace.dependencies.const-str] version = "0.5.7" diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index a401b63d..e362b3b3 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -2,7 +2,7 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - debug_info, debug_warn, err, error, info, pdu::PduBuilder, warn, Err, Error, Result, + debug_info, debug_warn, err, error, info, pdu::PduBuilder, warn, Err, Error, Result, StateKey, }; use futures::FutureExt; use ruma::{ @@ -198,7 +198,7 @@ pub(crate) async fn create_room_route( event_type: TimelineEventType::RoomCreate, content: to_raw_value(&create_content) .expect("create event content serialization"), - state_key: Some(String::new()), + state_key: Some(StateKey::new()), ..Default::default() }, sender_user, @@ -267,7 +267,7 @@ pub(crate) async fn create_room_route( event_type: TimelineEventType::RoomPowerLevels, content: to_raw_value(&power_levels_content) .expect("serialized power_levels event content"), - state_key: Some(String::new()), + state_key: Some(StateKey::new()), ..Default::default() }, sender_user, @@ -371,7 +371,7 @@ pub(crate) async fn create_room_route( } // Implicit state key defaults to "" - pdu_builder.state_key.get_or_insert_with(String::new); + pdu_builder.state_key.get_or_insert_with(StateKey::new); // Silently skip encryption events if they are not allowed if pdu_builder.event_type == TimelineEventType::RoomEncryption diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index 2f9706f4..a624f95f 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -1,7 +1,7 @@ use std::cmp::max; use axum::extract::State; -use conduwuit::{err, info, pdu::PduBuilder, Error, Result}; +use conduwuit::{err, info, pdu::PduBuilder, Error, Result, StateKey}; use futures::StreamExt; use ruma::{ api::client::{error::ErrorKind, room::upgrade_room}, @@ -77,7 +77,7 @@ pub(crate) async fn upgrade_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomTombstoneEventContent { + PduBuilder::state(StateKey::new(), &RoomTombstoneEventContent { body: "This room has been replaced".to_owned(), replacement_room: replacement_room.clone(), }), @@ -159,7 +159,7 @@ pub(crate) async fn upgrade_room_route( content: to_raw_value(&create_event_content) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some(String::new()), + state_key: Some(StateKey::new()), redacts: None, timestamp: None, }, @@ -188,7 +188,7 @@ pub(crate) async fn upgrade_room_route( }) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some(sender_user.to_string()), + state_key: Some(sender_user.as_str().into()), redacts: None, timestamp: None, }, @@ -217,7 +217,7 @@ pub(crate) async fn upgrade_room_route( PduBuilder { event_type: event_type.to_string().into(), content: event_content, - state_key: Some(String::new()), + state_key: Some(StateKey::new()), ..Default::default() }, sender_user, @@ -272,7 +272,7 @@ pub(crate) async fn upgrade_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomPowerLevelsEventContent { + PduBuilder::state(StateKey::new(), &RoomPowerLevelsEventContent { events_default: new_level, invite: new_level, ..power_levels_event_content diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 8555f88b..f73ffa46 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -172,7 +172,7 @@ async fn send_state_event_for_key_helper( PduBuilder { event_type: event_type.to_string().into(), content: serde_json::from_str(json.json().get())?, - state_key: Some(String::from(state_key)), + state_key: Some(state_key.into()), timestamp, ..Default::default() }, diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 1d1a91ba..f9dcd5ec 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -441,7 +441,7 @@ async fn handle_left_room( kind: RoomMember, content: serde_json::from_str(r#"{"membership":"leave"}"#) .expect("this is valid JSON"), - state_key: Some(sender_user.to_string()), + state_key: Some(sender_user.as_str().into()), unsigned: None, // The following keys are dropped on conversion room_id: room_id.clone(), diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 66793ba1..4e474ef3 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -29,7 +29,7 @@ use ruma::{ TimelineEventType::*, }, serde::Raw, - uint, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UInt, + uint, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, }; use service::rooms::read_receipt::pack_receipts; @@ -258,12 +258,9 @@ pub(crate) async fn sync_events_v4_route( continue; }; if pdu.kind == RoomMember { - if let Some(state_key) = &pdu.state_key { - let user_id = - OwnedUserId::parse(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; - + if let Some(Ok(user_id)) = + pdu.state_key.as_deref().map(UserId::parse) + { if user_id == *sender_user { continue; } @@ -275,18 +272,18 @@ pub(crate) async fn sync_events_v4_route( if !share_encrypted_room( &services, sender_user, - &user_id, + user_id, Some(room_id), ) .await { - device_list_changes.insert(user_id); + device_list_changes.insert(user_id.to_owned()); } }, | MembershipState::Leave => { // Write down users that have left encrypted rooms we // are in - left_encrypted_users.insert(user_id); + left_encrypted_users.insert(user_id.to_owned()); }, | _ => {}, } diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index e7b5fe74..f8ee1047 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -25,7 +25,7 @@ use ruma::{ }, serde::Raw, state_res::TypeStateKey, - uint, DeviceId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UInt, UserId, + uint, DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, }; use service::{rooms::read_receipt::pack_receipts, PduCount}; @@ -765,13 +765,9 @@ async fn collect_e2ee<'a>( continue; }; if pdu.kind == TimelineEventType::RoomMember { - if let Some(state_key) = &pdu.state_key { - let user_id = - OwnedUserId::parse(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; - - if user_id == *sender_user { + if let Some(Ok(user_id)) = pdu.state_key.as_deref().map(UserId::parse) + { + if user_id == sender_user { continue; } @@ -782,18 +778,18 @@ async fn collect_e2ee<'a>( if !share_encrypted_room( &services, sender_user, - &user_id, + user_id, Some(room_id), ) .await { - device_list_changes.insert(user_id); + device_list_changes.insert(user_id.to_owned()); } }, | MembershipState::Leave => { // Write down users that have left encrypted rooms we // are in - left_encrypted_users.insert(user_id); + left_encrypted_users.insert(user_id.to_owned()); }, | _ => {}, } diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index ef2df4ff..d4b0c83b 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -92,6 +92,8 @@ serde_json.workspace = true serde_regex.workspace = true serde_yaml.workspace = true serde.workspace = true +smallvec.workspace = true +smallstr.workspace = true thiserror.workspace = true tikv-jemallocator.optional = true tikv-jemallocator.workspace = true diff --git a/src/core/mod.rs b/src/core/mod.rs index 1416ed9e..ee128628 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -10,14 +10,17 @@ pub mod pdu; pub mod server; pub mod utils; +pub use ::arrayvec; pub use ::http; pub use ::ruma; +pub use ::smallstr; +pub use ::smallvec; pub use ::toml; pub use ::tracing; pub use config::Config; pub use error::Error; pub use info::{rustc_flags_capture, version, version::version}; -pub use pdu::{Event, PduBuilder, PduCount, PduEvent, PduId, RawPduId}; +pub use pdu::{Event, PduBuilder, PduCount, PduEvent, PduId, RawPduId, StateKey}; pub use server::Server; pub use utils::{ctor, dtor, implement, result, result::Result}; diff --git a/src/core/pdu/builder.rs b/src/core/pdu/builder.rs index b25d4e9e..0efee128 100644 --- a/src/core/pdu/builder.rs +++ b/src/core/pdu/builder.rs @@ -7,6 +7,8 @@ use ruma::{ use serde::Deserialize; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use super::StateKey; + /// Build the start of a PDU in order to add it to the Database. #[derive(Debug, Deserialize)] pub struct Builder { @@ -17,7 +19,7 @@ pub struct Builder { pub unsigned: Option, - pub state_key: Option, + pub state_key: Option, pub redacts: Option, @@ -29,15 +31,16 @@ pub struct Builder { type Unsigned = BTreeMap; impl Builder { - pub fn state(state_key: String, content: &T) -> Self + pub fn state(state_key: S, content: &T) -> Self where T: EventContent, + S: Into, { Self { event_type: content.event_type().into(), content: to_raw_value(content) .expect("Builder failed to serialize state event content to RawValue"), - state_key: Some(state_key), + state_key: Some(state_key.into()), ..Self::default() } } diff --git a/src/core/pdu/mod.rs b/src/core/pdu/mod.rs index 1a8f6a70..9cb42239 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/pdu/mod.rs @@ -8,6 +8,7 @@ mod id; mod raw_id; mod redact; mod relation; +mod state_key; mod strip; #[cfg(test)] mod tests; @@ -17,7 +18,7 @@ use std::cmp::Ordering; use ruma::{ events::TimelineEventType, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, - OwnedRoomId, OwnedUserId, UInt, + OwnedRoomId, OwnedServerName, OwnedUserId, UInt, }; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue as RawJsonValue; @@ -29,6 +30,7 @@ pub use self::{ event_id::*, id::*, raw_id::*, + state_key::{ShortStateKey, StateKey}, Count as PduCount, Id as PduId, Pdu as PduEvent, RawId as RawPduId, }; use crate::Result; @@ -40,13 +42,13 @@ pub struct Pdu { pub room_id: OwnedRoomId, pub sender: OwnedUserId, #[serde(skip_serializing_if = "Option::is_none")] - pub origin: Option, + pub origin: Option, pub origin_server_ts: UInt, #[serde(rename = "type")] pub kind: TimelineEventType, pub content: Box, #[serde(skip_serializing_if = "Option::is_none")] - pub state_key: Option, + pub state_key: Option, pub prev_events: Vec, pub depth: UInt, pub auth_events: Vec, diff --git a/src/core/pdu/state_key.rs b/src/core/pdu/state_key.rs new file mode 100644 index 00000000..4af4fcf7 --- /dev/null +++ b/src/core/pdu/state_key.rs @@ -0,0 +1,8 @@ +use smallstr::SmallString; + +use super::ShortId; + +pub type StateKey = SmallString<[u8; INLINE_SIZE]>; +pub type ShortStateKey = ShortId; + +const INLINE_SIZE: usize = 48; diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml index 557c9a3e..067c6f5f 100644 --- a/src/database/Cargo.toml +++ b/src/database/Cargo.toml @@ -34,7 +34,6 @@ zstd_compression = [ ] [dependencies] -arrayvec.workspace = true async-channel.workspace = true conduwuit-core.workspace = true const-str.workspace = true @@ -45,7 +44,6 @@ minicbor-serde.workspace = true rust-rocksdb.workspace = true serde.workspace = true serde_json.workspace = true -smallvec.workspace = true tokio.workspace = true tracing.workspace = true diff --git a/src/database/de.rs b/src/database/de.rs index 8e914fcc..441bb4ec 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -1,5 +1,6 @@ -use arrayvec::ArrayVec; -use conduwuit::{checked, debug::DebugInspect, err, utils::string, Error, Result}; +use conduwuit::{ + arrayvec::ArrayVec, checked, debug::DebugInspect, err, utils::string, Error, Result, +}; use serde::{ de, de::{DeserializeSeed, Visitor}, diff --git a/src/database/keyval.rs b/src/database/keyval.rs index 056e53d1..f572d15f 100644 --- a/src/database/keyval.rs +++ b/src/database/keyval.rs @@ -1,6 +1,5 @@ -use conduwuit::Result; +use conduwuit::{smallvec::SmallVec, Result}; use serde::{Deserialize, Serialize}; -use smallvec::SmallVec; use crate::{de, ser}; diff --git a/src/database/map/contains.rs b/src/database/map/contains.rs index 424f8970..7a09b358 100644 --- a/src/database/map/contains.rs +++ b/src/database/map/contains.rs @@ -1,7 +1,7 @@ use std::{convert::AsRef, fmt::Debug, future::Future, io::Write, sync::Arc}; -use arrayvec::ArrayVec; use conduwuit::{ + arrayvec::ArrayVec, err, implement, utils::{future::TryExtExt, result::FlatOk}, Result, diff --git a/src/database/map/insert.rs b/src/database/map/insert.rs index 68c305af..6f010097 100644 --- a/src/database/map/insert.rs +++ b/src/database/map/insert.rs @@ -5,8 +5,7 @@ use std::{convert::AsRef, fmt::Debug, io::Write}; -use arrayvec::ArrayVec; -use conduwuit::implement; +use conduwuit::{arrayvec::ArrayVec, implement}; use rocksdb::WriteBatchWithTransaction; use serde::Serialize; diff --git a/src/database/map/qry.rs b/src/database/map/qry.rs index 401eba43..178f4a61 100644 --- a/src/database/map/qry.rs +++ b/src/database/map/qry.rs @@ -1,7 +1,6 @@ use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; -use arrayvec::ArrayVec; -use conduwuit::{implement, Result}; +use conduwuit::{arrayvec::ArrayVec, implement, Result}; use futures::Future; use serde::Serialize; diff --git a/src/database/map/remove.rs b/src/database/map/remove.rs index ec37bbfe..a7ae9133 100644 --- a/src/database/map/remove.rs +++ b/src/database/map/remove.rs @@ -1,7 +1,6 @@ use std::{convert::AsRef, fmt::Debug, io::Write}; -use arrayvec::ArrayVec; -use conduwuit::implement; +use conduwuit::{arrayvec::ArrayVec, implement}; use serde::Serialize; use crate::{keyval::KeyBuf, ser, util::or_else}; diff --git a/src/database/pool.rs b/src/database/pool.rs index c753855a..7636ff5e 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -14,6 +14,7 @@ use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; use conduwuit::{ debug, debug_warn, err, error, implement, result::DebugInspect, + smallvec::SmallVec, trace, utils::sys::compute::{get_affinity, nth_core_available, set_affinity}, Error, Result, Server, @@ -21,7 +22,6 @@ use conduwuit::{ use futures::{channel::oneshot, TryFutureExt}; use oneshot::Sender as ResultSender; use rocksdb::Direction; -use smallvec::SmallVec; use self::configure::configure; use crate::{keyval::KeyBuf, stream, Handle, Map}; diff --git a/src/database/tests.rs b/src/database/tests.rs index e6c85983..594170e8 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -2,8 +2,10 @@ use std::fmt::Debug; -use arrayvec::ArrayVec; -use conduwuit::ruma::{serde::Raw, EventId, RoomId, UserId}; +use conduwuit::{ + arrayvec::ArrayVec, + ruma::{serde::Raw, EventId, RoomId, UserId}, +}; use serde::Serialize; use crate::{ diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index 30183179..caeea318 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -47,7 +47,6 @@ zstd_compression = [ blurhashing = ["dep:image","dep:blurhash"] [dependencies] -arrayvec.workspace = true async-trait.workspace = true base64.workspace = true bytes.workspace = true @@ -75,7 +74,6 @@ serde_json.workspace = true serde.workspace = true serde_yaml.workspace = true sha2.workspace = true -smallvec.workspace = true termimad.workspace = true termimad.optional = true tokio.workspace = true diff --git a/src/service/migrations.rs b/src/service/migrations.rs index 9c3ea293..69b1be4e 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -507,8 +507,10 @@ async fn fix_referencedevents_missing_sep(services: &Services) -> Result { } async fn fix_readreceiptid_readreceipt_duplicates(services: &Services) -> Result { + use conduwuit::arrayvec::ArrayString; use ruma::identifiers_validation::MAX_BYTES; - type ArrayId = arrayvec::ArrayString; + + type ArrayId = ArrayString; type Key<'a> = (&'a RoomId, u64, &'a UserId); warn!("Fixing undeleted entries in readreceiptid_readreceipt..."); diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index 22a92865..7b4f104d 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -1,7 +1,7 @@ use std::{net::IpAddr, sync::Arc, time::SystemTime}; -use arrayvec::ArrayVec; use conduwuit::{ + arrayvec::ArrayVec, at, err, implement, utils::{math::Expected, rand, stream::TryIgnore}, Result, diff --git a/src/service/resolver/fed.rs b/src/service/resolver/fed.rs index bfe100e7..e5bee9ac 100644 --- a/src/service/resolver/fed.rs +++ b/src/service/resolver/fed.rs @@ -4,8 +4,7 @@ use std::{ net::{IpAddr, SocketAddr}, }; -use arrayvec::ArrayString; -use conduwuit::utils::math::Expected; +use conduwuit::{arrayvec::ArrayString, utils::math::Expected}; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 090e562d..6be9d42d 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -6,8 +6,7 @@ mod tests; use std::sync::Arc; -use arrayvec::ArrayString; -use conduwuit::{utils::MutexMap, Result, Server}; +use conduwuit::{arrayvec::ArrayString, utils::MutexMap, Result, Server}; use self::{cache::Cache, dns::Resolver}; use crate::{client, Dep}; diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index a35aabe0..b7c38313 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -6,10 +6,8 @@ use std::{ use conduwuit::{debug, debug_info, err, implement, trace, warn, Err, Error, PduEvent, Result}; use futures::{future::ready, TryFutureExt}; use ruma::{ - api::client::error::ErrorKind, - events::StateEventType, - state_res::{self, EventTypeExt}, - CanonicalJsonObject, CanonicalJsonValue, EventId, RoomId, ServerName, + api::client::error::ErrorKind, events::StateEventType, state_res, CanonicalJsonObject, + CanonicalJsonValue, EventId, RoomId, ServerName, }; use super::{check_room_id, get_room_version_id, to_room_version}; @@ -123,7 +121,7 @@ pub(super) async fn handle_outlier_pdu<'a>( // The original create event must be in the auth events if !matches!( auth_events - .get(&(StateEventType::RoomCreate, String::new())) + .get(&(StateEventType::RoomCreate, String::new().into())) .map(AsRef::as_ref), Some(_) | None ) { @@ -134,7 +132,7 @@ pub(super) async fn handle_outlier_pdu<'a>( } let state_fetch = |ty: &'static StateEventType, sk: &str| { - let key = ty.with_state_key(sk); + let key = (ty.to_owned(), sk.into()); ready(auth_events.get(&key)) }; diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 4d99b088..eb9ca01f 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -64,6 +64,7 @@ pub async fn resolve_state( .multi_get_statekey_from_short(shortstatekeys) .zip(event_ids) .ready_filter_map(|(ty_sk, id)| Some((ty_sk.ok()?, id))) + .map(|((ty, sk), id)| ((ty, sk.as_str().to_owned()), id)) .collect() }) .map(Ok::<_, Error>) diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 8ae6354c..7bf3b8f8 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -172,6 +172,7 @@ async fn state_at_incoming_fork( .short .get_statekey_from_short(*k) .map_ok(|(ty, sk)| ((ty, sk), id.clone())) + .map_ok(|((ty, sk), id)| ((ty, sk.as_str().to_owned()), id)) }) .ready_filter_map(Result::ok) .collect() diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 2e6ecbb5..26e11ded 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,7 +1,7 @@ use std::{mem::size_of, sync::Arc}; -use arrayvec::ArrayVec; use conduwuit::{ + arrayvec::ArrayVec, result::LogErr, utils::{ stream::{TryIgnore, WidebandExt}, diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 35cfd444..cc015237 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use arrayvec::ArrayVec; use conduwuit::{ + arrayvec::ArrayVec, implement, utils::{ set, diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index dd586d02..8728325a 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,7 +1,7 @@ use std::{borrow::Borrow, fmt::Debug, mem::size_of_val, sync::Arc}; -pub use conduwuit::pdu::{ShortEventId, ShortId, ShortRoomId}; -use conduwuit::{err, implement, utils, utils::IterStream, Result}; +pub use conduwuit::pdu::{ShortEventId, ShortId, ShortRoomId, ShortStateKey}; +use conduwuit::{err, implement, utils, utils::IterStream, Result, StateKey}; use database::{Deserialized, Get, Map, Qry}; use futures::{Stream, StreamExt}; use ruma::{events::StateEventType, EventId, RoomId}; @@ -28,7 +28,6 @@ struct Services { } pub type ShortStateHash = ShortId; -pub type ShortStateKey = ShortId; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { @@ -181,7 +180,7 @@ where pub async fn get_statekey_from_short( &self, shortstatekey: ShortStateKey, -) -> Result<(StateEventType, String)> { +) -> Result<(StateEventType, StateKey)> { const BUFSIZE: usize = size_of::(); self.db @@ -200,7 +199,7 @@ pub async fn get_statekey_from_short( pub fn multi_get_statekey_from_short<'a, S>( &'a self, shortstatekey: S, -) -> impl Stream> + Send + 'a +) -> impl Stream> + Send + 'a where S: Stream + Send + 'a, { diff --git a/src/service/rooms/state_accessor/room_state.rs b/src/service/rooms/state_accessor/room_state.rs index 98a82cea..e3ec55fe 100644 --- a/src/service/rooms/state_accessor/room_state.rs +++ b/src/service/rooms/state_accessor/room_state.rs @@ -1,6 +1,6 @@ use std::borrow::Borrow; -use conduwuit::{err, implement, PduEvent, Result}; +use conduwuit::{err, implement, PduEvent, Result, StateKey}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{events::StateEventType, EventId, RoomId}; use serde::Deserialize; @@ -27,7 +27,7 @@ where pub fn room_state_full<'a>( &'a self, room_id: &'a RoomId, -) -> impl Stream> + Send + 'a { +) -> impl Stream> + Send + 'a { self.services .state .get_room_shortstatehash(room_id) diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs index 3cf168c1..da1500cb 100644 --- a/src/service/rooms/state_accessor/state.rs +++ b/src/service/rooms/state_accessor/state.rs @@ -6,7 +6,7 @@ use conduwuit::{ result::FlatOk, stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, }, - PduEvent, Result, + PduEvent, Result, StateKey, }; use database::Deserialized; use futures::{future::try_join, pin_mut, FutureExt, Stream, StreamExt, TryFutureExt}; @@ -192,7 +192,7 @@ pub fn state_keys_with_ids<'a, Id>( &'a self, shortstatehash: ShortStateHash, event_type: &'a StateEventType, -) -> impl Stream + Send + 'a +) -> impl Stream + Send + 'a where Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned + 'a, ::Owned: Borrow, @@ -200,7 +200,7 @@ where let state_keys_with_short_ids = self .state_keys_with_shortids(shortstatehash, event_type) .unzip() - .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) + .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) .shared(); let state_keys = state_keys_with_short_ids @@ -230,7 +230,7 @@ pub fn state_keys_with_shortids<'a>( &'a self, shortstatehash: ShortStateHash, event_type: &'a StateEventType, -) -> impl Stream + Send + 'a { +) -> impl Stream + Send + 'a { let short_ids = self .state_full_shortids(shortstatehash) .expect_ok() @@ -267,7 +267,7 @@ pub fn state_keys<'a>( &'a self, shortstatehash: ShortStateHash, event_type: &'a StateEventType, -) -> impl Stream + Send + 'a { +) -> impl Stream + Send + 'a { let short_ids = self .state_full_shortids(shortstatehash) .expect_ok() @@ -314,7 +314,7 @@ pub fn state_added( pub fn state_full( &self, shortstatehash: ShortStateHash, -) -> impl Stream + Send + '_ { +) -> impl Stream + Send + '_ { self.state_full_pdus(shortstatehash) .ready_filter_map(|pdu| { Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu)) diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs index 725a4fba..0332c227 100644 --- a/src/service/rooms/state_accessor/user_can.rs +++ b/src/service/rooms/state_accessor/user_can.rs @@ -175,7 +175,7 @@ pub async fn user_can_invite( .timeline .create_hash_and_sign_event( PduBuilder::state( - target_user.into(), + target_user.as_str(), &RoomMemberEventContent::new(MembershipState::Invite), ), sender, diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 3d68dff6..18731809 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -5,8 +5,8 @@ use std::{ sync::{Arc, Mutex}, }; -use arrayvec::ArrayVec; use conduwuit::{ + arrayvec::ArrayVec, at, checked, err, expected, utils, utils::{bytes, math::usize_from_f64, stream::IterStream}, Result, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index a913034d..a7edd4a4 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -38,7 +38,7 @@ use ruma::{ push::{Action, Ruleset, Tweak}, state_res::{self, Event, RoomVersion}, uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, - OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, + OwnedServerName, RoomId, RoomVersionId, ServerName, UserId, }; use serde::Deserialize; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; @@ -387,10 +387,10 @@ impl Service { if pdu.kind == TimelineEventType::RoomMember { if let Some(state_key) = &pdu.state_key { - let target_user_id = OwnedUserId::parse(state_key)?; + let target_user_id = UserId::parse(state_key)?; - if self.services.users.is_active_local(&target_user_id).await { - push_target.insert(target_user_id); + if self.services.users.is_active_local(target_user_id).await { + push_target.insert(target_user_id.to_owned()); } } } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 86b219f7..b46ce7a8 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -13,6 +13,7 @@ use std::{ use async_trait::async_trait; use conduwuit::{ debug, debug_warn, err, error, + smallvec::SmallVec, utils::{available_parallelism, math::usize_from_u64_truncated, ReadyExt, TryReadyExt}, warn, Result, Server, }; @@ -21,7 +22,6 @@ use ruma::{ api::{appservice::Registration, OutgoingRequest}, RoomId, ServerName, UserId, }; -use smallvec::SmallVec; use tokio::{task, task::JoinSet}; use self::data::Data; From 0a9a9b3c92852cae269aaf2cb3894658b5e35a54 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 5 Feb 2025 12:22:22 +0000 Subject: [PATCH 177/328] larcen state-res from ruma --- Cargo.toml | 1 - src/api/client/membership.rs | 6 +- src/api/client/sync/v5.rs | 3 +- src/core/error/mod.rs | 2 +- src/core/mod.rs | 2 + src/core/pdu/event.rs | 2 +- src/core/state_res/LICENSE | 17 + src/core/state_res/error.rs | 23 + src/core/state_res/event_auth.rs | 1418 ++++++++++++++ src/core/state_res/mod.rs | 1644 +++++++++++++++++ src/core/state_res/outcomes.txt | 104 ++ src/core/state_res/power_levels.rs | 256 +++ src/core/state_res/room_version.rs | 149 ++ src/core/state_res/state_event.rs | 102 + src/core/state_res/state_res_bench.rs | 648 +++++++ src/core/state_res/test_utils.rs | 688 +++++++ src/service/rooms/event_handler/fetch_prev.rs | 11 +- .../rooms/event_handler/handle_outlier_pdu.rs | 6 +- src/service/rooms/event_handler/mod.rs | 6 +- .../rooms/event_handler/resolve_state.rs | 9 +- .../rooms/event_handler/state_at_incoming.rs | 4 +- .../event_handler/upgrade_outlier_pdu.rs | 10 +- src/service/rooms/state/mod.rs | 2 +- src/service/rooms/timeline/mod.rs | 2 +- 24 files changed, 5082 insertions(+), 33 deletions(-) create mode 100644 src/core/state_res/LICENSE create mode 100644 src/core/state_res/error.rs create mode 100644 src/core/state_res/event_auth.rs create mode 100644 src/core/state_res/mod.rs create mode 100644 src/core/state_res/outcomes.txt create mode 100644 src/core/state_res/power_levels.rs create mode 100644 src/core/state_res/room_version.rs create mode 100644 src/core/state_res/state_event.rs create mode 100644 src/core/state_res/state_res_bench.rs create mode 100644 src/core/state_res/test_utils.rs diff --git a/Cargo.toml b/Cargo.toml index b93877bd..d8f34544 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -355,7 +355,6 @@ features = [ "federation-api", "markdown", "push-gateway-api-c", - "state-res", "server-util", "unstable-exhaustive-types", "ring-compat", diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 449d44d5..1045b014 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -12,7 +12,7 @@ use conduwuit::{ at, debug, debug_info, debug_warn, err, info, pdu::{gen_event_id_canonical_json, PduBuilder}, result::FlatOk, - trace, + state_res, trace, utils::{self, shuffle, IterStream, ReadyExt}, warn, Err, PduEvent, Result, }; @@ -40,8 +40,8 @@ use ruma::{ }, StateEventType, }, - state_res, CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, - OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, + CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, }; use service::{ appservice::RegistrationInfo, diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index f8ee1047..63731688 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -11,7 +11,7 @@ use conduwuit::{ math::{ruma_from_usize, usize_from_ruma}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, }, - warn, Error, Result, + warn, Error, Result, TypeStateKey, }; use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ @@ -24,7 +24,6 @@ use ruma::{ AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType, }, serde::Raw, - state_res::TypeStateKey, uint, DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, }; use service::{rooms::read_receipt::pack_receipts, PduCount}; diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 88ac6d09..16613b7e 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -121,7 +121,7 @@ pub enum Error { #[error(transparent)] Signatures(#[from] ruma::signatures::Error), #[error(transparent)] - StateRes(#[from] ruma::state_res::Error), + StateRes(#[from] crate::state_res::Error), #[error("uiaa")] Uiaa(ruma::api::client::uiaa::UiaaInfo), diff --git a/src/core/mod.rs b/src/core/mod.rs index ee128628..cd56774a 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -8,6 +8,7 @@ pub mod metrics; pub mod mods; pub mod pdu; pub mod server; +pub mod state_res; pub mod utils; pub use ::arrayvec; @@ -22,6 +23,7 @@ pub use error::Error; pub use info::{rustc_flags_capture, version, version::version}; pub use pdu::{Event, PduBuilder, PduCount, PduEvent, PduId, RawPduId, StateKey}; pub use server::Server; +pub use state_res::{EventTypeExt, RoomVersion, StateMap, TypeStateKey}; pub use utils::{ctor, dtor, implement, result, result::Result}; pub use crate as conduwuit_core; diff --git a/src/core/pdu/event.rs b/src/core/pdu/event.rs index 6a92afe8..d5c0561e 100644 --- a/src/core/pdu/event.rs +++ b/src/core/pdu/event.rs @@ -1,8 +1,8 @@ -pub use ruma::state_res::Event; use ruma::{events::TimelineEventType, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; use serde_json::value::RawValue as RawJsonValue; use super::Pdu; +pub use crate::state_res::Event; impl Event for Pdu { type Id = OwnedEventId; diff --git a/src/core/state_res/LICENSE b/src/core/state_res/LICENSE new file mode 100644 index 00000000..c103a044 --- /dev/null +++ b/src/core/state_res/LICENSE @@ -0,0 +1,17 @@ +//! Permission is hereby granted, free of charge, to any person obtaining a copy +//! of this software and associated documentation files (the "Software"), to +//! deal in the Software without restriction, including without limitation the +//! rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +//! sell copies of the Software, and to permit persons to whom the Software is +//! furnished to do so, subject to the following conditions: + +//! The above copyright notice and this permission notice shall be included in +//! all copies or substantial portions of the Software. + +//! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +//! IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +//! FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +//! AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +//! LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +//! FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +//! IN THE SOFTWARE. diff --git a/src/core/state_res/error.rs b/src/core/state_res/error.rs new file mode 100644 index 00000000..7711d878 --- /dev/null +++ b/src/core/state_res/error.rs @@ -0,0 +1,23 @@ +use serde_json::Error as JsonError; +use thiserror::Error; + +/// Represents the various errors that arise when resolving state. +#[derive(Error, Debug)] +#[non_exhaustive] +pub enum Error { + /// A deserialization error. + #[error(transparent)] + SerdeJson(#[from] JsonError), + + /// The given option or version is unsupported. + #[error("Unsupported room version: {0}")] + Unsupported(String), + + /// The given event was not found. + #[error("Not found error: {0}")] + NotFound(String), + + /// Invalid fields in the given PDU. + #[error("Invalid PDU: {0}")] + InvalidPdu(String), +} diff --git a/src/core/state_res/event_auth.rs b/src/core/state_res/event_auth.rs new file mode 100644 index 00000000..72a0216c --- /dev/null +++ b/src/core/state_res/event_auth.rs @@ -0,0 +1,1418 @@ +use std::{borrow::Borrow, collections::BTreeSet}; + +use futures::{ + future::{join3, OptionFuture}, + Future, +}; +use ruma::{ + events::room::{ + create::RoomCreateEventContent, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, ThirdPartyInvite}, + power_levels::RoomPowerLevelsEventContent, + third_party_invite::RoomThirdPartyInviteEventContent, + }, + int, + serde::{Base64, Raw}, + Int, OwnedUserId, RoomVersionId, UserId, +}; +use serde::{ + de::{Error as _, IgnoredAny}, + Deserialize, +}; +use serde_json::{from_str as from_json_str, value::RawValue as RawJsonValue}; +use tracing::{debug, error, instrument, trace, warn}; + +use super::{ + power_levels::{ + deserialize_power_levels, deserialize_power_levels_content_fields, + deserialize_power_levels_content_invite, deserialize_power_levels_content_redact, + }, + room_version::RoomVersion, + Error, Event, Result, StateEventType, TimelineEventType, +}; + +// FIXME: field extracting could be bundled for `content` +#[derive(Deserialize)] +struct GetMembership { + membership: MembershipState, +} + +#[derive(Deserialize)] +struct RoomMemberContentFields { + membership: Option>, + join_authorised_via_users_server: Option>, +} + +/// For the given event `kind` what are the relevant auth events that are needed +/// to authenticate this `content`. +/// +/// # Errors +/// +/// This function will return an error if the supplied `content` is not a JSON +/// object. +pub fn auth_types_for_event( + kind: &TimelineEventType, + sender: &UserId, + state_key: Option<&str>, + content: &RawJsonValue, +) -> serde_json::Result> { + if kind == &TimelineEventType::RoomCreate { + return Ok(vec![]); + } + + let mut auth_types = vec![ + (StateEventType::RoomPowerLevels, String::new()), + (StateEventType::RoomMember, sender.to_string()), + (StateEventType::RoomCreate, String::new()), + ]; + + if kind == &TimelineEventType::RoomMember { + #[derive(Deserialize)] + struct RoomMemberContentFields { + membership: Option>, + third_party_invite: Option>, + join_authorised_via_users_server: Option>, + } + + if let Some(state_key) = state_key { + let content: RoomMemberContentFields = from_json_str(content.get())?; + + if let Some(Ok(membership)) = content.membership.map(|m| m.deserialize()) { + if [MembershipState::Join, MembershipState::Invite, MembershipState::Knock] + .contains(&membership) + { + let key = (StateEventType::RoomJoinRules, String::new()); + if !auth_types.contains(&key) { + auth_types.push(key); + } + + if let Some(Ok(u)) = content + .join_authorised_via_users_server + .map(|m| m.deserialize()) + { + let key = (StateEventType::RoomMember, u.to_string()); + if !auth_types.contains(&key) { + auth_types.push(key); + } + } + } + + let key = (StateEventType::RoomMember, state_key.to_owned()); + if !auth_types.contains(&key) { + auth_types.push(key); + } + + if membership == MembershipState::Invite { + if let Some(Ok(t_id)) = content.third_party_invite.map(|t| t.deserialize()) { + let key = (StateEventType::RoomThirdPartyInvite, t_id.signed.token); + if !auth_types.contains(&key) { + auth_types.push(key); + } + } + } + } + } + } + + Ok(auth_types) +} + +/// Authenticate the incoming `event`. +/// +/// The steps of authentication are: +/// +/// * check that the event is being authenticated for the correct room +/// * then there are checks for specific event types +/// +/// The `fetch_state` closure should gather state from a state snapshot. We need +/// to know if the event passes auth against some state not a recursive +/// collection of auth_events fields. +#[instrument(level = "debug", skip_all, fields(event_id = incoming_event.event_id().borrow().as_str()))] +pub async fn auth_check( + room_version: &RoomVersion, + incoming_event: &Incoming, + current_third_party_invite: Option<&Incoming>, + fetch_state: F, +) -> Result +where + F: Fn(&'static StateEventType, &str) -> Fut, + Fut: Future> + Send, + Fetched: Event + Send, + Incoming: Event + Send, +{ + debug!( + "auth_check beginning for {} ({})", + incoming_event.event_id(), + incoming_event.event_type() + ); + + // [synapse] check that all the events are in the same room as `incoming_event` + + // [synapse] do_sig_check check the event has valid signatures for member events + + // TODO do_size_check is false when called by `iterative_auth_check` + // do_size_check is also mostly accomplished by ruma with the exception of + // checking event_type, state_key, and json are below a certain size (255 and + // 65_536 respectively) + + let sender = incoming_event.sender(); + + // Implementation of https://spec.matrix.org/latest/rooms/v1/#authorization-rules + // + // 1. If type is m.room.create: + if *incoming_event.event_type() == TimelineEventType::RoomCreate { + #[derive(Deserialize)] + struct RoomCreateContentFields { + room_version: Option>, + creator: Option>, + } + + debug!("start m.room.create check"); + + // If it has any previous events, reject + if incoming_event.prev_events().next().is_some() { + warn!("the room creation event had previous events"); + return Ok(false); + } + + // If the domain of the room_id does not match the domain of the sender, reject + let Some(room_id_server_name) = incoming_event.room_id().server_name() else { + warn!("room ID has no servername"); + return Ok(false); + }; + + if room_id_server_name != sender.server_name() { + warn!("servername of room ID does not match servername of sender"); + return Ok(false); + } + + // If content.room_version is present and is not a recognized version, reject + let content: RoomCreateContentFields = from_json_str(incoming_event.content().get())?; + if content + .room_version + .is_some_and(|v| v.deserialize().is_err()) + { + warn!("invalid room version found in m.room.create event"); + return Ok(false); + } + + if !room_version.use_room_create_sender { + // If content has no creator field, reject + if content.creator.is_none() { + warn!("no creator field found in m.room.create content"); + return Ok(false); + } + } + + debug!("m.room.create event was allowed"); + return Ok(true); + } + + /* + // TODO: In the past this code caused problems federating with synapse, maybe this has been + // resolved already. Needs testing. + // + // 2. Reject if auth_events + // a. auth_events cannot have duplicate keys since it's a BTree + // b. All entries are valid auth events according to spec + let expected_auth = auth_types_for_event( + incoming_event.kind, + sender, + incoming_event.state_key, + incoming_event.content().clone(), + ); + + dbg!(&expected_auth); + + for ev_key in auth_events.keys() { + // (b) + if !expected_auth.contains(ev_key) { + warn!("auth_events contained invalid auth event"); + return Ok(false); + } + } + */ + + let (room_create_event, power_levels_event, sender_member_event) = join3( + fetch_state(&StateEventType::RoomCreate, ""), + fetch_state(&StateEventType::RoomPowerLevels, ""), + fetch_state(&StateEventType::RoomMember, sender.as_str()), + ) + .await; + + let room_create_event = match room_create_event { + | None => { + warn!("no m.room.create event in auth chain"); + return Ok(false); + }, + | Some(e) => e, + }; + + // 3. If event does not have m.room.create in auth_events reject + if !incoming_event + .auth_events() + .any(|id| id.borrow() == room_create_event.event_id().borrow()) + { + warn!("no m.room.create event in auth events"); + return Ok(false); + } + + // If the create event content has the field m.federate set to false and the + // sender domain of the event does not match the sender domain of the create + // event, reject. + #[derive(Deserialize)] + struct RoomCreateContentFederate { + #[serde(rename = "m.federate", default = "ruma::serde::default_true")] + federate: bool, + } + let room_create_content: RoomCreateContentFederate = + from_json_str(room_create_event.content().get())?; + if !room_create_content.federate + && room_create_event.sender().server_name() != incoming_event.sender().server_name() + { + warn!( + "room is not federated and event's sender domain does not match create event's \ + sender domain" + ); + return Ok(false); + } + + // Only in some room versions 6 and below + if room_version.special_case_aliases_auth { + // 4. If type is m.room.aliases + if *incoming_event.event_type() == TimelineEventType::RoomAliases { + debug!("starting m.room.aliases check"); + + // If sender's domain doesn't matches state_key, reject + if incoming_event.state_key() != Some(sender.server_name().as_str()) { + warn!("state_key does not match sender"); + return Ok(false); + } + + debug!("m.room.aliases event was allowed"); + return Ok(true); + } + } + + // If type is m.room.member + if *incoming_event.event_type() == TimelineEventType::RoomMember { + debug!("starting m.room.member check"); + let state_key = match incoming_event.state_key() { + | None => { + warn!("no statekey in member event"); + return Ok(false); + }, + | Some(s) => s, + }; + + let content: RoomMemberContentFields = from_json_str(incoming_event.content().get())?; + if content + .membership + .as_ref() + .and_then(|m| m.deserialize().ok()) + .is_none() + { + warn!("no valid membership field found for m.room.member event content"); + return Ok(false); + } + + let target_user = + <&UserId>::try_from(state_key).map_err(|e| Error::InvalidPdu(format!("{e}")))?; + + let user_for_join_auth = content + .join_authorised_via_users_server + .as_ref() + .and_then(|u| u.deserialize().ok()); + + let user_for_join_auth_event: OptionFuture<_> = user_for_join_auth + .as_ref() + .map(|auth_user| fetch_state(&StateEventType::RoomMember, auth_user.as_str())) + .into(); + + let target_user_member_event = + fetch_state(&StateEventType::RoomMember, target_user.as_str()); + + let join_rules_event = fetch_state(&StateEventType::RoomJoinRules, ""); + + let (join_rules_event, target_user_member_event, user_for_join_auth_event) = + join3(join_rules_event, target_user_member_event, user_for_join_auth_event).await; + + let user_for_join_auth_membership = user_for_join_auth_event + .and_then(|mem| from_json_str::(mem?.content().get()).ok()) + .map_or(MembershipState::Leave, |mem| mem.membership); + + if !valid_membership_change( + room_version, + target_user, + target_user_member_event.as_ref(), + sender, + sender_member_event.as_ref(), + incoming_event, + current_third_party_invite, + power_levels_event.as_ref(), + join_rules_event.as_ref(), + user_for_join_auth.as_deref(), + &user_for_join_auth_membership, + room_create_event, + )? { + return Ok(false); + } + + debug!("m.room.member event was allowed"); + return Ok(true); + } + + // If the sender's current membership state is not join, reject + let sender_member_event = match sender_member_event { + | Some(mem) => mem, + | None => { + warn!("sender not found in room"); + return Ok(false); + }, + }; + + let sender_membership_event_content: RoomMemberContentFields = + from_json_str(sender_member_event.content().get())?; + let membership_state = sender_membership_event_content + .membership + .expect("we should test before that this field exists") + .deserialize()?; + + if !matches!(membership_state, MembershipState::Join) { + warn!("sender's membership is not join"); + return Ok(false); + } + + // If type is m.room.third_party_invite + let sender_power_level = if let Some(pl) = &power_levels_event { + let content = deserialize_power_levels_content_fields(pl.content().get(), room_version)?; + if let Some(level) = content.get_user_power(sender) { + *level + } else { + content.users_default + } + } else { + // If no power level event found the creator gets 100 everyone else gets 0 + let is_creator = if room_version.use_room_create_sender { + room_create_event.sender() == sender + } else { + #[allow(deprecated)] + from_json_str::(room_create_event.content().get()) + .is_ok_and(|create| create.creator.unwrap() == *sender) + }; + + if is_creator { + int!(100) + } else { + int!(0) + } + }; + + // Allow if and only if sender's current power level is greater than + // or equal to the invite level + if *incoming_event.event_type() == TimelineEventType::RoomThirdPartyInvite { + let invite_level = match &power_levels_event { + | Some(power_levels) => + deserialize_power_levels_content_invite( + power_levels.content().get(), + room_version, + )? + .invite, + | None => int!(0), + }; + + if sender_power_level < invite_level { + warn!("sender's cannot send invites in this room"); + return Ok(false); + } + + debug!("m.room.third_party_invite event was allowed"); + return Ok(true); + } + + // If the event type's required power level is greater than the sender's power + // level, reject If the event has a state_key that starts with an @ and does + // not match the sender, reject. + if !can_send_event(incoming_event, power_levels_event.as_ref(), sender_power_level) { + warn!("user cannot send event"); + return Ok(false); + } + + // If type is m.room.power_levels + if *incoming_event.event_type() == TimelineEventType::RoomPowerLevels { + debug!("starting m.room.power_levels check"); + + if let Some(required_pwr_lvl) = check_power_levels( + room_version, + incoming_event, + power_levels_event.as_ref(), + sender_power_level, + ) { + if !required_pwr_lvl { + warn!("m.room.power_levels was not allowed"); + return Ok(false); + } + } else { + warn!("m.room.power_levels was not allowed"); + return Ok(false); + } + debug!("m.room.power_levels event allowed"); + } + + // Room version 3: Redaction events are always accepted (provided the event is + // allowed by `events` and `events_default` in the power levels). However, + // servers should not apply or send redaction's to clients until both the + // redaction event and original event have been seen, and are valid. Servers + // should only apply redaction's to events where the sender's domains match, or + // the sender of the redaction has the appropriate permissions per the + // power levels. + + if room_version.extra_redaction_checks + && *incoming_event.event_type() == TimelineEventType::RoomRedaction + { + let redact_level = match power_levels_event { + | Some(pl) => + deserialize_power_levels_content_redact(pl.content().get(), room_version)?.redact, + | None => int!(50), + }; + + if !check_redaction(room_version, incoming_event, sender_power_level, redact_level)? { + return Ok(false); + } + } + + debug!("allowing event passed all checks"); + Ok(true) +} + +// TODO deserializing the member, power, join_rules event contents is done in +// conduit just before this is called. Could they be passed in? +/// Does the user who sent this member event have required power levels to do +/// so. +/// +/// * `user` - Information about the membership event and user making the +/// request. +/// * `auth_events` - The set of auth events that relate to a membership event. +/// +/// This is generated by calling `auth_types_for_event` with the membership +/// event and the current State. +#[allow(clippy::too_many_arguments)] +fn valid_membership_change( + room_version: &RoomVersion, + target_user: &UserId, + target_user_membership_event: Option, + sender: &UserId, + sender_membership_event: Option, + current_event: impl Event, + current_third_party_invite: Option, + power_levels_event: Option, + join_rules_event: Option, + user_for_join_auth: Option<&UserId>, + user_for_join_auth_membership: &MembershipState, + create_room: impl Event, +) -> Result { + #[derive(Deserialize)] + struct GetThirdPartyInvite { + third_party_invite: Option>, + } + let content = current_event.content(); + + let target_membership = from_json_str::(content.get())?.membership; + let third_party_invite = + from_json_str::(content.get())?.third_party_invite; + + let sender_membership = match &sender_membership_event { + | Some(pdu) => from_json_str::(pdu.content().get())?.membership, + | None => MembershipState::Leave, + }; + let sender_is_joined = sender_membership == MembershipState::Join; + + let target_user_current_membership = match &target_user_membership_event { + | Some(pdu) => from_json_str::(pdu.content().get())?.membership, + | None => MembershipState::Leave, + }; + + let power_levels: RoomPowerLevelsEventContent = match &power_levels_event { + | Some(ev) => from_json_str(ev.content().get())?, + | None => RoomPowerLevelsEventContent::default(), + }; + + let sender_power = power_levels + .users + .get(sender) + .or_else(|| sender_is_joined.then_some(&power_levels.users_default)); + + let target_power = power_levels.users.get(target_user).or_else(|| { + (target_membership == MembershipState::Join).then_some(&power_levels.users_default) + }); + + let mut join_rules = JoinRule::Invite; + if let Some(jr) = &join_rules_event { + join_rules = from_json_str::(jr.content().get())?.join_rule; + } + + let power_levels_event_id = power_levels_event.as_ref().map(Event::event_id); + let sender_membership_event_id = sender_membership_event.as_ref().map(Event::event_id); + let target_user_membership_event_id = + target_user_membership_event.as_ref().map(Event::event_id); + + let user_for_join_auth_is_valid = if let Some(user_for_join_auth) = user_for_join_auth { + // Is the authorised user allowed to invite users into this room + let (auth_user_pl, invite_level) = if let Some(pl) = &power_levels_event { + // TODO Refactor all powerlevel parsing + let invite = + deserialize_power_levels_content_invite(pl.content().get(), room_version)?.invite; + + let content = + deserialize_power_levels_content_fields(pl.content().get(), room_version)?; + let user_pl = if let Some(level) = content.get_user_power(user_for_join_auth) { + *level + } else { + content.users_default + }; + + (user_pl, invite) + } else { + (int!(0), int!(0)) + }; + (user_for_join_auth_membership == &MembershipState::Join) + && (auth_user_pl >= invite_level) + } else { + // No auth user was given + false + }; + + Ok(match target_membership { + | MembershipState::Join => { + // 1. If the only previous event is an m.room.create and the state_key is the + // creator, + // allow + let mut prev_events = current_event.prev_events(); + + let prev_event_is_create_event = prev_events + .next() + .is_some_and(|event_id| event_id.borrow() == create_room.event_id().borrow()); + let no_more_prev_events = prev_events.next().is_none(); + + if prev_event_is_create_event && no_more_prev_events { + let is_creator = if room_version.use_room_create_sender { + let creator = create_room.sender(); + + creator == sender && creator == target_user + } else { + #[allow(deprecated)] + let creator = from_json_str::(create_room.content().get())? + .creator + .ok_or_else(|| serde_json::Error::missing_field("creator"))?; + + creator == sender && creator == target_user + }; + + if is_creator { + return Ok(true); + } + } + + if sender != target_user { + // If the sender does not match state_key, reject. + warn!("Can't make other user join"); + false + } else if target_user_current_membership == MembershipState::Ban { + // If the sender is banned, reject. + warn!(?target_user_membership_event_id, "Banned user can't join"); + false + } else if (join_rules == JoinRule::Invite + || room_version.allow_knocking && join_rules == JoinRule::Knock) + // If the join_rule is invite then allow if membership state is invite or join + && (target_user_current_membership == MembershipState::Join + || target_user_current_membership == MembershipState::Invite) + { + true + } else if room_version.restricted_join_rules + && matches!(join_rules, JoinRule::Restricted(_)) + || room_version.knock_restricted_join_rule + && matches!(join_rules, JoinRule::KnockRestricted(_)) + { + // If the join_rule is restricted or knock_restricted + if matches!( + target_user_current_membership, + MembershipState::Invite | MembershipState::Join + ) { + // If membership state is join or invite, allow. + true + } else { + // If the join_authorised_via_users_server key in content is not a user with + // sufficient permission to invite other users, reject. + // Otherwise, allow. + user_for_join_auth_is_valid + } + } else { + // If the join_rule is public, allow. + // Otherwise, reject. + join_rules == JoinRule::Public + } + }, + | MembershipState::Invite => { + // If content has third_party_invite key + if let Some(tp_id) = third_party_invite.and_then(|i| i.deserialize().ok()) { + if target_user_current_membership == MembershipState::Ban { + warn!(?target_user_membership_event_id, "Can't invite banned user"); + false + } else { + let allow = verify_third_party_invite( + Some(target_user), + sender, + &tp_id, + current_third_party_invite, + ); + if !allow { + warn!("Third party invite invalid"); + } + allow + } + } else if !sender_is_joined + || target_user_current_membership == MembershipState::Join + || target_user_current_membership == MembershipState::Ban + { + warn!( + ?target_user_membership_event_id, + ?sender_membership_event_id, + "Can't invite user if sender not joined or the user is currently joined or \ + banned", + ); + false + } else { + let allow = sender_power + .filter(|&p| p >= &power_levels.invite) + .is_some(); + if !allow { + warn!( + ?target_user_membership_event_id, + ?power_levels_event_id, + "User does not have enough power to invite", + ); + } + allow + } + }, + | MembershipState::Leave => + if sender == target_user { + let allow = target_user_current_membership == MembershipState::Join + || target_user_current_membership == MembershipState::Invite + || target_user_current_membership == MembershipState::Knock; + if !allow { + warn!( + ?target_user_membership_event_id, + ?target_user_current_membership, + "Can't leave if sender is not already invited, knocked, or joined" + ); + } + allow + } else if !sender_is_joined + || target_user_current_membership == MembershipState::Ban + && sender_power.filter(|&p| p < &power_levels.ban).is_some() + { + warn!( + ?target_user_membership_event_id, + ?sender_membership_event_id, + "Can't kick if sender not joined or user is already banned", + ); + false + } else { + let allow = sender_power.filter(|&p| p >= &power_levels.kick).is_some() + && target_power < sender_power; + if !allow { + warn!( + ?target_user_membership_event_id, + ?power_levels_event_id, + "User does not have enough power to kick", + ); + } + allow + }, + | MembershipState::Ban => + if !sender_is_joined { + warn!(?sender_membership_event_id, "Can't ban user if sender is not joined"); + false + } else { + let allow = sender_power.filter(|&p| p >= &power_levels.ban).is_some() + && target_power < sender_power; + if !allow { + warn!( + ?target_user_membership_event_id, + ?power_levels_event_id, + "User does not have enough power to ban", + ); + } + allow + }, + | MembershipState::Knock if room_version.allow_knocking => { + // 1. If the `join_rule` is anything other than `knock` or `knock_restricted`, + // reject. + if !matches!(join_rules, JoinRule::KnockRestricted(_) | JoinRule::Knock) { + warn!( + "Join rule is not set to knock or knock_restricted, knocking is not allowed" + ); + false + } else if matches!(join_rules, JoinRule::KnockRestricted(_)) + && !room_version.knock_restricted_join_rule + { + // 2. If the `join_rule` is `knock_restricted`, but the room does not support + // `knock_restricted`, reject. + warn!( + "Join rule is set to knock_restricted but room version does not support \ + knock_restricted, knocking is not allowed" + ); + false + } else if sender != target_user { + // 3. If `sender` does not match `state_key`, reject. + warn!( + ?sender, + ?target_user, + "Can't make another user knock, sender did not match target" + ); + false + } else if matches!( + sender_membership, + MembershipState::Ban | MembershipState::Invite | MembershipState::Join + ) { + // 4. If the `sender`'s current membership is not `ban`, `invite`, or `join`, + // allow. + // 5. Otherwise, reject. + warn!( + ?target_user_membership_event_id, + "Knocking with a membership state of ban, invite or join is invalid", + ); + false + } else { + true + } + }, + | _ => { + warn!("Unknown membership transition"); + false + }, + }) +} + +/// Is the user allowed to send a specific event based on the rooms power +/// levels. +/// +/// Does the event have the correct userId as its state_key if it's not the "" +/// state_key. +fn can_send_event(event: impl Event, ple: Option, user_level: Int) -> bool { + let event_type_power_level = get_send_level(event.event_type(), event.state_key(), ple); + + debug!( + required_level = i64::from(event_type_power_level), + user_level = i64::from(user_level), + state_key = ?event.state_key(), + "permissions factors", + ); + + if user_level < event_type_power_level { + return false; + } + + if event.state_key().is_some_and(|k| k.starts_with('@')) + && event.state_key() != Some(event.sender().as_str()) + { + return false; // permission required to post in this room + } + + true +} + +/// Confirm that the event sender has the required power levels. +fn check_power_levels( + room_version: &RoomVersion, + power_event: impl Event, + previous_power_event: Option, + user_level: Int, +) -> Option { + match power_event.state_key() { + | Some("") => {}, + | Some(key) => { + error!(state_key = key, "m.room.power_levels event has non-empty state key"); + return None; + }, + | None => { + error!("check_power_levels requires an m.room.power_levels *state* event argument"); + return None; + }, + } + + // - If any of the keys users_default, events_default, state_default, ban, + // redact, kick, or invite in content are present and not an integer, reject. + // - If either of the keys events or notifications in content are present and + // not a dictionary with values that are integers, reject. + // - If users key in content is not a dictionary with keys that are valid user + // IDs with values that are integers, reject. + let user_content: RoomPowerLevelsEventContent = + deserialize_power_levels(power_event.content().get(), room_version)?; + + // Validation of users is done in Ruma, synapse for loops validating user_ids + // and integers here + debug!("validation of power event finished"); + + let current_state = match previous_power_event { + | Some(current_state) => current_state, + // If there is no previous m.room.power_levels event in the room, allow + | None => return Some(true), + }; + + let current_content: RoomPowerLevelsEventContent = + deserialize_power_levels(current_state.content().get(), room_version)?; + + let mut user_levels_to_check = BTreeSet::new(); + let old_list = ¤t_content.users; + let user_list = &user_content.users; + for user in old_list.keys().chain(user_list.keys()) { + let user: &UserId = user; + user_levels_to_check.insert(user); + } + + trace!(set = ?user_levels_to_check, "user levels to check"); + + let mut event_levels_to_check = BTreeSet::new(); + let old_list = ¤t_content.events; + let new_list = &user_content.events; + for ev_id in old_list.keys().chain(new_list.keys()) { + event_levels_to_check.insert(ev_id); + } + + trace!(set = ?event_levels_to_check, "event levels to check"); + + let old_state = ¤t_content; + let new_state = &user_content; + + // synapse does not have to split up these checks since we can't combine UserIds + // and EventTypes we do 2 loops + + // UserId loop + for user in user_levels_to_check { + let old_level = old_state.users.get(user); + let new_level = new_state.users.get(user); + if old_level.is_some() && new_level.is_some() && old_level == new_level { + continue; + } + + // If the current value is equal to the sender's current power level, reject + if user != power_event.sender() && old_level == Some(&user_level) { + warn!("m.room.power_level cannot remove ops == to own"); + return Some(false); // cannot remove ops level == to own + } + + // If the current value is higher than the sender's current power level, reject + // If the new value is higher than the sender's current power level, reject + let old_level_too_big = old_level > Some(&user_level); + let new_level_too_big = new_level > Some(&user_level); + if old_level_too_big || new_level_too_big { + warn!("m.room.power_level failed to add ops > than own"); + return Some(false); // cannot add ops greater than own + } + } + + // EventType loop + for ev_type in event_levels_to_check { + let old_level = old_state.events.get(ev_type); + let new_level = new_state.events.get(ev_type); + if old_level.is_some() && new_level.is_some() && old_level == new_level { + continue; + } + + // If the current value is higher than the sender's current power level, reject + // If the new value is higher than the sender's current power level, reject + let old_level_too_big = old_level > Some(&user_level); + let new_level_too_big = new_level > Some(&user_level); + if old_level_too_big || new_level_too_big { + warn!("m.room.power_level failed to add ops > than own"); + return Some(false); // cannot add ops greater than own + } + } + + // Notifications, currently there is only @room + if room_version.limit_notifications_power_levels { + let old_level = old_state.notifications.room; + let new_level = new_state.notifications.room; + if old_level != new_level { + // If the current value is higher than the sender's current power level, reject + // If the new value is higher than the sender's current power level, reject + let old_level_too_big = old_level > user_level; + let new_level_too_big = new_level > user_level; + if old_level_too_big || new_level_too_big { + warn!("m.room.power_level failed to add ops > than own"); + return Some(false); // cannot add ops greater than own + } + } + } + + let levels = [ + "users_default", + "events_default", + "state_default", + "ban", + "redact", + "kick", + "invite", + ]; + let old_state = serde_json::to_value(old_state).unwrap(); + let new_state = serde_json::to_value(new_state).unwrap(); + for lvl_name in &levels { + if let Some((old_lvl, new_lvl)) = get_deserialize_levels(&old_state, &new_state, lvl_name) + { + let old_level_too_big = old_lvl > user_level; + let new_level_too_big = new_lvl > user_level; + + if old_level_too_big || new_level_too_big { + warn!("cannot add ops > than own"); + return Some(false); + } + } + } + + Some(true) +} + +fn get_deserialize_levels( + old: &serde_json::Value, + new: &serde_json::Value, + name: &str, +) -> Option<(Int, Int)> { + Some(( + serde_json::from_value(old.get(name)?.clone()).ok()?, + serde_json::from_value(new.get(name)?.clone()).ok()?, + )) +} + +/// Does the event redacting come from a user with enough power to redact the +/// given event. +fn check_redaction( + _room_version: &RoomVersion, + redaction_event: impl Event, + user_level: Int, + redact_level: Int, +) -> Result { + if user_level >= redact_level { + debug!("redaction allowed via power levels"); + return Ok(true); + } + + // If the domain of the event_id of the event being redacted is the same as the + // domain of the event_id of the m.room.redaction, allow + if redaction_event.event_id().borrow().server_name() + == redaction_event + .redacts() + .as_ref() + .and_then(|&id| id.borrow().server_name()) + { + debug!("redaction event allowed via room version 1 rules"); + return Ok(true); + } + + Ok(false) +} + +/// Helper function to fetch the power level needed to send an event of type +/// `e_type` based on the rooms "m.room.power_level" event. +fn get_send_level( + e_type: &TimelineEventType, + state_key: Option<&str>, + power_lvl: Option, +) -> Int { + power_lvl + .and_then(|ple| { + from_json_str::(ple.content().get()) + .map(|content| { + content.events.get(e_type).copied().unwrap_or_else(|| { + if state_key.is_some() { + content.state_default + } else { + content.events_default + } + }) + }) + .ok() + }) + .unwrap_or_else(|| if state_key.is_some() { int!(50) } else { int!(0) }) +} + +fn verify_third_party_invite( + target_user: Option<&UserId>, + sender: &UserId, + tp_id: &ThirdPartyInvite, + current_third_party_invite: Option, +) -> bool { + // 1. Check for user being banned happens before this is called + // checking for mxid and token keys is done by ruma when deserializing + + // The state key must match the invitee + if target_user != Some(&tp_id.signed.mxid) { + return false; + } + + // If there is no m.room.third_party_invite event in the current room state with + // state_key matching token, reject + let current_tpid = match current_third_party_invite { + | Some(id) => id, + | None => return false, + }; + + if current_tpid.state_key() != Some(&tp_id.signed.token) { + return false; + } + + if sender != current_tpid.sender() { + return false; + } + + // If any signature in signed matches any public key in the + // m.room.third_party_invite event, allow + let tpid_ev = + match from_json_str::(current_tpid.content().get()) { + | Ok(ev) => ev, + | Err(_) => return false, + }; + + let decoded_invite_token = match Base64::parse(&tp_id.signed.token) { + | Ok(tok) => tok, + // FIXME: Log a warning? + | Err(_) => return false, + }; + + // A list of public keys in the public_keys field + for key in tpid_ev.public_keys.unwrap_or_default() { + if key.public_key == decoded_invite_token { + return true; + } + } + + // A single public key in the public_key field + tpid_ev.public_key == decoded_invite_token +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use ruma_events::{ + room::{ + join_rules::{ + AllowRule, JoinRule, Restricted, RoomJoinRulesEventContent, RoomMembership, + }, + member::{MembershipState, RoomMemberEventContent}, + }, + StateEventType, TimelineEventType, + }; + use serde_json::value::to_raw_value as to_raw_json_value; + + use crate::{ + event_auth::valid_membership_change, + test_utils::{ + alice, charlie, ella, event_id, member_content_ban, member_content_join, room_id, + to_pdu_event, PduEvent, INITIAL_EVENTS, INITIAL_EVENTS_CREATE_ROOM, + }, + Event, EventTypeExt, RoomVersion, StateMap, + }; + + #[test] + fn test_ban_pass() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS(); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + alice(), + TimelineEventType::RoomMember, + Some(charlie().as_str()), + member_content_ban(), + &[], + &["IMC"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = charlie(); + let sender = alice(); + + assert!(valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None, + &MembershipState::Leave, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + } + + #[test] + fn test_join_non_creator() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS_CREATE_ROOM(); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().as_str()), + member_content_join(), + &["CREATE"], + &["CREATE"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = charlie(); + let sender = charlie(); + + assert!(!valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None, + &MembershipState::Leave, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + } + + #[test] + fn test_join_creator() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS_CREATE_ROOM(); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + alice(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_join(), + &["CREATE"], + &["CREATE"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = alice(); + let sender = alice(); + + assert!(valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None, + &MembershipState::Leave, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + } + + #[test] + fn test_ban_fail() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS(); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + charlie(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_ban(), + &[], + &["IMC"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = alice(); + let sender = charlie(); + + assert!(!valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None, + &MembershipState::Leave, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + } + + #[test] + fn test_restricted_join_rule() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let mut events = INITIAL_EVENTS(); + *events.get_mut(&event_id("IJR")).unwrap() = to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Restricted( + Restricted::new(vec![AllowRule::RoomMembership(RoomMembership::new( + room_id().to_owned(), + ))]), + ))) + .unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["IPOWER"], + ); + + let mut member = RoomMemberEventContent::new(MembershipState::Join); + member.join_authorized_via_users_server = Some(alice().to_owned()); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + ella(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap(), + &["CREATE", "IJR", "IPOWER", "new"], + &["new"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = ella(); + let sender = ella(); + + assert!(valid_membership_change( + &RoomVersion::V9, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + Some(alice()), + &MembershipState::Join, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + + assert!(!valid_membership_change( + &RoomVersion::V9, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + Some(ella()), + &MembershipState::Leave, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + } + + #[test] + fn test_knock() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let mut events = INITIAL_EVENTS(); + *events.get_mut(&event_id("IJR")).unwrap() = to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Knock)).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["IPOWER"], + ); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + ella(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Knock)).unwrap(), + &[], + &["IMC"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = ella(); + let sender = ella(); + + assert!(valid_membership_change( + &RoomVersion::V7, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None, + &MembershipState::Leave, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + } +} diff --git a/src/core/state_res/mod.rs b/src/core/state_res/mod.rs new file mode 100644 index 00000000..e4054377 --- /dev/null +++ b/src/core/state_res/mod.rs @@ -0,0 +1,1644 @@ +pub(crate) mod error; +pub mod event_auth; +mod power_levels; +mod room_version; +mod state_event; + +#[cfg(test)] +mod test_utils; + +use std::{ + borrow::Borrow, + cmp::{Ordering, Reverse}, + collections::{BinaryHeap, HashMap, HashSet}, + fmt::Debug, + hash::Hash, +}; + +use futures::{future, stream, Future, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; +use ruma::{ + events::{ + room::member::{MembershipState, RoomMemberEventContent}, + StateEventType, TimelineEventType, + }, + int, EventId, Int, MilliSecondsSinceUnixEpoch, RoomVersionId, +}; +use serde_json::from_str as from_json_str; + +pub(crate) use self::error::Error; +use self::power_levels::PowerLevelsContentFields; +pub use self::{ + event_auth::{auth_check, auth_types_for_event}, + room_version::RoomVersion, + state_event::Event, +}; +use crate::{debug, trace, warn}; + +/// A mapping of event type and state_key to some value `T`, usually an +/// `EventId`. +pub type StateMap = HashMap; +pub type StateMapItem = (TypeStateKey, T); +pub type TypeStateKey = (StateEventType, String); + +type Result = crate::Result; + +/// Resolve sets of state events as they come in. +/// +/// Internally `StateResolution` builds a graph and an auth chain to allow for +/// state conflict resolution. +/// +/// ## Arguments +/// +/// * `state_sets` - The incoming state to resolve. Each `StateMap` represents a +/// possible fork in the state of a room. +/// +/// * `auth_chain_sets` - The full recursive set of `auth_events` for each event +/// in the `state_sets`. +/// +/// * `event_fetch` - Any event not found in the `event_map` will defer to this +/// closure to find the event. +/// +/// * `parallel_fetches` - The number of asynchronous fetch requests in-flight +/// for any given operation. +/// +/// ## Invariants +/// +/// The caller of `resolve` must ensure that all the events are from the same +/// room. Although this function takes a `RoomId` it does not check that each +/// event is part of the same room. +//#[tracing::instrument(level = "debug", skip(state_sets, auth_chain_sets, +//#[tracing::instrument(level event_fetch))] +pub async fn resolve<'a, E, SetIter, Fetch, FetchFut, Exists, ExistsFut>( + room_version: &RoomVersionId, + state_sets: impl IntoIterator + Send, + auth_chain_sets: &'a [HashSet], + event_fetch: &Fetch, + event_exists: &Exists, + parallel_fetches: usize, +) -> Result> +where + Fetch: Fn(E::Id) -> FetchFut + Sync, + FetchFut: Future> + Send, + Exists: Fn(E::Id) -> ExistsFut + Sync, + ExistsFut: Future + Send, + SetIter: Iterator> + Clone + Send, + E: Event + Clone + Send + Sync, + E::Id: Borrow + Send + Sync, + for<'b> &'b E: Send, +{ + debug!("State resolution starting"); + + // Split non-conflicting and conflicting state + let (clean, conflicting) = separate(state_sets.into_iter()); + + debug!(count = clean.len(), "non-conflicting events"); + trace!(map = ?clean, "non-conflicting events"); + + if conflicting.is_empty() { + debug!("no conflicting state found"); + return Ok(clean); + } + + debug!(count = conflicting.len(), "conflicting events"); + trace!(map = ?conflicting, "conflicting events"); + + let auth_chain_diff = + get_auth_chain_diff(auth_chain_sets).chain(conflicting.into_values().flatten()); + + // `all_conflicted` contains unique items + // synapse says `full_set = {eid for eid in full_conflicted_set if eid in + // event_map}` + let all_conflicted: HashSet<_> = stream::iter(auth_chain_diff) + // Don't honor events we cannot "verify" + .map(|id| event_exists(id.clone()).map(move |exists| (id, exists))) + .buffer_unordered(parallel_fetches) + .filter_map(|(id, exists)| future::ready(exists.then_some(id))) + .collect() + .boxed() + .await; + + debug!(count = all_conflicted.len(), "full conflicted set"); + trace!(set = ?all_conflicted, "full conflicted set"); + + // We used to check that all events are events from the correct room + // this is now a check the caller of `resolve` must make. + + // Get only the control events with a state_key: "" or ban/kick event (sender != + // state_key) + let control_events: Vec<_> = stream::iter(all_conflicted.iter()) + .map(|id| is_power_event_id(id, &event_fetch).map(move |is| (id, is))) + .buffer_unordered(parallel_fetches) + .filter_map(|(id, is)| future::ready(is.then_some(id.clone()))) + .collect() + .boxed() + .await; + + // Sort the control events based on power_level/clock/event_id and + // outgoing/incoming edges + let sorted_control_levels = reverse_topological_power_sort( + control_events, + &all_conflicted, + &event_fetch, + parallel_fetches, + ) + .boxed() + .await?; + + debug!(count = sorted_control_levels.len(), "power events"); + trace!(list = ?sorted_control_levels, "sorted power events"); + + let room_version = RoomVersion::new(room_version)?; + // Sequentially auth check each control event. + let resolved_control = iterative_auth_check( + &room_version, + sorted_control_levels.iter(), + clean.clone(), + &event_fetch, + parallel_fetches, + ) + .boxed() + .await?; + + debug!(count = resolved_control.len(), "resolved power events"); + trace!(map = ?resolved_control, "resolved power events"); + + // At this point the control_events have been resolved we now have to + // sort the remaining events using the mainline of the resolved power level. + let deduped_power_ev = sorted_control_levels.into_iter().collect::>(); + + // This removes the control events that passed auth and more importantly those + // that failed auth + let events_to_resolve = all_conflicted + .iter() + .filter(|&id| !deduped_power_ev.contains(id.borrow())) + .cloned() + .collect::>(); + + debug!(count = events_to_resolve.len(), "events left to resolve"); + trace!(list = ?events_to_resolve, "events left to resolve"); + + // This "epochs" power level event + let power_event = resolved_control.get(&(StateEventType::RoomPowerLevels, String::new())); + + debug!(event_id = ?power_event, "power event"); + + let sorted_left_events = + mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch, parallel_fetches) + .boxed() + .await?; + + trace!(list = ?sorted_left_events, "events left, sorted"); + + let mut resolved_state = iterative_auth_check( + &room_version, + sorted_left_events.iter(), + resolved_control, // The control events are added to the final resolved state + &event_fetch, + parallel_fetches, + ) + .boxed() + .await?; + + // Add unconflicted state to the resolved state + // We priorities the unconflicting state + resolved_state.extend(clean); + + debug!("state resolution finished"); + + Ok(resolved_state) +} + +/// Split the events that have no conflicts from those that are conflicting. +/// +/// The return tuple looks like `(unconflicted, conflicted)`. +/// +/// State is determined to be conflicting if for the given key (StateEventType, +/// StateKey) there is not exactly one event ID. This includes missing events, +/// if one state_set includes an event that none of the other have this is a +/// conflicting event. +fn separate<'a, Id>( + state_sets_iter: impl Iterator>, +) -> (StateMap, StateMap>) +where + Id: Clone + Eq + Hash + 'a, +{ + let mut state_set_count = 0_usize; + let mut occurrences = HashMap::<_, HashMap<_, _>>::new(); + + let state_sets_iter = state_sets_iter.inspect(|_| state_set_count += 1); + for (k, v) in state_sets_iter.flatten() { + occurrences + .entry(k) + .or_default() + .entry(v) + .and_modify(|x| *x += 1) + .or_insert(1); + } + + let mut unconflicted_state = StateMap::new(); + let mut conflicted_state = StateMap::new(); + + for (k, v) in occurrences { + for (id, occurrence_count) in v { + if occurrence_count == state_set_count { + unconflicted_state.insert((k.0.clone(), k.1.clone()), id.clone()); + } else { + conflicted_state + .entry((k.0.clone(), k.1.clone())) + .and_modify(|x: &mut Vec<_>| x.push(id.clone())) + .or_insert(vec![id.clone()]); + } + } + } + + (unconflicted_state, conflicted_state) +} + +/// Returns a Vec of deduped EventIds that appear in some chains but not others. +fn get_auth_chain_diff(auth_chain_sets: &[HashSet]) -> impl Iterator + Send +where + Id: Clone + Eq + Hash + Send, +{ + let num_sets = auth_chain_sets.len(); + let mut id_counts: HashMap = HashMap::new(); + for id in auth_chain_sets.iter().flatten() { + *id_counts.entry(id.clone()).or_default() += 1; + } + + id_counts + .into_iter() + .filter_map(move |(id, count)| (count < num_sets).then_some(id)) +} + +/// Events are sorted from "earliest" to "latest". +/// +/// They are compared using the negative power level (reverse topological +/// ordering), the origin server timestamp and in case of a tie the `EventId`s +/// are compared lexicographically. +/// +/// The power level is negative because a higher power level is equated to an +/// earlier (further back in time) origin server timestamp. +#[tracing::instrument(level = "debug", skip_all)] +async fn reverse_topological_power_sort( + events_to_sort: Vec, + auth_diff: &HashSet, + fetch_event: &F, + parallel_fetches: usize, +) -> Result> +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Send, + E::Id: Borrow + Send + Sync, +{ + debug!("reverse topological sort of power events"); + + let mut graph = HashMap::new(); + for event_id in events_to_sort { + add_event_and_auth_chain_to_graph(&mut graph, event_id, auth_diff, fetch_event).await; + } + + // This is used in the `key_fn` passed to the lexico_topo_sort fn + let event_to_pl = stream::iter(graph.keys()) + .map(|event_id| { + get_power_level_for_sender(event_id.clone(), fetch_event, parallel_fetches) + .map(move |res| res.map(|pl| (event_id, pl))) + }) + .buffer_unordered(parallel_fetches) + .try_fold(HashMap::new(), |mut event_to_pl, (event_id, pl)| { + debug!( + event_id = event_id.borrow().as_str(), + power_level = i64::from(pl), + "found the power level of an event's sender", + ); + + event_to_pl.insert(event_id.clone(), pl); + future::ok(event_to_pl) + }) + .boxed() + .await?; + + let event_to_pl = &event_to_pl; + let fetcher = |event_id: E::Id| async move { + let pl = *event_to_pl + .get(event_id.borrow()) + .ok_or_else(|| Error::NotFound(String::new()))?; + let ev = fetch_event(event_id) + .await + .ok_or_else(|| Error::NotFound(String::new()))?; + Ok((pl, ev.origin_server_ts())) + }; + + lexicographical_topological_sort(&graph, &fetcher).await +} + +/// Sorts the event graph based on number of outgoing/incoming edges. +/// +/// `key_fn` is used as to obtain the power level and age of an event for +/// breaking ties (together with the event ID). +#[tracing::instrument(level = "debug", skip_all)] +pub async fn lexicographical_topological_sort( + graph: &HashMap>, + key_fn: &F, +) -> Result> +where + F: Fn(Id) -> Fut + Sync, + Fut: Future> + Send, + Id: Borrow + Clone + Eq + Hash + Ord + Send, +{ + #[derive(PartialEq, Eq)] + struct TieBreaker<'a, Id> { + power_level: Int, + origin_server_ts: MilliSecondsSinceUnixEpoch, + event_id: &'a Id, + } + + impl Ord for TieBreaker<'_, Id> + where + Id: Ord, + { + fn cmp(&self, other: &Self) -> Ordering { + // NOTE: the power level comparison is "backwards" intentionally. + // See the "Mainline ordering" section of the Matrix specification + // around where it says the following: + // + // > for events `x` and `y`, `x < y` if [...] + // + // + other + .power_level + .cmp(&self.power_level) + .then(self.origin_server_ts.cmp(&other.origin_server_ts)) + .then(self.event_id.cmp(other.event_id)) + } + } + + impl PartialOrd for TieBreaker<'_, Id> + where + Id: Ord, + { + fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } + } + + debug!("starting lexicographical topological sort"); + + // NOTE: an event that has no incoming edges happened most recently, + // and an event that has no outgoing edges happened least recently. + + // NOTE: this is basically Kahn's algorithm except we look at nodes with no + // outgoing edges, c.f. + // https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm + + // outdegree_map is an event referring to the events before it, the + // more outdegree's the more recent the event. + let mut outdegree_map = graph.clone(); + + // The number of events that depend on the given event (the EventId key) + // How many events reference this event in the DAG as a parent + let mut reverse_graph: HashMap<_, HashSet<_>> = HashMap::new(); + + // Vec of nodes that have zero out degree, least recent events. + let mut zero_outdegree = Vec::new(); + + for (node, edges) in graph { + if edges.is_empty() { + let (power_level, origin_server_ts) = key_fn(node.clone()).await?; + // The `Reverse` is because rusts `BinaryHeap` sorts largest -> smallest we need + // smallest -> largest + zero_outdegree.push(Reverse(TieBreaker { + power_level, + origin_server_ts, + event_id: node, + })); + } + + reverse_graph.entry(node).or_default(); + for edge in edges { + reverse_graph.entry(edge).or_default().insert(node); + } + } + + let mut heap = BinaryHeap::from(zero_outdegree); + + // We remove the oldest node (most incoming edges) and check against all other + let mut sorted = vec![]; + // Destructure the `Reverse` and take the smallest `node` each time + while let Some(Reverse(item)) = heap.pop() { + let node = item.event_id; + + for &parent in reverse_graph + .get(node) + .expect("EventId in heap is also in reverse_graph") + { + // The number of outgoing edges this node has + let out = outdegree_map + .get_mut(parent.borrow()) + .expect("outdegree_map knows of all referenced EventIds"); + + // Only push on the heap once older events have been cleared + out.remove(node.borrow()); + if out.is_empty() { + let (power_level, origin_server_ts) = key_fn(parent.clone()).await?; + heap.push(Reverse(TieBreaker { + power_level, + origin_server_ts, + event_id: parent, + })); + } + } + + // synapse yields we push then return the vec + sorted.push(node.clone()); + } + + Ok(sorted) +} + +/// Find the power level for the sender of `event_id` or return a default value +/// of zero. +/// +/// Do NOT use this any where but topological sort, we find the power level for +/// the eventId at the eventId's generation (we walk backwards to `EventId`s +/// most recent previous power level event). +async fn get_power_level_for_sender( + event_id: E::Id, + fetch_event: &F, + parallel_fetches: usize, +) -> serde_json::Result +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Send, + E::Id: Borrow + Send, +{ + debug!("fetch event ({event_id}) senders power level"); + + let event = fetch_event(event_id.clone()).await; + + let auth_events = event.as_ref().map(Event::auth_events).into_iter().flatten(); + + let pl = stream::iter(auth_events) + .map(|aid| fetch_event(aid.clone())) + .buffer_unordered(parallel_fetches.min(5)) + .filter_map(future::ready) + .collect::>() + .boxed() + .await + .into_iter() + .find(|aev| is_type_and_key(aev, &TimelineEventType::RoomPowerLevels, "")); + + let content: PowerLevelsContentFields = match pl { + | None => return Ok(int!(0)), + | Some(ev) => from_json_str(ev.content().get())?, + }; + + if let Some(ev) = event { + if let Some(&user_level) = content.get_user_power(ev.sender()) { + debug!("found {} at power_level {user_level}", ev.sender()); + return Ok(user_level); + } + } + + Ok(content.users_default) +} + +/// Check the that each event is authenticated based on the events before it. +/// +/// ## Returns +/// +/// The `unconflicted_state` combined with the newly auth'ed events. So any +/// event that fails the `event_auth::auth_check` will be excluded from the +/// returned state map. +/// +/// For each `events_to_check` event we gather the events needed to auth it from +/// the the `fetch_event` closure and verify each event using the +/// `event_auth::auth_check` function. +async fn iterative_auth_check<'a, E, F, Fut, I>( + room_version: &RoomVersion, + events_to_check: I, + unconflicted_state: StateMap, + fetch_event: &F, + parallel_fetches: usize, +) -> Result> +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E::Id: Borrow + Clone + Eq + Ord + Send + Sync + 'a, + I: Iterator + Debug + Send + 'a, + E: Event + Clone + Send + Sync, +{ + debug!("starting iterative auth check"); + trace!( + list = ?events_to_check, + "events to check" + ); + + let events_to_check: Vec<_> = stream::iter(events_to_check) + .map(Result::Ok) + .map_ok(|event_id| { + fetch_event(event_id.clone()).map(move |result| { + result.ok_or_else(|| Error::NotFound(format!("Failed to find {event_id}"))) + }) + }) + .try_buffer_unordered(parallel_fetches) + .try_collect() + .boxed() + .await?; + + let auth_event_ids: HashSet = events_to_check + .iter() + .flat_map(|event: &E| event.auth_events().map(Clone::clone)) + .collect(); + + let auth_events: HashMap = stream::iter(auth_event_ids.into_iter()) + .map(fetch_event) + .buffer_unordered(parallel_fetches) + .filter_map(future::ready) + .map(|auth_event| (auth_event.event_id().clone(), auth_event)) + .collect() + .boxed() + .await; + + let auth_events = &auth_events; + let mut resolved_state = unconflicted_state; + for event in &events_to_check { + let event_id = event.event_id(); + let state_key = event + .state_key() + .ok_or_else(|| Error::InvalidPdu("State event had no state key".to_owned()))?; + + let auth_types = auth_types_for_event( + event.event_type(), + event.sender(), + Some(state_key), + event.content(), + )?; + + let mut auth_state = StateMap::new(); + for aid in event.auth_events() { + if let Some(ev) = auth_events.get(aid.borrow()) { + //TODO: synapse checks "rejected_reason" which is most likely related to + // soft-failing + auth_state.insert( + ev.event_type() + .with_state_key(ev.state_key().ok_or_else(|| { + Error::InvalidPdu("State event had no state key".to_owned()) + })?), + ev.clone(), + ); + } else { + warn!(event_id = aid.borrow().as_str(), "missing auth event"); + } + } + + stream::iter( + auth_types + .iter() + .filter_map(|key| Some((key, resolved_state.get(key)?))), + ) + .filter_map(|(key, ev_id)| async move { + if let Some(event) = auth_events.get(ev_id.borrow()) { + Some((key, event.clone())) + } else { + Some((key, fetch_event(ev_id.clone()).await?)) + } + }) + .for_each(|(key, event)| { + //TODO: synapse checks "rejected_reason" is None here + auth_state.insert(key.to_owned(), event); + future::ready(()) + }) + .await; + + debug!("event to check {:?}", event.event_id()); + + // The key for this is (eventType + a state_key of the signed token not sender) + // so search for it + let current_third_party = auth_state.iter().find_map(|(_, pdu)| { + (*pdu.event_type() == TimelineEventType::RoomThirdPartyInvite).then_some(pdu) + }); + + let fetch_state = |ty: &StateEventType, key: &str| { + future::ready(auth_state.get(&ty.with_state_key(key))) + }; + + if auth_check(room_version, &event, current_third_party.as_ref(), fetch_state).await? { + // add event to resolved state map + resolved_state.insert(event.event_type().with_state_key(state_key), event_id.clone()); + } else { + // synapse passes here on AuthError. We do not add this event to resolved_state. + warn!("event {event_id} failed the authentication check"); + } + } + + Ok(resolved_state) +} + +/// Returns the sorted `to_sort` list of `EventId`s based on a mainline sort +/// using the depth of `resolved_power_level`, the server timestamp, and the +/// eventId. +/// +/// The depth of the given event is calculated based on the depth of it's +/// closest "parent" power_level event. If there have been two power events the +/// after the most recent are depth 0, the events before (with the first power +/// level as a parent) will be marked as depth 1. depth 1 is "older" than depth +/// 0. +async fn mainline_sort( + to_sort: &[E::Id], + resolved_power_level: Option, + fetch_event: &F, + parallel_fetches: usize, +) -> Result> +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Clone + Send + Sync, + E::Id: Borrow + Clone + Send + Sync, +{ + debug!("mainline sort of events"); + + // There are no EventId's to sort, bail. + if to_sort.is_empty() { + return Ok(vec![]); + } + + let mut mainline = vec![]; + let mut pl = resolved_power_level; + while let Some(p) = pl { + mainline.push(p.clone()); + + let event = fetch_event(p.clone()) + .await + .ok_or_else(|| Error::NotFound(format!("Failed to find {p}")))?; + pl = None; + for aid in event.auth_events() { + let ev = fetch_event(aid.clone()) + .await + .ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?; + if is_type_and_key(&ev, &TimelineEventType::RoomPowerLevels, "") { + pl = Some(aid.to_owned()); + break; + } + } + } + + let mainline_map = mainline + .iter() + .rev() + .enumerate() + .map(|(idx, eid)| ((*eid).clone(), idx)) + .collect::>(); + + let order_map = stream::iter(to_sort.iter()) + .map(|ev_id| { + fetch_event(ev_id.clone()).map(move |event| event.map(|event| (event, ev_id))) + }) + .buffer_unordered(parallel_fetches) + .filter_map(future::ready) + .map(|(event, ev_id)| { + get_mainline_depth(Some(event.clone()), &mainline_map, fetch_event) + .map_ok(move |depth| (depth, event, ev_id)) + .map(Result::ok) + }) + .buffer_unordered(parallel_fetches) + .filter_map(future::ready) + .fold(HashMap::new(), |mut order_map, (depth, event, ev_id)| { + order_map.insert(ev_id, (depth, event.origin_server_ts(), ev_id)); + future::ready(order_map) + }) + .boxed() + .await; + + // Sort the event_ids by their depth, timestamp and EventId + // unwrap is OK order map and sort_event_ids are from to_sort (the same Vec) + let mut sort_event_ids = order_map.keys().map(|&k| k.clone()).collect::>(); + sort_event_ids.sort_by_key(|sort_id| &order_map[sort_id]); + + Ok(sort_event_ids) +} + +/// Get the mainline depth from the `mainline_map` or finds a power_level event +/// that has an associated mainline depth. +async fn get_mainline_depth( + mut event: Option, + mainline_map: &HashMap, + fetch_event: &F, +) -> Result +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Send, + E::Id: Borrow + Send, +{ + while let Some(sort_ev) = event { + debug!(event_id = sort_ev.event_id().borrow().as_str(), "mainline"); + let id = sort_ev.event_id(); + if let Some(depth) = mainline_map.get(id.borrow()) { + return Ok(*depth); + } + + event = None; + for aid in sort_ev.auth_events() { + let aev = fetch_event(aid.clone()) + .await + .ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?; + if is_type_and_key(&aev, &TimelineEventType::RoomPowerLevels, "") { + event = Some(aev); + break; + } + } + } + // Did not find a power level event so we default to zero + Ok(0) +} + +async fn add_event_and_auth_chain_to_graph( + graph: &mut HashMap>, + event_id: E::Id, + auth_diff: &HashSet, + fetch_event: &F, +) where + F: Fn(E::Id) -> Fut, + Fut: Future> + Send, + E: Event + Send, + E::Id: Borrow + Clone + Send, +{ + let mut state = vec![event_id]; + while let Some(eid) = state.pop() { + graph.entry(eid.clone()).or_default(); + let event = fetch_event(eid.clone()).await; + let auth_events = event.as_ref().map(Event::auth_events).into_iter().flatten(); + + // Prefer the store to event as the store filters dedups the events + for aid in auth_events { + if auth_diff.contains(aid.borrow()) { + if !graph.contains_key(aid.borrow()) { + state.push(aid.to_owned()); + } + + // We just inserted this at the start of the while loop + graph.get_mut(eid.borrow()).unwrap().insert(aid.to_owned()); + } + } + } +} + +async fn is_power_event_id(event_id: &E::Id, fetch: &F) -> bool +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Send, + E::Id: Borrow + Send, +{ + match fetch(event_id.clone()).await.as_ref() { + | Some(state) => is_power_event(state), + | _ => false, + } +} + +fn is_type_and_key(ev: impl Event, ev_type: &TimelineEventType, state_key: &str) -> bool { + ev.event_type() == ev_type && ev.state_key() == Some(state_key) +} + +fn is_power_event(event: impl Event) -> bool { + match event.event_type() { + | TimelineEventType::RoomPowerLevels + | TimelineEventType::RoomJoinRules + | TimelineEventType::RoomCreate => event.state_key() == Some(""), + | TimelineEventType::RoomMember => { + if let Ok(content) = from_json_str::(event.content().get()) { + if [MembershipState::Leave, MembershipState::Ban].contains(&content.membership) { + return Some(event.sender().as_str()) != event.state_key(); + } + } + + false + }, + | _ => false, + } +} + +/// Convenience trait for adding event type plus state key to state maps. +pub trait EventTypeExt { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String); +} + +impl EventTypeExt for StateEventType { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + (self, state_key.into()) + } +} + +impl EventTypeExt for TimelineEventType { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + (self.to_string().into(), state_key.into()) + } +} + +impl EventTypeExt for &T +where + T: EventTypeExt + Clone, +{ + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + self.to_owned().with_state_key(state_key) + } +} + +#[cfg(test)] +mod tests { + use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + }; + + use maplit::{hashmap, hashset}; + use rand::seq::SliceRandom; + use ruma::{ + events::{ + room::join_rules::{JoinRule, RoomJoinRulesEventContent}, + StateEventType, TimelineEventType, + }, + int, uint, + }; + use ruma_common::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId}; + use serde_json::{json, value::to_raw_value as to_raw_json_value}; + use tracing::debug; + + use crate::{ + is_power_event, + room_version::RoomVersion, + test_utils::{ + alice, bob, charlie, do_check, ella, event_id, member_content_ban, + member_content_join, room_id, to_init_pdu_event, to_pdu_event, zara, PduEvent, + TestStore, INITIAL_EVENTS, + }, + Event, EventTypeExt, StateMap, + }; + + async fn test_event_sort() { + use futures::future::ready; + + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS(); + + let event_map = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) + .collect::>(); + + let auth_chain: HashSet = HashSet::new(); + + let power_events = event_map + .values() + .filter(|&pdu| is_power_event(&**pdu)) + .map(|pdu| pdu.event_id.clone()) + .collect::>(); + + let fetcher = |id| ready(events.get(&id).cloned()); + let sorted_power_events = + crate::reverse_topological_power_sort(power_events, &auth_chain, &fetcher, 1) + .await + .unwrap(); + + let resolved_power = crate::iterative_auth_check( + &RoomVersion::V6, + sorted_power_events.iter(), + HashMap::new(), // unconflicted events + &fetcher, + 1, + ) + .await + .expect("iterative auth check failed on resolved events"); + + // don't remove any events so we know it sorts them all correctly + let mut events_to_sort = events.keys().cloned().collect::>(); + + events_to_sort.shuffle(&mut rand::thread_rng()); + + let power_level = resolved_power + .get(&(StateEventType::RoomPowerLevels, "".to_owned())) + .cloned(); + + let sorted_event_ids = crate::mainline_sort(&events_to_sort, power_level, &fetcher, 1) + .await + .unwrap(); + + assert_eq!( + vec![ + "$CREATE:foo", + "$IMA:foo", + "$IPOWER:foo", + "$IJR:foo", + "$IMB:foo", + "$IMC:foo", + "$START:foo", + "$END:foo" + ], + sorted_event_ids + .iter() + .map(|id| id.to_string()) + .collect::>() + ); + } + + #[tokio::test] + async fn test_sort() { + for _ in 0..20 { + // since we shuffle the eventIds before we sort them introducing randomness + // seems like we should test this a few times + test_event_sort().await; + } + } + + #[tokio::test] + async fn ban_vs_power_level() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "MA", + alice(), + TimelineEventType::RoomMember, + Some(alice().to_string().as_str()), + member_content_join(), + ), + to_init_pdu_event( + "MB", + alice(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_ban(), + ), + to_init_pdu_event( + "PB", + bob(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + ]; + + let edges = vec![vec!["END", "MB", "MA", "PA", "START"], vec!["END", "PA", "PB"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["PA", "MA", "MB"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn topic_basic() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "T1", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA1", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T2", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA2", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 0 } })).unwrap(), + ), + to_init_pdu_event( + "PB", + bob(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T3", + bob(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + ]; + + let edges = + vec![vec!["END", "PA2", "T2", "PA1", "T1", "START"], vec!["END", "T3", "PB", "PA1"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["PA2", "T2"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn topic_reset() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "T1", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T2", + bob(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "MB", + alice(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_ban(), + ), + ]; + + let edges = vec![vec!["END", "MB", "T2", "PA", "T1", "START"], vec!["END", "T1"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["T1", "MB", "PA"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn join_rule_evasion() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "JR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Private)).unwrap(), + ), + to_init_pdu_event( + "ME", + ella(), + TimelineEventType::RoomMember, + Some(ella().to_string().as_str()), + member_content_join(), + ), + ]; + + let edges = vec![vec!["END", "JR", "START"], vec!["END", "ME", "START"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec![event_id("JR")]; + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn offtopic_power_level() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "PB", + bob(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value( + &json!({ "users": { alice(): 100, bob(): 50, charlie(): 50 } }), + ) + .unwrap(), + ), + to_init_pdu_event( + "PC", + charlie(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50, charlie(): 0 } })) + .unwrap(), + ), + ]; + + let edges = vec![vec!["END", "PC", "PB", "PA", "START"], vec!["END", "PA"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["PC"].into_iter().map(event_id).collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn topic_setting() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "T1", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA1", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T2", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA2", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 0 } })).unwrap(), + ), + to_init_pdu_event( + "PB", + bob(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T3", + bob(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "MZ1", + zara(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "T4", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + ]; + + let edges = vec![vec!["END", "T4", "MZ1", "PA2", "T2", "PA1", "T1", "START"], vec![ + "END", "MZ1", "T3", "PB", "PA1", + ]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["T4", "PA2"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn test_event_map_none() { + use futures::future::ready; + + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let mut store = TestStore::(hashmap! {}); + + // build up the DAG + let (state_at_bob, state_at_charlie, expected) = store.set_up(); + + let ev_map = store.0.clone(); + let fetcher = |id| ready(ev_map.get(&id).cloned()); + + let exists = |id: ::Id| ready(ev_map.get(&*id).is_some()); + + let state_sets = [state_at_bob, state_at_charlie]; + let auth_chain: Vec<_> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let resolved = match crate::resolve( + &RoomVersionId::V2, + &state_sets, + &auth_chain, + &fetcher, + &exists, + 1, + ) + .await + { + | Ok(state) => state, + | Err(e) => panic!("{e}"), + }; + + assert_eq!(expected, resolved); + } + + #[tokio::test] + async fn test_lexicographical_sort() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let graph = hashmap! { + event_id("l") => hashset![event_id("o")], + event_id("m") => hashset![event_id("n"), event_id("o")], + event_id("n") => hashset![event_id("o")], + event_id("o") => hashset![], // "o" has zero outgoing edges but 4 incoming edges + event_id("p") => hashset![event_id("o")], + }; + + let res = crate::lexicographical_topological_sort(&graph, &|_id| async { + Ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) + }) + .await + .unwrap(); + + assert_eq!( + vec!["o", "l", "n", "m", "p"], + res.iter() + .map(ToString::to_string) + .map(|s| s.replace('$', "").replace(":foo", "")) + .collect::>() + ); + } + + #[tokio::test] + async fn ban_with_auth_chains() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let ban = BAN_STATE_SET(); + + let edges = vec![vec!["END", "MB", "PA", "START"], vec!["END", "IME", "MB"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["PA", "MB"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(&ban.values().cloned().collect::>(), edges, expected_state_ids).await; + } + + #[tokio::test] + async fn ban_with_auth_chains2() { + use futures::future::ready; + + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let init = INITIAL_EVENTS(); + let ban = BAN_STATE_SET(); + + let mut inner = init.clone(); + inner.extend(ban); + let store = TestStore(inner.clone()); + + let state_set_a = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("MB")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id.clone())) + .collect::>(); + + let state_set_b = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("IME")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id.clone())) + .collect::>(); + + let ev_map = &store.0; + let state_sets = [state_set_a, state_set_b]; + let auth_chain: Vec<_> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let fetcher = |id: ::Id| ready(ev_map.get(&id).cloned()); + let exists = |id: ::Id| ready(ev_map.get(&id).is_some()); + let resolved = match crate::resolve( + &RoomVersionId::V6, + &state_sets, + &auth_chain, + &fetcher, + &exists, + 1, + ) + .await + { + | Ok(state) => state, + | Err(e) => panic!("{e}"), + }; + + debug!( + resolved = ?resolved + .iter() + .map(|((ty, key), id)| format!("(({ty}{key:?}), {id})")) + .collect::>(), + "resolved state", + ); + + let expected = [ + "$CREATE:foo", + "$IJR:foo", + "$PA:foo", + "$IMA:foo", + "$IMB:foo", + "$IMC:foo", + "$MB:foo", + ]; + + for id in expected.iter().map(|i| event_id(i)) { + // make sure our resolved events are equal to the expected list + assert!(resolved.values().any(|eid| eid == &id) || init.contains_key(&id), "{id}"); + } + assert_eq!(expected.len(), resolved.len()); + } + + #[tokio::test] + async fn join_rule_with_auth_chain() { + let join_rule = JOIN_RULE(); + + let edges = vec![vec!["END", "JR", "START"], vec!["END", "IMZ", "START"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["JR"].into_iter().map(event_id).collect::>(); + + do_check(&join_rule.values().cloned().collect::>(), edges, expected_state_ids) + .await; + } + + #[allow(non_snake_case)] + fn BAN_STATE_SET() -> HashMap> { + vec![ + to_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], // auth_events + &["START"], // prev_events + ), + to_pdu_event( + "PB", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["END"], + ), + to_pdu_event( + "MB", + alice(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_ban(), + &["CREATE", "IMA", "PB"], + &["PA"], + ), + to_pdu_event( + "IME", + ella(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_join(), + &["CREATE", "IJR", "PA"], + &["MB"], + ), + ] + .into_iter() + .map(|ev| (ev.event_id.clone(), ev)) + .collect() + } + + #[allow(non_snake_case)] + fn JOIN_RULE() -> HashMap> { + vec![ + to_pdu_event( + "JR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&json!({ "join_rule": "invite" })).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["START"], + ), + to_pdu_event( + "IMZ", + zara(), + TimelineEventType::RoomPowerLevels, + Some(zara().as_str()), + member_content_join(), + &["CREATE", "JR", "IPOWER"], + &["START"], + ), + ] + .into_iter() + .map(|ev| (ev.event_id.clone(), ev)) + .collect() + } + + macro_rules! state_set { + ($($kind:expr => $key:expr => $id:expr),* $(,)?) => {{ + #[allow(unused_mut)] + let mut x = StateMap::new(); + $( + x.insert(($kind, $key.to_owned()), $id); + )* + x + }}; + } + + #[test] + fn separate_unique_conflicted() { + let (unconflicted, conflicted) = super::separate( + [ + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![StateEventType::RoomMember => "@b:hs1" => 1], + state_set![StateEventType::RoomMember => "@c:hs1" => 2], + ] + .iter(), + ); + + assert_eq!(unconflicted, StateMap::new()); + assert_eq!(conflicted, state_set![ + StateEventType::RoomMember => "@a:hs1" => vec![0], + StateEventType::RoomMember => "@b:hs1" => vec![1], + StateEventType::RoomMember => "@c:hs1" => vec![2], + ],); + } + + #[test] + fn separate_conflicted() { + let (unconflicted, mut conflicted) = super::separate( + [ + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![StateEventType::RoomMember => "@a:hs1" => 1], + state_set![StateEventType::RoomMember => "@a:hs1" => 2], + ] + .iter(), + ); + + // HashMap iteration order is random, so sort this before asserting on it + for v in conflicted.values_mut() { + v.sort_unstable(); + } + + assert_eq!(unconflicted, StateMap::new()); + assert_eq!(conflicted, state_set![ + StateEventType::RoomMember => "@a:hs1" => vec![0, 1, 2], + ],); + } + + #[test] + fn separate_unconflicted() { + let (unconflicted, conflicted) = super::separate( + [ + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + ] + .iter(), + ); + + assert_eq!(unconflicted, state_set![ + StateEventType::RoomMember => "@a:hs1" => 0, + ],); + assert_eq!(conflicted, StateMap::new()); + } + + #[test] + fn separate_mixed() { + let (unconflicted, conflicted) = super::separate( + [ + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![ + StateEventType::RoomMember => "@a:hs1" => 0, + StateEventType::RoomMember => "@b:hs1" => 1, + ], + state_set![ + StateEventType::RoomMember => "@a:hs1" => 0, + StateEventType::RoomMember => "@c:hs1" => 2, + ], + ] + .iter(), + ); + + assert_eq!(unconflicted, state_set![ + StateEventType::RoomMember => "@a:hs1" => 0, + ],); + assert_eq!(conflicted, state_set![ + StateEventType::RoomMember => "@b:hs1" => vec![1], + StateEventType::RoomMember => "@c:hs1" => vec![2], + ],); + } +} diff --git a/src/core/state_res/outcomes.txt b/src/core/state_res/outcomes.txt new file mode 100644 index 00000000..0fa1c734 --- /dev/null +++ b/src/core/state_res/outcomes.txt @@ -0,0 +1,104 @@ +11/29/2020 BRANCH: timo-spec-comp REV: d2a85669cc6056679ce6ca0fde4658a879ad2b08 +lexicographical topological sort + time: [1.7123 us 1.7157 us 1.7199 us] + change: [-1.7584% -1.5433% -1.3205%] (p = 0.00 < 0.05) + Performance has improved. +Found 8 outliers among 100 measurements (8.00%) + 2 (2.00%) low mild + 5 (5.00%) high mild + 1 (1.00%) high severe + +resolve state of 5 events one fork + time: [10.981 us 10.998 us 11.020 us] +Found 3 outliers among 100 measurements (3.00%) + 3 (3.00%) high mild + +resolve state of 10 events 3 conflicting + time: [26.858 us 26.946 us 27.037 us] + +11/29/2020 BRANCH: event-trait REV: f0eb1310efd49d722979f57f20bd1ac3592b0479 +lexicographical topological sort + time: [1.7686 us 1.7738 us 1.7810 us] + change: [-3.2752% -2.4634% -1.7635%] (p = 0.00 < 0.05) + Performance has improved. +Found 1 outliers among 100 measurements (1.00%) + 1 (1.00%) high severe + +resolve state of 5 events one fork + time: [10.643 us 10.656 us 10.669 us] + change: [-4.9990% -3.8078% -2.8319%] (p = 0.00 < 0.05) + Performance has improved. +Found 1 outliers among 100 measurements (1.00%) + 1 (1.00%) high severe + +resolve state of 10 events 3 conflicting + time: [29.149 us 29.252 us 29.375 us] + change: [-0.8433% -0.3270% +0.2656%] (p = 0.25 > 0.05) + No change in performance detected. +Found 1 outliers among 100 measurements (1.00%) + 1 (1.00%) high mild + +4/26/2020 BRANCH: fix-test-serde REV: +lexicographical topological sort + time: [1.6793 us 1.6823 us 1.6857 us] +Found 9 outliers among 100 measurements (9.00%) + 1 (1.00%) low mild + 4 (4.00%) high mild + 4 (4.00%) high severe + +resolve state of 5 events one fork + time: [9.9993 us 10.062 us 10.159 us] +Found 9 outliers among 100 measurements (9.00%) + 7 (7.00%) high mild + 2 (2.00%) high severe + +resolve state of 10 events 3 conflicting + time: [26.004 us 26.092 us 26.195 us] +Found 16 outliers among 100 measurements (16.00%) + 11 (11.00%) high mild + 5 (5.00%) high severe + +6/30/2021 BRANCH: state-closure REV: 174c3e2a72232ad75b3fb14b3551f5f746f4fe84 +lexicographical topological sort + time: [1.5496 us 1.5536 us 1.5586 us] +Found 9 outliers among 100 measurements (9.00%) + 1 (1.00%) low mild + 1 (1.00%) high mild + 7 (7.00%) high severe + +resolve state of 5 events one fork + time: [10.319 us 10.333 us 10.347 us] +Found 2 outliers among 100 measurements (2.00%) + 2 (2.00%) high severe + +resolve state of 10 events 3 conflicting + time: [25.770 us 25.805 us 25.839 us] +Found 7 outliers among 100 measurements (7.00%) + 5 (5.00%) high mild + 2 (2.00%) high severe + +7/20/2021 BRANCH stateres-result REV: +This marks the switch to HashSet/Map +lexicographical topological sort + time: [1.8122 us 1.8177 us 1.8233 us] + change: [+15.205% +15.919% +16.502%] (p = 0.00 < 0.05) + Performance has regressed. +Found 7 outliers among 100 measurements (7.00%) + 5 (5.00%) high mild + 2 (2.00%) high severe + +resolve state of 5 events one fork + time: [11.966 us 12.010 us 12.059 us] + change: [+16.089% +16.730% +17.469%] (p = 0.00 < 0.05) + Performance has regressed. +Found 7 outliers among 100 measurements (7.00%) + 3 (3.00%) high mild + 4 (4.00%) high severe + +resolve state of 10 events 3 conflicting + time: [29.092 us 29.201 us 29.311 us] + change: [+12.447% +12.847% +13.280%] (p = 0.00 < 0.05) + Performance has regressed. +Found 9 outliers among 100 measurements (9.00%) + 6 (6.00%) high mild + 3 (3.00%) high severe diff --git a/src/core/state_res/power_levels.rs b/src/core/state_res/power_levels.rs new file mode 100644 index 00000000..e1768574 --- /dev/null +++ b/src/core/state_res/power_levels.rs @@ -0,0 +1,256 @@ +use std::collections::BTreeMap; + +use ruma::{ + events::{room::power_levels::RoomPowerLevelsEventContent, TimelineEventType}, + power_levels::{default_power_level, NotificationPowerLevels}, + serde::{ + deserialize_v1_powerlevel, vec_deserialize_int_powerlevel_values, + vec_deserialize_v1_powerlevel_values, + }, + Int, OwnedUserId, UserId, +}; +use serde::Deserialize; +use serde_json::{from_str as from_json_str, Error}; +use tracing::error; + +use super::{Result, RoomVersion}; + +#[derive(Deserialize)] +struct IntRoomPowerLevelsEventContent { + #[serde(default = "default_power_level")] + ban: Int, + + #[serde(default)] + events: BTreeMap, + + #[serde(default)] + events_default: Int, + + #[serde(default)] + invite: Int, + + #[serde(default = "default_power_level")] + kick: Int, + + #[serde(default = "default_power_level")] + redact: Int, + + #[serde(default = "default_power_level")] + state_default: Int, + + #[serde(default)] + users: BTreeMap, + + #[serde(default)] + users_default: Int, + + #[serde(default)] + notifications: IntNotificationPowerLevels, +} + +impl From for RoomPowerLevelsEventContent { + fn from(int_pl: IntRoomPowerLevelsEventContent) -> Self { + let IntRoomPowerLevelsEventContent { + ban, + events, + events_default, + invite, + kick, + redact, + state_default, + users, + users_default, + notifications, + } = int_pl; + + let mut pl = Self::new(); + pl.ban = ban; + pl.events = events; + pl.events_default = events_default; + pl.invite = invite; + pl.kick = kick; + pl.redact = redact; + pl.state_default = state_default; + pl.users = users; + pl.users_default = users_default; + pl.notifications = notifications.into(); + + pl + } +} + +#[derive(Deserialize)] +struct IntNotificationPowerLevels { + #[serde(default = "default_power_level")] + room: Int, +} + +impl Default for IntNotificationPowerLevels { + fn default() -> Self { Self { room: default_power_level() } } +} + +impl From for NotificationPowerLevels { + fn from(int_notif: IntNotificationPowerLevels) -> Self { + let mut notif = Self::new(); + notif.room = int_notif.room; + + notif + } +} + +#[inline] +pub(crate) fn deserialize_power_levels( + content: &str, + room_version: &RoomVersion, +) -> Option { + if room_version.integer_power_levels { + deserialize_integer_power_levels(content) + } else { + deserialize_legacy_power_levels(content) + } +} + +fn deserialize_integer_power_levels(content: &str) -> Option { + match from_json_str::(content) { + | Ok(content) => Some(content.into()), + | Err(_) => { + error!("m.room.power_levels event is not valid with integer values"); + None + }, + } +} + +fn deserialize_legacy_power_levels(content: &str) -> Option { + match from_json_str(content) { + | Ok(content) => Some(content), + | Err(_) => { + error!( + "m.room.power_levels event is not valid with integer or string integer values" + ); + None + }, + } +} + +#[derive(Deserialize)] +pub(crate) struct PowerLevelsContentFields { + #[serde(default, deserialize_with = "vec_deserialize_v1_powerlevel_values")] + pub(crate) users: Vec<(OwnedUserId, Int)>, + + #[serde(default, deserialize_with = "deserialize_v1_powerlevel")] + pub(crate) users_default: Int, +} + +impl PowerLevelsContentFields { + pub(crate) fn get_user_power(&self, user_id: &UserId) -> Option<&Int> { + let comparator = |item: &(OwnedUserId, Int)| { + let item: &UserId = &item.0; + item.cmp(user_id) + }; + + self.users + .binary_search_by(comparator) + .ok() + .and_then(|idx| self.users.get(idx).map(|item| &item.1)) + } +} + +#[derive(Deserialize)] +struct IntPowerLevelsContentFields { + #[serde(default, deserialize_with = "vec_deserialize_int_powerlevel_values")] + users: Vec<(OwnedUserId, Int)>, + + #[serde(default)] + users_default: Int, +} + +impl From for PowerLevelsContentFields { + fn from(pl: IntPowerLevelsContentFields) -> Self { + let IntPowerLevelsContentFields { users, users_default } = pl; + Self { users, users_default } + } +} + +#[inline] +pub(crate) fn deserialize_power_levels_content_fields( + content: &str, + room_version: &RoomVersion, +) -> Result { + if room_version.integer_power_levels { + deserialize_integer_power_levels_content_fields(content) + } else { + deserialize_legacy_power_levels_content_fields(content) + } +} + +fn deserialize_integer_power_levels_content_fields( + content: &str, +) -> Result { + from_json_str::(content).map(Into::into) +} + +fn deserialize_legacy_power_levels_content_fields( + content: &str, +) -> Result { + from_json_str(content) +} + +#[derive(Deserialize)] +pub(crate) struct PowerLevelsContentInvite { + #[serde(default, deserialize_with = "deserialize_v1_powerlevel")] + pub(crate) invite: Int, +} + +#[derive(Deserialize)] +struct IntPowerLevelsContentInvite { + #[serde(default)] + invite: Int, +} + +impl From for PowerLevelsContentInvite { + fn from(pl: IntPowerLevelsContentInvite) -> Self { + let IntPowerLevelsContentInvite { invite } = pl; + Self { invite } + } +} + +pub(crate) fn deserialize_power_levels_content_invite( + content: &str, + room_version: &RoomVersion, +) -> Result { + if room_version.integer_power_levels { + from_json_str::(content).map(Into::into) + } else { + from_json_str(content) + } +} + +#[derive(Deserialize)] +pub(crate) struct PowerLevelsContentRedact { + #[serde(default = "default_power_level", deserialize_with = "deserialize_v1_powerlevel")] + pub(crate) redact: Int, +} + +#[derive(Deserialize)] +pub(crate) struct IntPowerLevelsContentRedact { + #[serde(default = "default_power_level")] + redact: Int, +} + +impl From for PowerLevelsContentRedact { + fn from(pl: IntPowerLevelsContentRedact) -> Self { + let IntPowerLevelsContentRedact { redact } = pl; + Self { redact } + } +} + +pub(crate) fn deserialize_power_levels_content_redact( + content: &str, + room_version: &RoomVersion, +) -> Result { + if room_version.integer_power_levels { + from_json_str::(content).map(Into::into) + } else { + from_json_str(content) + } +} diff --git a/src/core/state_res/room_version.rs b/src/core/state_res/room_version.rs new file mode 100644 index 00000000..e1b0afe1 --- /dev/null +++ b/src/core/state_res/room_version.rs @@ -0,0 +1,149 @@ +use ruma::RoomVersionId; + +use super::{Error, Result}; + +#[derive(Debug)] +#[allow(clippy::exhaustive_enums)] +pub enum RoomDisposition { + /// A room version that has a stable specification. + Stable, + /// A room version that is not yet fully specified. + Unstable, +} + +#[derive(Debug)] +#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] +pub enum EventFormatVersion { + /// $id:server event id format + V1, + /// MSC1659-style $hash event id format: introduced for room v3 + V2, + /// MSC1884-style $hash format: introduced for room v4 + V3, +} + +#[derive(Debug)] +#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] +pub enum StateResolutionVersion { + /// State resolution for rooms at version 1. + V1, + /// State resolution for room at version 2 or later. + V2, +} + +#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] +pub struct RoomVersion { + /// The stability of this room. + pub disposition: RoomDisposition, + /// The format of the EventId. + pub event_format: EventFormatVersion, + /// Which state resolution algorithm is used. + pub state_res: StateResolutionVersion, + // FIXME: not sure what this one means? + pub enforce_key_validity: bool, + + /// `m.room.aliases` had special auth rules and redaction rules + /// before room version 6. + /// + /// before MSC2261/MSC2432, + pub special_case_aliases_auth: bool, + /// Strictly enforce canonical json, do not allow: + /// * Integers outside the range of [-2 ^ 53 + 1, 2 ^ 53 - 1] + /// * Floats + /// * NaN, Infinity, -Infinity + pub strict_canonicaljson: bool, + /// Verify notifications key while checking m.room.power_levels. + /// + /// bool: MSC2209: Check 'notifications' + pub limit_notifications_power_levels: bool, + /// Extra rules when verifying redaction events. + pub extra_redaction_checks: bool, + /// Allow knocking in event authentication. + /// + /// See [room v7 specification](https://spec.matrix.org/latest/rooms/v7/) for more information. + pub allow_knocking: bool, + /// Adds support for the restricted join rule. + /// + /// See: [MSC3289](https://github.com/matrix-org/matrix-spec-proposals/pull/3289) for more information. + pub restricted_join_rules: bool, + /// Adds support for the knock_restricted join rule. + /// + /// See: [MSC3787](https://github.com/matrix-org/matrix-spec-proposals/pull/3787) for more information. + pub knock_restricted_join_rule: bool, + /// Enforces integer power levels. + /// + /// See: [MSC3667](https://github.com/matrix-org/matrix-spec-proposals/pull/3667) for more information. + pub integer_power_levels: bool, + /// Determine the room creator using the `m.room.create` event's `sender`, + /// instead of the event content's `creator` field. + /// + /// See: [MSC2175](https://github.com/matrix-org/matrix-spec-proposals/pull/2175) for more information. + pub use_room_create_sender: bool, +} + +impl RoomVersion { + pub const V1: Self = Self { + disposition: RoomDisposition::Stable, + event_format: EventFormatVersion::V1, + state_res: StateResolutionVersion::V1, + enforce_key_validity: false, + special_case_aliases_auth: true, + strict_canonicaljson: false, + limit_notifications_power_levels: false, + extra_redaction_checks: true, + allow_knocking: false, + restricted_join_rules: false, + knock_restricted_join_rule: false, + integer_power_levels: false, + use_room_create_sender: false, + }; + pub const V10: Self = Self { + knock_restricted_join_rule: true, + integer_power_levels: true, + ..Self::V9 + }; + pub const V11: Self = Self { + use_room_create_sender: true, + ..Self::V10 + }; + pub const V2: Self = Self { + state_res: StateResolutionVersion::V2, + ..Self::V1 + }; + pub const V3: Self = Self { + event_format: EventFormatVersion::V2, + extra_redaction_checks: false, + ..Self::V2 + }; + pub const V4: Self = Self { + event_format: EventFormatVersion::V3, + ..Self::V3 + }; + pub const V5: Self = Self { enforce_key_validity: true, ..Self::V4 }; + pub const V6: Self = Self { + special_case_aliases_auth: false, + strict_canonicaljson: true, + limit_notifications_power_levels: true, + ..Self::V5 + }; + pub const V7: Self = Self { allow_knocking: true, ..Self::V6 }; + pub const V8: Self = Self { restricted_join_rules: true, ..Self::V7 }; + pub const V9: Self = Self::V8; + + pub fn new(version: &RoomVersionId) -> Result { + Ok(match version { + | RoomVersionId::V1 => Self::V1, + | RoomVersionId::V2 => Self::V2, + | RoomVersionId::V3 => Self::V3, + | RoomVersionId::V4 => Self::V4, + | RoomVersionId::V5 => Self::V5, + | RoomVersionId::V6 => Self::V6, + | RoomVersionId::V7 => Self::V7, + | RoomVersionId::V8 => Self::V8, + | RoomVersionId::V9 => Self::V9, + | RoomVersionId::V10 => Self::V10, + | RoomVersionId::V11 => Self::V11, + | ver => return Err(Error::Unsupported(format!("found version `{ver}`"))), + }) + } +} diff --git a/src/core/state_res/state_event.rs b/src/core/state_res/state_event.rs new file mode 100644 index 00000000..2c038cfe --- /dev/null +++ b/src/core/state_res/state_event.rs @@ -0,0 +1,102 @@ +use std::{ + borrow::Borrow, + fmt::{Debug, Display}, + hash::Hash, + sync::Arc, +}; + +use ruma::{events::TimelineEventType, EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId}; +use serde_json::value::RawValue as RawJsonValue; + +/// Abstraction of a PDU so users can have their own PDU types. +pub trait Event { + type Id: Clone + Debug + Display + Eq + Ord + Hash + Send + Borrow; + + /// The `EventId` of this event. + fn event_id(&self) -> &Self::Id; + + /// The `RoomId` of this event. + fn room_id(&self) -> &RoomId; + + /// The `UserId` of this event. + fn sender(&self) -> &UserId; + + /// The time of creation on the originating server. + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch; + + /// The event type. + fn event_type(&self) -> &TimelineEventType; + + /// The event's content. + fn content(&self) -> &RawJsonValue; + + /// The state key for this event. + fn state_key(&self) -> Option<&str>; + + /// The events before this event. + // Requires GATs to avoid boxing (and TAIT for making it convenient). + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_; + + /// All the authenticating events for this event. + // Requires GATs to avoid boxing (and TAIT for making it convenient). + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_; + + /// If this event is a redaction event this is the event it redacts. + fn redacts(&self) -> Option<&Self::Id>; +} + +impl Event for &T { + type Id = T::Id; + + fn event_id(&self) -> &Self::Id { (*self).event_id() } + + fn room_id(&self) -> &RoomId { (*self).room_id() } + + fn sender(&self) -> &UserId { (*self).sender() } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { (*self).origin_server_ts() } + + fn event_type(&self) -> &TimelineEventType { (*self).event_type() } + + fn content(&self) -> &RawJsonValue { (*self).content() } + + fn state_key(&self) -> Option<&str> { (*self).state_key() } + + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + (*self).prev_events() + } + + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { + (*self).auth_events() + } + + fn redacts(&self) -> Option<&Self::Id> { (*self).redacts() } +} + +impl Event for Arc { + type Id = T::Id; + + fn event_id(&self) -> &Self::Id { (**self).event_id() } + + fn room_id(&self) -> &RoomId { (**self).room_id() } + + fn sender(&self) -> &UserId { (**self).sender() } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { (**self).origin_server_ts() } + + fn event_type(&self) -> &TimelineEventType { (**self).event_type() } + + fn content(&self) -> &RawJsonValue { (**self).content() } + + fn state_key(&self) -> Option<&str> { (**self).state_key() } + + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + (**self).prev_events() + } + + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { + (**self).auth_events() + } + + fn redacts(&self) -> Option<&Self::Id> { (**self).redacts() } +} diff --git a/src/core/state_res/state_res_bench.rs b/src/core/state_res/state_res_bench.rs new file mode 100644 index 00000000..a2bd2c23 --- /dev/null +++ b/src/core/state_res/state_res_bench.rs @@ -0,0 +1,648 @@ +// Because of criterion `cargo bench` works, +// but if you use `cargo bench -- --save-baseline ` +// or pass any other args to it, it fails with the error +// `cargo bench unknown option --save-baseline`. +// To pass args to criterion, use this form +// `cargo bench --bench -- --save-baseline `. + +#![allow(clippy::exhaustive_structs)] + +use std::{ + borrow::Borrow, + collections::{HashMap, HashSet}, + sync::{ + atomic::{AtomicU64, Ordering::SeqCst}, + Arc, + }, +}; + +use criterion::{criterion_group, criterion_main, Criterion}; +use event::PduEvent; +use futures::{future, future::ready}; +use ruma::{int, uint}; +use maplit::{btreemap, hashmap, hashset}; +use ruma::{ + room_id, user_id, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, + Signatures, UserId, +}; +use ruma::events::{ + pdu::{EventHash, Pdu, RoomV3Pdu}, + room::{ + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + StateEventType, TimelineEventType, +}; +use conduwuit::state_res::{self as state_res, Error, Event, Result, StateMap}; +use serde_json::{ + json, + value::{to_raw_value as to_raw_json_value, RawValue as RawJsonValue}, +}; + +static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); + +fn lexico_topo_sort(c: &mut Criterion) { + c.bench_function("lexicographical topological sort", |b| { + let graph = hashmap! { + event_id("l") => hashset![event_id("o")], + event_id("m") => hashset![event_id("n"), event_id("o")], + event_id("n") => hashset![event_id("o")], + event_id("o") => hashset![], // "o" has zero outgoing edges but 4 incoming edges + event_id("p") => hashset![event_id("o")], + }; + b.iter(|| { + let _ = state_res::lexicographical_topological_sort(&graph, &|_| { + future::ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) + }); + }); + }); +} + +fn resolution_shallow_auth_chain(c: &mut Criterion) { + c.bench_function("resolve state of 5 events one fork", |b| { + let mut store = TestStore(hashmap! {}); + + // build up the DAG + let (state_at_bob, state_at_charlie, _) = store.set_up(); + + b.iter(|| async { + let ev_map = store.0.clone(); + let state_sets = [&state_at_bob, &state_at_charlie]; + let fetch = |id: OwnedEventId| ready(ev_map.get(&id).map(Arc::clone)); + let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some()); + let auth_chain_sets = state_sets + .iter() + .map(|map| { + store.auth_event_ids(room_id(), map.values().cloned().collect()).unwrap() + }) + .collect(); + + let _ = match state_res::resolve( + &RoomVersionId::V6, + state_sets.into_iter(), + &auth_chain_sets, + &fetch, + &exists, + ) + .await + { + Ok(state) => state, + Err(e) => panic!("{e}"), + }; + }); + }); +} + +fn resolve_deeper_event_set(c: &mut Criterion) { + c.bench_function("resolve state of 10 events 3 conflicting", |b| { + let mut inner = INITIAL_EVENTS(); + let ban = BAN_STATE_SET(); + + inner.extend(ban); + let store = TestStore(inner.clone()); + + let state_set_a = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("MB")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| { + (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id().to_owned()) + }) + .collect::>(); + + let state_set_b = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("IME")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| { + (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id().to_owned()) + }) + .collect::>(); + + b.iter(|| async { + let state_sets = [&state_set_a, &state_set_b]; + let auth_chain_sets = state_sets + .iter() + .map(|map| { + store.auth_event_ids(room_id(), map.values().cloned().collect()).unwrap() + }) + .collect(); + + let fetch = |id: OwnedEventId| ready(inner.get(&id).map(Arc::clone)); + let exists = |id: OwnedEventId| ready(inner.get(&id).is_some()); + let _ = match state_res::resolve( + &RoomVersionId::V6, + state_sets.into_iter(), + &auth_chain_sets, + &fetch, + &exists, + ) + .await + { + Ok(state) => state, + Err(_) => panic!("resolution failed during benchmarking"), + }; + }); + }); +} + +criterion_group!( + benches, + lexico_topo_sort, + resolution_shallow_auth_chain, + resolve_deeper_event_set +); + +criterion_main!(benches); + +//*///////////////////////////////////////////////////////////////////// +// +// IMPLEMENTATION DETAILS AHEAD +// +/////////////////////////////////////////////////////////////////////*/ +struct TestStore(HashMap>); + +#[allow(unused)] +impl TestStore { + fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result> { + self.0 + .get(event_id) + .map(Arc::clone) + .ok_or_else(|| Error::NotFound(format!("{} not found", event_id))) + } + + /// Returns the events that correspond to the `event_ids` sorted in the same order. + fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result>> { + let mut events = vec![]; + for id in event_ids { + events.push(self.get_event(room_id, id)?); + } + Ok(events) + } + + /// Returns a Vec of the related auth events to the given `event`. + fn auth_event_ids(&self, room_id: &RoomId, event_ids: Vec) -> Result> { + let mut result = HashSet::new(); + let mut stack = event_ids; + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if result.contains(&ev_id) { + continue; + } + + result.insert(ev_id.clone()); + + let event = self.get_event(room_id, ev_id.borrow())?; + + stack.extend(event.auth_events().map(ToOwned::to_owned)); + } + + Ok(result) + } + + /// Returns a vector representing the difference in auth chains of the given `events`. + fn auth_chain_diff(&self, room_id: &RoomId, event_ids: Vec>) -> Result> { + let mut auth_chain_sets = vec![]; + for ids in event_ids { + // TODO state store `auth_event_ids` returns self in the event ids list + // when an event returns `auth_event_ids` self is not contained + let chain = self.auth_event_ids(room_id, ids)?.into_iter().collect::>(); + auth_chain_sets.push(chain); + } + + if let Some(first) = auth_chain_sets.first().cloned() { + let common = auth_chain_sets + .iter() + .skip(1) + .fold(first, |a, b| a.intersection(b).cloned().collect::>()); + + Ok(auth_chain_sets + .into_iter() + .flatten() + .filter(|id| !common.contains(id.borrow())) + .collect()) + } else { + Ok(vec![]) + } + } +} + +impl TestStore { + #[allow(clippy::type_complexity)] + fn set_up( + &mut self, + ) -> (StateMap, StateMap, StateMap) { + let create_event = to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ); + let cre = create_event.event_id().to_owned(); + self.0.insert(cre.clone(), Arc::clone(&create_event)); + + let alice_mem = to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().to_string().as_str()), + member_content_join(), + &[cre.clone()], + &[cre.clone()], + ); + self.0.insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem)); + + let join_rules = to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &[cre.clone(), alice_mem.event_id().to_owned()], + &[alice_mem.event_id().to_owned()], + ); + self.0.insert(join_rules.event_id().to_owned(), join_rules.clone()); + + // Bob and Charlie join at the same time, so there is a fork + // this will be represented in the state_sets when we resolve + let bob_mem = to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_join(), + &[cre.clone(), join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0.insert(bob_mem.event_id().to_owned(), bob_mem.clone()); + + let charlie_mem = to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().to_string().as_str()), + member_content_join(), + &[cre, join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0.insert(charlie_mem.event_id().to_owned(), charlie_mem.clone()); + + let state_at_bob = [&create_event, &alice_mem, &join_rules, &bob_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + let state_at_charlie = [&create_event, &alice_mem, &join_rules, &charlie_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + let expected = [&create_event, &alice_mem, &join_rules, &bob_mem, &charlie_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + (state_at_bob, state_at_charlie, expected) + } +} + +fn event_id(id: &str) -> OwnedEventId { + if id.contains('$') { + return id.try_into().unwrap(); + } + format!("${}:foo", id).try_into().unwrap() +} + +fn alice() -> &'static UserId { + user_id!("@alice:foo") +} + +fn bob() -> &'static UserId { + user_id!("@bob:foo") +} + +fn charlie() -> &'static UserId { + user_id!("@charlie:foo") +} + +fn ella() -> &'static UserId { + user_id!("@ella:foo") +} + +fn room_id() -> &'static RoomId { + room_id!("!test:foo") +} + +fn member_content_ban() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Ban)).unwrap() +} + +fn member_content_join() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap() +} + +fn to_pdu_event( + id: &str, + sender: &UserId, + ev_type: TimelineEventType, + state_key: Option<&str>, + content: Box, + auth_events: &[S], + prev_events: &[S], +) -> Arc +where + S: AsRef, +{ + // We don't care if the addition happens in order just that it is atomic + // (each event has its own value) + let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); + let id = if id.contains('$') { id.to_owned() } else { format!("${}:foo", id) }; + let auth_events = auth_events.iter().map(AsRef::as_ref).map(event_id).collect::>(); + let prev_events = prev_events.iter().map(AsRef::as_ref).map(event_id).collect::>(); + + let state_key = state_key.map(ToOwned::to_owned); + Arc::new(PduEvent { + event_id: id.try_into().unwrap(), + rest: Pdu::RoomV3Pdu(RoomV3Pdu { + room_id: room_id().to_owned(), + sender: sender.to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), + state_key, + kind: ev_type, + content, + redacts: None, + unsigned: btreemap! {}, + auth_events, + prev_events, + depth: uint!(0), + hashes: EventHash::new(String::new()), + signatures: Signatures::new(), + }), + }) +} + +// all graphs start with these input events +#[allow(non_snake_case)] +fn INITIAL_EVENTS() -> HashMap> { + vec![ + to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ), + to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_join(), + &["CREATE"], + &["CREATE"], + ), + to_pdu_event( + "IPOWER", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100 } })).unwrap(), + &["CREATE", "IMA"], + &["IMA"], + ), + to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["IPOWER"], + ), + to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IJR"], + ), + to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().to_string().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IMB"], + ), + to_pdu_event::<&EventId>( + "START", + charlie(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + to_pdu_event::<&EventId>( + "END", + charlie(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + ] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +// all graphs start with these input events +#[allow(non_snake_case)] +fn BAN_STATE_SET() -> HashMap> { + vec![ + to_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], // auth_events + &["START"], // prev_events + ), + to_pdu_event( + "PB", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["END"], + ), + to_pdu_event( + "MB", + alice(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_ban(), + &["CREATE", "IMA", "PB"], + &["PA"], + ), + to_pdu_event( + "IME", + ella(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_join(), + &["CREATE", "IJR", "PA"], + &["MB"], + ), + ] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +/// Convenience trait for adding event type plus state key to state maps. +trait EventTypeExt { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String); +} + +impl EventTypeExt for &TimelineEventType { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + (self.to_string().into(), state_key.into()) + } +} + +mod event { + use ruma_common::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; + use ruma_events::{pdu::Pdu, TimelineEventType}; + use ruma_state_res::Event; + use serde::{Deserialize, Serialize}; + use serde_json::value::RawValue as RawJsonValue; + + impl Event for PduEvent { + type Id = OwnedEventId; + + fn event_id(&self) -> &Self::Id { + &self.event_id + } + + fn room_id(&self) -> &RoomId { + match &self.rest { + Pdu::RoomV1Pdu(ev) => &ev.room_id, + Pdu::RoomV3Pdu(ev) => &ev.room_id, + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn sender(&self) -> &UserId { + match &self.rest { + Pdu::RoomV1Pdu(ev) => &ev.sender, + Pdu::RoomV3Pdu(ev) => &ev.sender, + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn event_type(&self) -> &TimelineEventType { + match &self.rest { + Pdu::RoomV1Pdu(ev) => &ev.kind, + Pdu::RoomV3Pdu(ev) => &ev.kind, + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn content(&self) -> &RawJsonValue { + match &self.rest { + Pdu::RoomV1Pdu(ev) => &ev.content, + Pdu::RoomV3Pdu(ev) => &ev.content, + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + match &self.rest { + Pdu::RoomV1Pdu(ev) => ev.origin_server_ts, + Pdu::RoomV3Pdu(ev) => ev.origin_server_ts, + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn state_key(&self) -> Option<&str> { + match &self.rest { + Pdu::RoomV1Pdu(ev) => ev.state_key.as_deref(), + Pdu::RoomV3Pdu(ev) => ev.state_key.as_deref(), + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn prev_events(&self) -> Box + Send + '_> { + match &self.rest { + Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), + Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn auth_events(&self) -> Box + Send + '_> { + match &self.rest { + Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), + Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn redacts(&self) -> Option<&Self::Id> { + match &self.rest { + Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), + Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + } + + #[derive(Clone, Debug, Deserialize, Serialize)] + pub(crate) struct PduEvent { + pub(crate) event_id: OwnedEventId, + #[serde(flatten)] + pub(crate) rest: Pdu, + } +} diff --git a/src/core/state_res/test_utils.rs b/src/core/state_res/test_utils.rs new file mode 100644 index 00000000..7954b28d --- /dev/null +++ b/src/core/state_res/test_utils.rs @@ -0,0 +1,688 @@ +use std::{ + borrow::Borrow, + collections::{BTreeMap, HashMap, HashSet}, + sync::{ + atomic::{AtomicU64, Ordering::SeqCst}, + Arc, + }, +}; + +use futures_util::future::ready; +use js_int::{int, uint}; +use ruma_common::{ + event_id, room_id, user_id, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, + RoomVersionId, ServerSignatures, UserId, +}; +use ruma_events::{ + pdu::{EventHash, Pdu, RoomV3Pdu}, + room::{ + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + TimelineEventType, +}; +use serde_json::{ + json, + value::{to_raw_value as to_raw_json_value, RawValue as RawJsonValue}, +}; +use tracing::info; + +pub(crate) use self::event::PduEvent; +use crate::{auth_types_for_event, Error, Event, EventTypeExt, Result, StateMap}; + +static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); + +pub(crate) async fn do_check( + events: &[Arc], + edges: Vec>, + expected_state_ids: Vec, +) { + // To activate logging use `RUST_LOG=debug cargo t` + + let init_events = INITIAL_EVENTS(); + + let mut store = TestStore( + init_events + .values() + .chain(events) + .map(|ev| (ev.event_id().to_owned(), ev.clone())) + .collect(), + ); + + // This will be lexi_topo_sorted for resolution + let mut graph = HashMap::new(); + // This is the same as in `resolve` event_id -> OriginalStateEvent + let mut fake_event_map = HashMap::new(); + + // Create the DB of events that led up to this point + // TODO maybe clean up some of these clones it is just tests but... + for ev in init_events.values().chain(events) { + graph.insert(ev.event_id().to_owned(), HashSet::new()); + fake_event_map.insert(ev.event_id().to_owned(), ev.clone()); + } + + for pair in INITIAL_EDGES().windows(2) { + if let [a, b] = &pair { + graph + .entry(a.to_owned()) + .or_insert_with(HashSet::new) + .insert(b.clone()); + } + } + + for edge_list in edges { + for pair in edge_list.windows(2) { + if let [a, b] = &pair { + graph + .entry(a.to_owned()) + .or_insert_with(HashSet::new) + .insert(b.clone()); + } + } + } + + // event_id -> PduEvent + let mut event_map: HashMap> = HashMap::new(); + // event_id -> StateMap + let mut state_at_event: HashMap> = HashMap::new(); + + // Resolve the current state and add it to the state_at_event map then continue + // on in "time" + for node in crate::lexicographical_topological_sort(&graph, &|_id| async { + Ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) + }) + .await + .unwrap() + { + let fake_event = fake_event_map.get(&node).unwrap(); + let event_id = fake_event.event_id().to_owned(); + + let prev_events = graph.get(&node).unwrap(); + + let state_before: StateMap = if prev_events.is_empty() { + HashMap::new() + } else if prev_events.len() == 1 { + state_at_event + .get(prev_events.iter().next().unwrap()) + .unwrap() + .clone() + } else { + let state_sets = prev_events + .iter() + .filter_map(|k| state_at_event.get(k)) + .collect::>(); + + info!( + "{:#?}", + state_sets + .iter() + .map(|map| map + .iter() + .map(|((ty, key), id)| format!("(({ty}{key:?}), {id})")) + .collect::>()) + .collect::>() + ); + + let auth_chain_sets: Vec<_> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let event_map = &event_map; + let fetch = |id: ::Id| ready(event_map.get(&id).cloned()); + let exists = |id: ::Id| ready(event_map.get(&id).is_some()); + let resolved = crate::resolve( + &RoomVersionId::V6, + state_sets, + &auth_chain_sets, + &fetch, + &exists, + 1, + ) + .await; + + match resolved { + | Ok(state) => state, + | Err(e) => panic!("resolution for {node} failed: {e}"), + } + }; + + let mut state_after = state_before.clone(); + + let ty = fake_event.event_type(); + let key = fake_event.state_key().unwrap(); + state_after.insert(ty.with_state_key(key), event_id.to_owned()); + + let auth_types = auth_types_for_event( + fake_event.event_type(), + fake_event.sender(), + fake_event.state_key(), + fake_event.content(), + ) + .unwrap(); + + let mut auth_events = vec![]; + for key in auth_types { + if state_before.contains_key(&key) { + auth_events.push(state_before[&key].clone()); + } + } + + // TODO The event is just remade, adding the auth_events and prev_events here + // the `to_pdu_event` was split into `init` and the fn below, could be better + let e = fake_event; + let ev_id = e.event_id(); + let event = to_pdu_event( + e.event_id().as_str(), + e.sender(), + e.event_type().clone(), + e.state_key(), + e.content().to_owned(), + &auth_events, + &prev_events.iter().cloned().collect::>(), + ); + + // We have to update our store, an actual user of this lib would + // be giving us state from a DB. + store.0.insert(ev_id.to_owned(), event.clone()); + + state_at_event.insert(node, state_after); + event_map.insert(event_id.to_owned(), Arc::clone(store.0.get(ev_id).unwrap())); + } + + let mut expected_state = StateMap::new(); + for node in expected_state_ids { + let ev = event_map.get(&node).unwrap_or_else(|| { + panic!( + "{node} not found in {:?}", + event_map + .keys() + .map(ToString::to_string) + .collect::>() + ) + }); + + let key = ev.event_type().with_state_key(ev.state_key().unwrap()); + + expected_state.insert(key, node); + } + + let start_state = state_at_event.get(event_id!("$START:foo")).unwrap(); + + let end_state = state_at_event + .get(event_id!("$END:foo")) + .unwrap() + .iter() + .filter(|(k, v)| { + expected_state.contains_key(k) + || start_state.get(k) != Some(*v) + // Filter out the dummy messages events. + // These act as points in time where there should be a known state to + // test against. + && **k != ("m.room.message".into(), "dummy".to_owned()) + }) + .map(|(k, v)| (k.clone(), v.clone())) + .collect::>(); + + assert_eq!(expected_state, end_state); +} + +#[allow(clippy::exhaustive_structs)] +pub(crate) struct TestStore(pub(crate) HashMap>); + +impl TestStore { + pub(crate) fn get_event(&self, _: &RoomId, event_id: &EventId) -> Result> { + self.0 + .get(event_id) + .cloned() + .ok_or_else(|| Error::NotFound(format!("{event_id} not found"))) + } + + /// Returns a Vec of the related auth events to the given `event`. + pub(crate) fn auth_event_ids( + &self, + room_id: &RoomId, + event_ids: Vec, + ) -> Result> { + let mut result = HashSet::new(); + let mut stack = event_ids; + + // DFS for auth event chain + while let Some(ev_id) = stack.pop() { + if result.contains(&ev_id) { + continue; + } + + result.insert(ev_id.clone()); + + let event = self.get_event(room_id, ev_id.borrow())?; + + stack.extend(event.auth_events().map(ToOwned::to_owned)); + } + + Ok(result) + } +} + +// A StateStore implementation for testing +#[allow(clippy::type_complexity)] +impl TestStore { + pub(crate) fn set_up( + &mut self, + ) -> (StateMap, StateMap, StateMap) { + let create_event = to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ); + let cre = create_event.event_id().to_owned(); + self.0.insert(cre.clone(), Arc::clone(&create_event)); + + let alice_mem = to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_join(), + &[cre.clone()], + &[cre.clone()], + ); + self.0 + .insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem)); + + let join_rules = to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &[cre.clone(), alice_mem.event_id().to_owned()], + &[alice_mem.event_id().to_owned()], + ); + self.0 + .insert(join_rules.event_id().to_owned(), join_rules.clone()); + + // Bob and Charlie join at the same time, so there is a fork + // this will be represented in the state_sets when we resolve + let bob_mem = to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().as_str()), + member_content_join(), + &[cre.clone(), join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0 + .insert(bob_mem.event_id().to_owned(), bob_mem.clone()); + + let charlie_mem = to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().as_str()), + member_content_join(), + &[cre, join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0 + .insert(charlie_mem.event_id().to_owned(), charlie_mem.clone()); + + let state_at_bob = [&create_event, &alice_mem, &join_rules, &bob_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + let state_at_charlie = [&create_event, &alice_mem, &join_rules, &charlie_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + let expected = [&create_event, &alice_mem, &join_rules, &bob_mem, &charlie_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + (state_at_bob, state_at_charlie, expected) + } +} + +pub(crate) fn event_id(id: &str) -> OwnedEventId { + if id.contains('$') { + return id.try_into().unwrap(); + } + + format!("${id}:foo").try_into().unwrap() +} + +pub(crate) fn alice() -> &'static UserId { user_id!("@alice:foo") } + +pub(crate) fn bob() -> &'static UserId { user_id!("@bob:foo") } + +pub(crate) fn charlie() -> &'static UserId { user_id!("@charlie:foo") } + +pub(crate) fn ella() -> &'static UserId { user_id!("@ella:foo") } + +pub(crate) fn zara() -> &'static UserId { user_id!("@zara:foo") } + +pub(crate) fn room_id() -> &'static RoomId { room_id!("!test:foo") } + +pub(crate) fn member_content_ban() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Ban)).unwrap() +} + +pub(crate) fn member_content_join() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap() +} + +pub(crate) fn to_init_pdu_event( + id: &str, + sender: &UserId, + ev_type: TimelineEventType, + state_key: Option<&str>, + content: Box, +) -> Arc { + let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); + let id = if id.contains('$') { + id.to_owned() + } else { + format!("${id}:foo") + }; + + let state_key = state_key.map(ToOwned::to_owned); + Arc::new(PduEvent { + event_id: id.try_into().unwrap(), + rest: Pdu::RoomV3Pdu(RoomV3Pdu { + room_id: room_id().to_owned(), + sender: sender.to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), + state_key, + kind: ev_type, + content, + redacts: None, + unsigned: BTreeMap::new(), + auth_events: vec![], + prev_events: vec![], + depth: uint!(0), + hashes: EventHash::new("".to_owned()), + signatures: ServerSignatures::default(), + }), + }) +} + +pub(crate) fn to_pdu_event( + id: &str, + sender: &UserId, + ev_type: TimelineEventType, + state_key: Option<&str>, + content: Box, + auth_events: &[S], + prev_events: &[S], +) -> Arc +where + S: AsRef, +{ + let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); + let id = if id.contains('$') { + id.to_owned() + } else { + format!("${id}:foo") + }; + let auth_events = auth_events + .iter() + .map(AsRef::as_ref) + .map(event_id) + .collect::>(); + let prev_events = prev_events + .iter() + .map(AsRef::as_ref) + .map(event_id) + .collect::>(); + + let state_key = state_key.map(ToOwned::to_owned); + Arc::new(PduEvent { + event_id: id.try_into().unwrap(), + rest: Pdu::RoomV3Pdu(RoomV3Pdu { + room_id: room_id().to_owned(), + sender: sender.to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), + state_key, + kind: ev_type, + content, + redacts: None, + unsigned: BTreeMap::new(), + auth_events, + prev_events, + depth: uint!(0), + hashes: EventHash::new("".to_owned()), + signatures: ServerSignatures::default(), + }), + }) +} + +// all graphs start with these input events +#[allow(non_snake_case)] +pub(crate) fn INITIAL_EVENTS() -> HashMap> { + vec![ + to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ), + to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_join(), + &["CREATE"], + &["CREATE"], + ), + to_pdu_event( + "IPOWER", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100 } })).unwrap(), + &["CREATE", "IMA"], + &["IMA"], + ), + to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["IPOWER"], + ), + to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IJR"], + ), + to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IMB"], + ), + to_pdu_event::<&EventId>( + "START", + charlie(), + TimelineEventType::RoomMessage, + Some("dummy"), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + to_pdu_event::<&EventId>( + "END", + charlie(), + TimelineEventType::RoomMessage, + Some("dummy"), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + ] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +// all graphs start with these input events +#[allow(non_snake_case)] +pub(crate) fn INITIAL_EVENTS_CREATE_ROOM() -> HashMap> { + vec![to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + )] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +#[allow(non_snake_case)] +pub(crate) fn INITIAL_EDGES() -> Vec { + vec!["START", "IMC", "IMB", "IJR", "IPOWER", "IMA", "CREATE"] + .into_iter() + .map(event_id) + .collect::>() +} + +pub(crate) mod event { + use ruma_common::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; + use ruma_events::{pdu::Pdu, TimelineEventType}; + use serde::{Deserialize, Serialize}; + use serde_json::value::RawValue as RawJsonValue; + + use crate::Event; + + impl Event for PduEvent { + type Id = OwnedEventId; + + fn event_id(&self) -> &Self::Id { &self.event_id } + + fn room_id(&self) -> &RoomId { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.room_id, + | Pdu::RoomV3Pdu(ev) => &ev.room_id, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn sender(&self) -> &UserId { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.sender, + | Pdu::RoomV3Pdu(ev) => &ev.sender, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn event_type(&self) -> &TimelineEventType { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.kind, + | Pdu::RoomV3Pdu(ev) => &ev.kind, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn content(&self) -> &RawJsonValue { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.content, + | Pdu::RoomV3Pdu(ev) => &ev.content, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.origin_server_ts, + | Pdu::RoomV3Pdu(ev) => ev.origin_server_ts, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn state_key(&self) -> Option<&str> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.state_key.as_deref(), + | Pdu::RoomV3Pdu(ev) => ev.state_key.as_deref(), + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + #[allow(refining_impl_trait)] + fn prev_events(&self) -> Box + Send + '_> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + #[allow(refining_impl_trait)] + fn auth_events(&self) -> Box + Send + '_> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn redacts(&self) -> Option<&Self::Id> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), + | Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + } + + #[derive(Clone, Debug, Deserialize, Serialize)] + #[allow(clippy::exhaustive_structs)] + pub(crate) struct PduEvent { + pub(crate) event_id: OwnedEventId, + #[serde(flatten)] + pub(crate) rest: Pdu, + } +} diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index aea70739..5a38f7fe 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -3,12 +3,15 @@ use std::{ sync::Arc, }; -use conduwuit::{debug_warn, err, implement, PduEvent, Result}; +use conduwuit::{ + debug_warn, err, implement, + state_res::{self}, + PduEvent, Result, +}; use futures::{future, FutureExt}; use ruma::{ - int, - state_res::{self}, - uint, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName, UInt, + int, uint, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName, + UInt, }; use super::check_room_id; diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index b7c38313..3cc15fc4 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -3,10 +3,12 @@ use std::{ sync::Arc, }; -use conduwuit::{debug, debug_info, err, implement, trace, warn, Err, Error, PduEvent, Result}; +use conduwuit::{ + debug, debug_info, err, implement, state_res, trace, warn, Err, Error, PduEvent, Result, +}; use futures::{future::ready, TryFutureExt}; use ruma::{ - api::client::error::ErrorKind, events::StateEventType, state_res, CanonicalJsonObject, + api::client::error::ErrorKind, events::StateEventType, CanonicalJsonObject, CanonicalJsonValue, EventId, RoomId, ServerName, }; diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 8bcbc48b..5960c734 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -19,12 +19,12 @@ use std::{ use conduwuit::{ utils::{MutexMap, TryFutureExtExt}, - Err, PduEvent, Result, Server, + Err, PduEvent, Result, RoomVersion, Server, }; use futures::TryFutureExt; use ruma::{ - events::room::create::RoomCreateEventContent, state_res::RoomVersion, OwnedEventId, - OwnedRoomId, RoomId, RoomVersionId, + events::room::create::RoomCreateEventContent, OwnedEventId, OwnedRoomId, RoomId, + RoomVersionId, }; use crate::{globals, rooms, sending, server_keys, Dep}; diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index eb9ca01f..28011a1b 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -5,15 +5,14 @@ use std::{ }; use conduwuit::{ - err, implement, trace, + err, implement, + state_res::{self, StateMap}, + trace, utils::stream::{automatic_width, IterStream, ReadyExt, TryWidebandExt, WidebandExt}, Error, Result, }; use futures::{future::try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; -use ruma::{ - state_res::{self, StateMap}, - OwnedEventId, RoomId, RoomVersionId, -}; +use ruma::{OwnedEventId, RoomId, RoomVersionId}; use crate::rooms::state_compressor::CompressedState; diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 7bf3b8f8..843b2af9 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -8,10 +8,10 @@ use std::{ use conduwuit::{ debug, err, implement, trace, utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryWidebandExt}, - PduEvent, Result, + PduEvent, Result, StateMap, }; use futures::{future::try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; -use ruma::{state_res::StateMap, OwnedEventId, RoomId, RoomVersionId}; +use ruma::{OwnedEventId, RoomId, RoomVersionId}; use crate::rooms::short::ShortStateHash; diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index b33b0388..f319ba48 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,16 +1,12 @@ use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant}; use conduwuit::{ - debug, debug_info, err, implement, trace, + debug, debug_info, err, implement, state_res, trace, utils::stream::{BroadbandExt, ReadyExt}, - warn, Err, PduEvent, Result, + warn, Err, EventTypeExt, PduEvent, Result, }; use futures::{future::ready, FutureExt, StreamExt}; -use ruma::{ - events::StateEventType, - state_res::{self, EventTypeExt}, - CanonicalJsonValue, RoomId, ServerName, -}; +use ruma::{events::StateEventType, CanonicalJsonValue, RoomId, ServerName}; use super::{get_room_version_id, to_room_version}; use crate::rooms::{ diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index de90a89c..d538de3c 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -3,6 +3,7 @@ use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc}; use conduwuit::{ err, result::FlatOk, + state_res::{self, StateMap}, utils::{ calculate_hash, stream::{BroadbandExt, TryIgnore}, @@ -20,7 +21,6 @@ use ruma::{ AnyStrippedStateEvent, StateEventType, TimelineEventType, }, serde::Raw, - state_res::{self, StateMap}, EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, UserId, }; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index a7edd4a4..d6154121 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -12,6 +12,7 @@ use std::{ use conduwuit::{ at, debug, debug_warn, err, error, implement, info, pdu::{gen_event_id, EventHash, PduBuilder, PduCount, PduEvent}, + state_res::{self, Event, RoomVersion}, utils::{ self, future::TryExtExt, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt, }, @@ -36,7 +37,6 @@ use ruma::{ GlobalAccountDataEventType, StateEventType, TimelineEventType, }, push::{Action, Ruleset, Tweak}, - state_res::{self, Event, RoomVersion}, uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, RoomVersionId, ServerName, UserId, }; From f2ca670c3b0858675312be60dcfb971384ce1244 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 8 Feb 2025 01:58:13 +0000 Subject: [PATCH 178/328] optimize further into state-res with SmallString triage and de-lints for state-res. Signed-off-by: Jason Volk --- Cargo.lock | 1 + Cargo.toml | 4 + src/api/client/membership.rs | 8 +- src/api/client/sync/v4.rs | 15 +- src/api/client/sync/v5.rs | 15 +- src/core/Cargo.toml | 3 + src/core/state_res/event_auth.rs | 142 ++++++++++-------- src/core/state_res/mod.rs | 89 ++++++----- src/core/state_res/room_version.rs | 1 + src/core/state_res/test_utils.rs | 43 +++--- .../rooms/event_handler/handle_outlier_pdu.rs | 2 +- .../rooms/event_handler/resolve_state.rs | 1 - .../rooms/event_handler/state_at_incoming.rs | 1 - .../event_handler/upgrade_outlier_pdu.rs | 10 +- src/service/rooms/timeline/mod.rs | 2 +- 15 files changed, 192 insertions(+), 145 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5981a2a6..4441779e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -810,6 +810,7 @@ dependencies = [ "libc", "libloading", "log", + "maplit", "nix", "num-traits", "rand", diff --git a/Cargo.toml b/Cargo.toml index d8f34544..a17aa4d6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -379,6 +379,7 @@ features = [ "unstable-msc4203", # sending to-device events to appservices "unstable-msc4210", # remove legacy mentions "unstable-extensible-events", + "unstable-pdu", ] [workspace.dependencies.rust-rocksdb] @@ -527,6 +528,9 @@ features = ["std"] version = "0.3.2" features = ["std"] +[workspace.dependencies.maplit] +version = "1.0.2" + # # Patches # diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 1045b014..6c970665 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -14,7 +14,7 @@ use conduwuit::{ result::FlatOk, state_res, trace, utils::{self, shuffle, IterStream, ReadyExt}, - warn, Err, PduEvent, Result, + warn, Err, PduEvent, Result, StateKey, }; use futures::{join, FutureExt, StreamExt, TryFutureExt}; use ruma::{ @@ -1151,8 +1151,8 @@ async fn join_room_by_id_helper_remote( debug!("Running send_join auth check"); let fetch_state = &state; - let state_fetch = |k: &'static StateEventType, s: String| async move { - let shortstatekey = services.rooms.short.get_shortstatekey(k, &s).await.ok()?; + let state_fetch = |k: StateEventType, s: StateKey| async move { + let shortstatekey = services.rooms.short.get_shortstatekey(&k, &s).await.ok()?; let event_id = fetch_state.get(&shortstatekey)?; services.rooms.timeline.get_pdu(event_id).await.ok() @@ -1162,7 +1162,7 @@ async fn join_room_by_id_helper_remote( &state_res::RoomVersion::new(&room_version_id)?, &parsed_join_pdu, None, // TODO: third party invite - |k, s| state_fetch(k, s.to_owned()), + |k, s| state_fetch(k.clone(), s.into()), ) .await .map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?; diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 4e474ef3..13f832b2 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -395,9 +395,12 @@ pub(crate) async fn sync_events_v4_route( .map_or(10, usize_from_u64_truncated) .min(100); - todo_room - .0 - .extend(list.room_details.required_state.iter().cloned()); + todo_room.0.extend( + list.room_details + .required_state + .iter() + .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), + ); todo_room.1 = todo_room.1.max(limit); // 0 means unknown because it got out of date @@ -449,7 +452,11 @@ pub(crate) async fn sync_events_v4_route( .map_or(10, usize_from_u64_truncated) .min(100); - todo_room.0.extend(room.required_state.iter().cloned()); + todo_room.0.extend( + room.required_state + .iter() + .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), + ); todo_room.1 = todo_room.1.max(limit); // 0 means unknown because it got out of date todo_room.2 = todo_room.2.min( diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index 63731688..cda6c041 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -223,7 +223,11 @@ async fn fetch_subscriptions( let limit: UInt = room.timeline_limit; - todo_room.0.extend(room.required_state.iter().cloned()); + todo_room.0.extend( + room.required_state + .iter() + .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), + ); todo_room.1 = todo_room.1.max(usize_from_ruma(limit)); // 0 means unknown because it got out of date todo_room.2 = todo_room.2.min( @@ -303,9 +307,12 @@ async fn handle_lists<'a>( let limit: usize = usize_from_ruma(list.room_details.timeline_limit).min(100); - todo_room - .0 - .extend(list.room_details.required_state.iter().cloned()); + todo_room.0.extend( + list.room_details + .required_state + .iter() + .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), + ); todo_room.1 = todo_room.1.max(limit); // 0 means unknown because it got out of date diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index d4b0c83b..b40dd3ad 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -116,5 +116,8 @@ nix.workspace = true hardened_malloc-rs.workspace = true hardened_malloc-rs.optional = true +[dev-dependencies] +maplit.workspace = true + [lints] workspace = true diff --git a/src/core/state_res/event_auth.rs b/src/core/state_res/event_auth.rs index 72a0216c..df2f8b36 100644 --- a/src/core/state_res/event_auth.rs +++ b/src/core/state_res/event_auth.rs @@ -21,7 +21,6 @@ use serde::{ Deserialize, }; use serde_json::{from_str as from_json_str, value::RawValue as RawJsonValue}; -use tracing::{debug, error, instrument, trace, warn}; use super::{ power_levels::{ @@ -29,8 +28,9 @@ use super::{ deserialize_power_levels_content_invite, deserialize_power_levels_content_redact, }, room_version::RoomVersion, - Error, Event, Result, StateEventType, TimelineEventType, + Error, Event, Result, StateEventType, StateKey, TimelineEventType, }; +use crate::{debug, error, trace, warn}; // FIXME: field extracting could be bundled for `content` #[derive(Deserialize)] @@ -56,15 +56,15 @@ pub fn auth_types_for_event( sender: &UserId, state_key: Option<&str>, content: &RawJsonValue, -) -> serde_json::Result> { +) -> serde_json::Result> { if kind == &TimelineEventType::RoomCreate { return Ok(vec![]); } let mut auth_types = vec![ - (StateEventType::RoomPowerLevels, String::new()), - (StateEventType::RoomMember, sender.to_string()), - (StateEventType::RoomCreate, String::new()), + (StateEventType::RoomPowerLevels, StateKey::new()), + (StateEventType::RoomMember, sender.as_str().into()), + (StateEventType::RoomCreate, StateKey::new()), ]; if kind == &TimelineEventType::RoomMember { @@ -82,7 +82,7 @@ pub fn auth_types_for_event( if [MembershipState::Join, MembershipState::Invite, MembershipState::Knock] .contains(&membership) { - let key = (StateEventType::RoomJoinRules, String::new()); + let key = (StateEventType::RoomJoinRules, StateKey::new()); if !auth_types.contains(&key) { auth_types.push(key); } @@ -91,21 +91,22 @@ pub fn auth_types_for_event( .join_authorised_via_users_server .map(|m| m.deserialize()) { - let key = (StateEventType::RoomMember, u.to_string()); + let key = (StateEventType::RoomMember, u.as_str().into()); if !auth_types.contains(&key) { auth_types.push(key); } } } - let key = (StateEventType::RoomMember, state_key.to_owned()); + let key = (StateEventType::RoomMember, state_key.into()); if !auth_types.contains(&key) { auth_types.push(key); } if membership == MembershipState::Invite { if let Some(Ok(t_id)) = content.third_party_invite.map(|t| t.deserialize()) { - let key = (StateEventType::RoomThirdPartyInvite, t_id.signed.token); + let key = + (StateEventType::RoomThirdPartyInvite, t_id.signed.token.into()); if !auth_types.contains(&key) { auth_types.push(key); } @@ -128,7 +129,13 @@ pub fn auth_types_for_event( /// The `fetch_state` closure should gather state from a state snapshot. We need /// to know if the event passes auth against some state not a recursive /// collection of auth_events fields. -#[instrument(level = "debug", skip_all, fields(event_id = incoming_event.event_id().borrow().as_str()))] +#[tracing::instrument( + level = "debug", + skip_all, + fields( + event_id = incoming_event.event_id().borrow().as_str() + ) +)] pub async fn auth_check( room_version: &RoomVersion, incoming_event: &Incoming, @@ -136,10 +143,10 @@ pub async fn auth_check( fetch_state: F, ) -> Result where - F: Fn(&'static StateEventType, &str) -> Fut, + F: Fn(&StateEventType, &str) -> Fut + Send, Fut: Future> + Send, Fetched: Event + Send, - Incoming: Event + Send, + Incoming: Event + Send + Sync, { debug!( "auth_check beginning for {} ({})", @@ -262,6 +269,7 @@ where // sender domain of the event does not match the sender domain of the create // event, reject. #[derive(Deserialize)] + #[allow(clippy::items_after_statements)] struct RoomCreateContentFederate { #[serde(rename = "m.federate", default = "ruma::serde::default_true")] federate: bool, @@ -354,7 +362,7 @@ where join_rules_event.as_ref(), user_for_join_auth.as_deref(), &user_for_join_auth_membership, - room_create_event, + &room_create_event, )? { return Ok(false); } @@ -364,6 +372,7 @@ where } // If the sender's current membership state is not join, reject + #[allow(clippy::manual_let_else)] let sender_member_event = match sender_member_event { | Some(mem) => mem, | None => { @@ -498,19 +507,20 @@ where /// This is generated by calling `auth_types_for_event` with the membership /// event and the current State. #[allow(clippy::too_many_arguments)] +#[allow(clippy::cognitive_complexity)] fn valid_membership_change( room_version: &RoomVersion, target_user: &UserId, - target_user_membership_event: Option, + target_user_membership_event: Option<&impl Event>, sender: &UserId, - sender_membership_event: Option, + sender_membership_event: Option<&impl Event>, current_event: impl Event, - current_third_party_invite: Option, - power_levels_event: Option, - join_rules_event: Option, + current_third_party_invite: Option<&impl Event>, + power_levels_event: Option<&impl Event>, + join_rules_event: Option<&impl Event>, user_for_join_auth: Option<&UserId>, user_for_join_auth_membership: &MembershipState, - create_room: impl Event, + create_room: &impl Event, ) -> Result { #[derive(Deserialize)] struct GetThirdPartyInvite { @@ -856,6 +866,7 @@ fn check_power_levels( // and integers here debug!("validation of power event finished"); + #[allow(clippy::manual_let_else)] let current_state = match previous_power_event { | Some(current_state) => current_state, // If there is no previous m.room.power_levels event in the room, allow @@ -1054,6 +1065,7 @@ fn verify_third_party_invite( // If there is no m.room.third_party_invite event in the current room state with // state_key matching token, reject + #[allow(clippy::manual_let_else)] let current_tpid = match current_third_party_invite { | Some(id) => id, | None => return false, @@ -1069,12 +1081,14 @@ fn verify_third_party_invite( // If any signature in signed matches any public key in the // m.room.third_party_invite event, allow + #[allow(clippy::manual_let_else)] let tpid_ev = match from_json_str::(current_tpid.content().get()) { | Ok(ev) => ev, | Err(_) => return false, }; + #[allow(clippy::manual_let_else)] let decoded_invite_token = match Base64::parse(&tp_id.signed.token) { | Ok(tok) => tok, // FIXME: Log a warning? @@ -1096,7 +1110,7 @@ fn verify_third_party_invite( mod tests { use std::sync::Arc; - use ruma_events::{ + use ruma::events::{ room::{ join_rules::{ AllowRule, JoinRule, Restricted, RoomJoinRulesEventContent, RoomMembership, @@ -1107,7 +1121,7 @@ mod tests { }; use serde_json::value::to_raw_value as to_raw_json_value; - use crate::{ + use crate::state_res::{ event_auth::valid_membership_change, test_utils::{ alice, charlie, ella, event_id, member_content_ban, member_content_join, room_id, @@ -1145,16 +1159,16 @@ mod tests { assert!(valid_membership_change( &RoomVersion::V6, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), None, &MembershipState::Leave, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); } @@ -1188,16 +1202,16 @@ mod tests { assert!(!valid_membership_change( &RoomVersion::V6, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), None, &MembershipState::Leave, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); } @@ -1231,16 +1245,16 @@ mod tests { assert!(valid_membership_change( &RoomVersion::V6, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), None, &MembershipState::Leave, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); } @@ -1274,16 +1288,16 @@ mod tests { assert!(!valid_membership_change( &RoomVersion::V6, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), None, &MembershipState::Leave, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); } @@ -1334,32 +1348,32 @@ mod tests { assert!(valid_membership_change( &RoomVersion::V9, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), Some(alice()), &MembershipState::Join, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); assert!(!valid_membership_change( &RoomVersion::V9, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), Some(ella()), &MembershipState::Leave, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); } @@ -1402,16 +1416,16 @@ mod tests { assert!(valid_membership_change( &RoomVersion::V7, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), None, &MembershipState::Leave, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); } diff --git a/src/core/state_res/mod.rs b/src/core/state_res/mod.rs index e4054377..19ea3cc0 100644 --- a/src/core/state_res/mod.rs +++ b/src/core/state_res/mod.rs @@ -1,3 +1,5 @@ +#![cfg_attr(test, allow(warnings))] + pub(crate) mod error; pub mod event_auth; mod power_levels; @@ -12,7 +14,7 @@ use std::{ cmp::{Ordering, Reverse}, collections::{BinaryHeap, HashMap, HashSet}, fmt::Debug, - hash::Hash, + hash::{BuildHasher, Hash}, }; use futures::{future, stream, Future, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; @@ -32,13 +34,13 @@ pub use self::{ room_version::RoomVersion, state_event::Event, }; -use crate::{debug, trace, warn}; +use crate::{debug, pdu::StateKey, trace, warn}; /// A mapping of event type and state_key to some value `T`, usually an /// `EventId`. pub type StateMap = HashMap; pub type StateMapItem = (TypeStateKey, T); -pub type TypeStateKey = (StateEventType, String); +pub type TypeStateKey = (StateEventType, StateKey); type Result = crate::Result; @@ -68,10 +70,10 @@ type Result = crate::Result; /// event is part of the same room. //#[tracing::instrument(level = "debug", skip(state_sets, auth_chain_sets, //#[tracing::instrument(level event_fetch))] -pub async fn resolve<'a, E, SetIter, Fetch, FetchFut, Exists, ExistsFut>( +pub async fn resolve<'a, E, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, ExistsFut>( room_version: &RoomVersionId, - state_sets: impl IntoIterator + Send, - auth_chain_sets: &'a [HashSet], + state_sets: Sets, + auth_chain_sets: &'a [HashSet], event_fetch: &Fetch, event_exists: &Exists, parallel_fetches: usize, @@ -81,7 +83,9 @@ where FetchFut: Future> + Send, Exists: Fn(E::Id) -> ExistsFut + Sync, ExistsFut: Future + Send, + Sets: IntoIterator + Send, SetIter: Iterator> + Clone + Send, + Hasher: BuildHasher + Send + Sync, E: Event + Clone + Send + Sync, E::Id: Borrow + Send + Sync, for<'b> &'b E: Send, @@ -178,7 +182,7 @@ where trace!(list = ?events_to_resolve, "events left to resolve"); // This "epochs" power level event - let power_event = resolved_control.get(&(StateEventType::RoomPowerLevels, String::new())); + let power_event = resolved_control.get(&(StateEventType::RoomPowerLevels, StateKey::new())); debug!(event_id = ?power_event, "power event"); @@ -222,16 +226,17 @@ fn separate<'a, Id>( where Id: Clone + Eq + Hash + 'a, { - let mut state_set_count = 0_usize; + let mut state_set_count: usize = 0; let mut occurrences = HashMap::<_, HashMap<_, _>>::new(); - let state_sets_iter = state_sets_iter.inspect(|_| state_set_count += 1); + let state_sets_iter = + state_sets_iter.inspect(|_| state_set_count = state_set_count.saturating_add(1)); for (k, v) in state_sets_iter.flatten() { occurrences .entry(k) .or_default() .entry(v) - .and_modify(|x| *x += 1) + .and_modify(|x: &mut usize| *x = x.saturating_add(1)) .or_insert(1); } @@ -246,7 +251,7 @@ where conflicted_state .entry((k.0.clone(), k.1.clone())) .and_modify(|x: &mut Vec<_>| x.push(id.clone())) - .or_insert(vec![id.clone()]); + .or_insert_with(|| vec![id.clone()]); } } } @@ -255,9 +260,13 @@ where } /// Returns a Vec of deduped EventIds that appear in some chains but not others. -fn get_auth_chain_diff(auth_chain_sets: &[HashSet]) -> impl Iterator + Send +#[allow(clippy::arithmetic_side_effects)] +fn get_auth_chain_diff( + auth_chain_sets: &[HashSet], +) -> impl Iterator + Send where Id: Clone + Eq + Hash + Send, + Hasher: BuildHasher + Send + Sync, { let num_sets = auth_chain_sets.len(); let mut id_counts: HashMap = HashMap::new(); @@ -288,7 +297,7 @@ async fn reverse_topological_power_sort( where F: Fn(E::Id) -> Fut + Sync, Fut: Future> + Send, - E: Event + Send, + E: Event + Send + Sync, E::Id: Borrow + Send + Sync, { debug!("reverse topological sort of power events"); @@ -337,14 +346,15 @@ where /// `key_fn` is used as to obtain the power level and age of an event for /// breaking ties (together with the event ID). #[tracing::instrument(level = "debug", skip_all)] -pub async fn lexicographical_topological_sort( - graph: &HashMap>, +pub async fn lexicographical_topological_sort( + graph: &HashMap>, key_fn: &F, ) -> Result> where F: Fn(Id) -> Fut + Sync, Fut: Future> + Send, - Id: Borrow + Clone + Eq + Hash + Ord + Send, + Id: Borrow + Clone + Eq + Hash + Ord + Send + Sync, + Hasher: BuildHasher + Default + Clone + Send + Sync, { #[derive(PartialEq, Eq)] struct TieBreaker<'a, Id> { @@ -395,7 +405,7 @@ where // The number of events that depend on the given event (the EventId key) // How many events reference this event in the DAG as a parent - let mut reverse_graph: HashMap<_, HashSet<_>> = HashMap::new(); + let mut reverse_graph: HashMap<_, HashSet<_, Hasher>> = HashMap::new(); // Vec of nodes that have zero out degree, least recent events. let mut zero_outdegree = Vec::new(); @@ -727,8 +737,8 @@ async fn get_mainline_depth( where F: Fn(E::Id) -> Fut + Sync, Fut: Future> + Send, - E: Event + Send, - E::Id: Borrow + Send, + E: Event + Send + Sync, + E::Id: Borrow + Send + Sync, { while let Some(sort_ev) = event { debug!(event_id = sort_ev.event_id().borrow().as_str(), "mainline"); @@ -758,10 +768,10 @@ async fn add_event_and_auth_chain_to_graph( auth_diff: &HashSet, fetch_event: &F, ) where - F: Fn(E::Id) -> Fut, + F: Fn(E::Id) -> Fut + Sync, Fut: Future> + Send, - E: Event + Send, - E::Id: Borrow + Clone + Send, + E: Event + Send + Sync, + E::Id: Borrow + Clone + Send + Sync, { let mut state = vec![event_id]; while let Some(eid) = state.pop() { @@ -788,7 +798,7 @@ where F: Fn(E::Id) -> Fut + Sync, Fut: Future> + Send, E: Event + Send, - E::Id: Borrow + Send, + E::Id: Borrow + Send + Sync, { match fetch(event_id.clone()).await.as_ref() { | Some(state) => is_power_event(state), @@ -820,18 +830,18 @@ fn is_power_event(event: impl Event) -> bool { /// Convenience trait for adding event type plus state key to state maps. pub trait EventTypeExt { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String); + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey); } impl EventTypeExt for StateEventType { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey) { (self, state_key.into()) } } impl EventTypeExt for TimelineEventType { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { - (self.to_string().into(), state_key.into()) + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey) { + (self.into(), state_key.into()) } } @@ -839,7 +849,7 @@ impl EventTypeExt for &T where T: EventTypeExt + Clone, { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey) { self.to_owned().with_state_key(state_key) } } @@ -858,13 +868,11 @@ mod tests { room::join_rules::{JoinRule, RoomJoinRulesEventContent}, StateEventType, TimelineEventType, }, - int, uint, + int, uint, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId, }; - use ruma_common::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId}; use serde_json::{json, value::to_raw_value as to_raw_json_value}; - use tracing::debug; - use crate::{ + use super::{ is_power_event, room_version::RoomVersion, test_utils::{ @@ -874,6 +882,7 @@ mod tests { }, Event, EventTypeExt, StateMap, }; + use crate::debug; async fn test_event_sort() { use futures::future::ready; @@ -898,11 +907,11 @@ mod tests { let fetcher = |id| ready(events.get(&id).cloned()); let sorted_power_events = - crate::reverse_topological_power_sort(power_events, &auth_chain, &fetcher, 1) + super::reverse_topological_power_sort(power_events, &auth_chain, &fetcher, 1) .await .unwrap(); - let resolved_power = crate::iterative_auth_check( + let resolved_power = super::iterative_auth_check( &RoomVersion::V6, sorted_power_events.iter(), HashMap::new(), // unconflicted events @@ -918,10 +927,10 @@ mod tests { events_to_sort.shuffle(&mut rand::thread_rng()); let power_level = resolved_power - .get(&(StateEventType::RoomPowerLevels, "".to_owned())) + .get(&(StateEventType::RoomPowerLevels, "".into())) .cloned(); - let sorted_event_ids = crate::mainline_sort(&events_to_sort, power_level, &fetcher, 1) + let sorted_event_ids = super::mainline_sort(&events_to_sort, power_level, &fetcher, 1) .await .unwrap(); @@ -1302,7 +1311,7 @@ mod tests { }) .collect(); - let resolved = match crate::resolve( + let resolved = match super::resolve( &RoomVersionId::V2, &state_sets, &auth_chain, @@ -1333,7 +1342,7 @@ mod tests { event_id("p") => hashset![event_id("o")], }; - let res = crate::lexicographical_topological_sort(&graph, &|_id| async { + let res = super::lexicographical_topological_sort(&graph, &|_id| async { Ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) }) .await @@ -1421,7 +1430,7 @@ mod tests { let fetcher = |id: ::Id| ready(ev_map.get(&id).cloned()); let exists = |id: ::Id| ready(ev_map.get(&id).is_some()); - let resolved = match crate::resolve( + let resolved = match super::resolve( &RoomVersionId::V6, &state_sets, &auth_chain, @@ -1552,7 +1561,7 @@ mod tests { #[allow(unused_mut)] let mut x = StateMap::new(); $( - x.insert(($kind, $key.to_owned()), $id); + x.insert(($kind, $key.into()), $id); )* x }}; diff --git a/src/core/state_res/room_version.rs b/src/core/state_res/room_version.rs index e1b0afe1..8dfd6cde 100644 --- a/src/core/state_res/room_version.rs +++ b/src/core/state_res/room_version.rs @@ -32,6 +32,7 @@ pub enum StateResolutionVersion { } #[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] +#[allow(clippy::struct_excessive_bools)] pub struct RoomVersion { /// The stability of this room. pub disposition: RoomDisposition, diff --git a/src/core/state_res/test_utils.rs b/src/core/state_res/test_utils.rs index 7954b28d..9c2b151f 100644 --- a/src/core/state_res/test_utils.rs +++ b/src/core/state_res/test_utils.rs @@ -7,28 +7,28 @@ use std::{ }, }; -use futures_util::future::ready; -use js_int::{int, uint}; -use ruma_common::{ - event_id, room_id, user_id, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, - RoomVersionId, ServerSignatures, UserId, -}; -use ruma_events::{ - pdu::{EventHash, Pdu, RoomV3Pdu}, - room::{ - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, +use futures::future::ready; +use ruma::{ + event_id, + events::{ + pdu::{EventHash, Pdu, RoomV3Pdu}, + room::{ + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + TimelineEventType, }, - TimelineEventType, + int, room_id, uint, user_id, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, + RoomVersionId, ServerSignatures, UserId, }; use serde_json::{ json, value::{to_raw_value as to_raw_json_value, RawValue as RawJsonValue}, }; -use tracing::info; pub(crate) use self::event::PduEvent; -use crate::{auth_types_for_event, Error, Event, EventTypeExt, Result, StateMap}; +use super::auth_types_for_event; +use crate::{info, Event, EventTypeExt, Result, StateMap}; static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); @@ -88,7 +88,7 @@ pub(crate) async fn do_check( // Resolve the current state and add it to the state_at_event map then continue // on in "time" - for node in crate::lexicographical_topological_sort(&graph, &|_id| async { + for node in super::lexicographical_topological_sort(&graph, &|_id| async { Ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) }) .await @@ -135,7 +135,7 @@ pub(crate) async fn do_check( let event_map = &event_map; let fetch = |id: ::Id| ready(event_map.get(&id).cloned()); let exists = |id: ::Id| ready(event_map.get(&id).is_some()); - let resolved = crate::resolve( + let resolved = super::resolve( &RoomVersionId::V6, state_sets, &auth_chain_sets, @@ -223,7 +223,7 @@ pub(crate) async fn do_check( // Filter out the dummy messages events. // These act as points in time where there should be a known state to // test against. - && **k != ("m.room.message".into(), "dummy".to_owned()) + && **k != ("m.room.message".into(), "dummy".into()) }) .map(|(k, v)| (k.clone(), v.clone())) .collect::>(); @@ -239,7 +239,8 @@ impl TestStore { self.0 .get(event_id) .cloned() - .ok_or_else(|| Error::NotFound(format!("{event_id} not found"))) + .ok_or_else(|| super::Error::NotFound(format!("{event_id} not found"))) + .map_err(Into::into) } /// Returns a Vec of the related auth events to the given `event`. @@ -582,8 +583,10 @@ pub(crate) fn INITIAL_EDGES() -> Vec { } pub(crate) mod event { - use ruma_common::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; - use ruma_events::{pdu::Pdu, TimelineEventType}; + use ruma::{ + events::{pdu::Pdu, TimelineEventType}, + MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, + }; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue as RawJsonValue; diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index 3cc15fc4..e628c77a 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -133,7 +133,7 @@ pub(super) async fn handle_outlier_pdu<'a>( )); } - let state_fetch = |ty: &'static StateEventType, sk: &str| { + let state_fetch = |ty: &StateEventType, sk: &str| { let key = (ty.to_owned(), sk.into()); ready(auth_events.get(&key)) }; diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 28011a1b..37d47d47 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -63,7 +63,6 @@ pub async fn resolve_state( .multi_get_statekey_from_short(shortstatekeys) .zip(event_ids) .ready_filter_map(|(ty_sk, id)| Some((ty_sk.ok()?, id))) - .map(|((ty, sk), id)| ((ty, sk.as_str().to_owned()), id)) .collect() }) .map(Ok::<_, Error>) diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 843b2af9..2eb6013a 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -172,7 +172,6 @@ async fn state_at_incoming_fork( .short .get_statekey_from_short(*k) .map_ok(|(ty, sk)| ((ty, sk), id.clone())) - .map_ok(|((ty, sk), id)| ((ty, sk.as_str().to_owned()), id)) }) .ready_filter_map(Result::ok) .collect() diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index f319ba48..385d2142 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -3,7 +3,7 @@ use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::In use conduwuit::{ debug, debug_info, err, implement, state_res, trace, utils::stream::{BroadbandExt, ReadyExt}, - warn, Err, EventTypeExt, PduEvent, Result, + warn, Err, EventTypeExt, PduEvent, Result, StateKey, }; use futures::{future::ready, FutureExt, StreamExt}; use ruma::{events::StateEventType, CanonicalJsonValue, RoomId, ServerName}; @@ -71,8 +71,8 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( debug!("Performing auth check"); // 11. Check the auth of the event passes based on the state of the event let state_fetch_state = &state_at_incoming_event; - let state_fetch = |k: &'static StateEventType, s: String| async move { - let shortstatekey = self.services.short.get_shortstatekey(k, &s).await.ok()?; + let state_fetch = |k: StateEventType, s: StateKey| async move { + let shortstatekey = self.services.short.get_shortstatekey(&k, &s).await.ok()?; let event_id = state_fetch_state.get(&shortstatekey)?; self.services.timeline.get_pdu(event_id).await.ok() @@ -82,7 +82,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( &room_version, &incoming_pdu, None, // TODO: third party invite - |k, s| state_fetch(k, s.to_owned()), + |ty, sk| state_fetch(ty.clone(), sk.into()), ) .await .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; @@ -104,7 +104,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( ) .await?; - let state_fetch = |k: &'static StateEventType, s: &str| { + let state_fetch = |k: &StateEventType, s: &str| { let key = k.with_state_key(s); ready(auth_events.get(&key).cloned()) }; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index d6154121..9d6ee982 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -747,7 +747,7 @@ impl Service { }; let auth_fetch = |k: &StateEventType, s: &str| { - let key = (k.clone(), s.to_owned()); + let key = (k.clone(), s.into()); ready(auth_events.get(&key)) }; From 4de0dafdf11acb71d28e6891c9b740b66d448934 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 8 Feb 2025 06:24:25 +0000 Subject: [PATCH 179/328] bump ruma Signed-off-by: Jason Volk --- Cargo.lock | 360 +++++++++++++++++++++++++++-------------------------- Cargo.toml | 2 +- 2 files changed, 183 insertions(+), 179 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4441779e..efba2e07 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -79,7 +79,7 @@ checksum = "0ae92a5119aa49cdbcf6b9f893fe4e1d98b04ccbf82ee0584ad948a44a734dea" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -161,18 +161,18 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] name = "async-trait" -version = "0.1.85" +version = "0.1.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" +checksum = "644dd749086bf3771a2fbc5f256fdb982d53f011c7d5d560304eafeecebce79d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -221,9 +221,9 @@ dependencies = [ [[package]] name = "aws-lc-rs" -version = "1.12.1" +version = "1.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ea835662a0af02443aa1396d39be523bbf8f11ee6fad20329607c480bea48c3" +checksum = "4c2b7ddaa2c56a367ad27a094ad8ef4faacf8a617c2575acb2ba88949df999ca" dependencies = [ "aws-lc-sys", "paste", @@ -232,9 +232,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.25.0" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71b2ddd3ada61a305e1d8bb6c005d1eaa7d14d903681edfc400406d523a9b491" +checksum = "54ac4f13dad353b209b34cbec082338202cbc01c8f00336b55c750c13ac91f8f" dependencies = [ "bindgen", "cc", @@ -427,7 +427,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.96", + "syn 2.0.98", "which", ] @@ -495,9 +495,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "4.0.1" +version = "4.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a45bd2e4095a8b518033b128020dd4a55aab1c0a381ba4404a472630f4bc362" +checksum = "74fa05ad7d803d413eb8380983b092cbbaf9a85f151b871360e7b00cd7060b37" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -511,9 +511,9 @@ checksum = "c360505aed52b7ec96a3636c3f039d99103c37d1d9b4f7a8c743d3ea9ffcd03b" [[package]] name = "bumpalo" -version = "3.16.0" +version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" [[package]] name = "bytemuck" @@ -535,9 +535,9 @@ checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495" [[package]] name = "bytes" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" +checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" [[package]] name = "bytesize" @@ -568,9 +568,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.10" +version = "1.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229" +checksum = "755717a7de9ec452bf7f3f1a3099085deabd7f2962b861dae91ecd7a365903d2" dependencies = [ "jobserver", "libc", @@ -639,9 +639,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.26" +version = "4.5.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8eb5e908ef3a6efbe1ed62520fb7287959888c88485abe072543190ecc66783" +checksum = "3e77c3243bd94243c03672cb5154667347c457ca271254724f9f393aee1c05ff" dependencies = [ "clap_builder", "clap_derive", @@ -649,9 +649,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.26" +version = "4.5.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b01801b5fc6a0a232407abc821660c9c6d25a1cafc0d4f85f29fb8d9afc121" +checksum = "1b26884eb4b57140e4d2d93652abfa49498b938b3c9179f9fc487b0acc3edad7" dependencies = [ "anstyle", "clap_lex", @@ -659,14 +659,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.24" +version = "4.5.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" +checksum = "bf4ced95c6f4a675af3da73304b9ac4ed991640c36374e4b46795c49e17cf1ed" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -677,9 +677,9 @@ checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "cmake" -version = "0.1.52" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c682c223677e0e5b6b7f63a64b9351844c3f1b1678a68b7ee617e30fb082620e" +checksum = "e24a03c8b52922d68a1589ad61032f2c1aa5a8158d2aa0d93c6e9534944bbad6" dependencies = [ "cc", ] @@ -863,7 +863,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1030,9 +1030,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" dependencies = [ "libc", ] @@ -1177,7 +1177,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1204,7 +1204,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1273,7 +1273,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1325,7 +1325,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1526,7 +1526,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1578,10 +1578,22 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] +[[package]] +name = "getrandom" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets 0.52.6", +] + [[package]] name = "gif" version = "0.13.1" @@ -1616,7 +1628,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.7.0", + "indexmap 2.7.1", "slab", "tokio", "tokio-util", @@ -1708,9 +1720,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hickory-proto" -version = "0.24.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447afdcdb8afb9d0a852af6dc65d9b285ce720ed7a59e42a8bf2e931c67bc1b5" +checksum = "2ad3d6d98c648ed628df039541a5577bee1a7c83e9e16fe3dbedeea4cdfeb971" dependencies = [ "async-trait", "cfg-if", @@ -1732,9 +1744,9 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a2e2aba9c389ce5267d31cf1e4dace82390ae276b0b364ea55630b1fa1b44b4" +checksum = "dcf287bde7b776e85d7188e6e5db7cf410a2f9531fe82817eb87feed034c8d14" dependencies = [ "cfg-if", "futures-util", @@ -1802,7 +1814,7 @@ dependencies = [ "markup5ever", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1850,9 +1862,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.5" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" +checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" [[package]] name = "httpdate" @@ -1868,9 +1880,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "1.5.2" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" dependencies = [ "bytes", "futures-channel", @@ -2054,7 +2066,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2129,9 +2141,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" +checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -2158,7 +2170,7 @@ checksum = "c34819042dc3d3971c46c2190835914dfbe0c3c13f61449b2997f4e9722dfa60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2189,9 +2201,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.10.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "itertools" @@ -2300,7 +2312,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2528,7 +2540,7 @@ checksum = "bd2209fff77f705b00c737016a48e73733d7fbccb8b007194db148f03561fb70" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2574,7 +2586,7 @@ checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", "log", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", ] @@ -2675,7 +2687,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2739,15 +2751,15 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.2" +version = "1.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" +checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e" [[package]] name = "openssl-probe" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "opentelemetry" @@ -2757,7 +2769,7 @@ checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a" dependencies = [ "futures-core", "futures-sink", - "indexmap 2.7.0", + "indexmap 2.7.1", "js-sys", "once_cell", "pin-project-lite", @@ -2913,7 +2925,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2928,7 +2940,7 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ - "phf_shared 0.11.3", + "phf_shared", ] [[package]] @@ -2937,18 +2949,8 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a" dependencies = [ - "phf_generator 0.11.3", - "phf_shared 0.11.3", -] - -[[package]] -name = "phf_generator" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d5285893bb5eb82e6aaf5d59ee909a06a16737a8970984dd7746ba9283498d6" -dependencies = [ - "phf_shared 0.10.0", - "rand", + "phf_generator", + "phf_shared", ] [[package]] @@ -2957,46 +2959,37 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ - "phf_shared 0.11.3", + "phf_shared", "rand", ] -[[package]] -name = "phf_shared" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" -dependencies = [ - "siphasher 0.3.11", -] - [[package]] name = "phf_shared" version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" dependencies = [ - "siphasher 1.0.1", + "siphasher", ] [[package]] name = "pin-project" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" +checksum = "dfe2e71e1471fe07709406bf725f710b02927c9c54b2b5b2ec0e8087d97c327d" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" +checksum = "f6e859e6e5bd50440ab63c47e3ebabc90f26251f7c73c3d3e837b74a1cc3fa67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3068,7 +3061,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" dependencies = [ "proc-macro2", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3097,7 +3090,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "version_check", "yansi", ] @@ -3118,7 +3111,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a65f2e60fbf1063868558d69c6beacf412dc755f9fc020f514b7955fc914fe30" dependencies = [ "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3141,7 +3134,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3202,7 +3195,7 @@ dependencies = [ "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "rustls", "socket2", "thiserror 2.0.11", @@ -3217,10 +3210,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", - "getrandom", + "getrandom 0.2.15", "rand", "ring", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "rustls", "rustls-pki-types", "slab", @@ -3280,7 +3273,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", ] [[package]] @@ -3479,7 +3472,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom", + "getrandom 0.2.15", "libc", "spin", "untrusted", @@ -3489,7 +3482,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "assign", "js_int", @@ -3511,7 +3504,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "js_int", "ruma-common", @@ -3523,7 +3516,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "as_variant", "assign", @@ -3546,14 +3539,14 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "as_variant", "base64 0.22.1", "bytes", "form_urlencoded", "http", - "indexmap 2.7.0", + "indexmap 2.7.1", "js_int", "konst", "percent-encoding", @@ -3577,10 +3570,10 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "as_variant", - "indexmap 2.7.0", + "indexmap 2.7.1", "js_int", "js_option", "percent-encoding", @@ -3602,7 +3595,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "bytes", "http", @@ -3620,7 +3613,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3629,7 +3622,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "js_int", "ruma-common", @@ -3639,7 +3632,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3647,14 +3640,14 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.96", + "syn 2.0.98", "toml", ] [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "js_int", "ruma-common", @@ -3666,7 +3659,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "headers", "http", @@ -3679,7 +3672,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3695,7 +3688,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "futures-util", "js_int", @@ -3755,9 +3748,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustc_version" @@ -3770,9 +3763,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.43" +version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ "bitflags 2.8.0", "errno", @@ -3783,9 +3776,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.21" +version = "0.23.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f287924602bf649d949c63dc8ac8b235fa5387d394020705b80c4eb597ce5b8" +checksum = "9fb9263ab4eb695e42321db096e3b8fbd715a59b154d5c88d82db2175b681ba7" dependencies = [ "aws-lc-rs", "log", @@ -3820,9 +3813,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2bf47e6ff922db3825eb750c4e2ff784c6ff8fb9e13046ef6a1d1c5401b0b37" +checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" dependencies = [ "web-time 1.1.0", ] @@ -3862,9 +3855,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" +checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" [[package]] name = "sanitize-filename" @@ -3892,9 +3885,9 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sd-notify" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561e6b346a5e59e0b8a07894004897d7160567e3352d2ebd6c3741d4e086b6f5" +checksum = "b943eadf71d8b69e661330cb0e2656e31040acf21ee7708e2c238a0ec6af2bf4" dependencies = [ "libc", ] @@ -3924,9 +3917,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" +checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" [[package]] name = "sentry" @@ -4080,7 +4073,7 @@ checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4090,7 +4083,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" dependencies = [ "form_urlencoded", - "indexmap 2.7.0", + "indexmap 2.7.1", "itoa", "ryu", "serde", @@ -4098,9 +4091,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.135" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b0d7ba2887406110130a978386c4e1befb98c674b4fba677954e4db976630d9" +checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" dependencies = [ "itoa", "memchr", @@ -4155,7 +4148,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.7.1", "itoa", "ryu", "serde", @@ -4253,12 +4246,6 @@ dependencies = [ "quote", ] -[[package]] -name = "siphasher" -version = "0.3.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" - [[package]] name = "siphasher" version = "1.0.1" @@ -4333,26 +4320,25 @@ checksum = "f42444fea5b87a39db4218d9422087e66a85d0e7a0963a439b07bcdf91804006" [[package]] name = "string_cache" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b" +checksum = "938d512196766101d333398efde81bc1f37b00cb42c2f8350e5df639f040bbbe" dependencies = [ "new_debug_unreachable", - "once_cell", "parking_lot", - "phf_shared 0.10.0", + "phf_shared", "precomputed-hash", "serde", ] [[package]] name = "string_cache_codegen" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb30289b722be4ff74a408c3cc27edeaad656e06cb1fe8fa9231fa59c728988" +checksum = "244292f3441c89febe5b5bdfbb6863aeaf4f64da810ea3050fd927b27b8d92ce" dependencies = [ - "phf_generator 0.10.0", - "phf_shared 0.10.0", + "phf_generator", + "phf_shared", "proc-macro2", "quote", ] @@ -4385,9 +4371,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.96" +version = "2.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" +checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" dependencies = [ "proc-macro2", "quote", @@ -4411,7 +4397,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4446,9 +4432,9 @@ dependencies = [ [[package]] name = "termimad" -version = "0.31.1" +version = "0.31.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a5d4cf55d9f1cb04fcda48f725772d0733ae34e030dfc4dd36e738a5965f4" +checksum = "a8e19c6dbf107bec01d0e216bb8219485795b7d75328e4fa5ef2756c1be4f8dc" dependencies = [ "coolor", "crokey", @@ -4496,7 +4482,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4507,7 +4493,7 @@ checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4663,7 +4649,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4726,9 +4712,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148" dependencies = [ "serde", "serde_spanned", @@ -4747,11 +4733,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.22" +version = "0.22.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +checksum = "02a8b472d1a3d7c18e2d61a489aee3453fd9031c33e4f55bd533f4a7adca1bee" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.7.1", "serde", "serde_spanned", "toml_datetime", @@ -4876,7 +4862,7 @@ source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4997,9 +4983,9 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.14" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" +checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" [[package]] name = "unicode-segmentation" @@ -5084,11 +5070,11 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.12.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744018581f9a3454a9e15beb8a33b017183f1e7c0cd170232a2d1453b23a51c4" +checksum = "ced87ca4be083373936a67f8de945faa23b6b42384bd5b64434850802c6dccd0" dependencies = [ - "getrandom", + "getrandom 0.3.1", "serde", ] @@ -5142,6 +5128,15 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasi" +version = "0.13.3+wasi-0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + [[package]] name = "wasm-bindgen" version = "0.2.100" @@ -5164,7 +5159,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "wasm-bindgen-shared", ] @@ -5199,7 +5194,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5257,9 +5252,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.7" +version = "0.26.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d642ff16b7e79272ae451b7322067cdc17cadf68c23264be9d94a32319efe7e" +checksum = "2210b291f7ea53617fbafcc4939f10914214ec15aace5ba62293a668f322c5c9" dependencies = [ "rustls-pki-types", ] @@ -5515,9 +5510,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.24" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8d71a593cc5c42ad7876e2c1fda56f314f3754c084128833e64f1345ff8a03a" +checksum = "86e376c75f4f43f44db463cf729e0d3acbf954d13e22c51e26e4c264b4ab545f" dependencies = [ "memchr", ] @@ -5532,6 +5527,15 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags 2.8.0", +] + [[package]] name = "write16" version = "1.0.0" @@ -5581,7 +5585,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "synstructure", ] @@ -5603,7 +5607,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -5623,7 +5627,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "synstructure", ] @@ -5652,7 +5656,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index a17aa4d6..12556e00 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +rev = "e7a793b720e58bbe6858fecb86db97191dbfe7aa" features = [ "compat", "rand", From 6113803038f15a9f0206b31fc0216ebc315d7761 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 8 Feb 2025 10:09:57 +0000 Subject: [PATCH 180/328] better error logging on send_join response failure Signed-off-by: Jason Volk --- src/api/client/membership.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 6c970665..26736fb5 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -9,7 +9,7 @@ use std::{ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - at, debug, debug_info, debug_warn, err, info, + at, debug, debug_info, debug_warn, err, error, info, pdu::{gen_event_id_canonical_json, PduBuilder}, result::FlatOk, state_res, trace, @@ -1011,10 +1011,17 @@ async fn join_room_by_id_helper_remote( .await, }; - let send_join_response = services + let send_join_response = match services .sending .send_synapse_request(&remote_server, send_join_request) - .await?; + .await + { + | Ok(response) => response, + | Err(e) => { + error!("send_join failed: {e}"); + return Err(e); + }, + }; info!("send_join finished"); From f47677c995e2847b6ad39c877c3ab5b9bd5b1152 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 7 Feb 2025 07:09:45 +0000 Subject: [PATCH 181/328] refactor spaces Signed-off-by: Jason Volk --- src/api/client/space.rs | 228 +++++---- src/api/server/hierarchy.rs | 90 ++-- src/service/rooms/spaces/mod.rs | 774 +++++++++++++++--------------- src/service/rooms/spaces/tests.rs | 27 +- 4 files changed, 568 insertions(+), 551 deletions(-) diff --git a/src/api/client/space.rs b/src/api/client/space.rs index 8f54de2a..7efd7817 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -1,18 +1,25 @@ -use std::{collections::VecDeque, str::FromStr}; +use std::{ + collections::{BTreeSet, VecDeque}, + str::FromStr, +}; use axum::extract::State; -use conduwuit::{checked, pdu::ShortRoomId, utils::stream::IterStream}; -use futures::{StreamExt, TryFutureExt}; +use conduwuit::{ + utils::{future::TryExtExt, stream::IterStream}, + Err, Result, +}; +use futures::{future::OptionFuture, StreamExt, TryFutureExt}; use ruma::{ - api::client::{error::ErrorKind, space::get_hierarchy}, - OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, + api::client::space::get_hierarchy, OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, }; use service::{ - rooms::spaces::{get_parent_children_via, summary_to_chunk, SummaryAccessibility}, + rooms::spaces::{ + get_parent_children_via, summary_to_chunk, PaginationToken, SummaryAccessibility, + }, Services, }; -use crate::{service::rooms::spaces::PaginationToken, Error, Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy` /// @@ -40,10 +47,9 @@ pub(crate) async fn get_hierarchy_route( // Should prevent unexpeded behaviour in (bad) clients if let Some(ref token) = key { if token.suggested_only != body.suggested_only || token.max_depth != max_depth { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "suggested_only and max_depth cannot change on paginated requests", - )); + return Err!(Request(InvalidParam( + "suggested_only and max_depth cannot change on paginated requests" + ))); } } @@ -52,58 +58,70 @@ pub(crate) async fn get_hierarchy_route( body.sender_user(), &body.room_id, limit.try_into().unwrap_or(10), - key.map_or(vec![], |token| token.short_room_ids), - max_depth.into(), + max_depth.try_into().unwrap_or(usize::MAX), body.suggested_only, + key.as_ref() + .into_iter() + .flat_map(|t| t.short_room_ids.iter()), ) .await } -async fn get_client_hierarchy( +async fn get_client_hierarchy<'a, ShortRoomIds>( services: &Services, sender_user: &UserId, room_id: &RoomId, limit: usize, - short_room_ids: Vec, - max_depth: u64, + max_depth: usize, suggested_only: bool, -) -> Result { - let mut parents = VecDeque::new(); + short_room_ids: ShortRoomIds, +) -> Result +where + ShortRoomIds: Iterator + Clone + Send + Sync + 'a, +{ + type Via = Vec; + type Entry = (OwnedRoomId, Via); + type Rooms = VecDeque; - // Don't start populating the results if we have to start at a specific room. - let mut populate_results = short_room_ids.is_empty(); + let mut queue: Rooms = [( + room_id.to_owned(), + room_id + .server_name() + .map(ToOwned::to_owned) + .into_iter() + .collect(), + )] + .into(); - let mut stack = vec![vec![(room_id.to_owned(), match room_id.server_name() { - | Some(server_name) => vec![server_name.into()], - | None => vec![], - })]]; + let mut rooms = Vec::with_capacity(limit); + let mut parents = BTreeSet::new(); + while let Some((current_room, via)) = queue.pop_front() { + let summary = services + .rooms + .spaces + .get_summary_and_children_client(¤t_room, suggested_only, sender_user, &via) + .await?; - let mut results = Vec::with_capacity(limit); - - while let Some((current_room, via)) = { next_room_to_traverse(&mut stack, &mut parents) } { - if results.len() >= limit { - break; - } - - match ( - services - .rooms - .spaces - .get_summary_and_children_client(¤t_room, suggested_only, sender_user, &via) - .await?, - current_room == room_id, - ) { + match (summary, current_room == room_id) { + | (None | Some(SummaryAccessibility::Inaccessible), false) => { + // Just ignore other unavailable rooms + }, + | (None, true) => { + return Err!(Request(Forbidden("The requested room was not found"))); + }, + | (Some(SummaryAccessibility::Inaccessible), true) => { + return Err!(Request(Forbidden("The requested room is inaccessible"))); + }, | (Some(SummaryAccessibility::Accessible(summary)), _) => { - let mut children: Vec<(OwnedRoomId, Vec)> = - get_parent_children_via(&summary, suggested_only) - .into_iter() - .filter(|(room, _)| parents.iter().all(|parent| parent != room)) - .rev() - .collect(); + let populate = parents.len() >= short_room_ids.clone().count(); - if populate_results { - results.push(summary_to_chunk(*summary.clone())); - } else { + let mut children: Vec = get_parent_children_via(&summary, suggested_only) + .filter(|(room, _)| !parents.contains(room)) + .rev() + .map(|(key, val)| (key, val.collect())) + .collect(); + + if !populate { children = children .iter() .rev() @@ -113,97 +131,69 @@ async fn get_client_hierarchy( .rooms .short .get_shortroomid(room) - .map_ok(|short| Some(&short) != short_room_ids.get(parents.len())) + .map_ok(|short| { + Some(&short) != short_room_ids.clone().nth(parents.len()) + }) .unwrap_or_else(|_| false) }) .map(Clone::clone) - .collect::)>>() + .collect::>() .await .into_iter() .rev() .collect(); - - if children.is_empty() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Room IDs in token were not found.", - )); - } - - // We have reached the room after where we last left off - let parents_len = parents.len(); - if checked!(parents_len + 1)? == short_room_ids.len() { - populate_results = true; - } } - let parents_len: u64 = parents.len().try_into()?; - if !children.is_empty() && parents_len < max_depth { - parents.push_back(current_room.clone()); - stack.push(children); + if populate { + rooms.push(summary_to_chunk(summary.clone())); + } else if queue.is_empty() && children.is_empty() { + return Err!(Request(InvalidParam("Room IDs in token were not found."))); } - // Root room in the space hierarchy, we return an error - // if this one fails. + + parents.insert(current_room.clone()); + if rooms.len() >= limit { + break; + } + + if children.is_empty() { + break; + } + + if parents.len() >= max_depth { + continue; + } + + queue.extend(children); }, - | (Some(SummaryAccessibility::Inaccessible), true) => { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "The requested room is inaccessible", - )); - }, - | (None, true) => { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "The requested room was not found", - )); - }, - // Just ignore other unavailable rooms - | (None | Some(SummaryAccessibility::Inaccessible), false) => (), } } - Ok(get_hierarchy::v1::Response { - next_batch: if let Some((room, _)) = next_room_to_traverse(&mut stack, &mut parents) { - parents.pop_front(); - parents.push_back(room); + let next_batch: OptionFuture<_> = queue + .pop_front() + .map(|(room, _)| async move { + parents.insert(room); let next_short_room_ids: Vec<_> = parents .iter() .stream() - .filter_map(|room_id| async move { - services.rooms.short.get_shortroomid(room_id).await.ok() - }) + .filter_map(|room_id| services.rooms.short.get_shortroomid(room_id).ok()) .collect() .await; - (next_short_room_ids != short_room_ids && !next_short_room_ids.is_empty()).then( - || { - PaginationToken { - short_room_ids: next_short_room_ids, - limit: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - max_depth: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - suggested_only, - } - .to_string() - }, - ) - } else { - None - }, - rooms: results, + (next_short_room_ids.iter().ne(short_room_ids) && !next_short_room_ids.is_empty()) + .then_some(PaginationToken { + short_room_ids: next_short_room_ids, + limit: max_depth.try_into().ok()?, + max_depth: max_depth.try_into().ok()?, + suggested_only, + }) + .as_ref() + .map(PaginationToken::to_string) + }) + .into(); + + Ok(get_hierarchy::v1::Response { + next_batch: next_batch.await.flatten(), + rooms, }) } - -fn next_room_to_traverse( - stack: &mut Vec)>>, - parents: &mut VecDeque, -) -> Option<(OwnedRoomId, Vec)> { - while stack.last().is_some_and(Vec::is_empty) { - stack.pop(); - parents.pop_back(); - } - - stack.last_mut().and_then(Vec::pop) -} diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs index bcf2f7bc..f7bc43ab 100644 --- a/src/api/server/hierarchy.rs +++ b/src/api/server/hierarchy.rs @@ -1,10 +1,11 @@ use axum::extract::State; -use conduwuit::{Err, Result}; -use ruma::{api::federation::space::get_hierarchy, RoomId, ServerName}; -use service::{ - rooms::spaces::{get_parent_children_via, Identifier, SummaryAccessibility}, - Services, +use conduwuit::{ + utils::stream::{BroadbandExt, IterStream}, + Err, Result, }; +use futures::{FutureExt, StreamExt}; +use ruma::api::federation::space::get_hierarchy; +use service::rooms::spaces::{get_parent_children_via, Identifier, SummaryAccessibility}; use crate::Ruma; @@ -20,54 +21,51 @@ pub(crate) async fn get_hierarchy_route( return Err!(Request(NotFound("Room does not exist."))); } - get_hierarchy(&services, &body.room_id, body.origin(), body.suggested_only).await -} - -/// Gets the response for the space hierarchy over federation request -/// -/// Errors if the room does not exist, so a check if the room exists should -/// be done -async fn get_hierarchy( - services: &Services, - room_id: &RoomId, - server_name: &ServerName, - suggested_only: bool, -) -> Result { + let room_id = &body.room_id; + let suggested_only = body.suggested_only; + let ref identifier = Identifier::ServerName(body.origin()); match services .rooms .spaces - .get_summary_and_children_local(&room_id.to_owned(), Identifier::ServerName(server_name)) + .get_summary_and_children_local(room_id, identifier) .await? { - | Some(SummaryAccessibility::Accessible(room)) => { - let mut children = Vec::new(); - let mut inaccessible_children = Vec::new(); + | None => Err!(Request(NotFound("The requested room was not found"))), - for (child, _via) in get_parent_children_via(&room, suggested_only) { - match services - .rooms - .spaces - .get_summary_and_children_local(&child, Identifier::ServerName(server_name)) - .await? - { - | Some(SummaryAccessibility::Accessible(summary)) => { - children.push((*summary).into()); - }, - | Some(SummaryAccessibility::Inaccessible) => { - inaccessible_children.push(child); - }, - | None => (), - } - } - - Ok(get_hierarchy::v1::Response { - room: *room, - children, - inaccessible_children, - }) - }, | Some(SummaryAccessibility::Inaccessible) => Err!(Request(NotFound("The requested room is inaccessible"))), - | None => Err!(Request(NotFound("The requested room was not found"))), + + | Some(SummaryAccessibility::Accessible(room)) => { + let (children, inaccessible_children) = + get_parent_children_via(&room, suggested_only) + .stream() + .broad_filter_map(|(child, _via)| async move { + match services + .rooms + .spaces + .get_summary_and_children_local(&child, identifier) + .await + .ok()? + { + | None => None, + + | Some(SummaryAccessibility::Inaccessible) => + Some((None, Some(child))), + + | Some(SummaryAccessibility::Accessible(summary)) => + Some((Some(summary), None)), + } + }) + .unzip() + .map(|(children, inaccessible_children): (Vec<_>, Vec<_>)| { + ( + children.into_iter().flatten().map(Into::into).collect(), + inaccessible_children.into_iter().flatten().collect(), + ) + }) + .await; + + Ok(get_hierarchy::v1::Response { room, children, inaccessible_children }) + }, } } diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 1e2b0a9f..268d6dfe 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -1,14 +1,24 @@ mod pagination_token; +#[cfg(test)] mod tests; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; -use conduwuit::{debug_info, err, utils::math::usize_from_f64, Error, Result}; -use futures::StreamExt; +use conduwuit::{ + implement, + utils::{ + future::BoolExt, + math::usize_from_f64, + stream::{BroadbandExt, ReadyExt}, + IterStream, + }, + Err, Error, Result, +}; +use futures::{pin_mut, stream::FuturesUnordered, FutureExt, Stream, StreamExt, TryFutureExt}; use lru_cache::LruCache; use ruma::{ api::{ - client::{error::ErrorKind, space::SpaceHierarchyRoomsChunk}, + client::space::SpaceHierarchyRoomsChunk, federation::{ self, space::{SpaceHierarchyChildSummary, SpaceHierarchyParentSummary}, @@ -21,46 +31,46 @@ use ruma::{ }, serde::Raw, space::SpaceRoomJoinRule, - OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, + OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, }; -use tokio::sync::Mutex; +use tokio::sync::{Mutex, MutexGuard}; pub use self::pagination_token::PaginationToken; -use crate::{rooms, sending, Dep}; - -pub struct CachedSpaceHierarchySummary { - summary: SpaceHierarchyParentSummary, -} - -pub enum SummaryAccessibility { - Accessible(Box), - Inaccessible, -} - -/// Identifier used to check if rooms are accessible -/// -/// None is used if you want to return the room, no matter if accessible or not -pub enum Identifier<'a> { - UserId(&'a UserId), - ServerName(&'a ServerName), -} +use crate::{conduwuit::utils::TryFutureExtExt, rooms, sending, Dep}; pub struct Service { services: Services, - pub roomid_spacehierarchy_cache: - Mutex>>, + pub roomid_spacehierarchy_cache: Mutex, } struct Services { state_accessor: Dep, state_cache: Dep, state: Dep, - short: Dep, event_handler: Dep, timeline: Dep, sending: Dep, } +pub struct CachedSpaceHierarchySummary { + summary: SpaceHierarchyParentSummary, +} + +#[allow(clippy::large_enum_variant)] +pub enum SummaryAccessibility { + Accessible(SpaceHierarchyParentSummary), + Inaccessible, +} + +/// Identifier used to check if rooms are accessible. None is used if you want +/// to return the room, no matter if accessible or not +pub enum Identifier<'a> { + UserId(&'a UserId), + ServerName(&'a ServerName), +} + +type Cache = LruCache>; + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let config = &args.server.config; @@ -72,7 +82,6 @@ impl crate::Service for Service { .depend::("rooms::state_accessor"), state_cache: args.depend::("rooms::state_cache"), state: args.depend::("rooms::state"), - short: args.depend::("rooms::short"), event_handler: args .depend::("rooms::event_handler"), timeline: args.depend::("rooms::timeline"), @@ -85,370 +94,407 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - /// Gets the summary of a space using solely local information - pub async fn get_summary_and_children_local( - &self, - current_room: &OwnedRoomId, - identifier: Identifier<'_>, - ) -> Result> { - if let Some(cached) = self - .roomid_spacehierarchy_cache - .lock() - .await - .get_mut(¤t_room.to_owned()) - .as_ref() - { - return Ok(if let Some(cached) = cached { +/// Gets the summary of a space using solely local information +#[implement(Service)] +pub async fn get_summary_and_children_local( + &self, + current_room: &RoomId, + identifier: &Identifier<'_>, +) -> Result> { + match self + .roomid_spacehierarchy_cache + .lock() + .await + .get_mut(current_room) + .as_ref() + { + | None => (), // cache miss + | Some(None) => return Ok(None), + | Some(Some(cached)) => + return Ok(Some( if self .is_accessible_child( current_room, &cached.summary.join_rule, - &identifier, + identifier, &cached.summary.allowed_room_ids, ) .await { - Some(SummaryAccessibility::Accessible(Box::new(cached.summary.clone()))) + SummaryAccessibility::Accessible(cached.summary.clone()) } else { - Some(SummaryAccessibility::Inaccessible) - } - } else { - None - }); - } + SummaryAccessibility::Inaccessible + }, + )), + }; - if let Some(children_pdus) = self.get_stripped_space_child_events(current_room).await? { - let summary = self - .get_room_summary(current_room, children_pdus, &identifier) - .await; - if let Ok(summary) = summary { - self.roomid_spacehierarchy_cache.lock().await.insert( - current_room.clone(), - Some(CachedSpaceHierarchySummary { summary: summary.clone() }), - ); + let children_pdus: Vec<_> = self + .get_stripped_space_child_events(current_room) + .collect() + .await; - Ok(Some(SummaryAccessibility::Accessible(Box::new(summary)))) - } else { - Ok(None) - } - } else { - Ok(None) - } - } + let summary = self + .get_room_summary(current_room, children_pdus, identifier) + .boxed() + .await; - /// Gets the summary of a space using solely federation - #[tracing::instrument(level = "debug", skip(self))] - async fn get_summary_and_children_federation( - &self, - current_room: &OwnedRoomId, - suggested_only: bool, - user_id: &UserId, - via: &[OwnedServerName], - ) -> Result> { - for server in via { - debug_info!("Asking {server} for /hierarchy"); - let Ok(response) = self - .services + let Ok(summary) = summary else { + return Ok(None); + }; + + self.roomid_spacehierarchy_cache.lock().await.insert( + current_room.to_owned(), + Some(CachedSpaceHierarchySummary { summary: summary.clone() }), + ); + + Ok(Some(SummaryAccessibility::Accessible(summary))) +} + +/// Gets the summary of a space using solely federation +#[implement(Service)] +#[tracing::instrument(level = "debug", skip(self))] +async fn get_summary_and_children_federation( + &self, + current_room: &RoomId, + suggested_only: bool, + user_id: &UserId, + via: &[OwnedServerName], +) -> Result> { + let request = federation::space::get_hierarchy::v1::Request { + room_id: current_room.to_owned(), + suggested_only, + }; + + let mut requests: FuturesUnordered<_> = via + .iter() + .map(|server| { + self.services .sending - .send_federation_request(server, federation::space::get_hierarchy::v1::Request { - room_id: current_room.to_owned(), - suggested_only, - }) - .await - else { - continue; - }; - - debug_info!("Got response from {server} for /hierarchy\n{response:?}"); - let summary = response.room.clone(); - - self.roomid_spacehierarchy_cache.lock().await.insert( - current_room.clone(), - Some(CachedSpaceHierarchySummary { summary: summary.clone() }), - ); - - for child in response.children { - let mut guard = self.roomid_spacehierarchy_cache.lock().await; - if !guard.contains_key(current_room) { - guard.insert( - current_room.clone(), - Some(CachedSpaceHierarchySummary { - summary: { - let SpaceHierarchyChildSummary { - canonical_alias, - name, - num_joined_members, - room_id, - topic, - world_readable, - guest_can_join, - avatar_url, - join_rule, - room_type, - allowed_room_ids, - } = child; - - SpaceHierarchyParentSummary { - canonical_alias, - name, - num_joined_members, - room_id: room_id.clone(), - topic, - world_readable, - guest_can_join, - avatar_url, - join_rule, - room_type, - children_state: self - .get_stripped_space_child_events(&room_id) - .await? - .unwrap(), - allowed_room_ids, - } - }, - }), - ); - } - } - if self - .is_accessible_child( - current_room, - &response.room.join_rule, - &Identifier::UserId(user_id), - &response.room.allowed_room_ids, - ) - .await - { - return Ok(Some(SummaryAccessibility::Accessible(Box::new(summary.clone())))); - } - - return Ok(Some(SummaryAccessibility::Inaccessible)); - } + .send_federation_request(server, request.clone()) + }) + .collect(); + let Some(Ok(response)) = requests.next().await else { self.roomid_spacehierarchy_cache .lock() .await - .insert(current_room.clone(), None); + .insert(current_room.to_owned(), None); - Ok(None) - } + return Ok(None); + }; - /// Gets the summary of a space using either local or remote (federation) - /// sources - pub async fn get_summary_and_children_client( - &self, - current_room: &OwnedRoomId, - suggested_only: bool, - user_id: &UserId, - via: &[OwnedServerName], - ) -> Result> { - if let Ok(Some(response)) = self - .get_summary_and_children_local(current_room, Identifier::UserId(user_id)) - .await - { - Ok(Some(response)) - } else { - self.get_summary_and_children_federation(current_room, suggested_only, user_id, via) - .await - } - } + let summary = response.room; + self.roomid_spacehierarchy_cache.lock().await.insert( + current_room.to_owned(), + Some(CachedSpaceHierarchySummary { summary: summary.clone() }), + ); - async fn get_room_summary( - &self, - current_room: &OwnedRoomId, - children_state: Vec>, - identifier: &Identifier<'_>, - ) -> Result { - let room_id: &RoomId = current_room; - - let join_rule = self - .services - .state_accessor - .room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") - .await - .map_or(JoinRule::Invite, |c: RoomJoinRulesEventContent| c.join_rule); - - let allowed_room_ids = self - .services - .state_accessor - .allowed_room_ids(join_rule.clone()); - - if !self - .is_accessible_child( - current_room, - &join_rule.clone().into(), - identifier, - &allowed_room_ids, - ) - .await - { - debug_info!("User is not allowed to see room {room_id}"); - // This error will be caught later - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "User is not allowed to see the room", - )); - } - - Ok(SpaceHierarchyParentSummary { - canonical_alias: self - .services - .state_accessor - .get_canonical_alias(room_id) - .await - .ok(), - name: self.services.state_accessor.get_name(room_id).await.ok(), - num_joined_members: self - .services - .state_cache - .room_joined_count(room_id) - .await - .unwrap_or(0) - .try_into() - .expect("user count should not be that big"), - room_id: room_id.to_owned(), - topic: self - .services - .state_accessor - .get_room_topic(room_id) - .await - .ok(), - world_readable: self - .services - .state_accessor - .is_world_readable(room_id) - .await, - guest_can_join: self.services.state_accessor.guest_can_join(room_id).await, - avatar_url: self - .services - .state_accessor - .get_avatar(room_id) - .await - .into_option() - .unwrap_or_default() - .url, - join_rule: join_rule.into(), - room_type: self - .services - .state_accessor - .get_room_type(room_id) - .await - .ok(), - children_state, - allowed_room_ids, + response + .children + .into_iter() + .stream() + .then(|child| { + self.roomid_spacehierarchy_cache + .lock() + .map(|lock| (child, lock)) }) + .ready_filter_map(|(child, mut cache)| { + (!cache.contains_key(current_room)).then_some((child, cache)) + }) + .for_each(|(child, cache)| self.cache_insert(cache, current_room, child)) + .await; + + let identifier = Identifier::UserId(user_id); + let is_accessible_child = self + .is_accessible_child( + current_room, + &summary.join_rule, + &identifier, + &summary.allowed_room_ids, + ) + .await; + + if is_accessible_child { + return Ok(Some(SummaryAccessibility::Accessible(summary))); } - /// Simply returns the stripped m.space.child events of a room - async fn get_stripped_space_child_events( - &self, - room_id: &RoomId, - ) -> Result>>, Error> { - let Ok(current_shortstatehash) = - self.services.state.get_room_shortstatehash(room_id).await - else { - return Ok(None); - }; - - let state: HashMap<_, Arc<_>> = self - .services - .state_accessor - .state_full_ids(current_shortstatehash) - .collect() - .await; - - let mut children_pdus = Vec::with_capacity(state.len()); - for (key, id) in state { - let (event_type, state_key) = - self.services.short.get_statekey_from_short(key).await?; - - if event_type != StateEventType::SpaceChild { - continue; - } - - let pdu = - self.services.timeline.get_pdu(&id).await.map_err(|e| { - err!(Database("Event {id:?} in space state not found: {e:?}")) - })?; + Ok(Some(SummaryAccessibility::Inaccessible)) +} +/// Simply returns the stripped m.space.child events of a room +#[implement(Service)] +fn get_stripped_space_child_events<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream> + 'a { + self.services + .state + .get_room_shortstatehash(room_id) + .map_ok(|current_shortstatehash| { + self.services + .state_accessor + .state_keys_with_ids(current_shortstatehash, &StateEventType::SpaceChild) + }) + .map(Result::into_iter) + .map(IterStream::stream) + .map(StreamExt::flatten) + .flatten_stream() + .broad_filter_map(move |(state_key, event_id): (_, OwnedEventId)| async move { + self.services + .timeline + .get_pdu(&event_id) + .map_ok(move |pdu| (state_key, pdu)) + .await + .ok() + }) + .ready_filter_map(move |(state_key, pdu)| { if let Ok(content) = pdu.get_content::() { if content.via.is_empty() { - continue; + return None; } } - if OwnedRoomId::try_from(state_key).is_ok() { - children_pdus.push(pdu.to_stripped_spacechild_state_event()); + if RoomId::parse(&state_key).is_ok() { + return Some(pdu.to_stripped_spacechild_state_event()); } - } - Ok(Some(children_pdus)) + None + }) +} + +/// Gets the summary of a space using either local or remote (federation) +/// sources +#[implement(Service)] +pub async fn get_summary_and_children_client( + &self, + current_room: &OwnedRoomId, + suggested_only: bool, + user_id: &UserId, + via: &[OwnedServerName], +) -> Result> { + let identifier = Identifier::UserId(user_id); + + if let Ok(Some(response)) = self + .get_summary_and_children_local(current_room, &identifier) + .await + { + return Ok(Some(response)); } - /// With the given identifier, checks if a room is accessable - async fn is_accessible_child( - &self, - current_room: &OwnedRoomId, - join_rule: &SpaceRoomJoinRule, - identifier: &Identifier<'_>, - allowed_room_ids: &Vec, - ) -> bool { - match identifier { - | Identifier::ServerName(server_name) => { - // Checks if ACLs allow for the server to participate - if self - .services - .event_handler - .acl_check(server_name, current_room) - .await - .is_err() - { - return false; - } - }, - | Identifier::UserId(user_id) => { - if self - .services - .state_cache - .is_joined(user_id, current_room) - .await || self - .services - .state_cache - .is_invited(user_id, current_room) - .await - { - return true; - } - }, + self.get_summary_and_children_federation(current_room, suggested_only, user_id, via) + .await +} + +#[implement(Service)] +async fn get_room_summary( + &self, + room_id: &RoomId, + children_state: Vec>, + identifier: &Identifier<'_>, +) -> Result { + let join_rule = self + .services + .state_accessor + .room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") + .await + .map_or(JoinRule::Invite, |c: RoomJoinRulesEventContent| c.join_rule); + + let allowed_room_ids = self + .services + .state_accessor + .allowed_room_ids(join_rule.clone()); + + let join_rule = join_rule.clone().into(); + let is_accessible_child = self + .is_accessible_child(room_id, &join_rule, identifier, &allowed_room_ids) + .await; + + if !is_accessible_child { + return Err!(Request(Forbidden("User is not allowed to see the room",))); + } + + let name = self.services.state_accessor.get_name(room_id).ok(); + + let topic = self.services.state_accessor.get_room_topic(room_id).ok(); + + let room_type = self.services.state_accessor.get_room_type(room_id).ok(); + + let world_readable = self.services.state_accessor.is_world_readable(room_id); + + let guest_can_join = self.services.state_accessor.guest_can_join(room_id); + + let num_joined_members = self + .services + .state_cache + .room_joined_count(room_id) + .unwrap_or(0); + + let canonical_alias = self + .services + .state_accessor + .get_canonical_alias(room_id) + .ok(); + + let avatar_url = self + .services + .state_accessor + .get_avatar(room_id) + .map(|res| res.into_option().unwrap_or_default().url); + + let ( + canonical_alias, + name, + num_joined_members, + topic, + world_readable, + guest_can_join, + avatar_url, + room_type, + ) = futures::join!( + canonical_alias, + name, + num_joined_members, + topic, + world_readable, + guest_can_join, + avatar_url, + room_type + ); + + Ok(SpaceHierarchyParentSummary { + canonical_alias, + name, + topic, + world_readable, + guest_can_join, + avatar_url, + room_type, + children_state, + allowed_room_ids, + join_rule, + room_id: room_id.to_owned(), + num_joined_members: num_joined_members + .try_into() + .expect("user count should not be that big"), + }) +} + +/// With the given identifier, checks if a room is accessable +#[implement(Service)] +async fn is_accessible_child( + &self, + current_room: &RoomId, + join_rule: &SpaceRoomJoinRule, + identifier: &Identifier<'_>, + allowed_room_ids: &[OwnedRoomId], +) -> bool { + if let Identifier::ServerName(server_name) = identifier { + // Checks if ACLs allow for the server to participate + if self + .services + .event_handler + .acl_check(server_name, current_room) + .await + .is_err() + { + return false; } - match &join_rule { - | SpaceRoomJoinRule::Public - | SpaceRoomJoinRule::Knock - | SpaceRoomJoinRule::KnockRestricted => true, - | SpaceRoomJoinRule::Restricted => { - for room in allowed_room_ids { + } + + if let Identifier::UserId(user_id) = identifier { + let is_joined = self.services.state_cache.is_joined(user_id, current_room); + + let is_invited = self.services.state_cache.is_invited(user_id, current_room); + + pin_mut!(is_joined, is_invited); + if is_joined.or(is_invited).await { + return true; + } + } + + match join_rule { + | SpaceRoomJoinRule::Public + | SpaceRoomJoinRule::Knock + | SpaceRoomJoinRule::KnockRestricted => true, + | SpaceRoomJoinRule::Restricted => + allowed_room_ids + .iter() + .stream() + .any(|room| async { match identifier { - | Identifier::UserId(user) => { - if self.services.state_cache.is_joined(user, room).await { - return true; - } - }, - | Identifier::ServerName(server) => { - if self.services.state_cache.server_in_room(server, room).await { - return true; - } - }, + | Identifier::UserId(user) => + self.services.state_cache.is_joined(user, room).await, + | Identifier::ServerName(server) => + self.services.state_cache.server_in_room(server, room).await, } - } - false - }, - // Invite only, Private, or Custom join rule - | _ => false, - } + }) + .await, + + // Invite only, Private, or Custom join rule + | _ => false, } } +/// Returns the children of a SpaceHierarchyParentSummary, making use of the +/// children_state field +pub fn get_parent_children_via( + parent: &SpaceHierarchyParentSummary, + suggested_only: bool, +) -> impl DoubleEndedIterator)> + Send + '_ +{ + parent + .children_state + .iter() + .map(Raw::deserialize) + .filter_map(Result::ok) + .filter_map(move |ce| { + (!suggested_only || ce.content.suggested) + .then_some((ce.state_key, ce.content.via.into_iter())) + }) +} + +#[implement(Service)] +async fn cache_insert( + &self, + mut cache: MutexGuard<'_, Cache>, + current_room: &RoomId, + child: SpaceHierarchyChildSummary, +) { + let SpaceHierarchyChildSummary { + canonical_alias, + name, + num_joined_members, + room_id, + topic, + world_readable, + guest_can_join, + avatar_url, + join_rule, + room_type, + allowed_room_ids, + } = child; + + let summary = SpaceHierarchyParentSummary { + canonical_alias, + name, + num_joined_members, + topic, + world_readable, + guest_can_join, + avatar_url, + join_rule, + room_type, + allowed_room_ids, + room_id: room_id.clone(), + children_state: self + .get_stripped_space_child_events(&room_id) + .collect() + .await, + }; + + cache.insert(current_room.to_owned(), Some(CachedSpaceHierarchySummary { summary })); +} + // Here because cannot implement `From` across ruma-federation-api and // ruma-client-api types impl From for SpaceHierarchyRoomsChunk { @@ -517,25 +563,3 @@ pub fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyR children_state, } } - -/// Returns the children of a SpaceHierarchyParentSummary, making use of the -/// children_state field -#[must_use] -pub fn get_parent_children_via( - parent: &SpaceHierarchyParentSummary, - suggested_only: bool, -) -> Vec<(OwnedRoomId, Vec)> { - parent - .children_state - .iter() - .filter_map(|raw_ce| { - raw_ce.deserialize().map_or(None, |ce| { - if suggested_only && !ce.content.suggested { - None - } else { - Some((ce.state_key, ce.content.via)) - } - }) - }) - .collect() -} diff --git a/src/service/rooms/spaces/tests.rs b/src/service/rooms/spaces/tests.rs index b4c387d7..dd6c2f35 100644 --- a/src/service/rooms/spaces/tests.rs +++ b/src/service/rooms/spaces/tests.rs @@ -1,5 +1,3 @@ -#![cfg(test)] - use std::str::FromStr; use ruma::{ @@ -69,15 +67,22 @@ fn get_summary_children() { } .into(); - assert_eq!(get_parent_children_via(&summary, false), vec![ - (owned_room_id!("!foo:example.org"), vec![owned_server_name!("example.org")]), - (owned_room_id!("!bar:example.org"), vec![owned_server_name!("example.org")]), - (owned_room_id!("!baz:example.org"), vec![owned_server_name!("example.org")]) - ]); - assert_eq!(get_parent_children_via(&summary, true), vec![( - owned_room_id!("!bar:example.org"), - vec![owned_server_name!("example.org")] - )]); + assert_eq!( + get_parent_children_via(&summary, false) + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![ + (owned_room_id!("!foo:example.org"), vec![owned_server_name!("example.org")]), + (owned_room_id!("!bar:example.org"), vec![owned_server_name!("example.org")]), + (owned_room_id!("!baz:example.org"), vec![owned_server_name!("example.org")]) + ] + ); + assert_eq!( + get_parent_children_via(&summary, true) + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![(owned_room_id!("!bar:example.org"), vec![owned_server_name!("example.org")])] + ); } #[test] From c614d5bf44b477a39a6b819ab4f31fa9f2c626f1 Mon Sep 17 00:00:00 2001 From: strawberry Date: Mon, 17 Feb 2025 17:35:03 -0500 Subject: [PATCH 182/328] bump ruwuma Signed-off-by: strawberry --- Cargo.lock | 27 ++++++++++++------------- Cargo.toml | 2 +- flake.lock | 6 +++--- src/api/client/unstable.rs | 40 +++++++++++++++++++++++--------------- 4 files changed, 42 insertions(+), 33 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index efba2e07..be2c6720 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3482,7 +3482,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "assign", "js_int", @@ -3504,7 +3504,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "js_int", "ruma-common", @@ -3516,7 +3516,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "as_variant", "assign", @@ -3539,12 +3539,13 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "as_variant", "base64 0.22.1", "bytes", "form_urlencoded", + "getrandom 0.2.15", "http", "indexmap 2.7.1", "js_int", @@ -3570,7 +3571,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3595,7 +3596,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "bytes", "http", @@ -3613,7 +3614,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3622,7 +3623,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "js_int", "ruma-common", @@ -3632,7 +3633,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3647,7 +3648,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "js_int", "ruma-common", @@ -3659,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "headers", "http", @@ -3672,7 +3673,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3688,7 +3689,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 12556e00..bea306f6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "e7a793b720e58bbe6858fecb86db97191dbfe7aa" +rev = "4b3a92568310bef42078783e0172b188c5a92b3d" features = [ "compat", "rand", diff --git a/flake.lock b/flake.lock index 5af6ec43..15040a42 100644 --- a/flake.lock +++ b/flake.lock @@ -567,11 +567,11 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1737828695, - "narHash": "sha256-8Ev6zzhNPU798JNvU27a7gj5X+6SDG3jBweUkQ59DbA=", + "lastModified": 1739735789, + "narHash": "sha256-BIzuZS0TV4gRnciP4ieW5J3Hql986iedM5dHQfK6z68=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "a4d9230dcc9d03be428b9a728133f8f646c0065c", + "rev": "34e401fd4392dd3268e042f1e40dffd064b9a7ff", "type": "github" }, "original": { diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index 66cb31d5..67c7df75 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -272,7 +272,7 @@ pub(crate) async fn set_profile_key_route( ))); } - let Some(profile_key_value) = body.kv_pair.get(&body.key) else { + let Some(profile_key_value) = body.kv_pair.get(&body.key_name) else { return Err!(Request(BadJson( "The key does not match the URL field key, or JSON body is empty (use DELETE)" ))); @@ -290,7 +290,7 @@ pub(crate) async fn set_profile_key_route( return Err!(Request(BadJson("Key names cannot be longer than 128 bytes"))); } - if body.key == "displayname" { + if body.key_name == "displayname" { let all_joined_rooms: Vec = services .rooms .state_cache @@ -306,7 +306,7 @@ pub(crate) async fn set_profile_key_route( &all_joined_rooms, ) .await; - } else if body.key == "avatar_url" { + } else if body.key_name == "avatar_url" { let mxc = ruma::OwnedMxcUri::from(profile_key_value.to_string()); let all_joined_rooms: Vec = services @@ -319,9 +319,11 @@ pub(crate) async fn set_profile_key_route( update_avatar_url(&services, &body.user_id, Some(mxc), None, &all_joined_rooms).await; } else { - services - .users - .set_profile_key(&body.user_id, &body.key, Some(profile_key_value.clone())); + services.users.set_profile_key( + &body.user_id, + &body.key_name, + Some(profile_key_value.clone()), + ); } if services.globals.allow_local_presence() { @@ -357,7 +359,7 @@ pub(crate) async fn delete_profile_key_route( ))); } - if body.key == "displayname" { + if body.key_name == "displayname" { let all_joined_rooms: Vec = services .rooms .state_cache @@ -367,7 +369,7 @@ pub(crate) async fn delete_profile_key_route( .await; update_displayname(&services, &body.user_id, None, &all_joined_rooms).await; - } else if body.key == "avatar_url" { + } else if body.key_name == "avatar_url" { let all_joined_rooms: Vec = services .rooms .state_cache @@ -380,7 +382,7 @@ pub(crate) async fn delete_profile_key_route( } else { services .users - .set_profile_key(&body.user_id, &body.key, None); + .set_profile_key(&body.user_id, &body.key_name, None); } if services.globals.allow_local_presence() { @@ -497,11 +499,13 @@ pub(crate) async fn get_profile_key_route( .users .set_timezone(&body.user_id, response.tz.clone()); - if let Some(value) = response.custom_profile_fields.get(&body.key) { - profile_key_value.insert(body.key.clone(), value.clone()); - services - .users - .set_profile_key(&body.user_id, &body.key, Some(value.clone())); + if let Some(value) = response.custom_profile_fields.get(&body.key_name) { + profile_key_value.insert(body.key_name.clone(), value.clone()); + services.users.set_profile_key( + &body.user_id, + &body.key_name, + Some(value.clone()), + ); } else { return Err!(Request(NotFound("The requested profile key does not exist."))); } @@ -520,8 +524,12 @@ pub(crate) async fn get_profile_key_route( return Err!(Request(NotFound("Profile was not found."))); } - if let Ok(value) = services.users.profile_key(&body.user_id, &body.key).await { - profile_key_value.insert(body.key.clone(), value); + if let Ok(value) = services + .users + .profile_key(&body.user_id, &body.key_name) + .await + { + profile_key_value.insert(body.key_name.clone(), value); } else { return Err!(Request(NotFound("The requested profile key does not exist."))); } From 01155fa649ef401d3ca9653439c0a7adf8a83f71 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 21 Feb 2025 17:47:44 +0000 Subject: [PATCH 183/328] fix unsafe precondition violation Signed-off-by: Jason Volk --- src/api/client/sync/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 1967f4a2..46540881 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -76,11 +76,13 @@ async fn share_encrypted_room( .state_cache .get_shared_rooms(sender_user, user_id) .ready_filter(|&room_id| Some(room_id) != ignore_room) - .broad_any(|other_room_id| { + .map(ToOwned::to_owned) + .broad_any(|other_room_id| async move { services .rooms .state_accessor - .is_encrypted_room(other_room_id) + .is_encrypted_room(&other_room_id) + .await }) .await } From 1061f68f0e14ee166a14d631540d322492988627 Mon Sep 17 00:00:00 2001 From: morguldir Date: Fri, 21 Feb 2025 21:13:06 +0100 Subject: [PATCH 184/328] bump ruwuma --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index be2c6720..7e84437c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3482,7 +3482,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "assign", "js_int", @@ -3504,7 +3504,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "js_int", "ruma-common", @@ -3516,7 +3516,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "as_variant", "assign", @@ -3539,7 +3539,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "as_variant", "base64 0.22.1", @@ -3571,7 +3571,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3596,7 +3596,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "bytes", "http", @@ -3614,7 +3614,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3623,7 +3623,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "js_int", "ruma-common", @@ -3633,7 +3633,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3648,7 +3648,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "headers", "http", @@ -3673,7 +3673,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3689,7 +3689,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index bea306f6..ed7e6ac3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "4b3a92568310bef42078783e0172b188c5a92b3d" +rev = "5dc3e0f81d614ed9dc96b50f646b2e4385291c55" features = [ "compat", "rand", From 8085a1c064afeb61d8136963a671e6bbc15a8f98 Mon Sep 17 00:00:00 2001 From: morguldir Date: Sat, 22 Feb 2025 16:46:06 +0100 Subject: [PATCH 185/328] Implement MSC3967, also fixes error when uploading keys in element Co-authored-by: Aiden McClelland Signed-off-by: morguldir --- bin/complement | 2 +- src/api/client/keys.rs | 120 +++++++++++++++++++++++++++++++++------ src/api/server/send.rs | 14 ++--- src/service/users/mod.rs | 60 +++++++++++--------- 4 files changed, 143 insertions(+), 53 deletions(-) diff --git a/bin/complement b/bin/complement index a4c62856..ffd7a938 100755 --- a/bin/complement +++ b/bin/complement @@ -45,7 +45,7 @@ set +o pipefail env \ -C "$COMPLEMENT_SRC" \ COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \ - go test -tags="conduwuit_blacklist" "$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests | tee "$LOG_FILE" + go test -tags="conduwuit_blacklist" "$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests ./tests/msc3967 | tee "$LOG_FILE" set -o pipefail # Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 7bf0a5da..801ae32b 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -1,7 +1,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use axum::extract::State; -use conduwuit::{err, utils, Error, Result}; +use conduwuit::{debug, err, info, result::NotFound, utils, Err, Error, Result}; use futures::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::{ @@ -15,6 +15,7 @@ use ruma::{ }, federation, }, + encryption::CrossSigningKey, serde::Raw, OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, }; @@ -125,7 +126,24 @@ pub(crate) async fn upload_signing_keys_route( auth_error: None, }; - if let Some(auth) = &body.auth { + if let Ok(exists) = check_for_new_keys( + services, + sender_user, + body.self_signing_key.as_ref(), + body.user_signing_key.as_ref(), + body.master_key.as_ref(), + ) + .await + .inspect_err(|e| info!(?e)) + { + if let Some(result) = exists { + // No-op, they tried to reupload the same set of keys + // (lost connection for example) + return Ok(result); + } + debug!("Skipping UIA in accordance with MSC3967, the user didn't have any existing keys"); + // Some of the keys weren't found, so we let them upload + } else if let Some(auth) = &body.auth { let (worked, uiaainfo) = services .uiaa .try_auth(sender_user, sender_device, auth, &uiaainfo) @@ -134,7 +152,7 @@ pub(crate) async fn upload_signing_keys_route( if !worked { return Err(Error::Uiaa(uiaainfo)); } - // Success! + // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services @@ -146,22 +164,90 @@ pub(crate) async fn upload_signing_keys_route( return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } - if let Some(master_key) = &body.master_key { - services - .users - .add_cross_signing_keys( - sender_user, - master_key, - &body.self_signing_key, - &body.user_signing_key, - true, // notify so that other users see the new keys - ) - .await?; - } + services + .users + .add_cross_signing_keys( + sender_user, + &body.master_key, + &body.self_signing_key, + &body.user_signing_key, + true, // notify so that other users see the new keys + ) + .await?; Ok(upload_signing_keys::v3::Response {}) } +async fn check_for_new_keys( + services: crate::State, + user_id: &UserId, + self_signing_key: Option<&Raw>, + user_signing_key: Option<&Raw>, + master_signing_key: Option<&Raw>, +) -> Result> { + debug!("checking for existing keys"); + let mut empty = false; + if let Some(master_signing_key) = master_signing_key { + let (key, value) = parse_master_key(user_id, master_signing_key)?; + let result = services + .users + .get_master_key(None, user_id, &|_| true) + .await; + if result.is_not_found() { + empty = true; + } else { + let existing_master_key = result?; + let (existing_key, existing_value) = parse_master_key(user_id, &existing_master_key)?; + if existing_key != key || existing_value != value { + return Err!(Request(Forbidden( + "Tried to change an existing master key, UIA required" + ))); + } + } + } + if let Some(user_signing_key) = user_signing_key { + let key = services.users.get_user_signing_key(user_id).await; + if key.is_not_found() && !empty { + return Err!(Request(Forbidden( + "Tried to update an existing user signing key, UIA required" + ))); + } + if !key.is_not_found() { + let existing_signing_key = key?.deserialize()?; + if existing_signing_key != user_signing_key.deserialize()? { + return Err!(Request(Forbidden( + "Tried to change an existing user signing key, UIA required" + ))); + } + } + } + if let Some(self_signing_key) = self_signing_key { + let key = services + .users + .get_self_signing_key(None, user_id, &|_| true) + .await; + if key.is_not_found() && !empty { + debug!(?key); + return Err!(Request(Forbidden( + "Tried to add a new signing key independently from the master key" + ))); + } + if !key.is_not_found() { + let existing_signing_key = key?.deserialize()?; + if existing_signing_key != self_signing_key.deserialize()? { + return Err!(Request(Forbidden( + "Tried to update an existing self signing key, UIA required" + ))); + } + } + } + if empty { + return Ok(None); + } + + Ok(Some(upload_signing_keys::v3::Response {})) +} + /// # `POST /_matrix/client/r0/keys/signatures/upload` /// /// Uploads end-to-end key signatures from the sender user. @@ -407,7 +493,9 @@ where * resulting in an endless loop */ ) .await?; - master_keys.insert(user.clone(), raw); + if let Some(raw) = raw { + master_keys.insert(user.clone(), raw); + } } self_signing_keys.extend(response.self_signing_keys); diff --git a/src/api/server/send.rs b/src/api/server/send.rs index 2e615a0c..bc18377e 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -585,12 +585,10 @@ async fn handle_edu_signing_key_update( return; } - if let Some(master_key) = master_key { - services - .users - .add_cross_signing_keys(&user_id, &master_key, &self_signing_key, &None, true) - .await - .log_err() - .ok(); - } + services + .users + .add_cross_signing_keys(&user_id, &master_key, &self_signing_key, &None, true) + .await + .log_err() + .ok(); } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 68b87541..f0389a4a 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -514,7 +514,7 @@ impl Service { pub async fn add_cross_signing_keys( &self, user_id: &UserId, - master_key: &Raw, + master_key: &Option>, self_signing_key: &Option>, user_signing_key: &Option>, notify: bool, @@ -523,15 +523,17 @@ impl Service { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xFF); - let (master_key_key, _) = parse_master_key(user_id, master_key)?; + if let Some(master_key) = master_key { + let (master_key_key, _) = parse_master_key(user_id, master_key)?; - self.db - .keyid_key - .insert(&master_key_key, master_key.json().get().as_bytes()); + self.db + .keyid_key + .insert(&master_key_key, master_key.json().get().as_bytes()); - self.db - .userid_masterkeyid - .insert(user_id.as_bytes(), &master_key_key); + self.db + .userid_masterkeyid + .insert(user_id.as_bytes(), &master_key_key); + } // Self-signing key if let Some(self_signing_key) = self_signing_key { @@ -567,32 +569,16 @@ impl Service { // User-signing key if let Some(user_signing_key) = user_signing_key { - let mut user_signing_key_ids = user_signing_key - .deserialize() - .map_err(|_| err!(Request(InvalidParam("Invalid user signing key"))))? - .keys - .into_values(); - - let user_signing_key_id = user_signing_key_ids - .next() - .ok_or(err!(Request(InvalidParam("User signing key contained no key."))))?; - - if user_signing_key_ids.next().is_some() { - return Err!(Request(InvalidParam( - "User signing key contained more than one key." - ))); - } - - let mut user_signing_key_key = prefix; - user_signing_key_key.extend_from_slice(user_signing_key_id.as_bytes()); + let user_signing_key_id = parse_user_signing_key(user_signing_key)?; + let user_signing_key_key = (user_id, &user_signing_key_id); self.db .keyid_key - .insert(&user_signing_key_key, user_signing_key.json().get().as_bytes()); + .put_raw(user_signing_key_key, user_signing_key.json().get().as_bytes()); self.db .userid_usersigningkeyid - .insert(user_id.as_bytes(), &user_signing_key_key); + .put(user_id, user_signing_key_key); } if notify { @@ -1079,6 +1065,24 @@ pub fn parse_master_key( Ok((master_key_key, master_key)) } +pub fn parse_user_signing_key(user_signing_key: &Raw) -> Result { + let mut user_signing_key_ids = user_signing_key + .deserialize() + .map_err(|_| err!(Request(InvalidParam("Invalid user signing key"))))? + .keys + .into_values(); + + let user_signing_key_id = user_signing_key_ids + .next() + .ok_or(err!(Request(InvalidParam("User signing key contained no key."))))?; + + if user_signing_key_ids.next().is_some() { + return Err!(Request(InvalidParam("User signing key contained more than one key."))); + } + + Ok(user_signing_key_id) +} + /// Ensure that a user only sees signatures from themselves and the target user fn clean_signatures( mut cross_signing_key: serde_json::Value, From bec19df275f100f15fa58dc8654a2ec41958eacc Mon Sep 17 00:00:00 2001 From: morguldir Date: Sat, 22 Feb 2025 17:12:31 +0100 Subject: [PATCH 186/328] increase compression levels for some column families again --- src/database/engine/descriptor.rs | 4 ++-- src/database/maps.rs | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index 934ef831..816555d2 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -83,7 +83,7 @@ pub(crate) static RANDOM: Descriptor = Descriptor { write_size: 1024 * 1024 * 32, cache_shards: 128, compression_level: -3, - bottommost_level: Some(-1), + bottommost_level: Some(2), compressed_index: true, ..BASE }; @@ -95,7 +95,7 @@ pub(crate) static SEQUENTIAL: Descriptor = Descriptor { file_size: 1024 * 1024 * 2, cache_shards: 128, compression_level: -2, - bottommost_level: Some(-1), + bottommost_level: Some(2), compression_shape: [0, 0, 1, 1, 1, 1, 1], compressed_index: false, ..BASE diff --git a/src/database/maps.rs b/src/database/maps.rs index 9ae5ab44..fc216ee0 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -171,6 +171,8 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "roomsynctoken_shortstatehash", val_size_hint: Some(8), block_size: 512, + compression_level: 3, + bottommost_level: Some(6), ..descriptor::SEQUENTIAL }, Descriptor { From e97952b7f6d310d5954a0d9e6b8979d25b090387 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 23 Feb 2025 01:17:30 -0500 Subject: [PATCH 187/328] bump nix lock, update to rust 2024 and 1.85.0 toolchain Signed-off-by: June Clementine Strawberry --- Cargo.toml | 7 +++++-- flake.lock | 36 ++++++++++++++++++------------------ flake.nix | 2 +- rust-toolchain.toml | 2 +- rustfmt.toml | 2 +- 5 files changed, 26 insertions(+), 23 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ed7e6ac3..76de2212 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,14 +13,14 @@ authors = [ ] categories = ["network-programming"] description = "a very cool Matrix chat homeserver written in Rust" -edition = "2021" +edition = "2024" homepage = "https://conduwuit.puppyirl.gay/" keywords = ["chat", "matrix", "networking", "server", "uwu"] license = "Apache-2.0" # See also `rust-toolchain.toml` readme = "README.md" repository = "https://github.com/girlbossceo/conduwuit" -rust-version = "1.84.0" +rust-version = "1.85.0" version = "0.5.0" [workspace.metadata.crane] @@ -975,3 +975,6 @@ suspicious = { level = "warn", priority = -1 } ## some sadness let_underscore_future = { level = "allow", priority = 1 } + +# rust doesnt understand conduwuit's custom log macros +literal_string_with_formatting_args = { level = "allow", priority = 1 } diff --git a/flake.lock b/flake.lock index 15040a42..9bf6ac55 100644 --- a/flake.lock +++ b/flake.lock @@ -10,11 +10,11 @@ "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1731270564, - "narHash": "sha256-6KMC/NH/VWP5Eb+hA56hz0urel3jP6Y6cF2PX6xaTkk=", + "lastModified": 1738524606, + "narHash": "sha256-hPYEJ4juK3ph7kbjbvv7PlU1D9pAkkhl+pwx8fZY53U=", "owner": "zhaofengli", "repo": "attic", - "rev": "47752427561f1c34debb16728a210d378f0ece36", + "rev": "ff8a897d1f4408ebbf4d45fa9049c06b3e1e3f4e", "type": "github" }, "original": { @@ -117,11 +117,11 @@ }, "crane_2": { "locked": { - "lastModified": 1737689766, - "narHash": "sha256-ivVXYaYlShxYoKfSo5+y5930qMKKJ8CLcAoIBPQfJ6s=", + "lastModified": 1739936662, + "narHash": "sha256-x4syUjNUuRblR07nDPeLDP7DpphaBVbUaSoeZkFbGSk=", "owner": "ipetkov", "repo": "crane", - "rev": "6fe74265bbb6d016d663b1091f015e2976c4a527", + "rev": "19de14aaeb869287647d9461cbd389187d8ecdb7", "type": "github" }, "original": { @@ -170,11 +170,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1737786656, - "narHash": "sha256-ubCW9Jy7ZUOF354bWxTgLDpVnTvIpNr6qR4H/j7I0oo=", + "lastModified": 1740206139, + "narHash": "sha256-wWSv4KYhPKggKuJLzghfBs99pS3Kli9UBlyXVBzuIzc=", "owner": "nix-community", "repo": "fenix", - "rev": "2f721f527886f801403f389a9cabafda8f1e3b7f", + "rev": "133a9eb59fb4ddac443ebe5ab2449d3940396533", "type": "github" }, "original": { @@ -364,11 +364,11 @@ "liburing": { "flake": false, "locked": { - "lastModified": 1737600516, - "narHash": "sha256-EKyLQ3pbcjoU5jH5atge59F4fzuhTsb6yalUj6Ve2t8=", + "lastModified": 1740063075, + "narHash": "sha256-AfrCMPiXwgB0yxociq4no4NjCqGf/nRVhC3CLRoKqhA=", "owner": "axboe", "repo": "liburing", - "rev": "6c509e2b0c881a13b83b259a221bf15fc9b3f681", + "rev": "5c788d514b9ed6d1a3624150de8aa6db403c1c65", "type": "github" }, "original": { @@ -550,11 +550,11 @@ }, "nixpkgs_5": { "locked": { - "lastModified": 1737717945, - "narHash": "sha256-ET91TMkab3PmOZnqiJQYOtSGvSTvGeHoegAv4zcTefM=", + "lastModified": 1740019556, + "narHash": "sha256-vn285HxnnlHLWnv59Og7muqECNMS33mWLM14soFIv2g=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "ecd26a469ac56357fd333946a99086e992452b6a", + "rev": "dad564433178067be1fbdfcce23b546254b6d641", "type": "github" }, "original": { @@ -599,11 +599,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1737728869, - "narHash": "sha256-U4pl3Hi0lT6GP4ecN3q9wdD2sdaKMbmD/5NJ1NdJ9AM=", + "lastModified": 1740077634, + "narHash": "sha256-KlYdDhon/hy91NutuBeN8e3qTKf3FXgsudWsjnHud68=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "6e4c29f7ce18cea7d3d31237a4661ab932eab636", + "rev": "88fbdcd510e79ef3bcd81d6d9d4f07bdce84be8c", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 3cef1af5..04dee681 100644 --- a/flake.nix +++ b/flake.nix @@ -26,7 +26,7 @@ file = ./rust-toolchain.toml; # See also `rust-toolchain.toml` - sha256 = "sha256-lMLAupxng4Fd9F1oDw8gx+qA0RuF7ou7xhNU8wgs0PU="; + sha256 = "sha256-AJ6LX/Q/Er9kS15bn9iflkUwcgYqRQxiOIL2ToVAXaU="; }; mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: { diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 97e33c91..00fb6cee 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -9,7 +9,7 @@ # If you're having trouble making the relevant changes, bug a maintainer. [toolchain] -channel = "1.84.0" +channel = "1.85.0" profile = "minimal" components = [ # For rust-analyzer diff --git a/rustfmt.toml b/rustfmt.toml index 635ec8f8..89041b04 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -2,7 +2,7 @@ array_width = 80 chain_width = 60 comment_width = 80 condense_wildcard_suffixes = true -edition = "2024" +style_edition = "2024" fn_call_width = 80 fn_single_line = true format_code_in_doc_comments = true From a1e1f40deda8f974d61b0095fc41356cc3fda43f Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 23 Feb 2025 01:17:45 -0500 Subject: [PATCH 188/328] run cargo fix for rust 2024 changes and rustfmt Signed-off-by: June Clementine Strawberry --- src/admin/appservice/commands.rs | 2 +- src/admin/command.rs | 4 +- src/admin/debug/commands.rs | 37 +- src/admin/debug/tester.rs | 2 +- src/admin/federation/commands.rs | 2 +- src/admin/media/commands.rs | 157 +++---- src/admin/processor.rs | 14 +- src/admin/query/account_data.rs | 2 +- src/admin/query/raw.rs | 5 +- src/admin/query/resolver.rs | 4 +- src/admin/query/room_state_cache.rs | 2 +- src/admin/query/room_timeline.rs | 4 +- src/admin/query/sending.rs | 2 +- src/admin/query/short.rs | 2 +- src/admin/query/users.rs | 2 +- src/admin/room/alias.rs | 4 +- src/admin/room/commands.rs | 4 +- src/admin/room/directory.rs | 4 +- src/admin/room/info.rs | 4 +- src/admin/room/moderation.rs | 198 ++++----- src/admin/server/commands.rs | 2 +- src/admin/user/commands.rs | 8 +- src/admin/utils.rs | 2 +- src/api/client/account.rs | 201 +++++---- src/api/client/account_data.rs | 6 +- src/api/client/alias.rs | 30 +- src/api/client/appservice.rs | 2 +- src/api/client/backup.rs | 4 +- src/api/client/capabilities.rs | 2 +- src/api/client/context.rs | 11 +- src/api/client/device.rs | 86 ++-- src/api/client/directory.rs | 41 +- src/api/client/keys.rs | 130 +++--- src/api/client/media.rs | 7 +- src/api/client/media_legacy.rs | 252 ++++++------ src/api/client/membership.rs | 157 +++---- src/api/client/message.rs | 13 +- src/api/client/presence.rs | 57 +-- src/api/client/profile.rs | 9 +- src/api/client/push.rs | 6 +- src/api/client/read_marker.rs | 6 +- src/api/client/redact.rs | 2 +- src/api/client/relations.rs | 13 +- src/api/client/report.rs | 10 +- src/api/client/room/create.rs | 25 +- src/api/client/room/event.rs | 6 +- src/api/client/room/initial_sync.rs | 5 +- src/api/client/room/upgrade.rs | 7 +- src/api/client/search.rs | 11 +- src/api/client/send.rs | 4 +- src/api/client/session.rs | 68 ++-- src/api/client/space.rs | 12 +- src/api/client/state.rs | 6 +- src/api/client/sync/mod.rs | 14 +- src/api/client/sync/v3.rs | 65 +-- src/api/client/sync/v4.rs | 25 +- src/api/client/sync/v5.rs | 24 +- src/api/client/tag.rs | 2 +- src/api/client/threads.rs | 2 +- src/api/client/typing.rs | 65 +-- src/api/client/unstable.rs | 34 +- src/api/client/unversioned.rs | 2 +- src/api/client/user_directory.rs | 4 +- src/api/client/voip.rs | 6 +- src/api/client/well_known.rs | 2 +- src/api/mod.rs | 2 +- src/api/router.rs | 6 +- src/api/router/args.rs | 8 +- src/api/router/auth.rs | 24 +- src/api/router/handler.rs | 4 +- src/api/router/request.rs | 4 +- src/api/router/response.rs | 4 +- src/api/server/backfill.rs | 4 +- src/api/server/event.rs | 4 +- src/api/server/event_auth.rs | 4 +- src/api/server/get_missing_events.rs | 2 +- src/api/server/hierarchy.rs | 4 +- src/api/server/invite.rs | 6 +- src/api/server/key.rs | 8 +- src/api/server/make_join.rs | 8 +- src/api/server/make_knock.rs | 8 +- src/api/server/make_leave.rs | 2 +- src/api/server/media.rs | 8 +- src/api/server/query.rs | 4 +- src/api/server/send.rs | 12 +- src/api/server/send_join.rs | 16 +- src/api/server/send_knock.rs | 14 +- src/api/server/send_leave.rs | 8 +- src/api/server/state.rs | 4 +- src/api/server/state_ids.rs | 4 +- src/api/server/user.rs | 2 +- src/api/server/utils.rs | 4 +- src/core/alloc/je.rs | 25 +- src/core/config/check.rs | 2 +- src/core/config/manager.rs | 4 +- src/core/config/mod.rs | 10 +- src/core/debug.rs | 2 +- src/core/error/err.rs | 4 +- src/core/error/mod.rs | 4 +- src/core/error/panic.rs | 2 +- src/core/error/response.rs | 2 +- src/core/info/room_version.rs | 2 +- src/core/log/capture/data.rs | 4 +- src/core/log/capture/util.rs | 2 +- src/core/log/console.rs | 6 +- src/core/log/fmt.rs | 2 +- src/core/log/mod.rs | 4 +- src/core/log/reload.rs | 4 +- src/core/mods/module.rs | 4 +- src/core/mods/new.rs | 2 +- src/core/pdu/builder.rs | 4 +- src/core/pdu/content.rs | 2 +- src/core/pdu/count.rs | 2 +- src/core/pdu/event.rs | 2 +- src/core/pdu/event_id.rs | 2 +- src/core/pdu/mod.rs | 6 +- src/core/pdu/raw_id.rs | 4 +- src/core/pdu/redact.rs | 31 +- src/core/pdu/strip.rs | 2 +- src/core/pdu/unsigned.rs | 4 +- src/core/server.rs | 4 +- src/core/state_res/event_auth.rs | 383 +++++++++--------- src/core/state_res/mod.rs | 25 +- src/core/state_res/power_levels.rs | 8 +- src/core/state_res/state_event.rs | 2 +- src/core/state_res/test_utils.rs | 16 +- src/core/utils/bytes.rs | 2 +- src/core/utils/defer.rs | 4 +- src/core/utils/future/bool_ext.rs | 2 +- src/core/utils/future/ext_ext.rs | 2 +- src/core/utils/future/mod.rs | 2 +- src/core/utils/future/option_ext.rs | 2 +- src/core/utils/future/try_ext_ext.rs | 3 +- src/core/utils/hash/argon.rs | 6 +- src/core/utils/json.rs | 2 +- src/core/utils/math.rs | 2 +- src/core/utils/math/tried.rs | 2 +- src/core/utils/mod.rs | 8 +- src/core/utils/mutex_map.rs | 2 +- src/core/utils/rand.rs | 2 +- src/core/utils/stream/broadband.rs | 4 +- src/core/utils/stream/cloned.rs | 2 +- src/core/utils/stream/ignore.rs | 2 +- src/core/utils/stream/iter_stream.rs | 3 +- src/core/utils/stream/mod.rs | 4 +- src/core/utils/stream/ready.rs | 2 +- src/core/utils/stream/try_parallel.rs | 4 +- src/core/utils/stream/try_ready.rs | 2 +- src/core/utils/stream/try_tools.rs | 2 +- src/core/utils/stream/wideband.rs | 4 +- src/core/utils/string.rs | 2 +- src/core/utils/string/unquoted.rs | 4 +- src/core/utils/sys.rs | 4 +- src/core/utils/sys/compute.rs | 6 +- src/core/utils/sys/storage.rs | 4 +- src/core/utils/tests.rs | 2 +- src/core/utils/time.rs | 2 +- src/database/de.rs | 5 +- src/database/engine.rs | 6 +- src/database/engine/backup.rs | 2 +- src/database/engine/cf_opts.rs | 4 +- src/database/engine/context.rs | 2 +- src/database/engine/db_opts.rs | 4 +- src/database/engine/files.rs | 4 +- src/database/engine/memory_usage.rs | 2 +- src/database/engine/open.rs | 8 +- src/database/engine/repair.rs | 2 +- src/database/handle.rs | 2 +- src/database/keyval.rs | 2 +- src/database/map.rs | 2 +- src/database/map/compact.rs | 2 +- src/database/map/contains.rs | 16 +- src/database/map/count.rs | 7 +- src/database/map/get.rs | 11 +- src/database/map/get_batch.rs | 11 +- src/database/map/keys.rs | 2 +- src/database/map/keys_from.rs | 18 +- src/database/map/keys_prefix.rs | 10 +- src/database/map/qry.rs | 13 +- src/database/map/qry_batch.rs | 7 +- src/database/map/rev_keys.rs | 2 +- src/database/map/rev_keys_from.rs | 12 +- src/database/map/rev_keys_prefix.rs | 10 +- src/database/map/rev_stream.rs | 2 +- src/database/map/rev_stream_from.rs | 12 +- src/database/map/rev_stream_prefix.rs | 10 +- src/database/map/stream.rs | 2 +- src/database/map/stream_from.rs | 12 +- src/database/map/stream_prefix.rs | 10 +- src/database/maps.rs | 2 +- src/database/mod.rs | 10 +- src/database/pool.rs | 9 +- src/database/pool/configure.rs | 3 +- src/database/ser.rs | 4 +- src/database/stream.rs | 4 +- src/database/stream/items.rs | 4 +- src/database/stream/items_rev.rs | 4 +- src/database/stream/keys.rs | 4 +- src/database/stream/keys_rev.rs | 4 +- src/database/tests.rs | 7 +- src/database/watchers.rs | 2 +- src/macros/admin.rs | 6 +- src/macros/cargo.rs | 2 +- src/macros/config.rs | 8 +- src/macros/implement.rs | 2 +- src/macros/mod.rs | 3 +- src/macros/refutable.rs | 4 +- src/macros/utils.rs | 2 +- src/main/clap.rs | 2 +- src/main/logging.rs | 6 +- src/main/main.rs | 4 +- src/main/mods.rs | 4 +- src/main/runtime.rs | 5 +- src/main/sentry.rs | 8 +- src/main/server.rs | 2 +- src/router/layers.rs | 18 +- src/router/request.rs | 4 +- src/router/router.rs | 2 +- src/router/run.rs | 4 +- src/router/serve/mod.rs | 2 +- src/router/serve/plain.rs | 6 +- src/router/serve/tls.rs | 4 +- src/router/serve/unix.rs | 10 +- src/service/account_data/mod.rs | 9 +- src/service/admin/console.rs | 8 +- src/service/admin/create.rs | 4 +- src/service/admin/execute.rs | 4 +- src/service/admin/grant.rs | 6 +- src/service/admin/mod.rs | 6 +- src/service/appservice/mod.rs | 6 +- src/service/appservice/registration_info.rs | 2 +- src/service/client/mod.rs | 9 +- src/service/config/mod.rs | 5 +- src/service/emergency/mod.rs | 6 +- src/service/federation/execute.rs | 12 +- src/service/federation/mod.rs | 2 +- src/service/globals/data.rs | 2 +- src/service/globals/mod.rs | 2 +- src/service/key_backups/mod.rs | 7 +- src/service/manager.rs | 4 +- src/service/media/blurhash.rs | 2 +- src/service/media/data.rs | 7 +- src/service/media/migrations.rs | 6 +- src/service/media/mod.rs | 70 ++-- src/service/media/preview.rs | 2 +- src/service/media/remote.rs | 10 +- src/service/media/tests.rs | 2 +- src/service/media/thumbnail.rs | 18 +- src/service/migrations.rs | 12 +- src/service/mod.rs | 2 +- src/service/presence/data.rs | 9 +- src/service/presence/mod.rs | 8 +- src/service/presence/presence.rs | 4 +- src/service/pusher/mod.rs | 17 +- src/service/resolver/actual.rs | 26 +- src/service/resolver/cache.rs | 6 +- src/service/resolver/dns.rs | 4 +- src/service/resolver/mod.rs | 4 +- src/service/resolver/tests.rs | 2 +- src/service/rooms/alias/mod.rs | 15 +- src/service/rooms/alias/remote.rs | 4 +- src/service/rooms/auth_chain/data.rs | 2 +- src/service/rooms/auth_chain/mod.rs | 8 +- src/service/rooms/directory/mod.rs | 4 +- src/service/rooms/event_handler/acl_check.rs | 4 +- .../fetch_and_handle_outliers.rs | 23 +- src/service/rooms/event_handler/fetch_prev.rs | 90 ++-- .../rooms/event_handler/fetch_state.rs | 8 +- .../event_handler/handle_incoming_pdu.rs | 8 +- .../rooms/event_handler/handle_outlier_pdu.rs | 10 +- .../rooms/event_handler/handle_prev_pdu.rs | 4 +- src/service/rooms/event_handler/mod.rs | 8 +- .../rooms/event_handler/parse_incoming_pdu.rs | 2 +- .../rooms/event_handler/resolve_state.rs | 13 +- .../rooms/event_handler/state_at_incoming.rs | 5 +- .../event_handler/upgrade_outlier_pdu.rs | 9 +- src/service/rooms/lazy_loading/mod.rs | 9 +- src/service/rooms/metadata/mod.rs | 4 +- src/service/rooms/outlier/mod.rs | 2 +- src/service/rooms/pdu_metadata/data.rs | 10 +- src/service/rooms/pdu_metadata/mod.rs | 8 +- src/service/rooms/read_receipt/data.rs | 8 +- src/service/rooms/read_receipt/mod.rs | 24 +- src/service/rooms/search/mod.rs | 16 +- src/service/rooms/short/mod.rs | 6 +- src/service/rooms/spaces/mod.rs | 18 +- src/service/rooms/spaces/pagination_token.rs | 2 +- src/service/rooms/spaces/tests.rs | 4 +- src/service/rooms/state/mod.rs | 132 +++--- src/service/rooms/state_accessor/mod.rs | 13 +- .../rooms/state_accessor/room_state.rs | 4 +- .../rooms/state_accessor/server_can.rs | 8 +- src/service/rooms/state_accessor/state.rs | 15 +- src/service/rooms/state_accessor/user_can.rs | 64 +-- src/service/rooms/state_cache/mod.rs | 18 +- src/service/rooms/state_compressor/mod.rs | 5 +- src/service/rooms/threads/mod.rs | 20 +- src/service/rooms/timeline/data.rs | 9 +- src/service/rooms/timeline/mod.rs | 23 +- src/service/rooms/typing/mod.rs | 9 +- src/service/rooms/user/mod.rs | 4 +- src/service/sending/appservice.rs | 4 +- src/service/sending/data.rs | 11 +- src/service/sending/mod.rs | 12 +- src/service/sending/sender.rs | 30 +- src/service/server_keys/acquire.rs | 8 +- src/service/server_keys/get.rs | 8 +- src/service/server_keys/keypair.rs | 2 +- src/service/server_keys/mod.rs | 11 +- src/service/server_keys/request.rs | 14 +- src/service/server_keys/sign.rs | 2 +- src/service/server_keys/verify.rs | 4 +- src/service/service.rs | 2 +- src/service/services.rs | 2 +- src/service/sync/mod.rs | 4 +- src/service/sync/watch.rs | 4 +- src/service/transaction_ids/mod.rs | 2 +- src/service/uiaa/mod.rs | 10 +- src/service/updates/mod.rs | 6 +- src/service/users/mod.rs | 24 +- 320 files changed, 2212 insertions(+), 2039 deletions(-) diff --git a/src/admin/appservice/commands.rs b/src/admin/appservice/commands.rs index 4f02531a..88f28431 100644 --- a/src/admin/appservice/commands.rs +++ b/src/admin/appservice/commands.rs @@ -1,6 +1,6 @@ use ruma::{api::appservice::Registration, events::room::message::RoomMessageEventContent}; -use crate::{admin_command, Result}; +use crate::{Result, admin_command}; #[admin_command] pub(super) async fn register(&self) -> Result { diff --git a/src/admin/command.rs b/src/admin/command.rs index 5ad9e581..5df980d6 100644 --- a/src/admin/command.rs +++ b/src/admin/command.rs @@ -3,9 +3,9 @@ use std::{fmt, time::SystemTime}; use conduwuit::Result; use conduwuit_service::Services; use futures::{ + Future, FutureExt, io::{AsyncWriteExt, BufWriter}, lock::Mutex, - Future, FutureExt, }; use ruma::EventId; @@ -21,7 +21,7 @@ impl Command<'_> { pub(crate) fn write_fmt( &self, arguments: fmt::Arguments<'_>, - ) -> impl Future + Send + '_ { + ) -> impl Future + Send + '_ + use<'_> { let buf = format!("{arguments}"); self.output.lock().then(|mut output| async move { output.write_all(buf.as_bytes()).await.map_err(Into::into) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index dcf9879c..c6f6a170 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -6,19 +6,19 @@ use std::{ }; use conduwuit::{ - debug_error, err, info, trace, utils, + Error, PduEvent, PduId, RawPduId, Result, debug_error, err, info, trace, utils, utils::{ stream::{IterStream, ReadyExt}, string::EMPTY, }, - warn, Error, PduEvent, PduId, RawPduId, Result, + warn, }; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ - api::{client::error::ErrorKind, federation::event::get_room_state}, - events::room::message::RoomMessageEventContent, CanonicalJsonObject, EventId, OwnedEventId, OwnedRoomOrAliasId, RoomId, RoomVersionId, ServerName, + api::{client::error::ErrorKind, federation::event::get_room_state}, + events::room::message::RoomMessageEventContent, }; use service::rooms::{ short::{ShortEventId, ShortRoomId}, @@ -209,18 +209,21 @@ pub(super) async fn get_remote_pdu_list( for pdu in list { if force { - if let Err(e) = self.get_remote_pdu(Box::from(pdu), server.clone()).await { - failed_count = failed_count.saturating_add(1); - self.services - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Failed to get remote PDU, ignoring error: {e}" - ))) - .await - .ok(); - warn!("Failed to get remote PDU, ignoring error: {e}"); - } else { - success_count = success_count.saturating_add(1); + match self.get_remote_pdu(Box::from(pdu), server.clone()).await { + | Err(e) => { + failed_count = failed_count.saturating_add(1); + self.services + .admin + .send_message(RoomMessageEventContent::text_plain(format!( + "Failed to get remote PDU, ignoring error: {e}" + ))) + .await + .ok(); + warn!("Failed to get remote PDU, ignoring error: {e}"); + }, + | _ => { + success_count = success_count.saturating_add(1); + }, } } else { self.get_remote_pdu(Box::from(pdu), server.clone()).await?; @@ -957,7 +960,7 @@ pub(super) async fn database_stats( self.services .db .iter() - .filter(|(&name, _)| map_name.is_empty() || map_name == name) + .filter(|&(&name, _)| map_name.is_empty() || map_name == name) .try_stream() .try_for_each(|(&name, map)| { let res = map.property(&property).expect("invalid property"); diff --git a/src/admin/debug/tester.rs b/src/admin/debug/tester.rs index 5200fa0d..005ee775 100644 --- a/src/admin/debug/tester.rs +++ b/src/admin/debug/tester.rs @@ -1,7 +1,7 @@ use conduwuit::Err; use ruma::events::room::message::RoomMessageEventContent; -use crate::{admin_command, admin_command_dispatch, Result}; +use crate::{Result, admin_command, admin_command_dispatch}; #[admin_command_dispatch] #[derive(Debug, clap::Subcommand)] diff --git a/src/admin/federation/commands.rs b/src/admin/federation/commands.rs index 13bc8da4..240ffa6a 100644 --- a/src/admin/federation/commands.rs +++ b/src/admin/federation/commands.rs @@ -3,7 +3,7 @@ use std::fmt::Write; use conduwuit::Result; use futures::StreamExt; use ruma::{ - events::room::message::RoomMessageEventContent, OwnedRoomId, RoomId, ServerName, UserId, + OwnedRoomId, RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent, }; use crate::{admin_command, get_room_info}; diff --git a/src/admin/media/commands.rs b/src/admin/media/commands.rs index 3d0a9473..aeefa9f2 100644 --- a/src/admin/media/commands.rs +++ b/src/admin/media/commands.rs @@ -1,12 +1,12 @@ use std::time::Duration; use conduwuit::{ - debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago, Result, + Result, debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago, }; use conduwuit_service::media::Dim; use ruma::{ - events::room::message::RoomMessageEventContent, EventId, Mxc, MxcUri, OwnedMxcUri, - OwnedServerName, ServerName, + EventId, Mxc, MxcUri, OwnedMxcUri, OwnedServerName, ServerName, + events::room::message::RoomMessageEventContent, }; use crate::{admin_command, utils::parse_local_user_id}; @@ -41,103 +41,106 @@ pub(super) async fn delete( let mut mxc_urls = Vec::with_capacity(4); // parsing the PDU for any MXC URLs begins here - if let Ok(event_json) = self.services.rooms.timeline.get_pdu_json(&event_id).await { - if let Some(content_key) = event_json.get("content") { - debug!("Event ID has \"content\"."); - let content_obj = content_key.as_object(); + match self.services.rooms.timeline.get_pdu_json(&event_id).await { + | Ok(event_json) => { + if let Some(content_key) = event_json.get("content") { + debug!("Event ID has \"content\"."); + let content_obj = content_key.as_object(); - if let Some(content) = content_obj { - // 1. attempts to parse the "url" key - debug!("Attempting to go into \"url\" key for main media file"); - if let Some(url) = content.get("url") { - debug!("Got a URL in the event ID {event_id}: {url}"); + if let Some(content) = content_obj { + // 1. attempts to parse the "url" key + debug!("Attempting to go into \"url\" key for main media file"); + if let Some(url) = content.get("url") { + debug!("Got a URL in the event ID {event_id}: {url}"); - if url.to_string().starts_with("\"mxc://") { - debug!("Pushing URL {url} to list of MXCs to delete"); - let final_url = url.to_string().replace('"', ""); - mxc_urls.push(final_url); - } else { - info!( - "Found a URL in the event ID {event_id} but did not start with \ - mxc://, ignoring" - ); - } - } - - // 2. attempts to parse the "info" key - debug!("Attempting to go into \"info\" key for thumbnails"); - if let Some(info_key) = content.get("info") { - debug!("Event ID has \"info\"."); - let info_obj = info_key.as_object(); - - if let Some(info) = info_obj { - if let Some(thumbnail_url) = info.get("thumbnail_url") { - debug!("Found a thumbnail_url in info key: {thumbnail_url}"); - - if thumbnail_url.to_string().starts_with("\"mxc://") { - debug!( - "Pushing thumbnail URL {thumbnail_url} to list of MXCs \ - to delete" - ); - let final_thumbnail_url = - thumbnail_url.to_string().replace('"', ""); - mxc_urls.push(final_thumbnail_url); - } else { - info!( - "Found a thumbnail URL in the event ID {event_id} but \ - did not start with mxc://, ignoring" - ); - } + if url.to_string().starts_with("\"mxc://") { + debug!("Pushing URL {url} to list of MXCs to delete"); + let final_url = url.to_string().replace('"', ""); + mxc_urls.push(final_url); } else { info!( - "No \"thumbnail_url\" key in \"info\" key, assuming no \ - thumbnails." + "Found a URL in the event ID {event_id} but did not start \ + with mxc://, ignoring" ); } } - } - // 3. attempts to parse the "file" key - debug!("Attempting to go into \"file\" key"); - if let Some(file_key) = content.get("file") { - debug!("Event ID has \"file\"."); - let file_obj = file_key.as_object(); + // 2. attempts to parse the "info" key + debug!("Attempting to go into \"info\" key for thumbnails"); + if let Some(info_key) = content.get("info") { + debug!("Event ID has \"info\"."); + let info_obj = info_key.as_object(); - if let Some(file) = file_obj { - if let Some(url) = file.get("url") { - debug!("Found url in file key: {url}"); + if let Some(info) = info_obj { + if let Some(thumbnail_url) = info.get("thumbnail_url") { + debug!("Found a thumbnail_url in info key: {thumbnail_url}"); - if url.to_string().starts_with("\"mxc://") { - debug!("Pushing URL {url} to list of MXCs to delete"); - let final_url = url.to_string().replace('"', ""); - mxc_urls.push(final_url); + if thumbnail_url.to_string().starts_with("\"mxc://") { + debug!( + "Pushing thumbnail URL {thumbnail_url} to list of \ + MXCs to delete" + ); + let final_thumbnail_url = + thumbnail_url.to_string().replace('"', ""); + mxc_urls.push(final_thumbnail_url); + } else { + info!( + "Found a thumbnail URL in the event ID {event_id} \ + but did not start with mxc://, ignoring" + ); + } } else { info!( - "Found a URL in the event ID {event_id} but did not \ - start with mxc://, ignoring" + "No \"thumbnail_url\" key in \"info\" key, assuming no \ + thumbnails." ); } - } else { - info!("No \"url\" key in \"file\" key."); } } + + // 3. attempts to parse the "file" key + debug!("Attempting to go into \"file\" key"); + if let Some(file_key) = content.get("file") { + debug!("Event ID has \"file\"."); + let file_obj = file_key.as_object(); + + if let Some(file) = file_obj { + if let Some(url) = file.get("url") { + debug!("Found url in file key: {url}"); + + if url.to_string().starts_with("\"mxc://") { + debug!("Pushing URL {url} to list of MXCs to delete"); + let final_url = url.to_string().replace('"', ""); + mxc_urls.push(final_url); + } else { + info!( + "Found a URL in the event ID {event_id} but did not \ + start with mxc://, ignoring" + ); + } + } else { + info!("No \"url\" key in \"file\" key."); + } + } + } + } else { + return Ok(RoomMessageEventContent::text_plain( + "Event ID does not have a \"content\" key or failed parsing the \ + event ID JSON.", + )); } } else { return Ok(RoomMessageEventContent::text_plain( - "Event ID does not have a \"content\" key or failed parsing the event \ - ID JSON.", + "Event ID does not have a \"content\" key, this is not a message or an \ + event type that contains media.", )); } - } else { + }, + | _ => { return Ok(RoomMessageEventContent::text_plain( - "Event ID does not have a \"content\" key, this is not a message or an \ - event type that contains media.", + "Event ID does not exist or is not known to us.", )); - } - } else { - return Ok(RoomMessageEventContent::text_plain( - "Event ID does not exist or is not known to us.", - )); + }, } if mxc_urls.is_empty() { diff --git a/src/admin/processor.rs b/src/admin/processor.rs index eefcdcd6..77a60959 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -8,7 +8,7 @@ use std::{ use clap::{CommandFactory, Parser}; use conduwuit::{ - debug, error, + Error, Result, debug, error, log::{ capture, capture::Capture, @@ -16,24 +16,24 @@ use conduwuit::{ }, trace, utils::string::{collect_stream, common_prefix}, - warn, Error, Result, + warn, }; -use futures::{future::FutureExt, io::BufWriter, AsyncWriteExt}; +use futures::{AsyncWriteExt, future::FutureExt, io::BufWriter}; use ruma::{ + EventId, events::{ relation::InReplyTo, room::message::{Relation::Reply, RoomMessageEventContent}, }, - EventId, }; use service::{ - admin::{CommandInput, CommandOutput, ProcessorFuture, ProcessorResult}, Services, + admin::{CommandInput, CommandOutput, ProcessorFuture, ProcessorResult}, }; use tracing::Level; -use tracing_subscriber::{filter::LevelFilter, EnvFilter}; +use tracing_subscriber::{EnvFilter, filter::LevelFilter}; -use crate::{admin, admin::AdminCommand, Command}; +use crate::{Command, admin, admin::AdminCommand}; #[must_use] pub(super) fn complete(line: &str) -> String { complete_command(AdminCommand::command(), line) } diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index bb8ddeff..b2bf5e6d 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, RoomId, UserId}; +use ruma::{RoomId, UserId, events::room::message::RoomMessageEventContent}; use crate::{admin_command, admin_command_dispatch}; diff --git a/src/admin/query/raw.rs b/src/admin/query/raw.rs index 5a6006ec..23f11cc8 100644 --- a/src/admin/query/raw.rs +++ b/src/admin/query/raw.rs @@ -2,13 +2,12 @@ use std::{borrow::Cow, collections::BTreeMap, ops::Deref}; use clap::Subcommand; use conduwuit::{ - apply, at, is_zero, + Err, Result, apply, at, is_zero, utils::{ + IterStream, stream::{ReadyExt, TryIgnore, TryParallelExt}, string::EMPTY, - IterStream, }, - Err, Result, }; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::events::room::message::RoomMessageEventContent; diff --git a/src/admin/query/resolver.rs b/src/admin/query/resolver.rs index 08b5d171..10748d88 100644 --- a/src/admin/query/resolver.rs +++ b/src/admin/query/resolver.rs @@ -1,7 +1,7 @@ use clap::Subcommand; -use conduwuit::{utils::time, Result}; +use conduwuit::{Result, utils::time}; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, OwnedServerName}; +use ruma::{OwnedServerName, events::room::message::RoomMessageEventContent}; use crate::{admin_command, admin_command_dispatch}; diff --git a/src/admin/query/room_state_cache.rs b/src/admin/query/room_state_cache.rs index 71dadc99..1de5c02d 100644 --- a/src/admin/query/room_state_cache.rs +++ b/src/admin/query/room_state_cache.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::{Error, Result}; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, RoomId, ServerName, UserId}; +use ruma::{RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent}; use crate::Command; diff --git a/src/admin/query/room_timeline.rs b/src/admin/query/room_timeline.rs index 3fe653e3..6f08aee9 100644 --- a/src/admin/query/room_timeline.rs +++ b/src/admin/query/room_timeline.rs @@ -1,7 +1,7 @@ use clap::Subcommand; -use conduwuit::{utils::stream::TryTools, PduCount, Result}; +use conduwuit::{PduCount, Result, utils::stream::TryTools}; use futures::TryStreamExt; -use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomOrAliasId}; +use ruma::{OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent}; use crate::{admin_command, admin_command_dispatch}; diff --git a/src/admin/query/sending.rs b/src/admin/query/sending.rs index 8c6fb25f..a148f718 100644 --- a/src/admin/query/sending.rs +++ b/src/admin/query/sending.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, ServerName, UserId}; +use ruma::{ServerName, UserId, events::room::message::RoomMessageEventContent}; use service::sending::Destination; use crate::Command; diff --git a/src/admin/query/short.rs b/src/admin/query/short.rs index 7f0f3449..0957c15e 100644 --- a/src/admin/query/short.rs +++ b/src/admin/query/short.rs @@ -1,6 +1,6 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::{events::room::message::RoomMessageEventContent, OwnedEventId, OwnedRoomOrAliasId}; +use ruma::{OwnedEventId, OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent}; use crate::{admin_command, admin_command_dispatch}; diff --git a/src/admin/query/users.rs b/src/admin/query/users.rs index c517d9dd..5995bc62 100644 --- a/src/admin/query/users.rs +++ b/src/admin/query/users.rs @@ -2,7 +2,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::stream::StreamExt; use ruma::{ - events::room::message::RoomMessageEventContent, OwnedDeviceId, OwnedRoomId, OwnedUserId, + OwnedDeviceId, OwnedRoomId, OwnedUserId, events::room::message::RoomMessageEventContent, }; use crate::{admin_command, admin_command_dispatch}; diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index d3b956e1..6262f33e 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -4,10 +4,10 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; use ruma::{ - events::room::message::RoomMessageEventContent, OwnedRoomAliasId, OwnedRoomId, RoomId, + OwnedRoomAliasId, OwnedRoomId, RoomId, events::room::message::RoomMessageEventContent, }; -use crate::{escape_html, Command}; +use crate::{Command, escape_html}; #[derive(Debug, Subcommand)] pub(crate) enum RoomAliasCommand { diff --git a/src/admin/room/commands.rs b/src/admin/room/commands.rs index b58d04c5..b5c303c8 100644 --- a/src/admin/room/commands.rs +++ b/src/admin/room/commands.rs @@ -1,8 +1,8 @@ use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId}; +use ruma::{OwnedRoomId, events::room::message::RoomMessageEventContent}; -use crate::{admin_command, get_room_info, PAGE_SIZE}; +use crate::{PAGE_SIZE, admin_command, get_room_info}; #[admin_command] pub(super) async fn list_rooms( diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index 791b9204..e9c23a1d 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -1,9 +1,9 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, RoomId}; +use ruma::{RoomId, events::room::message::RoomMessageEventContent}; -use crate::{get_room_info, Command, PAGE_SIZE}; +use crate::{Command, PAGE_SIZE, get_room_info}; #[derive(Debug, Subcommand)] pub(crate) enum RoomDirectoryCommand { diff --git a/src/admin/room/info.rs b/src/admin/room/info.rs index 34abf8a9..a39728fe 100644 --- a/src/admin/room/info.rs +++ b/src/admin/room/info.rs @@ -1,7 +1,7 @@ use clap::Subcommand; -use conduwuit::{utils::ReadyExt, Result}; +use conduwuit::{Result, utils::ReadyExt}; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, RoomId}; +use ruma::{RoomId, events::room::message::RoomMessageEventContent}; use crate::{admin_command, admin_command_dispatch}; diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index bf54505e..ee132590 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -1,14 +1,14 @@ use api::client::leave_room; use clap::Subcommand; use conduwuit::{ - debug, error, info, + Result, debug, error, info, utils::{IterStream, ReadyExt}, - warn, Result, + warn, }; use futures::StreamExt; use ruma::{ - events::room::message::RoomMessageEventContent, OwnedRoomId, RoomAliasId, RoomId, - RoomOrAliasId, + OwnedRoomId, RoomAliasId, RoomId, RoomOrAliasId, + events::room::message::RoomMessageEventContent, }; use crate::{admin_command, admin_command_dispatch, get_room_info}; @@ -124,41 +124,42 @@ async fn ban_room( locally, if not using get_alias_helper to fetch room ID remotely" ); - let room_id = if let Ok(room_id) = self + let room_id = match self .services .rooms .alias .resolve_local_alias(room_alias) .await { - room_id - } else { - debug!( - "We don't have this room alias to a room ID locally, attempting to fetch room \ - ID over federation" - ); + | Ok(room_id) => room_id, + | _ => { + debug!( + "We don't have this room alias to a room ID locally, attempting to fetch \ + room ID over federation" + ); - match self - .services - .rooms - .alias - .resolve_alias(room_alias, None) - .await - { - | Ok((room_id, servers)) => { - debug!( - ?room_id, - ?servers, - "Got federation response fetching room ID for {room_id}" - ); - room_id - }, - | Err(e) => { - return Ok(RoomMessageEventContent::notice_plain(format!( - "Failed to resolve room alias {room_alias} to a room ID: {e}" - ))); - }, - } + match self + .services + .rooms + .alias + .resolve_alias(room_alias, None) + .await + { + | Ok((room_id, servers)) => { + debug!( + ?room_id, + ?servers, + "Got federation response fetching room ID for {room_id}" + ); + room_id + }, + | Err(e) => { + return Ok(RoomMessageEventContent::notice_plain(format!( + "Failed to resolve room alias {room_alias} to a room ID: {e}" + ))); + }, + } + }, }; self.services.rooms.metadata.ban_room(&room_id, true); @@ -321,51 +322,55 @@ async fn ban_list_of_rooms( if room_alias_or_id.is_room_alias_id() { match RoomAliasId::parse(room_alias_or_id) { | Ok(room_alias) => { - let room_id = if let Ok(room_id) = self + let room_id = match self .services .rooms .alias .resolve_local_alias(room_alias) .await { - room_id - } else { - debug!( - "We don't have this room alias to a room ID locally, \ - attempting to fetch room ID over federation" - ); + | Ok(room_id) => room_id, + | _ => { + debug!( + "We don't have this room alias to a room ID locally, \ + attempting to fetch room ID over federation" + ); - match self - .services - .rooms - .alias - .resolve_alias(room_alias, None) - .await - { - | Ok((room_id, servers)) => { - debug!( - ?room_id, - ?servers, - "Got federation response fetching room ID for {room}", - ); - room_id - }, - | Err(e) => { - // don't fail if force blocking - if force { - warn!( - "Failed to resolve room alias {room} to a room \ - ID: {e}" + match self + .services + .rooms + .alias + .resolve_alias(room_alias, None) + .await + { + | Ok((room_id, servers)) => { + debug!( + ?room_id, + ?servers, + "Got federation response fetching room ID for \ + {room}", ); - continue; - } + room_id + }, + | Err(e) => { + // don't fail if force blocking + if force { + warn!( + "Failed to resolve room alias {room} to a \ + room ID: {e}" + ); + continue; + } - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to resolve room alias {room} to a room ID: \ - {e}" - ))); - }, - } + return Ok(RoomMessageEventContent::text_plain( + format!( + "Failed to resolve room alias {room} to a \ + room ID: {e}" + ), + )); + }, + } + }, }; room_ids.push(room_id); @@ -537,41 +542,42 @@ async fn unban_room( locally, if not using get_alias_helper to fetch room ID remotely" ); - let room_id = if let Ok(room_id) = self + let room_id = match self .services .rooms .alias .resolve_local_alias(room_alias) .await { - room_id - } else { - debug!( - "We don't have this room alias to a room ID locally, attempting to fetch room \ - ID over federation" - ); + | Ok(room_id) => room_id, + | _ => { + debug!( + "We don't have this room alias to a room ID locally, attempting to fetch \ + room ID over federation" + ); - match self - .services - .rooms - .alias - .resolve_alias(room_alias, None) - .await - { - | Ok((room_id, servers)) => { - debug!( - ?room_id, - ?servers, - "Got federation response fetching room ID for room {room}" - ); - room_id - }, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to resolve room alias {room} to a room ID: {e}" - ))); - }, - } + match self + .services + .rooms + .alias + .resolve_alias(room_alias, None) + .await + { + | Ok((room_id, servers)) => { + debug!( + ?room_id, + ?servers, + "Got federation response fetching room ID for room {room}" + ); + room_id + }, + | Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to resolve room alias {room} to a room ID: {e}" + ))); + }, + } + }, }; self.services.rooms.metadata.ban_room(&room_id, false); diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index d4cfa7d5..17bf9ec0 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -1,6 +1,6 @@ use std::{fmt::Write, path::PathBuf, sync::Arc}; -use conduwuit::{info, utils::time, warn, Err, Result}; +use conduwuit::{Err, Result, info, utils::time, warn}; use ruma::events::room::message::RoomMessageEventContent; use crate::admin_command; diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 64767a36..8cb8edc3 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -2,23 +2,23 @@ use std::{collections::BTreeMap, fmt::Write as _}; use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room}; use conduwuit::{ - debug_warn, error, info, is_equal_to, + PduBuilder, Result, debug_warn, error, info, is_equal_to, utils::{self, ReadyExt}, - warn, PduBuilder, Result, + warn, }; use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname}; use futures::StreamExt; use ruma::{ + EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, UserId, events::{ + RoomAccountDataEventType, StateEventType, room::{ message::RoomMessageEventContent, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, redaction::RoomRedactionEventContent, }, tag::{TagEvent, TagEventContent, TagInfo}, - RoomAccountDataEventType, StateEventType, }, - EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, UserId, }; use crate::{ diff --git a/src/admin/utils.rs b/src/admin/utils.rs index eba33fba..a2696c50 100644 --- a/src/admin/utils.rs +++ b/src/admin/utils.rs @@ -1,4 +1,4 @@ -use conduwuit_core::{err, Err, Result}; +use conduwuit_core::{Err, Result, err}; use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; use service::Services; diff --git a/src/api/client/account.rs b/src/api/client/account.rs index cb25b276..cb49a6db 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -3,34 +3,35 @@ use std::fmt::Write; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - debug_info, error, info, is_equal_to, utils, utils::ReadyExt, warn, Error, PduBuilder, Result, + Error, PduBuilder, Result, debug_info, error, info, is_equal_to, utils, utils::ReadyExt, warn, }; use futures::{FutureExt, StreamExt}; use register::RegistrationKind; use ruma::{ + OwnedRoomId, UserId, api::client::{ account::{ - change_password, check_registration_token_validity, deactivate, get_3pids, - get_username_availability, + ThirdPartyIdRemovalStatus, change_password, check_registration_token_validity, + deactivate, get_3pids, get_username_availability, register::{self, LoginType}, request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, - whoami, ThirdPartyIdRemovalStatus, + whoami, }, error::ErrorKind, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, events::{ + GlobalAccountDataEventType, StateEventType, room::{ message::RoomMessageEventContent, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, }, - GlobalAccountDataEventType, StateEventType, }, - push, OwnedRoomId, UserId, + push, }; use service::Services; -use super::{join_room_by_id_helper, DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; +use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH, join_room_by_id_helper}; use crate::Ruma; const RANDOM_USER_ID_LENGTH: usize = 10; @@ -218,12 +219,20 @@ pub(crate) async fn register_route( }; if body.body.login_type == Some(LoginType::ApplicationService) { - if let Some(ref info) = body.appservice_info { - if !info.is_user_match(&user_id) { - return Err(Error::BadRequest(ErrorKind::Exclusive, "User is not in namespace.")); - } - } else { - return Err(Error::BadRequest(ErrorKind::MissingToken, "Missing appservice token.")); + match body.appservice_info { + | Some(ref info) => + if !info.is_user_match(&user_id) { + return Err(Error::BadRequest( + ErrorKind::Exclusive, + "User is not in namespace.", + )); + }, + | _ => { + return Err(Error::BadRequest( + ErrorKind::MissingToken, + "Missing appservice token.", + )); + }, } } else if services.appservice.is_exclusive_user_id(&user_id).await { return Err(Error::BadRequest(ErrorKind::Exclusive, "User ID reserved by appservice.")); @@ -256,33 +265,39 @@ pub(crate) async fn register_route( }; if !skip_auth { - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth( - &UserId::parse_with_server_name("", services.globals.server_name()) - .expect("we know this is valid"), - "".into(), - auth, - &uiaainfo, - ) - .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services.uiaa.create( - &UserId::parse_with_server_name("", services.globals.server_name()) - .expect("we know this is valid"), - "".into(), - &uiaainfo, - &json, - ); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth( + &UserId::parse_with_server_name("", services.globals.server_name()) + .expect("we know this is valid"), + "".into(), + auth, + &uiaainfo, + ) + .await?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services.uiaa.create( + &UserId::parse_with_server_name("", services.globals.server_name()) + .expect("we know this is valid"), + "".into(), + &uiaainfo, + &json, + ); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + }, + }, } } @@ -463,7 +478,7 @@ pub(crate) async fn register_route( } if let Some(room_server_name) = room.server_name() { - if let Err(e) = join_room_by_id_helper( + match join_room_by_id_helper( &services, &user_id, &room_id, @@ -475,10 +490,15 @@ pub(crate) async fn register_route( .boxed() .await { - // don't return this error so we don't fail registrations - error!("Failed to automatically join room {room} for user {user_id}: {e}"); - } else { - info!("Automatically joined room {room} for user {user_id}"); + | Err(e) => { + // don't return this error so we don't fail registrations + error!( + "Failed to automatically join room {room} for user {user_id}: {e}" + ); + }, + | _ => { + info!("Automatically joined room {room} for user {user_id}"); + }, }; } } @@ -532,26 +552,32 @@ pub(crate) async fn change_password_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + // Success! + }, + | _ => match body.json_body { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + }, + }, } services @@ -636,25 +662,31 @@ pub(crate) async fn deactivate_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + }, + }, } // Remove profile pictures and display name @@ -809,7 +841,7 @@ pub async fn full_user_deactivate( power_levels_content.users.remove(user_id); // ignore errors so deactivation doesn't fail - if let Err(e) = services + match services .rooms .timeline .build_and_append_pdu( @@ -820,9 +852,12 @@ pub async fn full_user_deactivate( ) .await { - warn!(%room_id, %user_id, "Failed to demote user's own power level: {e}"); - } else { - info!("Demoted {user_id} in {room_id} as part of account deactivation"); + | Err(e) => { + warn!(%room_id, %user_id, "Failed to demote user's own power level: {e}"); + }, + | _ => { + info!("Demoted {user_id} in {room_id} as part of account deactivation"); + }, } } } diff --git a/src/api/client/account_data.rs b/src/api/client/account_data.rs index 9f84f227..60c18b37 100644 --- a/src/api/client/account_data.rs +++ b/src/api/client/account_data.rs @@ -1,6 +1,7 @@ use axum::extract::State; -use conduwuit::{err, Err}; +use conduwuit::{Err, err}; use ruma::{ + RoomId, UserId, api::client::config::{ get_global_account_data, get_room_account_data, set_global_account_data, set_room_account_data, @@ -10,12 +11,11 @@ use ruma::{ GlobalAccountDataEventType, RoomAccountDataEventType, }, serde::Raw, - RoomId, UserId, }; use serde::Deserialize; use serde_json::{json, value::RawValue as RawJsonValue}; -use crate::{service::Services, Result, Ruma}; +use crate::{Result, Ruma, service::Services}; /// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}` /// diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index e1af416e..319e5141 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -1,10 +1,10 @@ use axum::extract::State; -use conduwuit::{debug, Err, Result}; +use conduwuit::{Err, Result, debug}; use futures::StreamExt; use rand::seq::SliceRandom; use ruma::{ - api::client::alias::{create_alias, delete_alias, get_alias}, OwnedServerName, RoomAliasId, RoomId, + api::client::alias::{create_alias, delete_alias, get_alias}, }; use service::Services; @@ -128,18 +128,26 @@ async fn room_available_servers( // insert our server as the very first choice if in list, else check if we can // prefer the room alias server first - if let Some(server_index) = servers + match servers .iter() .position(|server_name| services.globals.server_is_ours(server_name)) { - servers.swap_remove(server_index); - servers.insert(0, services.globals.server_name().to_owned()); - } else if let Some(alias_server_index) = servers - .iter() - .position(|server| server == room_alias.server_name()) - { - servers.swap_remove(alias_server_index); - servers.insert(0, room_alias.server_name().into()); + | Some(server_index) => { + servers.swap_remove(server_index); + servers.insert(0, services.globals.server_name().to_owned()); + }, + | _ => { + match servers + .iter() + .position(|server| server == room_alias.server_name()) + { + | Some(alias_server_index) => { + servers.swap_remove(alias_server_index); + servers.insert(0, room_alias.server_name().into()); + }, + | _ => {}, + } + }, } servers diff --git a/src/api/client/appservice.rs b/src/api/client/appservice.rs index e4071ab0..84955309 100644 --- a/src/api/client/appservice.rs +++ b/src/api/client/appservice.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::{err, Err, Result}; +use conduwuit::{Err, Result, err}; use ruma::api::{appservice::ping, client::appservice::request_ping}; use crate::Ruma; diff --git a/src/api/client/backup.rs b/src/api/client/backup.rs index d330952d..714e3f86 100644 --- a/src/api/client/backup.rs +++ b/src/api/client/backup.rs @@ -1,6 +1,7 @@ use axum::extract::State; -use conduwuit::{err, Err}; +use conduwuit::{Err, err}; use ruma::{ + UInt, api::client::backup::{ add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, create_backup_version, delete_backup_keys, delete_backup_keys_for_room, @@ -8,7 +9,6 @@ use ruma::{ get_backup_keys_for_room, get_backup_keys_for_session, get_latest_backup_info, update_backup_version, }, - UInt, }; use crate::{Result, Ruma}; diff --git a/src/api/client/capabilities.rs b/src/api/client/capabilities.rs index 7188aa23..e20af21b 100644 --- a/src/api/client/capabilities.rs +++ b/src/api/client/capabilities.rs @@ -3,11 +3,11 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{Result, Server}; use ruma::{ + RoomVersionId, api::client::discovery::get_capabilities::{ self, Capabilities, GetLoginTokenCapability, RoomVersionStability, RoomVersionsCapability, ThirdPartyIdChangesCapability, }, - RoomVersionId, }; use serde_json::json; diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 7256683f..3f16c850 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,23 +1,22 @@ use axum::extract::State; use conduwuit::{ - at, err, ref_at, + Err, PduEvent, Result, at, err, ref_at, utils::{ + IterStream, future::TryExtExt, stream::{BroadbandExt, ReadyExt, TryIgnore, WidebandExt}, - IterStream, }, - Err, PduEvent, Result, }; use futures::{ - future::{join, join3, try_join3, OptionFuture}, FutureExt, StreamExt, TryFutureExt, TryStreamExt, + future::{OptionFuture, join, join3, try_join3}, }; -use ruma::{api::client::context::get_context, events::StateEventType, OwnedEventId, UserId}; +use ruma::{OwnedEventId, UserId, api::client::context::get_context, events::StateEventType}; use service::rooms::{lazy_loading, lazy_loading::Options, short::ShortStateKey}; use crate::{ - client::message::{event_filter, ignored_filter, lazy_loading_witness, visibility_filter}, Ruma, + client::message::{event_filter, ignored_filter, lazy_loading_witness, visibility_filter}, }; const LIMIT_MAX: usize = 100; diff --git a/src/api/client/device.rs b/src/api/client/device.rs index bb0773dd..6a845aed 100644 --- a/src/api/client/device.rs +++ b/src/api/client/device.rs @@ -1,18 +1,18 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{err, Err}; +use conduwuit::{Err, err}; use futures::StreamExt; use ruma::{ + MilliSecondsSinceUnixEpoch, api::client::{ device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, error::ErrorKind, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, - MilliSecondsSinceUnixEpoch, }; use super::SESSION_ID_LENGTH; -use crate::{utils, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, utils}; /// # `GET /_matrix/client/r0/devices` /// @@ -107,25 +107,31 @@ pub(crate) async fn delete_device_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err!(Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + if !worked { + return Err!(Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json); - return Err!(Uiaa(uiaainfo)); - } else { - return Err!(Request(NotJson("Not json."))); + return Err!(Uiaa(uiaainfo)); + }, + | _ => { + return Err!(Request(NotJson("Not json."))); + }, + }, } services @@ -164,25 +170,31 @@ pub(crate) async fn delete_devices_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + }, + }, } for device_id in &body.devices { diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 9166eed9..136c5961 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -1,8 +1,9 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{info, warn, Err, Error, Result}; +use conduwuit::{Err, Error, Result, info, warn}; use futures::{StreamExt, TryFutureExt}; use ruma::{ + OwnedRoomId, RoomId, ServerName, UInt, UserId, api::{ client::{ directory::{ @@ -16,13 +17,13 @@ use ruma::{ }, directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork}, events::{ + StateEventType, room::{ join_rules::{JoinRule, RoomJoinRulesEventContent}, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, }, - StateEventType, }, - uint, OwnedRoomId, RoomId, ServerName, UInt, UserId, + uint, }; use service::Services; @@ -365,30 +366,34 @@ async fn user_can_publish_room( user_id: &UserId, room_id: &RoomId, ) -> Result { - if let Ok(event) = services + match services .rooms .state_accessor .room_state_get(room_id, &StateEventType::RoomPowerLevels, "") .await { - serde_json::from_str(event.content.get()) + | Ok(event) => serde_json::from_str(event.content.get()) .map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels")) .map(|content: RoomPowerLevelsEventContent| { RoomPowerLevels::from(content) .user_can_send_state(user_id, StateEventType::RoomHistoryVisibility) - }) - } else if let Ok(event) = services - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .await - { - Ok(event.sender == user_id) - } else { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "User is not allowed to publish this room", - )); + }), + | _ => { + match services + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .await + { + | Ok(event) => Ok(event.sender == user_id), + | _ => { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "User is not allowed to publish this room", + )); + }, + } + }, } } diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 801ae32b..6f20153b 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -1,9 +1,10 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use axum::extract::State; -use conduwuit::{debug, err, info, result::NotFound, utils, Err, Error, Result}; -use futures::{stream::FuturesUnordered, StreamExt}; +use conduwuit::{Err, Error, Result, debug, err, info, result::NotFound, utils}; +use futures::{StreamExt, stream::FuturesUnordered}; use ruma::{ + OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, api::{ client::{ error::ErrorKind, @@ -17,14 +18,13 @@ use ruma::{ }, encryption::CrossSigningKey, serde::Raw, - OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, }; use serde_json::json; use super::SESSION_ID_LENGTH; use crate::{ - service::{users::parse_master_key, Services}, Ruma, + service::{Services, users::parse_master_key}, }; /// # `POST /_matrix/client/r0/keys/upload` @@ -126,7 +126,7 @@ pub(crate) async fn upload_signing_keys_route( auth_error: None, }; - if let Ok(exists) = check_for_new_keys( + match check_for_new_keys( services, sender_user, body.self_signing_key.as_ref(), @@ -136,32 +136,45 @@ pub(crate) async fn upload_signing_keys_route( .await .inspect_err(|e| info!(?e)) { - if let Some(result) = exists { - // No-op, they tried to reupload the same set of keys - // (lost connection for example) - return Ok(result); - } - debug!("Skipping UIA in accordance with MSC3967, the user didn't have any existing keys"); - // Some of the keys weren't found, so we let them upload - } else if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + | Ok(exists) => { + if let Some(result) = exists { + // No-op, they tried to reupload the same set of keys + // (lost connection for example) + return Ok(result); + } + debug!( + "Skipping UIA in accordance with MSC3967, the user didn't have any existing keys" + ); + // Some of the keys weren't found, so we let them upload + }, + | _ => { + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + }, + }, + } + }, } services @@ -471,37 +484,40 @@ where .collect(); while let Some((server, response)) = futures.next().await { - if let Ok(response) = response { - for (user, master_key) in response.master_keys { - let (master_key_id, mut master_key) = parse_master_key(&user, &master_key)?; + match response { + | Ok(response) => { + for (user, master_key) in response.master_keys { + let (master_key_id, mut master_key) = parse_master_key(&user, &master_key)?; - if let Ok(our_master_key) = services - .users - .get_key(&master_key_id, sender_user, &user, &allowed_signatures) - .await - { - let (_, mut our_master_key) = parse_master_key(&user, &our_master_key)?; - master_key.signatures.append(&mut our_master_key.signatures); + if let Ok(our_master_key) = services + .users + .get_key(&master_key_id, sender_user, &user, &allowed_signatures) + .await + { + let (_, mut our_master_key) = parse_master_key(&user, &our_master_key)?; + master_key.signatures.append(&mut our_master_key.signatures); + } + let json = serde_json::to_value(master_key).expect("to_value always works"); + let raw = serde_json::from_value(json).expect("Raw::from_value always works"); + services + .users + .add_cross_signing_keys( + &user, &raw, &None, &None, + false, /* Dont notify. A notification would trigger another key + * request resulting in an endless loop */ + ) + .await?; + if let Some(raw) = raw { + master_keys.insert(user.clone(), raw); + } } - let json = serde_json::to_value(master_key).expect("to_value always works"); - let raw = serde_json::from_value(json).expect("Raw::from_value always works"); - services - .users - .add_cross_signing_keys( - &user, &raw, &None, &None, - false, /* Dont notify. A notification would trigger another key request - * resulting in an endless loop */ - ) - .await?; - if let Some(raw) = raw { - master_keys.insert(user.clone(), raw); - } - } - self_signing_keys.extend(response.self_signing_keys); - device_keys.extend(response.device_keys); - } else { - failures.insert(server.to_string(), json!({})); + self_signing_keys.extend(response.self_signing_keys); + device_keys.extend(response.device_keys); + }, + | _ => { + failures.insert(server.to_string(), json!({})); + }, } } diff --git a/src/api/client/media.rs b/src/api/client/media.rs index 0cff8185..94572413 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -3,16 +3,16 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - err, + Err, Result, err, utils::{self, content_disposition::make_content_disposition, math::ruma_from_usize}, - Err, Result, }; use conduwuit_service::{ - media::{Dim, FileMeta, CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, MXC_LENGTH}, Services, + media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta, MXC_LENGTH}, }; use reqwest::Url; use ruma::{ + Mxc, UserId, api::client::{ authenticated_media::{ get_content, get_content_as_filename, get_content_thumbnail, get_media_config, @@ -20,7 +20,6 @@ use ruma::{ }, media::create_content, }, - Mxc, UserId, }; use crate::Ruma; diff --git a/src/api/client/media_legacy.rs b/src/api/client/media_legacy.rs index 4fa0b52e..d9f24f77 100644 --- a/src/api/client/media_legacy.rs +++ b/src/api/client/media_legacy.rs @@ -3,21 +3,20 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - err, + Err, Result, err, utils::{content_disposition::make_content_disposition, math::ruma_from_usize}, - Err, Result, }; -use conduwuit_service::media::{Dim, FileMeta, CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN}; +use conduwuit_service::media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta}; use reqwest::Url; use ruma::{ + Mxc, api::client::media::{ create_content, get_content, get_content_as_filename, get_content_thumbnail, get_media_config, get_media_preview, }, - Mxc, }; -use crate::{client::create_content_route, Ruma, RumaResponse}; +use crate::{Ruma, RumaResponse, client::create_content_route}; /// # `GET /_matrix/media/v3/config` /// @@ -142,46 +141,52 @@ pub(crate) async fn get_content_legacy_route( media_id: &body.media_id, }; - if let Some(FileMeta { - content, - content_type, - content_disposition, - }) = services.media.get(&mxc).await? - { - let content_disposition = - make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); + match services.media.get(&mxc).await? { + | Some(FileMeta { + content, + content_type, + content_disposition, + }) => { + let content_disposition = make_content_disposition( + content_disposition.as_ref(), + content_type.as_deref(), + None, + ); - Ok(get_content::v3::Response { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - content_disposition: Some(content_disposition), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { - let response = services - .media - .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) - .await - .map_err(|e| { - err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) - })?; + Ok(get_content::v3::Response { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + content_disposition: Some(content_disposition), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + }, + | _ => + if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { + let response = services + .media + .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) + .await + .map_err(|e| { + err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) + })?; - let content_disposition = make_content_disposition( - response.content_disposition.as_ref(), - response.content_type.as_deref(), - None, - ); + let content_disposition = make_content_disposition( + response.content_disposition.as_ref(), + response.content_type.as_deref(), + None, + ); - Ok(get_content::v3::Response { - file: response.file, - content_type: response.content_type, - content_disposition: Some(content_disposition), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else { - Err!(Request(NotFound("Media not found."))) + Ok(get_content::v3::Response { + file: response.file, + content_type: response.content_type, + content_disposition: Some(content_disposition), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + } else { + Err!(Request(NotFound("Media not found."))) + }, } } @@ -227,49 +232,52 @@ pub(crate) async fn get_content_as_filename_legacy_route( media_id: &body.media_id, }; - if let Some(FileMeta { - content, - content_type, - content_disposition, - }) = services.media.get(&mxc).await? - { - let content_disposition = make_content_disposition( - content_disposition.as_ref(), - content_type.as_deref(), - Some(&body.filename), - ); + match services.media.get(&mxc).await? { + | Some(FileMeta { + content, + content_type, + content_disposition, + }) => { + let content_disposition = make_content_disposition( + content_disposition.as_ref(), + content_type.as_deref(), + Some(&body.filename), + ); - Ok(get_content_as_filename::v3::Response { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - content_disposition: Some(content_disposition), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { - let response = services - .media - .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) - .await - .map_err(|e| { - err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) - })?; + Ok(get_content_as_filename::v3::Response { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + content_disposition: Some(content_disposition), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + }, + | _ => + if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { + let response = services + .media + .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) + .await + .map_err(|e| { + err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) + })?; - let content_disposition = make_content_disposition( - response.content_disposition.as_ref(), - response.content_type.as_deref(), - None, - ); + let content_disposition = make_content_disposition( + response.content_disposition.as_ref(), + response.content_type.as_deref(), + None, + ); - Ok(get_content_as_filename::v3::Response { - content_disposition: Some(content_disposition), - content_type: response.content_type, - file: response.file, - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else { - Err!(Request(NotFound("Media not found."))) + Ok(get_content_as_filename::v3::Response { + content_disposition: Some(content_disposition), + content_type: response.content_type, + file: response.file, + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + } else { + Err!(Request(NotFound("Media not found."))) + }, } } @@ -315,46 +323,52 @@ pub(crate) async fn get_content_thumbnail_legacy_route( }; let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?; - if let Some(FileMeta { - content, - content_type, - content_disposition, - }) = services.media.get_thumbnail(&mxc, &dim).await? - { - let content_disposition = - make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); + match services.media.get_thumbnail(&mxc, &dim).await? { + | Some(FileMeta { + content, + content_type, + content_disposition, + }) => { + let content_disposition = make_content_disposition( + content_disposition.as_ref(), + content_type.as_deref(), + None, + ); - Ok(get_content_thumbnail::v3::Response { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - content_disposition: Some(content_disposition), - }) - } else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { - let response = services - .media - .fetch_remote_thumbnail_legacy(&body) - .await - .map_err(|e| { - err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) - })?; + Ok(get_content_thumbnail::v3::Response { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + content_disposition: Some(content_disposition), + }) + }, + | _ => + if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { + let response = services + .media + .fetch_remote_thumbnail_legacy(&body) + .await + .map_err(|e| { + err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) + })?; - let content_disposition = make_content_disposition( - response.content_disposition.as_ref(), - response.content_type.as_deref(), - None, - ); + let content_disposition = make_content_disposition( + response.content_disposition.as_ref(), + response.content_type.as_deref(), + None, + ); - Ok(get_content_thumbnail::v3::Response { - file: response.file, - content_type: response.content_type, - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - content_disposition: Some(content_disposition), - }) - } else { - Err!(Request(NotFound("Media not found."))) + Ok(get_content_thumbnail::v3::Response { + file: response.file, + content_type: response.content_type, + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + content_disposition: Some(content_disposition), + }) + } else { + Err!(Request(NotFound("Media not found."))) + }, } } diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 26736fb5..9c2693dc 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -9,51 +9,51 @@ use std::{ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - at, debug, debug_info, debug_warn, err, error, info, - pdu::{gen_event_id_canonical_json, PduBuilder}, + Err, PduEvent, Result, StateKey, at, debug, debug_info, debug_warn, err, error, info, + pdu::{PduBuilder, gen_event_id_canonical_json}, result::FlatOk, state_res, trace, - utils::{self, shuffle, IterStream, ReadyExt}, - warn, Err, PduEvent, Result, StateKey, + utils::{self, IterStream, ReadyExt, shuffle}, + warn, }; -use futures::{join, FutureExt, StreamExt, TryFutureExt}; +use futures::{FutureExt, StreamExt, TryFutureExt, join}; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, api::{ client::{ error::ErrorKind, knock::knock_room, membership::{ - ban_user, forget_room, get_member_events, invite_user, join_room_by_id, - join_room_by_id_or_alias, + ThirdPartySigned, ban_user, forget_room, get_member_events, invite_user, + join_room_by_id, join_room_by_id_or_alias, joined_members::{self, v3::RoomMember}, - joined_rooms, kick_user, leave_room, unban_user, ThirdPartySigned, + joined_rooms, kick_user, leave_room, unban_user, }, }, federation::{self, membership::create_invite}, }, canonical_json::to_canonical_value, events::{ + StateEventType, room::{ join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, message::RoomMessageEventContent, }, - StateEventType, }, - CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, }; use service::{ + Services, appservice::RegistrationInfo, pdu::gen_event_id, rooms::{ state::RoomMutexGuard, state_compressor::{CompressedState, HashSetCompressStateEvent}, }, - Services, }; -use crate::{client::full_user_deactivate, Ruma}; +use crate::{Ruma, client::full_user_deactivate}; /// Checks if the room is banned in any way possible and the sender user is not /// an admin. @@ -507,43 +507,54 @@ pub(crate) async fn invite_user_route( ) .await?; - if let invite_user::v3::InvitationRecipient::UserId { user_id } = &body.recipient { - let sender_ignored_recipient = services.users.user_is_ignored(sender_user, user_id); - let recipient_ignored_by_sender = services.users.user_is_ignored(user_id, sender_user); + match &body.recipient { + | invite_user::v3::InvitationRecipient::UserId { user_id } => { + let sender_ignored_recipient = services.users.user_is_ignored(sender_user, user_id); + let recipient_ignored_by_sender = + services.users.user_is_ignored(user_id, sender_user); - let (sender_ignored_recipient, recipient_ignored_by_sender) = - join!(sender_ignored_recipient, recipient_ignored_by_sender); + let (sender_ignored_recipient, recipient_ignored_by_sender) = + join!(sender_ignored_recipient, recipient_ignored_by_sender); - if sender_ignored_recipient { - return Err!(Request(Forbidden( - "You cannot invite users you have ignored to rooms." - ))); - } - - if let Ok(target_user_membership) = services - .rooms - .state_accessor - .get_member(&body.room_id, user_id) - .await - { - if target_user_membership.membership == MembershipState::Ban { - return Err!(Request(Forbidden("User is banned from this room."))); + if sender_ignored_recipient { + return Err!(Request(Forbidden( + "You cannot invite users you have ignored to rooms." + ))); } - } - if recipient_ignored_by_sender { - // silently drop the invite to the recipient if they've been ignored by the - // sender, pretend it worked - return Ok(invite_user::v3::Response {}); - } + if let Ok(target_user_membership) = services + .rooms + .state_accessor + .get_member(&body.room_id, user_id) + .await + { + if target_user_membership.membership == MembershipState::Ban { + return Err!(Request(Forbidden("User is banned from this room."))); + } + } - invite_helper(&services, sender_user, user_id, &body.room_id, body.reason.clone(), false) + if recipient_ignored_by_sender { + // silently drop the invite to the recipient if they've been ignored by the + // sender, pretend it worked + return Ok(invite_user::v3::Response {}); + } + + invite_helper( + &services, + sender_user, + user_id, + &body.room_id, + body.reason.clone(), + false, + ) .boxed() .await?; - Ok(invite_user::v3::Response {}) - } else { - Err!(Request(NotFound("User not found."))) + Ok(invite_user::v3::Response {}) + }, + | _ => { + Err!(Request(NotFound("User not found."))) + }, } } @@ -1830,38 +1841,46 @@ async fn remote_leave_room( .collect() .await; - if let Ok(invite_state) = services + match services .rooms .state_cache .invite_state(user_id, room_id) .await { - servers.extend( - invite_state - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()), - ); - } else if let Ok(knock_state) = services - .rooms - .state_cache - .knock_state(user_id, room_id) - .await - { - servers.extend( - knock_state - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .filter_map(|sender| { - if !services.globals.user_is_local(sender) { - Some(sender.server_name().to_owned()) - } else { - None - } - }), - ); + | Ok(invite_state) => { + servers.extend( + invite_state + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); + }, + | _ => { + match services + .rooms + .state_cache + .knock_state(user_id, room_id) + .await + { + | Ok(knock_state) => { + servers.extend( + knock_state + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .filter_map(|sender| { + if !services.globals.user_is_local(sender) { + Some(sender.server_name().to_owned()) + } else { + None + } + }), + ); + }, + | _ => {}, + } + }, } if let Some(room_id_server_name) = room_id.server_name() { diff --git a/src/api/client/message.rs b/src/api/client/message.rs index bb4e72dd..571a238a 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,30 +1,29 @@ use axum::extract::State; use conduwuit::{ - at, + Event, PduCount, PduEvent, Result, at, utils::{ + IterStream, ReadyExt, result::{FlatOk, LogErr}, stream::{BroadbandExt, TryIgnore, WidebandExt}, - IterStream, ReadyExt, }, - Event, PduCount, PduEvent, Result, }; -use futures::{future::OptionFuture, pin_mut, FutureExt, StreamExt, TryFutureExt}; +use futures::{FutureExt, StreamExt, TryFutureExt, future::OptionFuture, pin_mut}; use ruma::{ + RoomId, UserId, api::{ - client::{filter::RoomEventFilter, message::get_message_events}, Direction, + client::{filter::RoomEventFilter, message::get_message_events}, }, events::{AnyStateEvent, StateEventType, TimelineEventType, TimelineEventType::*}, serde::Raw, - RoomId, UserId, }; use service::{ + Services, rooms::{ lazy_loading, lazy_loading::{Options, Witness}, timeline::PdusIterItem, }, - Services, }; use crate::Ruma; diff --git a/src/api/client/presence.rs b/src/api/client/presence.rs index d19e6ae1..9b41a721 100644 --- a/src/api/client/presence.rs +++ b/src/api/client/presence.rs @@ -70,37 +70,38 @@ pub(crate) async fn get_presence_route( } } - if let Some(presence) = presence_event { - let status_msg = if presence - .content - .status_msg - .as_ref() - .is_some_and(String::is_empty) - { - None - } else { - presence.content.status_msg - }; - - let last_active_ago = match presence.content.currently_active { - | Some(true) => None, - | _ => presence + match presence_event { + | Some(presence) => { + let status_msg = if presence .content - .last_active_ago - .map(|millis| Duration::from_millis(millis.into())), - }; + .status_msg + .as_ref() + .is_some_and(String::is_empty) + { + None + } else { + presence.content.status_msg + }; - Ok(get_presence::v3::Response { - // TODO: Should ruma just use the presenceeventcontent type here? - status_msg, - currently_active: presence.content.currently_active, - last_active_ago, - presence: presence.content.presence, - }) - } else { - Err(Error::BadRequest( + let last_active_ago = match presence.content.currently_active { + | Some(true) => None, + | _ => presence + .content + .last_active_ago + .map(|millis| Duration::from_millis(millis.into())), + }; + + Ok(get_presence::v3::Response { + // TODO: Should ruma just use the presenceeventcontent type here? + status_msg, + currently_active: presence.content.currently_active, + last_active_ago, + presence: presence.content.presence, + }) + }, + | _ => Err(Error::BadRequest( ErrorKind::NotFound, "Presence state for this user was not found", - )) + )), } } diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index 584adfc1..12e5ebcc 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -2,12 +2,14 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ + Err, Error, Result, pdu::PduBuilder, - utils::{stream::TryIgnore, IterStream}, - warn, Err, Error, Result, + utils::{IterStream, stream::TryIgnore}, + warn, }; -use futures::{future::join3, StreamExt, TryStreamExt}; +use futures::{StreamExt, TryStreamExt, future::join3}; use ruma::{ + OwnedMxcUri, OwnedRoomId, UserId, api::{ client::{ error::ErrorKind, @@ -19,7 +21,6 @@ use ruma::{ }, events::room::member::{MembershipState, RoomMemberEventContent}, presence::PresenceState, - OwnedMxcUri, OwnedRoomId, UserId, }; use service::Services; diff --git a/src/api/client/push.rs b/src/api/client/push.rs index ed7371e4..384b9dbc 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -1,6 +1,7 @@ use axum::extract::State; -use conduwuit::{err, Err}; +use conduwuit::{Err, err}; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, api::client::{ error::ErrorKind, push::{ @@ -10,14 +11,13 @@ use ruma::{ }, }, events::{ - push_rules::{PushRulesEvent, PushRulesEventContent}, GlobalAccountDataEventType, + push_rules::{PushRulesEvent, PushRulesEventContent}, }, push::{ InsertPushRuleError, PredefinedContentRuleId, PredefinedOverrideRuleId, RemovePushRuleError, Ruleset, }, - CanonicalJsonObject, CanonicalJsonValue, }; use service::Services; diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index 89fe003a..d01327f6 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -1,14 +1,14 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduwuit::{err, Err, PduCount}; +use conduwuit::{Err, PduCount, err}; use ruma::{ + MilliSecondsSinceUnixEpoch, api::client::{read_marker::set_read_marker, receipt::create_receipt}, events::{ - receipt::{ReceiptThread, ReceiptType}, RoomAccountDataEventType, + receipt::{ReceiptThread, ReceiptType}, }, - MilliSecondsSinceUnixEpoch, }; use crate::{Result, Ruma}; diff --git a/src/api/client/redact.rs b/src/api/client/redact.rs index ba59a010..7b512d06 100644 --- a/src/api/client/redact.rs +++ b/src/api/client/redact.rs @@ -3,7 +3,7 @@ use ruma::{ api::client::redact::redact_event, events::room::redaction::RoomRedactionEventContent, }; -use crate::{service::pdu::PduBuilder, Result, Ruma}; +use crate::{Result, Ruma, service::pdu::PduBuilder}; /// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}` /// diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index 87fb1eac..7ed40f14 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -1,22 +1,21 @@ use axum::extract::State; use conduwuit::{ - at, - utils::{result::FlatOk, stream::WidebandExt, IterStream, ReadyExt}, - PduCount, Result, + PduCount, Result, at, + utils::{IterStream, ReadyExt, result::FlatOk, stream::WidebandExt}, }; use futures::StreamExt; use ruma::{ + EventId, RoomId, UInt, UserId, api::{ + Direction, client::relations::{ get_relating_events, get_relating_events_with_rel_type, get_relating_events_with_rel_type_and_event_type, }, - Direction, }, - events::{relation::RelationType, TimelineEventType}, - EventId, RoomId, UInt, UserId, + events::{TimelineEventType, relation::RelationType}, }; -use service::{rooms::timeline::PdusIterItem, Services}; +use service::{Services, rooms::timeline::PdusIterItem}; use crate::Ruma; diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 57de3f12..db085721 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -2,22 +2,22 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{info, utils::ReadyExt, Err}; +use conduwuit::{Err, info, utils::ReadyExt}; use rand::Rng; use ruma::{ + EventId, RoomId, UserId, api::client::{ error::ErrorKind, room::{report_content, report_room}, }, events::room::message, - int, EventId, RoomId, UserId, + int, }; use tokio::time::sleep; use crate::{ - debug_info, - service::{pdu::PduEvent, Services}, - Error, Result, Ruma, + Error, Result, Ruma, debug_info, + service::{Services, pdu::PduEvent}, }; /// # `POST /_matrix/client/v3/rooms/{roomId}/report` diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index e362b3b3..1b8294a5 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -2,15 +2,17 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - debug_info, debug_warn, err, error, info, pdu::PduBuilder, warn, Err, Error, Result, StateKey, + Err, Error, Result, StateKey, debug_info, debug_warn, err, error, info, pdu::PduBuilder, warn, }; use futures::FutureExt; use ruma::{ + CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, api::client::{ error::ErrorKind, room::{self, create_room}, }, events::{ + TimelineEventType, room::{ canonical_alias::RoomCanonicalAliasEventContent, create::RoomCreateEventContent, @@ -22,16 +24,14 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, topic::RoomTopicEventContent, }, - TimelineEventType, }, int, serde::{JsonObject, Raw}, - CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, }; use serde_json::{json, value::to_raw_value}; -use service::{appservice::RegistrationInfo, Services}; +use service::{Services, appservice::RegistrationInfo}; -use crate::{client::invite_helper, Ruma}; +use crate::{Ruma, client::invite_helper}; /// # `POST /_matrix/client/v3/createRoom` /// @@ -68,10 +68,9 @@ pub(crate) async fn create_room_route( )); } - let room_id: OwnedRoomId = if let Some(custom_room_id) = &body.room_id { - custom_room_id_check(&services, custom_room_id)? - } else { - RoomId::new(&services.server.name) + let room_id: OwnedRoomId = match &body.room_id { + | Some(custom_room_id) => custom_room_id_check(&services, custom_room_id)?, + | _ => RoomId::new(&services.server.name), }; // check if room ID doesn't already exist instead of erroring on auth check @@ -114,10 +113,10 @@ pub(crate) async fn create_room_route( .await; let state_lock = services.rooms.state.mutex.lock(&room_id).await; - let alias: Option = if let Some(alias) = body.room_alias_name.as_ref() { - Some(room_alias_check(&services, alias, body.appservice_info.as_ref()).await?) - } else { - None + let alias: Option = match body.room_alias_name.as_ref() { + | Some(alias) => + Some(room_alias_check(&services, alias, body.appservice_info.as_ref()).await?), + | _ => None, }; let room_version = match body.room_version.clone() { diff --git a/src/api/client/room/event.rs b/src/api/client/room/event.rs index f0ae64dd..84b591cd 100644 --- a/src/api/client/room/event.rs +++ b/src/api/client/room/event.rs @@ -1,9 +1,9 @@ use axum::extract::State; -use conduwuit::{err, Err, Event, Result}; -use futures::{future::try_join, FutureExt, TryFutureExt}; +use conduwuit::{Err, Event, Result, err}; +use futures::{FutureExt, TryFutureExt, future::try_join}; use ruma::api::client::room::get_room_event; -use crate::{client::is_ignored_pdu, Ruma}; +use crate::{Ruma, client::is_ignored_pdu}; /// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}` /// diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs index 233d180f..e4c76ae0 100644 --- a/src/api/client/room/initial_sync.rs +++ b/src/api/client/room/initial_sync.rs @@ -1,8 +1,7 @@ use axum::extract::State; use conduwuit::{ - at, - utils::{stream::TryTools, BoolExt}, - Err, PduEvent, Result, + Err, PduEvent, Result, at, + utils::{BoolExt, stream::TryTools}, }; use futures::TryStreamExt; use ruma::api::client::room::initial_sync::v3::{PaginationChunk, Request, Response}; diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index a624f95f..4ac341a9 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -1,19 +1,20 @@ use std::cmp::max; use axum::extract::State; -use conduwuit::{err, info, pdu::PduBuilder, Error, Result, StateKey}; +use conduwuit::{Error, Result, StateKey, err, info, pdu::PduBuilder}; use futures::StreamExt; use ruma::{ + CanonicalJsonObject, RoomId, RoomVersionId, api::client::{error::ErrorKind, room::upgrade_room}, events::{ + StateEventType, TimelineEventType, room::{ member::{MembershipState, RoomMemberEventContent}, power_levels::RoomPowerLevelsEventContent, tombstone::RoomTombstoneEventContent, }, - StateEventType, TimelineEventType, }, - int, CanonicalJsonObject, RoomId, RoomVersionId, + int, }; use serde_json::{json, value::to_raw_value}; diff --git a/src/api/client/search.rs b/src/api/client/search.rs index 898dfc7f..f3366843 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -2,23 +2,22 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - at, is_true, + Err, PduEvent, Result, at, is_true, result::FlatOk, - utils::{stream::ReadyExt, IterStream}, - Err, PduEvent, Result, + utils::{IterStream, stream::ReadyExt}, }; -use futures::{future::OptionFuture, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::OptionFuture}; use ruma::{ + OwnedRoomId, RoomId, UInt, UserId, api::client::search::search_events::{ self, v3::{Criteria, EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}, }, events::AnyStateEvent, serde::Raw, - OwnedRoomId, RoomId, UInt, UserId, }; use search_events::v3::{Request, Response}; -use service::{rooms::search::RoomQuery, Services}; +use service::{Services, rooms::search::RoomQuery}; use crate::Ruma; diff --git a/src/api/client/send.rs b/src/api/client/send.rs index 39340070..b01d1ed6 100644 --- a/src/api/client/send.rs +++ b/src/api/client/send.rs @@ -1,11 +1,11 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduwuit::{err, Err}; +use conduwuit::{Err, err}; use ruma::{api::client::message::send_message_event, events::MessageLikeEventType}; use serde_json::from_str; -use crate::{service::pdu::PduBuilder, utils, Result, Ruma}; +use crate::{Result, Ruma, service::pdu::PduBuilder, utils}; /// # `PUT /_matrix/client/v3/rooms/{roomId}/send/{eventType}/{txnId}` /// diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 7155351c..5c0ab47d 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -2,9 +2,10 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{debug, err, info, utils::ReadyExt, warn, Err}; +use conduwuit::{Err, debug, err, info, utils::ReadyExt, warn}; use futures::StreamExt; use ruma::{ + OwnedUserId, UserId, api::client::{ error::ErrorKind, session::{ @@ -21,12 +22,11 @@ use ruma::{ }, uiaa, }, - OwnedUserId, UserId, }; use service::uiaa::SESSION_ID_LENGTH; use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; -use crate::{utils, utils::hash, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, utils, utils::hash}; /// # `GET /_matrix/client/v3/login` /// @@ -139,18 +139,20 @@ pub(crate) async fn login_route( Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") })?; - if let Some(ref info) = body.appservice_info { - if !info.is_user_match(&user_id) { + match body.appservice_info { + | Some(ref info) => + if !info.is_user_match(&user_id) { + return Err(Error::BadRequest( + ErrorKind::Exclusive, + "User is not in namespace.", + )); + }, + | _ => { return Err(Error::BadRequest( - ErrorKind::Exclusive, - "User is not in namespace.", + ErrorKind::MissingToken, + "Missing appservice token.", )); - } - } else { - return Err(Error::BadRequest( - ErrorKind::MissingToken, - "Missing appservice token.", - )); + }, } user_id @@ -259,26 +261,32 @@ pub(crate) async fn login_token_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } - // Success! - } else if let Some(json) = body.json_body.as_ref() { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, json); + // Success! + }, + | _ => match body.json_body.as_ref() { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err!(Request(NotJson("No JSON body was sent when required."))); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err!(Request(NotJson("No JSON body was sent when required."))); + }, + }, } let login_token = utils::random_string(TOKEN_LENGTH); diff --git a/src/api/client/space.rs b/src/api/client/space.rs index 7efd7817..a667f852 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -5,18 +5,18 @@ use std::{ use axum::extract::State; use conduwuit::{ - utils::{future::TryExtExt, stream::IterStream}, Err, Result, + utils::{future::TryExtExt, stream::IterStream}, }; -use futures::{future::OptionFuture, StreamExt, TryFutureExt}; +use futures::{StreamExt, TryFutureExt, future::OptionFuture}; use ruma::{ - api::client::space::get_hierarchy, OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, + OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, api::client::space::get_hierarchy, }; use service::{ - rooms::spaces::{ - get_parent_children_via, summary_to_chunk, PaginationToken, SummaryAccessibility, - }, Services, + rooms::spaces::{ + PaginationToken, SummaryAccessibility, get_parent_children_via, summary_to_chunk, + }, }; use crate::Ruma; diff --git a/src/api/client/state.rs b/src/api/client/state.rs index f73ffa46..6353fe1c 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -1,19 +1,19 @@ use axum::extract::State; -use conduwuit::{err, pdu::PduBuilder, utils::BoolExt, Err, PduEvent, Result}; +use conduwuit::{Err, PduEvent, Result, err, pdu::PduBuilder, utils::BoolExt}; use futures::TryStreamExt; use ruma::{ + OwnedEventId, RoomId, UserId, api::client::state::{get_state_events, get_state_events_for_key, send_state_event}, events::{ + AnyStateEventContent, StateEventType, room::{ canonical_alias::RoomCanonicalAliasEventContent, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, }, - AnyStateEventContent, StateEventType, }, serde::Raw, - OwnedEventId, RoomId, UserId, }; use service::Services; diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 46540881..3eab76cc 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -3,25 +3,25 @@ mod v4; mod v5; use conduwuit::{ - utils::{ - stream::{BroadbandExt, ReadyExt, TryIgnore}, - IterStream, - }, PduCount, + utils::{ + IterStream, + stream::{BroadbandExt, ReadyExt, TryIgnore}, + }, }; -use futures::{pin_mut, StreamExt}; +use futures::{StreamExt, pin_mut}; use ruma::{ + RoomId, UserId, directory::RoomTypeFilter, events::TimelineEventType::{ self, Beacon, CallInvite, PollStart, RoomEncrypted, RoomMessage, Sticker, }, - RoomId, UserId, }; pub(crate) use self::{ v3::sync_events_route, v4::sync_events_v4_route, v5::sync_events_v5_route, }; -use crate::{service::Services, Error, PduEvent, Result}; +use crate::{Error, PduEvent, Result, service::Services}; pub(crate) const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] = &[CallInvite, PollStart, Beacon, RoomEncrypted, RoomMessage, Sticker]; diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index f9dcd5ec..fb59837b 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -6,57 +6,55 @@ use std::{ use axum::extract::State; use conduwuit::{ - at, err, error, extract_variant, is_equal_to, pair_of, + PduCount, PduEvent, Result, at, err, error, extract_variant, is_equal_to, pair_of, pdu::{Event, EventHash}, ref_at, result::FlatOk, utils::{ - self, + self, BoolExt, IterStream, ReadyExt, TryFutureExtExt, math::ruma_from_u64, stream::{BroadbandExt, Tools, TryExpect, WidebandExt}, - BoolExt, IterStream, ReadyExt, TryFutureExtExt, }, - PduCount, PduEvent, Result, }; use conduwuit_service::{ + Services, rooms::{ lazy_loading, lazy_loading::{Options, Witness}, short::ShortStateHash, }, - Services, }; use futures::{ - future::{join, join3, join4, join5, try_join, try_join4, OptionFuture}, FutureExt, StreamExt, TryFutureExt, TryStreamExt, + future::{OptionFuture, join, join3, join4, join5, try_join, try_join4}, }; use ruma::{ + DeviceId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, api::client::{ filter::FilterDefinition, sync::sync_events::{ - self, + self, DeviceLists, UnreadNotificationsCount, v3::{ Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, KnockState, KnockedRoom, LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State as RoomState, Timeline, ToDevice, }, - DeviceLists, UnreadNotificationsCount, }, uiaa::UiaaResponse, }, events::{ - presence::{PresenceEvent, PresenceEventContent}, - room::member::{MembershipState, RoomMemberEventContent}, AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType::*, + presence::{PresenceEvent, PresenceEventContent}, + room::member::{MembershipState, RoomMemberEventContent}, }, serde::Raw, - uint, DeviceId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, + uint, }; use service::rooms::short::{ShortEventId, ShortStateKey}; use super::{load_timeline, share_encrypted_room}; -use crate::{client::ignored_filter, Ruma, RumaResponse}; +use crate::{Ruma, RumaResponse, client::ignored_filter}; #[derive(Default)] struct StateChanges { @@ -168,8 +166,8 @@ pub(crate) async fn build_sync_events( let full_state = body.body.full_state; let filter = match body.body.filter.as_ref() { | None => FilterDefinition::default(), - | Some(Filter::FilterDefinition(ref filter)) => filter.clone(), - | Some(Filter::FilterId(ref filter_id)) => services + | Some(Filter::FilterDefinition(filter)) => filter.clone(), + | Some(Filter::FilterId(filter_id)) => services .users .get_filter(sender_user, filter_id) .await @@ -1016,34 +1014,37 @@ async fn calculate_state_incremental<'a>( let lazy_state_ids: OptionFuture<_> = witness .filter(|_| !full_state && !encrypted_room) .map(|witness| { - witness - .iter() - .stream() - .broad_filter_map(|user_id| state_get_shorteventid(user_id)) - .into_future() + StreamExt::into_future( + witness + .iter() + .stream() + .broad_filter_map(|user_id| state_get_shorteventid(user_id)), + ) }) .into(); let state_diff: OptionFuture<_> = (!full_state && state_changed) .then(|| { - services - .rooms - .state_accessor - .state_added((since_shortstatehash, current_shortstatehash)) - .boxed() - .into_future() + StreamExt::into_future( + services + .rooms + .state_accessor + .state_added((since_shortstatehash, current_shortstatehash)) + .boxed(), + ) }) .into(); let current_state_ids: OptionFuture<_> = full_state .then(|| { - services - .rooms - .state_accessor - .state_full_shortids(current_shortstatehash) - .expect_ok() - .boxed() - .into_future() + StreamExt::into_future( + services + .rooms + .state_accessor + .state_full_shortids(current_shortstatehash) + .expect_ok() + .boxed(), + ) }) .into(); diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 13f832b2..5fdcbab8 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -6,37 +6,37 @@ use std::{ use axum::extract::State; use conduwuit::{ - debug, error, extract_variant, + Error, PduCount, Result, debug, error, extract_variant, utils::{ - math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, + math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, }, - warn, Error, PduCount, Result, + warn, }; use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ + MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, api::client::{ error::ErrorKind, sync::sync_events::{ - self, + self, DeviceLists, UnreadNotificationsCount, v4::{SlidingOp, SlidingSyncRoomHero}, - DeviceLists, UnreadNotificationsCount, }, }, events::{ - room::member::{MembershipState, RoomMemberEventContent}, AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType::*, + room::member::{MembershipState, RoomMemberEventContent}, }, serde::Raw, - uint, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, + uint, }; use service::rooms::read_receipt::pack_receipts; use super::{load_timeline, share_encrypted_room}; use crate::{ - client::{filter_rooms, ignored_filter, sync::v5::TodoRooms, DEFAULT_BUMP_TYPES}, Ruma, + client::{DEFAULT_BUMP_TYPES, filter_rooms, ignored_filter, sync::v5::TodoRooms}, }; pub(crate) const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; @@ -700,14 +700,13 @@ pub(crate) async fn sync_events_v4_route( .await .ok() .or(name), - avatar: if let Some(heroes_avatar) = heroes_avatar { - ruma::JsOption::Some(heroes_avatar) - } else { - match services.rooms.state_accessor.get_avatar(room_id).await { + avatar: match heroes_avatar { + | Some(heroes_avatar) => ruma::JsOption::Some(heroes_avatar), + | _ => match services.rooms.state_accessor.get_avatar(room_id).await { | ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url), | ruma::JsOption::Null => ruma::JsOption::Null, | ruma::JsOption::Undefined => ruma::JsOption::Undefined, - } + }, }, initial: Some(roomsince == &0), is_dm: None, diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index cda6c041..b4c1b815 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -6,32 +6,33 @@ use std::{ use axum::extract::State; use conduwuit::{ - debug, error, extract_variant, trace, + Error, Result, TypeStateKey, debug, error, extract_variant, trace, utils::{ - math::{ruma_from_usize, usize_from_ruma}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, + math::{ruma_from_usize, usize_from_ruma}, }, - warn, Error, Result, TypeStateKey, + warn, }; use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ + DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, api::client::{ error::ErrorKind, sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, }, events::{ - room::member::{MembershipState, RoomMemberEventContent}, AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType, + room::member::{MembershipState, RoomMemberEventContent}, }, serde::Raw, - uint, DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, + uint, }; -use service::{rooms::read_receipt::pack_receipts, PduCount}; +use service::{PduCount, rooms::read_receipt::pack_receipts}; use super::{filter_rooms, share_encrypted_room}; use crate::{ - client::{ignored_filter, sync::load_timeline, DEFAULT_BUMP_TYPES}, Ruma, + client::{DEFAULT_BUMP_TYPES, ignored_filter, sync::load_timeline}, }; type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request); @@ -572,14 +573,13 @@ async fn process_rooms( .await .ok() .or(name), - avatar: if let Some(heroes_avatar) = heroes_avatar { - ruma::JsOption::Some(heroes_avatar) - } else { - match services.rooms.state_accessor.get_avatar(room_id).await { + avatar: match heroes_avatar { + | Some(heroes_avatar) => ruma::JsOption::Some(heroes_avatar), + | _ => match services.rooms.state_accessor.get_avatar(room_id).await { | ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url), | ruma::JsOption::Null => ruma::JsOption::Null, | ruma::JsOption::Undefined => ruma::JsOption::Undefined, - } + }, }, initial: Some(roomsince == &0), is_dm: None, diff --git a/src/api/client/tag.rs b/src/api/client/tag.rs index 820ee4a1..3b3b40d4 100644 --- a/src/api/client/tag.rs +++ b/src/api/client/tag.rs @@ -4,8 +4,8 @@ use axum::extract::State; use ruma::{ api::client::tag::{create_tag, delete_tag, get_tags}, events::{ - tag::{TagEvent, TagEventContent}, RoomAccountDataEventType, + tag::{TagEvent, TagEventContent}, }, }; diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs index f0cbf467..d25e52c0 100644 --- a/src/api/client/threads.rs +++ b/src/api/client/threads.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::{at, PduCount, PduEvent}; +use conduwuit::{PduCount, PduEvent, at}; use futures::StreamExt; use ruma::{api::client::threads::get_threads, uint}; diff --git a/src/api/client/typing.rs b/src/api/client/typing.rs index b311295b..ccfa7340 100644 --- a/src/api/client/typing.rs +++ b/src/api/client/typing.rs @@ -1,8 +1,8 @@ use axum::extract::State; -use conduwuit::{utils::math::Tried, Err}; +use conduwuit::{Err, utils::math::Tried}; use ruma::api::client::typing::create_typing_event; -use crate::{utils, Result, Ruma}; +use crate::{Result, Ruma, utils}; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// @@ -27,37 +27,40 @@ pub(crate) async fn create_typing_event_route( return Err!(Request(Forbidden("You are not in this room."))); } - if let Typing::Yes(duration) = body.state { - let duration = utils::clamp( - duration.as_millis().try_into().unwrap_or(u64::MAX), + match body.state { + | Typing::Yes(duration) => { + let duration = utils::clamp( + duration.as_millis().try_into().unwrap_or(u64::MAX), + services + .server + .config + .typing_client_timeout_min_s + .try_mul(1000)?, + services + .server + .config + .typing_client_timeout_max_s + .try_mul(1000)?, + ); services - .server - .config - .typing_client_timeout_min_s - .try_mul(1000)?, + .rooms + .typing + .typing_add( + sender_user, + &body.room_id, + utils::millis_since_unix_epoch() + .checked_add(duration) + .expect("user typing timeout should not get this high"), + ) + .await?; + }, + | _ => { services - .server - .config - .typing_client_timeout_max_s - .try_mul(1000)?, - ); - services - .rooms - .typing - .typing_add( - sender_user, - &body.room_id, - utils::millis_since_unix_epoch() - .checked_add(duration) - .expect("user typing timeout should not get this high"), - ) - .await?; - } else { - services - .rooms - .typing - .typing_remove(sender_user, &body.room_id) - .await?; + .rooms + .typing + .typing_remove(sender_user, &body.room_id) + .await?; + }, } // ping presence diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index 67c7df75..08da5a37 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -5,6 +5,7 @@ use axum_client_ip::InsecureClientIp; use conduwuit::Err; use futures::StreamExt; use ruma::{ + OwnedRoomId, api::{ client::{ error::ErrorKind, @@ -19,7 +20,6 @@ use ruma::{ }, events::room::member::MembershipState, presence::PresenceState, - OwnedRoomId, }; use super::{update_avatar_url, update_displayname}; @@ -499,15 +499,18 @@ pub(crate) async fn get_profile_key_route( .users .set_timezone(&body.user_id, response.tz.clone()); - if let Some(value) = response.custom_profile_fields.get(&body.key_name) { - profile_key_value.insert(body.key_name.clone(), value.clone()); - services.users.set_profile_key( - &body.user_id, - &body.key_name, - Some(value.clone()), - ); - } else { - return Err!(Request(NotFound("The requested profile key does not exist."))); + match response.custom_profile_fields.get(&body.key_name) { + | Some(value) => { + profile_key_value.insert(body.key_name.clone(), value.clone()); + services.users.set_profile_key( + &body.user_id, + &body.key_name, + Some(value.clone()), + ); + }, + | _ => { + return Err!(Request(NotFound("The requested profile key does not exist."))); + }, } if profile_key_value.is_empty() { @@ -524,14 +527,17 @@ pub(crate) async fn get_profile_key_route( return Err!(Request(NotFound("Profile was not found."))); } - if let Ok(value) = services + match services .users .profile_key(&body.user_id, &body.key_name) .await { - profile_key_value.insert(body.key_name.clone(), value); - } else { - return Err!(Request(NotFound("The requested profile key does not exist."))); + | Ok(value) => { + profile_key_value.insert(body.key_name.clone(), value); + }, + | _ => { + return Err!(Request(NotFound("The requested profile key does not exist."))); + }, } if profile_key_value.is_empty() { diff --git a/src/api/client/unversioned.rs b/src/api/client/unversioned.rs index 904f1d2f..4e2b7d9d 100644 --- a/src/api/client/unversioned.rs +++ b/src/api/client/unversioned.rs @@ -1,6 +1,6 @@ use std::collections::BTreeMap; -use axum::{extract::State, response::IntoResponse, Json}; +use axum::{Json, extract::State, response::IntoResponse}; use futures::StreamExt; use ruma::api::client::discovery::get_supported_versions; diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index 182e30db..c5d79a56 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -1,11 +1,11 @@ use axum::extract::State; use conduwuit::utils::TryFutureExtExt; -use futures::{pin_mut, StreamExt}; +use futures::{StreamExt, pin_mut}; use ruma::{ api::client::user_directory::search_users, events::{ - room::join_rules::{JoinRule, RoomJoinRulesEventContent}, StateEventType, + room::join_rules::{JoinRule, RoomJoinRulesEventContent}, }, }; diff --git a/src/api/client/voip.rs b/src/api/client/voip.rs index 70ad4913..37e67984 100644 --- a/src/api/client/voip.rs +++ b/src/api/client/voip.rs @@ -1,10 +1,10 @@ use std::time::{Duration, SystemTime}; use axum::extract::State; -use base64::{engine::general_purpose, Engine as _}; -use conduwuit::{utils, Err}; +use base64::{Engine as _, engine::general_purpose}; +use conduwuit::{Err, utils}; use hmac::{Hmac, Mac}; -use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch, UserId}; +use ruma::{SecondsSinceUnixEpoch, UserId, api::client::voip::get_turn_server_info}; use sha1::Sha1; use crate::{Result, Ruma}; diff --git a/src/api/client/well_known.rs b/src/api/client/well_known.rs index 5c53d013..abda61b0 100644 --- a/src/api/client/well_known.rs +++ b/src/api/client/well_known.rs @@ -1,4 +1,4 @@ -use axum::{extract::State, response::IntoResponse, Json}; +use axum::{Json, extract::State, response::IntoResponse}; use ruma::api::client::{ discovery::{ discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo}, diff --git a/src/api/mod.rs b/src/api/mod.rs index 80e34f10..8df17a59 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -7,7 +7,7 @@ pub mod server; extern crate conduwuit_core as conduwuit; extern crate conduwuit_service as service; -pub(crate) use conduwuit::{debug_info, pdu::PduEvent, utils, Error, Result}; +pub(crate) use conduwuit::{Error, Result, debug_info, pdu::PduEvent, utils}; pub(crate) use self::router::{Ruma, RumaResponse, State}; diff --git a/src/api/router.rs b/src/api/router.rs index 7855ddfa..3fbef275 100644 --- a/src/api/router.rs +++ b/src/api/router.rs @@ -8,12 +8,12 @@ pub mod state; use std::str::FromStr; use axum::{ + Router, response::{IntoResponse, Redirect}, routing::{any, get, post}, - Router, }; -use conduwuit::{err, Server}; -use http::{uri, Uri}; +use conduwuit::{Server, err}; +use http::{Uri, uri}; use self::handler::RouterExt; pub(super) use self::{args::Args as Ruma, response::RumaResponse, state::State}; diff --git a/src/api/router/args.rs b/src/api/router/args.rs index 582f0c56..65a68fa4 100644 --- a/src/api/router/args.rs +++ b/src/api/router/args.rs @@ -2,15 +2,15 @@ use std::{mem, ops::Deref}; use axum::{async_trait, body::Body, extract::FromRequest}; use bytes::{BufMut, Bytes, BytesMut}; -use conduwuit::{debug, debug_warn, err, trace, utils::string::EMPTY, Error, Result}; +use conduwuit::{Error, Result, debug, debug_warn, err, trace, utils::string::EMPTY}; use ruma::{ - api::IncomingRequest, CanonicalJsonObject, CanonicalJsonValue, DeviceId, OwnedDeviceId, - OwnedServerName, OwnedUserId, ServerName, UserId, + CanonicalJsonObject, CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedServerName, + OwnedUserId, ServerName, UserId, api::IncomingRequest, }; use service::Services; use super::{auth, auth::Auth, request, request::Request}; -use crate::{service::appservice::RegistrationInfo, State}; +use crate::{State, service::appservice::RegistrationInfo}; /// Extractor for Ruma request structs pub(crate) struct Args { diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index ecea305b..56256683 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -1,12 +1,14 @@ use axum::RequestPartsExt; use axum_extra::{ - headers::{authorization::Bearer, Authorization}, - typed_header::TypedHeaderRejectionReason, TypedHeader, + headers::{Authorization, authorization::Bearer}, + typed_header::TypedHeaderRejectionReason, }; -use conduwuit::{debug_error, err, warn, Err, Error, Result}; +use conduwuit::{Err, Error, Result, debug_error, err, warn}; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId, api::{ + AuthScheme, IncomingRequest, Metadata, client::{ directory::get_public_rooms, error::ErrorKind, @@ -16,14 +18,12 @@ use ruma::{ voip::get_turn_server_info, }, federation::openid::get_openid_userinfo, - AuthScheme, IncomingRequest, Metadata, }, server_util::authorization::XMatrix, - CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId, }; use service::{ - server_keys::{PubKeyMap, PubKeys}, Services, + server_keys::{PubKeyMap, PubKeys}, }; use super::request::Request; @@ -56,12 +56,12 @@ pub(super) async fn auth( }; let token = if let Some(token) = token { - if let Some(reg_info) = services.appservice.find_from_token(token).await { - Token::Appservice(Box::new(reg_info)) - } else if let Ok((user_id, device_id)) = services.users.find_from_token(token).await { - Token::User((user_id, device_id)) - } else { - Token::Invalid + match services.appservice.find_from_token(token).await { + | Some(reg_info) => Token::Appservice(Box::new(reg_info)), + | _ => match services.users.find_from_token(token).await { + | Ok((user_id, device_id)) => Token::User((user_id, device_id)), + | _ => Token::Invalid, + }, } } else { Token::None diff --git a/src/api/router/handler.rs b/src/api/router/handler.rs index cfb8fb6e..ab013945 100644 --- a/src/api/router/handler.rs +++ b/src/api/router/handler.rs @@ -1,8 +1,8 @@ use axum::{ + Router, extract::FromRequestParts, response::IntoResponse, - routing::{on, MethodFilter}, - Router, + routing::{MethodFilter, on}, }; use conduwuit::Result; use futures::{Future, TryFutureExt}; diff --git a/src/api/router/request.rs b/src/api/router/request.rs index 615a8bff..3cdc452b 100644 --- a/src/api/router/request.rs +++ b/src/api/router/request.rs @@ -1,8 +1,8 @@ use std::str; -use axum::{extract::Path, RequestExt, RequestPartsExt}; +use axum::{RequestExt, RequestPartsExt, extract::Path}; use bytes::Bytes; -use conduwuit::{err, Result}; +use conduwuit::{Result, err}; use http::request::Parts; use serde::Deserialize; use service::Services; diff --git a/src/api/router/response.rs b/src/api/router/response.rs index a10560f1..03c9060e 100644 --- a/src/api/router/response.rs +++ b/src/api/router/response.rs @@ -1,9 +1,9 @@ use axum::response::{IntoResponse, Response}; use bytes::BytesMut; -use conduwuit::{error, Error}; +use conduwuit::{Error, error}; use http::StatusCode; use http_body_util::Full; -use ruma::api::{client::uiaa::UiaaResponse, OutgoingResponse}; +use ruma::api::{OutgoingResponse, client::uiaa::UiaaResponse}; pub(crate) struct RumaResponse(pub(crate) T) where diff --git a/src/api/server/backfill.rs b/src/api/server/backfill.rs index b44db67c..5c875807 100644 --- a/src/api/server/backfill.rs +++ b/src/api/server/backfill.rs @@ -2,11 +2,11 @@ use std::cmp; use axum::extract::State; use conduwuit::{ - utils::{stream::TryTools, IterStream, ReadyExt}, PduCount, Result, + utils::{IterStream, ReadyExt, stream::TryTools}, }; use futures::{FutureExt, StreamExt, TryStreamExt}; -use ruma::{api::federation::backfill::get_backfill, uint, MilliSecondsSinceUnixEpoch}; +use ruma::{MilliSecondsSinceUnixEpoch, api::federation::backfill::get_backfill, uint}; use super::AccessCheck; use crate::Ruma; diff --git a/src/api/server/event.rs b/src/api/server/event.rs index 629dd6a2..5846c6d7 100644 --- a/src/api/server/event.rs +++ b/src/api/server/event.rs @@ -1,6 +1,6 @@ use axum::extract::State; -use conduwuit::{err, Result}; -use ruma::{api::federation::event::get_event, MilliSecondsSinceUnixEpoch, RoomId}; +use conduwuit::{Result, err}; +use ruma::{MilliSecondsSinceUnixEpoch, RoomId, api::federation::event::get_event}; use super::AccessCheck; use crate::Ruma; diff --git a/src/api/server/event_auth.rs b/src/api/server/event_auth.rs index 49dcd718..c9e210f5 100644 --- a/src/api/server/event_auth.rs +++ b/src/api/server/event_auth.rs @@ -1,11 +1,11 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; -use conduwuit::{utils::stream::ReadyExt, Error, Result}; +use conduwuit::{Error, Result, utils::stream::ReadyExt}; use futures::StreamExt; use ruma::{ - api::{client::error::ErrorKind, federation::authorization::get_event_authorization}, RoomId, + api::{client::error::ErrorKind, federation::authorization::get_event_authorization}, }; use super::AccessCheck; diff --git a/src/api/server/get_missing_events.rs b/src/api/server/get_missing_events.rs index ea06015a..3d0bbb07 100644 --- a/src/api/server/get_missing_events.rs +++ b/src/api/server/get_missing_events.rs @@ -1,8 +1,8 @@ use axum::extract::State; use conduwuit::{Error, Result}; use ruma::{ - api::{client::error::ErrorKind, federation::event::get_missing_events}, CanonicalJsonValue, EventId, RoomId, + api::{client::error::ErrorKind, federation::event::get_missing_events}, }; use super::AccessCheck; diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs index f7bc43ab..41eaedd0 100644 --- a/src/api/server/hierarchy.rs +++ b/src/api/server/hierarchy.rs @@ -1,11 +1,11 @@ use axum::extract::State; use conduwuit::{ - utils::stream::{BroadbandExt, IterStream}, Err, Result, + utils::stream::{BroadbandExt, IterStream}, }; use futures::{FutureExt, StreamExt}; use ruma::api::federation::space::get_hierarchy; -use service::rooms::spaces::{get_parent_children_via, Identifier, SummaryAccessibility}; +use service::rooms::spaces::{Identifier, SummaryAccessibility, get_parent_children_via}; use crate::Ruma; diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index 27a4485c..463cb9ab 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -1,12 +1,12 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use base64::{engine::general_purpose, Engine as _}; -use conduwuit::{err, utils, utils::hash::sha256, warn, Err, Error, PduEvent, Result}; +use base64::{Engine as _, engine::general_purpose}; +use conduwuit::{Err, Error, PduEvent, Result, err, utils, utils::hash::sha256, warn}; use ruma::{ + CanonicalJsonValue, OwnedUserId, UserId, api::{client::error::ErrorKind, federation::membership::create_invite}, events::room::member::{MembershipState, RoomMemberEventContent}, serde::JsonObject, - CanonicalJsonValue, OwnedUserId, UserId, }; use service::pdu::gen_event_id; diff --git a/src/api/server/key.rs b/src/api/server/key.rs index 75801a7a..f9bd0926 100644 --- a/src/api/server/key.rs +++ b/src/api/server/key.rs @@ -3,15 +3,15 @@ use std::{ time::{Duration, SystemTime}, }; -use axum::{extract::State, response::IntoResponse, Json}; -use conduwuit::{utils::timepoint_from_now, Result}; +use axum::{Json, extract::State, response::IntoResponse}; +use conduwuit::{Result, utils::timepoint_from_now}; use ruma::{ + MilliSecondsSinceUnixEpoch, Signatures, api::{ - federation::discovery::{get_server_keys, OldVerifyKey, ServerSigningKeys}, OutgoingResponse, + federation::discovery::{OldVerifyKey, ServerSigningKeys, get_server_keys}, }, serde::Raw, - MilliSecondsSinceUnixEpoch, Signatures, }; /// # `GET /_matrix/key/v2/server` diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index b753346c..f18d1304 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -1,22 +1,22 @@ use axum::extract::State; -use conduwuit::{debug_info, utils::IterStream, warn, Err}; +use conduwuit::{Err, debug_info, utils::IterStream, warn}; use futures::StreamExt; use ruma::{ + CanonicalJsonObject, OwnedUserId, RoomId, RoomVersionId, UserId, api::{client::error::ErrorKind, federation::membership::prepare_join_event}, events::{ + StateEventType, room::{ join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, }, - StateEventType, }, - CanonicalJsonObject, OwnedUserId, RoomId, RoomVersionId, UserId, }; use serde_json::value::to_raw_value; use crate::{ - service::{pdu::PduBuilder, Services}, Error, Result, Ruma, + service::{Services, pdu::PduBuilder}, }; /// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs index 423e202d..71536439 100644 --- a/src/api/server/make_knock.rs +++ b/src/api/server/make_knock.rs @@ -1,15 +1,15 @@ +use RoomVersionId::*; use axum::extract::State; -use conduwuit::{debug_warn, Err}; +use conduwuit::{Err, debug_warn}; use ruma::{ + RoomVersionId, api::{client::error::ErrorKind, federation::knock::create_knock_event_template}, events::room::member::{MembershipState, RoomMemberEventContent}, - RoomVersionId, }; use serde_json::value::to_raw_value; use tracing::warn; -use RoomVersionId::*; -use crate::{service::pdu::PduBuilder, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, service::pdu::PduBuilder}; /// # `GET /_matrix/federation/v1/make_knock/{roomId}/{userId}` /// diff --git a/src/api/server/make_leave.rs b/src/api/server/make_leave.rs index 936e0fbb..1ed02785 100644 --- a/src/api/server/make_leave.rs +++ b/src/api/server/make_leave.rs @@ -7,7 +7,7 @@ use ruma::{ use serde_json::value::to_raw_value; use super::make_join::maybe_strip_event_id; -use crate::{service::pdu::PduBuilder, Ruma}; +use crate::{Ruma, service::pdu::PduBuilder}; /// # `GET /_matrix/federation/v1/make_leave/{roomId}/{eventId}` /// diff --git a/src/api/server/media.rs b/src/api/server/media.rs index e56f5b9d..cbe8595b 100644 --- a/src/api/server/media.rs +++ b/src/api/server/media.rs @@ -1,12 +1,12 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{utils::content_disposition::make_content_disposition, Err, Result}; +use conduwuit::{Err, Result, utils::content_disposition::make_content_disposition}; use conduwuit_service::media::{Dim, FileMeta}; use ruma::{ - api::federation::authenticated_media::{ - get_content, get_content_thumbnail, Content, ContentMetadata, FileOrLocation, - }, Mxc, + api::federation::authenticated_media::{ + Content, ContentMetadata, FileOrLocation, get_content, get_content_thumbnail, + }, }; use crate::Ruma; diff --git a/src/api/server/query.rs b/src/api/server/query.rs index 69f62e94..9d4fcf73 100644 --- a/src/api/server/query.rs +++ b/src/api/server/query.rs @@ -1,16 +1,16 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduwuit::{err, Error, Result}; +use conduwuit::{Error, Result, err}; use futures::StreamExt; use get_profile_information::v1::ProfileField; use rand::seq::SliceRandom; use ruma::{ + OwnedServerName, api::{ client::error::ErrorKind, federation::query::{get_profile_information, get_room_information}, }, - OwnedServerName, }; use crate::Ruma; diff --git a/src/api/server/send.rs b/src/api/server/send.rs index bc18377e..1f467dac 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -3,20 +3,21 @@ use std::{collections::BTreeMap, net::IpAddr, time::Instant}; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - debug, + Err, Error, Result, debug, debug::INFO_SPAN_LEVEL, debug_warn, err, error, result::LogErr, trace, utils::{ - stream::{automatic_width, BroadbandExt, TryBroadbandExt}, IterStream, ReadyExt, + stream::{BroadbandExt, TryBroadbandExt, automatic_width}, }, - warn, Err, Error, Result, + warn, }; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use itertools::Itertools; use ruma::{ + CanonicalJsonObject, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, ServerName, UserId, api::{ client::error::ErrorKind, federation::transactions::{ @@ -31,17 +32,16 @@ use ruma::{ events::receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType}, serde::Raw, to_device::DeviceIdOrAllDevices, - CanonicalJsonObject, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, ServerName, UserId, }; use service::{ - sending::{EDU_LIMIT, PDU_LIMIT}, Services, + sending::{EDU_LIMIT, PDU_LIMIT}, }; use utils::millis_since_unix_epoch; use crate::{ - utils::{self}, Ruma, + utils::{self}, }; type ResolvedMap = BTreeMap; diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index e81d7672..08fa3835 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -4,22 +4,22 @@ use std::borrow::Borrow; use axum::extract::State; use conduwuit::{ - at, err, + Err, Result, at, err, pdu::gen_event_id_canonical_json, utils::stream::{IterStream, TryBroadbandExt}, - warn, Err, Result, + warn, }; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ - api::federation::membership::create_join_event, - events::{ - room::member::{MembershipState, RoomMemberEventContent}, - StateEventType, - }, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, + api::federation::membership::create_join_event, + events::{ + StateEventType, + room::member::{MembershipState, RoomMemberEventContent}, + }, }; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; use service::Services; use crate::Ruma; diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index b07620af..1d4c2a6c 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -1,15 +1,15 @@ use axum::extract::State; -use conduwuit::{err, pdu::gen_event_id_canonical_json, warn, Err, PduEvent, Result}; +use conduwuit::{Err, PduEvent, Result, err, pdu::gen_event_id_canonical_json, warn}; use futures::FutureExt; use ruma::{ - api::federation::knock::send_knock, - events::{ - room::member::{MembershipState, RoomMemberEventContent}, - StateEventType, - }, - serde::JsonObject, OwnedServerName, OwnedUserId, RoomVersionId::*, + api::federation::knock::send_knock, + events::{ + StateEventType, + room::member::{MembershipState, RoomMemberEventContent}, + }, + serde::JsonObject, }; use crate::Ruma; diff --git a/src/api/server/send_leave.rs b/src/api/server/send_leave.rs index e955a267..71516553 100644 --- a/src/api/server/send_leave.rs +++ b/src/api/server/send_leave.rs @@ -1,21 +1,21 @@ #![allow(deprecated)] use axum::extract::State; -use conduwuit::{err, Err, Result}; +use conduwuit::{Err, Result, err}; use futures::FutureExt; use ruma::{ + OwnedRoomId, OwnedUserId, RoomId, ServerName, api::federation::membership::create_leave_event, events::{ - room::member::{MembershipState, RoomMemberEventContent}, StateEventType, + room::member::{MembershipState, RoomMemberEventContent}, }, - OwnedRoomId, OwnedUserId, RoomId, ServerName, }; use serde_json::value::RawValue as RawJsonValue; use crate::{ - service::{pdu::gen_event_id_canonical_json, Services}, Ruma, + service::{Services, pdu::gen_event_id_canonical_json}, }; /// # `PUT /_matrix/federation/v1/send_leave/{roomId}/{eventId}` diff --git a/src/api/server/state.rs b/src/api/server/state.rs index b16e61a0..8c786815 100644 --- a/src/api/server/state.rs +++ b/src/api/server/state.rs @@ -1,9 +1,9 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; -use conduwuit::{at, err, utils::IterStream, Result}; +use conduwuit::{Result, at, err, utils::IterStream}; use futures::{FutureExt, StreamExt, TryStreamExt}; -use ruma::{api::federation::event::get_room_state, OwnedEventId}; +use ruma::{OwnedEventId, api::federation::event::get_room_state}; use super::AccessCheck; use crate::Ruma; diff --git a/src/api/server/state_ids.rs b/src/api/server/state_ids.rs index 7d0440bf..648d4575 100644 --- a/src/api/server/state_ids.rs +++ b/src/api/server/state_ids.rs @@ -1,9 +1,9 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; -use conduwuit::{at, err, Result}; +use conduwuit::{Result, at, err}; use futures::{StreamExt, TryStreamExt}; -use ruma::{api::federation::event::get_room_state_ids, OwnedEventId}; +use ruma::{OwnedEventId, api::federation::event::get_room_state_ids}; use super::AccessCheck; use crate::Ruma; diff --git a/src/api/server/user.rs b/src/api/server/user.rs index 321d0b66..80c353ab 100644 --- a/src/api/server/user.rs +++ b/src/api/server/user.rs @@ -10,8 +10,8 @@ use ruma::api::{ }; use crate::{ - client::{claim_keys_helper, get_keys_helper}, Ruma, + client::{claim_keys_helper, get_keys_helper}, }; /// # `GET /_matrix/federation/v1/user/devices/{userId}` diff --git a/src/api/server/utils.rs b/src/api/server/utils.rs index 4f3fa245..5696e44b 100644 --- a/src/api/server/utils.rs +++ b/src/api/server/utils.rs @@ -1,6 +1,6 @@ -use conduwuit::{implement, is_false, Err, Result}; +use conduwuit::{Err, Result, implement, is_false}; use conduwuit_service::Services; -use futures::{future::OptionFuture, join, FutureExt, StreamExt}; +use futures::{FutureExt, StreamExt, future::OptionFuture, join}; use ruma::{EventId, RoomId, ServerName}; pub(super) struct AccessCheck<'a> { diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 57143e85..6870c1c0 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -2,7 +2,7 @@ use std::{ cell::OnceCell, - ffi::{c_char, c_void, CStr}, + ffi::{CStr, c_char, c_void}, fmt::Debug, sync::RwLock, }; @@ -14,9 +14,8 @@ use tikv_jemalloc_sys as ffi; use tikv_jemallocator as jemalloc; use crate::{ - err, is_equal_to, is_nonzero, + Result, err, is_equal_to, is_nonzero, utils::{math, math::Tried}, - Result, }; #[cfg(feature = "jemalloc_conf")] @@ -128,7 +127,7 @@ unsafe extern "C" fn malloc_stats_cb(opaque: *mut c_void, msg: *const c_char) { } macro_rules! mallctl { - ($name:expr) => {{ + ($name:expr_2021) => {{ thread_local! { static KEY: OnceCell = OnceCell::default(); }; @@ -141,7 +140,7 @@ macro_rules! mallctl { } pub mod this_thread { - use super::{is_nonzero, key, math, Debug, Key, OnceCell, Result}; + use super::{Debug, Key, OnceCell, Result, is_nonzero, key, math}; thread_local! { static ALLOCATED_BYTES: OnceCell<&'static u64> = const { OnceCell::new() }; @@ -261,18 +260,18 @@ pub fn decay>>(arena: I) -> Result { } pub fn set_muzzy_decay>>(arena: I, decay_ms: isize) -> Result { - if let Some(arena) = arena.into() { - set_by_arena(Some(arena), mallctl!("arena.4096.muzzy_decay_ms"), decay_ms) - } else { - set(&mallctl!("arenas.muzzy_decay_ms"), decay_ms) + match arena.into() { + | Some(arena) => + set_by_arena(Some(arena), mallctl!("arena.4096.muzzy_decay_ms"), decay_ms), + | _ => set(&mallctl!("arenas.muzzy_decay_ms"), decay_ms), } } pub fn set_dirty_decay>>(arena: I, decay_ms: isize) -> Result { - if let Some(arena) = arena.into() { - set_by_arena(Some(arena), mallctl!("arena.4096.dirty_decay_ms"), decay_ms) - } else { - set(&mallctl!("arenas.dirty_decay_ms"), decay_ms) + match arena.into() { + | Some(arena) => + set_by_arena(Some(arena), mallctl!("arena.4096.dirty_decay_ms"), decay_ms), + | _ => set(&mallctl!("arenas.dirty_decay_ms"), decay_ms), } } diff --git a/src/core/config/check.rs b/src/core/config/check.rs index 5532c5a2..488f7f94 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -4,7 +4,7 @@ use either::Either; use figment::Figment; use super::DEPRECATED_KEYS; -use crate::{debug, debug_info, debug_warn, error, warn, Config, Err, Result, Server}; +use crate::{Config, Err, Result, Server, debug, debug_info, debug_warn, error, warn}; /// Performs check() with additional checks specific to reloading old config /// with new config. diff --git a/src/core/config/manager.rs b/src/core/config/manager.rs index 0c95ca15..e55916ba 100644 --- a/src/core/config/manager.rs +++ b/src/core/config/manager.rs @@ -4,13 +4,13 @@ use std::{ ptr, ptr::null_mut, sync::{ - atomic::{AtomicPtr, Ordering}, Arc, + atomic::{AtomicPtr, Ordering}, }, }; use super::Config; -use crate::{implement, Result}; +use crate::{Result, implement}; /// The configuration manager is an indirection to reload the configuration for /// the server while it is running. In order to not burden or clutter the many diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index e66532ee..67c3b95c 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -14,18 +14,18 @@ use either::{ Either::{Left, Right}, }; use figment::providers::{Env, Format, Toml}; -pub use figment::{value::Value as FigmentValue, Figment}; +pub use figment::{Figment, value::Value as FigmentValue}; use regex::RegexSet; use ruma::{ - api::client::discovery::discover_support::ContactRole, OwnedRoomOrAliasId, OwnedServerName, - OwnedUserId, RoomVersionId, + OwnedRoomOrAliasId, OwnedServerName, OwnedUserId, RoomVersionId, + api::client::discovery::discover_support::ContactRole, }; -use serde::{de::IgnoredAny, Deserialize}; +use serde::{Deserialize, de::IgnoredAny}; use url::Url; use self::proxy::ProxyConfig; pub use self::{check::check, manager::Manager}; -use crate::{err, error::Error, utils::sys, Result}; +use crate::{Result, err, error::Error, utils::sys}; /// All the config options for conduwuit. #[allow(clippy::struct_excessive_bools)] diff --git a/src/core/debug.rs b/src/core/debug.rs index 8a5eccfd..b9a53038 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -13,7 +13,7 @@ pub use crate::{result::DebugInspect, utils::debug::*}; /// In release-mode it becomes DEBUG level, and possibly subject to elision. #[macro_export] macro_rules! debug_event { - ( $level:expr, $($x:tt)+ ) => { + ( $level:expr_2021, $($x:tt)+ ) => { if $crate::debug::logging() { ::tracing::event!( $level, _debug = true, $($x)+ ) } else { diff --git a/src/core/error/err.rs b/src/core/error/err.rs index 60fa5bff..0962c4ee 100644 --- a/src/core/error/err.rs +++ b/src/core/error/err.rs @@ -165,10 +165,10 @@ macro_rules! err_lev { use std::{fmt, fmt::Write}; use tracing::{ - level_enabled, Callsite, Event, __macro_support, __tracing_log, + __macro_support, __tracing_log, Callsite, Event, Level, callsite::DefaultCallsite, field::{Field, ValueSet, Visit}, - Level, + level_enabled, }; struct Visitor<'a>(&'a mut String); diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 16613b7e..02ab6fa3 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -152,8 +152,8 @@ impl Error { /// Generate the error message string. pub fn message(&self) -> String { match self { - | Self::Federation(ref origin, ref error) => format!("Answer from {origin}: {error}"), - | Self::Ruma(ref error) => response::ruma_error_message(error), + | Self::Federation(origin, error) => format!("Answer from {origin}: {error}"), + | Self::Ruma(error) => response::ruma_error_message(error), | _ => format!("{self}"), } } diff --git a/src/core/error/panic.rs b/src/core/error/panic.rs index c6a83ae0..2e63105b 100644 --- a/src/core/error/panic.rs +++ b/src/core/error/panic.rs @@ -1,6 +1,6 @@ use std::{ any::Any, - panic::{panic_any, RefUnwindSafe, UnwindSafe}, + panic::{RefUnwindSafe, UnwindSafe, panic_any}, }; use super::Error; diff --git a/src/core/error/response.rs b/src/core/error/response.rs index 75e4050d..00ade5ae 100644 --- a/src/core/error/response.rs +++ b/src/core/error/response.rs @@ -2,11 +2,11 @@ use bytes::BytesMut; use http::StatusCode; use http_body_util::Full; use ruma::api::{ + OutgoingResponse, client::{ error::{ErrorBody, ErrorKind}, uiaa::UiaaResponse, }, - OutgoingResponse, }; use super::Error; diff --git a/src/core/info/room_version.rs b/src/core/info/room_version.rs index b33a8562..51d5d3c6 100644 --- a/src/core/info/room_version.rs +++ b/src/core/info/room_version.rs @@ -2,7 +2,7 @@ use std::iter::once; -use ruma::{api::client::discovery::get_capabilities::RoomVersionStability, RoomVersionId}; +use ruma::{RoomVersionId, api::client::discovery::get_capabilities::RoomVersionStability}; use crate::{at, is_equal_to}; diff --git a/src/core/log/capture/data.rs b/src/core/log/capture/data.rs index 0ad7a6c2..a4a1225b 100644 --- a/src/core/log/capture/data.rs +++ b/src/core/log/capture/data.rs @@ -1,7 +1,7 @@ use tracing::Level; -use tracing_core::{span::Current, Event}; +use tracing_core::{Event, span::Current}; -use super::{layer::Value, Layer}; +use super::{Layer, layer::Value}; use crate::{info, utils::string::EMPTY}; pub struct Data<'a> { diff --git a/src/core/log/capture/util.rs b/src/core/log/capture/util.rs index 8bad4ba0..65524be5 100644 --- a/src/core/log/capture/util.rs +++ b/src/core/log/capture/util.rs @@ -1,7 +1,7 @@ use std::sync::{Arc, Mutex}; use super::{ - super::{fmt, Level}, + super::{Level, fmt}, Closure, Data, }; use crate::Result; diff --git a/src/core/log/console.rs b/src/core/log/console.rs index 1f04ba26..d91239ac 100644 --- a/src/core/log/console.rs +++ b/src/core/log/console.rs @@ -1,20 +1,20 @@ use std::{env, io, sync::LazyLock}; use tracing::{ - field::{Field, Visit}, Event, Level, Subscriber, + field::{Field, Visit}, }; use tracing_subscriber::{ field::RecordFields, fmt, fmt::{ - format::{Compact, DefaultVisitor, Format, Full, Pretty, Writer}, FmtContext, FormatEvent, FormatFields, MakeWriter, + format::{Compact, DefaultVisitor, Format, Full, Pretty, Writer}, }, registry::LookupSpan, }; -use crate::{apply, Config, Result}; +use crate::{Config, Result, apply}; static SYSTEMD_MODE: LazyLock = LazyLock::new(|| env::var("SYSTEMD_EXEC_PID").is_ok() && env::var("JOURNAL_STREAM").is_ok()); diff --git a/src/core/log/fmt.rs b/src/core/log/fmt.rs index 353d4442..b73d0c9b 100644 --- a/src/core/log/fmt.rs +++ b/src/core/log/fmt.rs @@ -1,6 +1,6 @@ use std::fmt::Write; -use super::{color, Level}; +use super::{Level, color}; use crate::Result; pub fn html(out: &mut S, level: &Level, span: &str, msg: &str) -> Result<()> diff --git a/src/core/log/mod.rs b/src/core/log/mod.rs index 0c1840d0..5ac374e8 100644 --- a/src/core/log/mod.rs +++ b/src/core/log/mod.rs @@ -9,7 +9,7 @@ mod reload; mod suppress; pub use capture::Capture; -pub use console::{is_systemd_mode, ConsoleFormat, ConsoleWriter}; +pub use console::{ConsoleFormat, ConsoleWriter, is_systemd_mode}; pub use reload::{LogLevelReloadHandles, ReloadHandle}; pub use suppress::Suppress; pub use tracing::Level; @@ -34,7 +34,7 @@ pub struct Log { #[macro_export] macro_rules! event { - ( $level:expr, $($x:tt)+ ) => { ::tracing::event!( $level, $($x)+ ) } + ( $level:expr_2021, $($x:tt)+ ) => { ::tracing::event!( $level, $($x)+ ) } } #[macro_export] diff --git a/src/core/log/reload.rs b/src/core/log/reload.rs index 12d14f48..e6a16c9f 100644 --- a/src/core/log/reload.rs +++ b/src/core/log/reload.rs @@ -3,9 +3,9 @@ use std::{ sync::{Arc, Mutex}, }; -use tracing_subscriber::{reload, EnvFilter}; +use tracing_subscriber::{EnvFilter, reload}; -use crate::{error, Result}; +use crate::{Result, error}; /// We need to store a reload::Handle value, but can't name it's type explicitly /// because the S type parameter depends on the subscriber's previous layers. In diff --git a/src/core/mods/module.rs b/src/core/mods/module.rs index ff181e4f..b65bbca2 100644 --- a/src/core/mods/module.rs +++ b/src/core/mods/module.rs @@ -3,8 +3,8 @@ use std::{ time::SystemTime, }; -use super::{canary, new, path, Library, Symbol}; -use crate::{error, Result}; +use super::{Library, Symbol, canary, new, path}; +use crate::{Result, error}; pub struct Module { handle: Option, diff --git a/src/core/mods/new.rs b/src/core/mods/new.rs index 77d89af4..258fdedc 100644 --- a/src/core/mods/new.rs +++ b/src/core/mods/new.rs @@ -1,6 +1,6 @@ use std::ffi::OsStr; -use super::{path, Library}; +use super::{Library, path}; use crate::{Err, Result}; const OPEN_FLAGS: i32 = libloading::os::unix::RTLD_LAZY | libloading::os::unix::RTLD_GLOBAL; diff --git a/src/core/pdu/builder.rs b/src/core/pdu/builder.rs index 0efee128..5aa0c9ca 100644 --- a/src/core/pdu/builder.rs +++ b/src/core/pdu/builder.rs @@ -1,11 +1,11 @@ use std::collections::BTreeMap; use ruma::{ - events::{EventContent, MessageLikeEventType, StateEventType, TimelineEventType}, MilliSecondsSinceUnixEpoch, OwnedEventId, + events::{EventContent, MessageLikeEventType, StateEventType, TimelineEventType}, }; use serde::Deserialize; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; use super::StateKey; diff --git a/src/core/pdu/content.rs b/src/core/pdu/content.rs index fa724cb2..4e60ce6e 100644 --- a/src/core/pdu/content.rs +++ b/src/core/pdu/content.rs @@ -1,7 +1,7 @@ use serde::Deserialize; use serde_json::value::Value as JsonValue; -use crate::{err, implement, Result}; +use crate::{Result, err, implement}; #[must_use] #[implement(super::Pdu)] diff --git a/src/core/pdu/count.rs b/src/core/pdu/count.rs index 0135cf28..b880278f 100644 --- a/src/core/pdu/count.rs +++ b/src/core/pdu/count.rs @@ -4,7 +4,7 @@ use std::{cmp::Ordering, fmt, fmt::Display, str::FromStr}; use ruma::api::Direction; -use crate::{err, Error, Result}; +use crate::{Error, Result, err}; #[derive(Hash, PartialEq, Eq, Clone, Copy, Debug)] pub enum Count { diff --git a/src/core/pdu/event.rs b/src/core/pdu/event.rs index d5c0561e..09ad1666 100644 --- a/src/core/pdu/event.rs +++ b/src/core/pdu/event.rs @@ -1,4 +1,4 @@ -use ruma::{events::TimelineEventType, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; +use ruma::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, events::TimelineEventType}; use serde_json::value::RawValue as RawJsonValue; use super::Pdu; diff --git a/src/core/pdu/event_id.rs b/src/core/pdu/event_id.rs index 09b33edc..e9d868b1 100644 --- a/src/core/pdu/event_id.rs +++ b/src/core/pdu/event_id.rs @@ -1,7 +1,7 @@ use ruma::{CanonicalJsonObject, OwnedEventId, RoomVersionId}; use serde_json::value::RawValue as RawJsonValue; -use crate::{err, Result}; +use crate::{Result, err}; /// Generates a correct eventId for the incoming pdu. /// diff --git a/src/core/pdu/mod.rs b/src/core/pdu/mod.rs index 9cb42239..9fb2a3da 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/pdu/mod.rs @@ -17,13 +17,14 @@ mod unsigned; use std::cmp::Ordering; use ruma::{ - events::TimelineEventType, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, - OwnedRoomId, OwnedServerName, OwnedUserId, UInt, + CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, + OwnedUserId, UInt, events::TimelineEventType, }; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue as RawJsonValue; pub use self::{ + Count as PduCount, Id as PduId, Pdu as PduEvent, RawId as RawPduId, builder::{Builder, Builder as PduBuilder}, count::Count, event::Event, @@ -31,7 +32,6 @@ pub use self::{ id::*, raw_id::*, state_key::{ShortStateKey, StateKey}, - Count as PduCount, Id as PduId, Pdu as PduEvent, RawId as RawPduId, }; use crate::Result; diff --git a/src/core/pdu/raw_id.rs b/src/core/pdu/raw_id.rs index e1fd2381..318a0cd7 100644 --- a/src/core/pdu/raw_id.rs +++ b/src/core/pdu/raw_id.rs @@ -55,8 +55,8 @@ impl RawId { #[must_use] pub fn as_bytes(&self) -> &[u8] { match self { - | Self::Normal(ref raw) => raw, - | Self::Backfilled(ref raw) => raw, + | Self::Normal(raw) => raw, + | Self::Backfilled(raw) => raw, } } } diff --git a/src/core/pdu/redact.rs b/src/core/pdu/redact.rs index 7c332719..409debfe 100644 --- a/src/core/pdu/redact.rs +++ b/src/core/pdu/redact.rs @@ -1,15 +1,15 @@ use ruma::{ - canonical_json::redact_content_in_place, - events::{room::redaction::RoomRedactionEventContent, TimelineEventType}, OwnedEventId, RoomVersionId, + canonical_json::redact_content_in_place, + events::{TimelineEventType, room::redaction::RoomRedactionEventContent}, }; use serde::Deserialize; use serde_json::{ json, - value::{to_raw_value, RawValue as RawJsonValue}, + value::{RawValue as RawJsonValue, to_raw_value}, }; -use crate::{implement, Error, Result}; +use crate::{Error, Result, implement}; #[derive(Deserialize)] struct ExtractRedactedBecause { @@ -76,14 +76,21 @@ pub fn copy_redacts(&self) -> (Option, Box) { if let Ok(mut content) = serde_json::from_str::(self.content.get()) { - if let Some(redacts) = content.redacts { - return (Some(redacts), self.content.clone()); - } else if let Some(redacts) = self.redacts.clone() { - content.redacts = Some(redacts); - return ( - self.redacts.clone(), - to_raw_value(&content).expect("Must be valid, we only added redacts field"), - ); + match content.redacts { + | Some(redacts) => { + return (Some(redacts), self.content.clone()); + }, + | _ => match self.redacts.clone() { + | Some(redacts) => { + content.redacts = Some(redacts); + return ( + self.redacts.clone(), + to_raw_value(&content) + .expect("Must be valid, we only added redacts field"), + ); + }, + | _ => {}, + }, } } } diff --git a/src/core/pdu/strip.rs b/src/core/pdu/strip.rs index 7d2fb1d6..4e7c5b83 100644 --- a/src/core/pdu/strip.rs +++ b/src/core/pdu/strip.rs @@ -1,8 +1,8 @@ use ruma::{ events::{ - room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, AnyEphemeralRoomEvent, AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, + room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, }, serde::Raw, }; diff --git a/src/core/pdu/unsigned.rs b/src/core/pdu/unsigned.rs index 8482a48a..23897519 100644 --- a/src/core/pdu/unsigned.rs +++ b/src/core/pdu/unsigned.rs @@ -2,10 +2,10 @@ use std::collections::BTreeMap; use ruma::MilliSecondsSinceUnixEpoch; use serde::Deserialize; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue, Value as JsonValue}; +use serde_json::value::{RawValue as RawJsonValue, Value as JsonValue, to_raw_value}; use super::Pdu; -use crate::{err, implement, is_true, Result}; +use crate::{Result, err, implement, is_true}; #[implement(Pdu)] pub fn remove_transaction_id(&mut self) -> Result { diff --git a/src/core/server.rs b/src/core/server.rs index 80493c94..b67759d6 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -1,7 +1,7 @@ use std::{ sync::{ - atomic::{AtomicBool, Ordering}, Arc, + atomic::{AtomicBool, Ordering}, }, time::SystemTime, }; @@ -9,7 +9,7 @@ use std::{ use ruma::OwnedServerName; use tokio::{runtime, sync::broadcast}; -use crate::{config, config::Config, log::Log, metrics::Metrics, Err, Result}; +use crate::{Err, Result, config, config::Config, log::Log, metrics::Metrics}; /// Server runtime state; public portion pub struct Server { diff --git a/src/core/state_res/event_auth.rs b/src/core/state_res/event_auth.rs index df2f8b36..4b8e55f3 100644 --- a/src/core/state_res/event_auth.rs +++ b/src/core/state_res/event_auth.rs @@ -1,10 +1,11 @@ use std::{borrow::Borrow, collections::BTreeSet}; use futures::{ - future::{join3, OptionFuture}, Future, + future::{OptionFuture, join3}, }; use ruma::{ + Int, OwnedUserId, RoomVersionId, UserId, events::room::{ create::RoomCreateEventContent, join_rules::{JoinRule, RoomJoinRulesEventContent}, @@ -14,21 +15,20 @@ use ruma::{ }, int, serde::{Base64, Raw}, - Int, OwnedUserId, RoomVersionId, UserId, }; use serde::{ - de::{Error as _, IgnoredAny}, Deserialize, + de::{Error as _, IgnoredAny}, }; use serde_json::{from_str as from_json_str, value::RawValue as RawJsonValue}; use super::{ + Error, Event, Result, StateEventType, StateKey, TimelineEventType, power_levels::{ deserialize_power_levels, deserialize_power_levels_content_fields, deserialize_power_levels_content_invite, deserialize_power_levels_content_redact, }, room_version::RoomVersion, - Error, Event, Result, StateEventType, StateKey, TimelineEventType, }; use crate::{debug, error, trace, warn}; @@ -394,28 +394,27 @@ where } // If type is m.room.third_party_invite - let sender_power_level = if let Some(pl) = &power_levels_event { - let content = deserialize_power_levels_content_fields(pl.content().get(), room_version)?; - if let Some(level) = content.get_user_power(sender) { - *level - } else { - content.users_default - } - } else { - // If no power level event found the creator gets 100 everyone else gets 0 - let is_creator = if room_version.use_room_create_sender { - room_create_event.sender() == sender - } else { - #[allow(deprecated)] - from_json_str::(room_create_event.content().get()) - .is_ok_and(|create| create.creator.unwrap() == *sender) - }; + let sender_power_level = match &power_levels_event { + | Some(pl) => { + let content = + deserialize_power_levels_content_fields(pl.content().get(), room_version)?; + match content.get_user_power(sender) { + | Some(level) => *level, + | _ => content.users_default, + } + }, + | _ => { + // If no power level event found the creator gets 100 everyone else gets 0 + let is_creator = if room_version.use_room_create_sender { + room_create_event.sender() == sender + } else { + #[allow(deprecated)] + from_json_str::(room_create_event.content().get()) + .is_ok_and(|create| create.creator.unwrap() == *sender) + }; - if is_creator { - int!(100) - } else { - int!(0) - } + if is_creator { int!(100) } else { int!(0) } + }, }; // Allow if and only if sender's current power level is greater than @@ -452,19 +451,21 @@ where if *incoming_event.event_type() == TimelineEventType::RoomPowerLevels { debug!("starting m.room.power_levels check"); - if let Some(required_pwr_lvl) = check_power_levels( + match check_power_levels( room_version, incoming_event, power_levels_event.as_ref(), sender_power_level, ) { - if !required_pwr_lvl { + | Some(required_pwr_lvl) => + if !required_pwr_lvl { + warn!("m.room.power_levels was not allowed"); + return Ok(false); + }, + | _ => { warn!("m.room.power_levels was not allowed"); return Ok(false); - } - } else { - warn!("m.room.power_levels was not allowed"); - return Ok(false); + }, } debug!("m.room.power_levels event allowed"); } @@ -576,10 +577,9 @@ fn valid_membership_change( let content = deserialize_power_levels_content_fields(pl.content().get(), room_version)?; - let user_pl = if let Some(level) = content.get_user_power(user_for_join_auth) { - *level - } else { - content.users_default + let user_pl = match content.get_user_power(user_for_join_auth) { + | Some(level) => *level, + | _ => content.users_default, }; (user_pl, invite) @@ -665,45 +665,48 @@ fn valid_membership_change( }, | MembershipState::Invite => { // If content has third_party_invite key - if let Some(tp_id) = third_party_invite.and_then(|i| i.deserialize().ok()) { - if target_user_current_membership == MembershipState::Ban { - warn!(?target_user_membership_event_id, "Can't invite banned user"); - false - } else { - let allow = verify_third_party_invite( - Some(target_user), - sender, - &tp_id, - current_third_party_invite, - ); - if !allow { - warn!("Third party invite invalid"); - } - allow - } - } else if !sender_is_joined - || target_user_current_membership == MembershipState::Join - || target_user_current_membership == MembershipState::Ban - { - warn!( - ?target_user_membership_event_id, - ?sender_membership_event_id, - "Can't invite user if sender not joined or the user is currently joined or \ - banned", - ); - false - } else { - let allow = sender_power - .filter(|&p| p >= &power_levels.invite) - .is_some(); - if !allow { - warn!( - ?target_user_membership_event_id, - ?power_levels_event_id, - "User does not have enough power to invite", - ); - } - allow + match third_party_invite.and_then(|i| i.deserialize().ok()) { + | Some(tp_id) => + if target_user_current_membership == MembershipState::Ban { + warn!(?target_user_membership_event_id, "Can't invite banned user"); + false + } else { + let allow = verify_third_party_invite( + Some(target_user), + sender, + &tp_id, + current_third_party_invite, + ); + if !allow { + warn!("Third party invite invalid"); + } + allow + }, + | _ => + if !sender_is_joined + || target_user_current_membership == MembershipState::Join + || target_user_current_membership == MembershipState::Ban + { + warn!( + ?target_user_membership_event_id, + ?sender_membership_event_id, + "Can't invite user if sender not joined or the user is currently \ + joined or banned", + ); + false + } else { + let allow = sender_power + .filter(|&p| p >= &power_levels.invite) + .is_some(); + if !allow { + warn!( + ?target_user_membership_event_id, + ?power_levels_event_id, + "User does not have enough power to invite", + ); + } + allow + }, } }, | MembershipState::Leave => @@ -1111,23 +1114,23 @@ mod tests { use std::sync::Arc; use ruma::events::{ + StateEventType, TimelineEventType, room::{ join_rules::{ AllowRule, JoinRule, Restricted, RoomJoinRulesEventContent, RoomMembership, }, member::{MembershipState, RoomMemberEventContent}, }, - StateEventType, TimelineEventType, }; use serde_json::value::to_raw_value as to_raw_json_value; use crate::state_res::{ + Event, EventTypeExt, RoomVersion, StateMap, event_auth::valid_membership_change, test_utils::{ - alice, charlie, ella, event_id, member_content_ban, member_content_join, room_id, - to_pdu_event, PduEvent, INITIAL_EVENTS, INITIAL_EVENTS_CREATE_ROOM, + INITIAL_EVENTS, INITIAL_EVENTS_CREATE_ROOM, PduEvent, alice, charlie, ella, event_id, + member_content_ban, member_content_join, room_id, to_pdu_event, }, - Event, EventTypeExt, RoomVersion, StateMap, }; #[test] @@ -1156,21 +1159,23 @@ mod tests { let target_user = charlie(); let sender = alice(); - assert!(valid_membership_change( - &RoomVersion::V6, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); } #[test] @@ -1199,21 +1204,23 @@ mod tests { let target_user = charlie(); let sender = charlie(); - assert!(!valid_membership_change( - &RoomVersion::V6, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + !valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); } #[test] @@ -1242,21 +1249,23 @@ mod tests { let target_user = alice(); let sender = alice(); - assert!(valid_membership_change( - &RoomVersion::V6, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); } #[test] @@ -1285,21 +1294,23 @@ mod tests { let target_user = alice(); let sender = charlie(); - assert!(!valid_membership_change( - &RoomVersion::V6, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + !valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); } #[test] @@ -1345,37 +1356,41 @@ mod tests { let target_user = ella(); let sender = ella(); - assert!(valid_membership_change( - &RoomVersion::V9, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - Some(alice()), - &MembershipState::Join, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + valid_membership_change( + &RoomVersion::V9, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + Some(alice()), + &MembershipState::Join, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); - assert!(!valid_membership_change( - &RoomVersion::V9, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - Some(ella()), - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + !valid_membership_change( + &RoomVersion::V9, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + Some(ella()), + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); } #[test] @@ -1413,20 +1428,22 @@ mod tests { let target_user = ella(); let sender = ella(); - assert!(valid_membership_change( - &RoomVersion::V7, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + valid_membership_change( + &RoomVersion::V7, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); } } diff --git a/src/core/state_res/mod.rs b/src/core/state_res/mod.rs index 19ea3cc0..6bff0cf8 100644 --- a/src/core/state_res/mod.rs +++ b/src/core/state_res/mod.rs @@ -17,13 +17,14 @@ use std::{ hash::{BuildHasher, Hash}, }; -use futures::{future, stream, Future, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{Future, FutureExt, StreamExt, TryFutureExt, TryStreamExt, future, stream}; use ruma::{ + EventId, Int, MilliSecondsSinceUnixEpoch, RoomVersionId, events::{ - room::member::{MembershipState, RoomMemberEventContent}, StateEventType, TimelineEventType, + room::member::{MembershipState, RoomMemberEventContent}, }, - int, EventId, Int, MilliSecondsSinceUnixEpoch, RoomVersionId, + int, }; use serde_json::from_str as from_json_str; @@ -263,7 +264,7 @@ where #[allow(clippy::arithmetic_side_effects)] fn get_auth_chain_diff( auth_chain_sets: &[HashSet], -) -> impl Iterator + Send +) -> impl Iterator + Send + use where Id: Clone + Eq + Hash + Send, Hasher: BuildHasher + Send + Sync, @@ -864,23 +865,23 @@ mod tests { use maplit::{hashmap, hashset}; use rand::seq::SliceRandom; use ruma::{ + MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId, events::{ - room::join_rules::{JoinRule, RoomJoinRulesEventContent}, StateEventType, TimelineEventType, + room::join_rules::{JoinRule, RoomJoinRulesEventContent}, }, - int, uint, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId, + int, uint, }; use serde_json::{json, value::to_raw_value as to_raw_json_value}; use super::{ - is_power_event, + Event, EventTypeExt, StateMap, is_power_event, room_version::RoomVersion, test_utils::{ - alice, bob, charlie, do_check, ella, event_id, member_content_ban, - member_content_join, room_id, to_init_pdu_event, to_pdu_event, zara, PduEvent, - TestStore, INITIAL_EVENTS, + INITIAL_EVENTS, PduEvent, TestStore, alice, bob, charlie, do_check, ella, event_id, + member_content_ban, member_content_join, room_id, to_init_pdu_event, to_pdu_event, + zara, }, - Event, EventTypeExt, StateMap, }; use crate::debug; @@ -1557,7 +1558,7 @@ mod tests { } macro_rules! state_set { - ($($kind:expr => $key:expr => $id:expr),* $(,)?) => {{ + ($($kind:expr_2021 => $key:expr_2021 => $id:expr_2021),* $(,)?) => {{ #[allow(unused_mut)] let mut x = StateMap::new(); $( diff --git a/src/core/state_res/power_levels.rs b/src/core/state_res/power_levels.rs index e1768574..045b1666 100644 --- a/src/core/state_res/power_levels.rs +++ b/src/core/state_res/power_levels.rs @@ -1,16 +1,16 @@ use std::collections::BTreeMap; use ruma::{ - events::{room::power_levels::RoomPowerLevelsEventContent, TimelineEventType}, - power_levels::{default_power_level, NotificationPowerLevels}, + Int, OwnedUserId, UserId, + events::{TimelineEventType, room::power_levels::RoomPowerLevelsEventContent}, + power_levels::{NotificationPowerLevels, default_power_level}, serde::{ deserialize_v1_powerlevel, vec_deserialize_int_powerlevel_values, vec_deserialize_v1_powerlevel_values, }, - Int, OwnedUserId, UserId, }; use serde::Deserialize; -use serde_json::{from_str as from_json_str, Error}; +use serde_json::{Error, from_str as from_json_str}; use tracing::error; use super::{Result, RoomVersion}; diff --git a/src/core/state_res/state_event.rs b/src/core/state_res/state_event.rs index 2c038cfe..ac9e29d6 100644 --- a/src/core/state_res/state_event.rs +++ b/src/core/state_res/state_event.rs @@ -5,7 +5,7 @@ use std::{ sync::Arc, }; -use ruma::{events::TimelineEventType, EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId}; +use ruma::{EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId, events::TimelineEventType}; use serde_json::value::RawValue as RawJsonValue; /// Abstraction of a PDU so users can have their own PDU types. diff --git a/src/core/state_res/test_utils.rs b/src/core/state_res/test_utils.rs index 9c2b151f..d96ee927 100644 --- a/src/core/state_res/test_utils.rs +++ b/src/core/state_res/test_utils.rs @@ -2,33 +2,33 @@ use std::{ borrow::Borrow, collections::{BTreeMap, HashMap, HashSet}, sync::{ - atomic::{AtomicU64, Ordering::SeqCst}, Arc, + atomic::{AtomicU64, Ordering::SeqCst}, }, }; use futures::future::ready; use ruma::{ - event_id, + EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, ServerSignatures, + UserId, event_id, events::{ + TimelineEventType, pdu::{EventHash, Pdu, RoomV3Pdu}, room::{ join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, }, - TimelineEventType, }, - int, room_id, uint, user_id, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, - RoomVersionId, ServerSignatures, UserId, + int, room_id, uint, user_id, }; use serde_json::{ json, - value::{to_raw_value as to_raw_json_value, RawValue as RawJsonValue}, + value::{RawValue as RawJsonValue, to_raw_value as to_raw_json_value}, }; pub(crate) use self::event::PduEvent; use super::auth_types_for_event; -use crate::{info, Event, EventTypeExt, Result, StateMap}; +use crate::{Event, EventTypeExt, Result, StateMap, info}; static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); @@ -584,8 +584,8 @@ pub(crate) fn INITIAL_EDGES() -> Vec { pub(crate) mod event { use ruma::{ - events::{pdu::Pdu, TimelineEventType}, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, + events::{TimelineEventType, pdu::Pdu}, }; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue as RawJsonValue; diff --git a/src/core/utils/bytes.rs b/src/core/utils/bytes.rs index 40316440..04101be4 100644 --- a/src/core/utils/bytes.rs +++ b/src/core/utils/bytes.rs @@ -1,6 +1,6 @@ use bytesize::ByteSize; -use crate::{err, Result}; +use crate::{Result, err}; /// Parse a human-writable size string w/ si-unit suffix into integer #[inline] diff --git a/src/core/utils/defer.rs b/src/core/utils/defer.rs index 60243e97..4887d164 100644 --- a/src/core/utils/defer.rs +++ b/src/core/utils/defer.rs @@ -12,14 +12,14 @@ macro_rules! defer { let _defer_ = _Defer_ { closure: || $body }; }; - ($body:expr) => { + ($body:expr_2021) => { $crate::defer! {{ $body }} }; } #[macro_export] macro_rules! scope_restore { - ($val:ident, $ours:expr) => { + ($val:ident, $ours:expr_2021) => { let theirs = $crate::utils::exchange($val, $ours); $crate::defer! {{ *$val = theirs; }}; }; diff --git a/src/core/utils/future/bool_ext.rs b/src/core/utils/future/bool_ext.rs index 6cb2f1fe..c93c7dbc 100644 --- a/src/core/utils/future/bool_ext.rs +++ b/src/core/utils/future/bool_ext.rs @@ -3,8 +3,8 @@ use std::marker::Unpin; use futures::{ - future::{select_ok, try_join, try_join_all, try_select}, Future, FutureExt, + future::{select_ok, try_join, try_join_all, try_select}, }; pub trait BoolExt diff --git a/src/core/utils/future/ext_ext.rs b/src/core/utils/future/ext_ext.rs index 38decaae..219bb664 100644 --- a/src/core/utils/future/ext_ext.rs +++ b/src/core/utils/future/ext_ext.rs @@ -2,7 +2,7 @@ use std::marker::Unpin; -use futures::{future, future::Select, Future}; +use futures::{Future, future, future::Select}; /// This interface is not necessarily complete; feel free to add as-needed. pub trait ExtExt diff --git a/src/core/utils/future/mod.rs b/src/core/utils/future/mod.rs index 2198a84f..e1d96941 100644 --- a/src/core/utils/future/mod.rs +++ b/src/core/utils/future/mod.rs @@ -3,7 +3,7 @@ mod ext_ext; mod option_ext; mod try_ext_ext; -pub use bool_ext::{and, or, BoolExt}; +pub use bool_ext::{BoolExt, and, or}; pub use ext_ext::ExtExt; pub use option_ext::OptionExt; pub use try_ext_ext::TryExtExt; diff --git a/src/core/utils/future/option_ext.rs b/src/core/utils/future/option_ext.rs index ed61de56..d553e5dc 100644 --- a/src/core/utils/future/option_ext.rs +++ b/src/core/utils/future/option_ext.rs @@ -1,6 +1,6 @@ #![allow(clippy::wrong_self_convention)] -use futures::{future::OptionFuture, Future, FutureExt}; +use futures::{Future, FutureExt, future::OptionFuture}; pub trait OptionExt { fn is_none_or(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send; diff --git a/src/core/utils/future/try_ext_ext.rs b/src/core/utils/future/try_ext_ext.rs index aa3d72e4..b2114e56 100644 --- a/src/core/utils/future/try_ext_ext.rs +++ b/src/core/utils/future/try_ext_ext.rs @@ -7,9 +7,8 @@ use std::marker::Unpin; use futures::{ - future, + TryFuture, TryFutureExt, future, future::{MapOkOrElse, TrySelect, UnwrapOrElse}, - TryFuture, TryFutureExt, }; /// This interface is not necessarily complete; feel free to add as-needed. diff --git a/src/core/utils/hash/argon.rs b/src/core/utils/hash/argon.rs index 18146b47..66dfab75 100644 --- a/src/core/utils/hash/argon.rs +++ b/src/core/utils/hash/argon.rs @@ -1,11 +1,11 @@ use std::sync::OnceLock; use argon2::{ - password_hash, password_hash::SaltString, Algorithm, Argon2, Params, PasswordHash, - PasswordHasher, PasswordVerifier, Version, + Algorithm, Argon2, Params, PasswordHash, PasswordHasher, PasswordVerifier, Version, + password_hash, password_hash::SaltString, }; -use crate::{err, Error, Result}; +use crate::{Error, Result, err}; const M_COST: u32 = Params::DEFAULT_M_COST; // memory size in 1 KiB blocks const T_COST: u32 = Params::DEFAULT_T_COST; // nr of iterations diff --git a/src/core/utils/json.rs b/src/core/utils/json.rs index 4a3fec8f..3f2f225e 100644 --- a/src/core/utils/json.rs +++ b/src/core/utils/json.rs @@ -1,6 +1,6 @@ use std::{fmt, str::FromStr}; -use ruma::{canonical_json::try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; +use ruma::{CanonicalJsonError, CanonicalJsonObject, canonical_json::try_from_json_map}; use crate::Result; diff --git a/src/core/utils/math.rs b/src/core/utils/math.rs index ed157daf..488f2a13 100644 --- a/src/core/utils/math.rs +++ b/src/core/utils/math.rs @@ -6,7 +6,7 @@ use std::{cmp, convert::TryFrom}; pub use checked_ops::checked_ops; pub use self::{expected::Expected, tried::Tried}; -use crate::{debug::type_name, err, Err, Error, Result}; +use crate::{Err, Error, Result, debug::type_name, err}; /// Checked arithmetic expression. Returns a Result #[macro_export] diff --git a/src/core/utils/math/tried.rs b/src/core/utils/math/tried.rs index 2006d2d5..09de731f 100644 --- a/src/core/utils/math/tried.rs +++ b/src/core/utils/math/tried.rs @@ -1,6 +1,6 @@ use num_traits::ops::checked::{CheckedAdd, CheckedDiv, CheckedMul, CheckedRem, CheckedSub}; -use crate::{checked, Result}; +use crate::{Result, checked}; pub trait Tried { #[inline] diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index c2d8ed45..53460c59 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -49,7 +49,7 @@ pub fn exchange(state: &mut T, source: T) -> T { std::mem::replace(state, sou #[macro_export] macro_rules! extract_variant { - ($e:expr, $variant:path) => { + ($e:expr_2021, $variant:path) => { match $e { | $variant(value) => Some(value), | _ => None, @@ -90,7 +90,7 @@ macro_rules! pair_of { ($decl, $decl) }; - ($init:expr) => { + ($init:expr_2021) => { ($init, $init) }; } @@ -134,7 +134,7 @@ macro_rules! is_equal_to { |x| x == $val }; - ($val:expr) => { + ($val:expr_2021) => { |x| x == $val }; } @@ -146,7 +146,7 @@ macro_rules! is_less_than { |x| x < $val }; - ($val:expr) => { + ($val:expr_2021) => { |x| x < $val }; } diff --git a/src/core/utils/mutex_map.rs b/src/core/utils/mutex_map.rs index 03a4adf1..01504ce6 100644 --- a/src/core/utils/mutex_map.rs +++ b/src/core/utils/mutex_map.rs @@ -6,7 +6,7 @@ use std::{ use tokio::sync::OwnedMutexGuard as Omg; -use crate::{err, Result}; +use crate::{Result, err}; /// Map of Mutexes pub struct MutexMap { diff --git a/src/core/utils/rand.rs b/src/core/utils/rand.rs index 1d289c6e..72487633 100644 --- a/src/core/utils/rand.rs +++ b/src/core/utils/rand.rs @@ -4,7 +4,7 @@ use std::{ }; use arrayvec::ArrayString; -use rand::{seq::SliceRandom, thread_rng, Rng}; +use rand::{Rng, seq::SliceRandom, thread_rng}; pub fn shuffle(vec: &mut [T]) { let mut rng = thread_rng(); diff --git a/src/core/utils/stream/broadband.rs b/src/core/utils/stream/broadband.rs index 282008e7..832f2638 100644 --- a/src/core/utils/stream/broadband.rs +++ b/src/core/utils/stream/broadband.rs @@ -3,11 +3,11 @@ use std::convert::identity; use futures::{ - stream::{Stream, StreamExt}, Future, + stream::{Stream, StreamExt}, }; -use super::{automatic_width, ReadyExt}; +use super::{ReadyExt, automatic_width}; /// Concurrency extensions to augment futures::StreamExt. broad_ combinators /// produce out-of-order diff --git a/src/core/utils/stream/cloned.rs b/src/core/utils/stream/cloned.rs index d6a0e647..b89e4695 100644 --- a/src/core/utils/stream/cloned.rs +++ b/src/core/utils/stream/cloned.rs @@ -1,6 +1,6 @@ use std::clone::Clone; -use futures::{stream::Map, Stream, StreamExt}; +use futures::{Stream, StreamExt, stream::Map}; pub trait Cloned<'a, T, S> where diff --git a/src/core/utils/stream/ignore.rs b/src/core/utils/stream/ignore.rs index 9baa00f3..37c89d9a 100644 --- a/src/core/utils/stream/ignore.rs +++ b/src/core/utils/stream/ignore.rs @@ -1,4 +1,4 @@ -use futures::{future::ready, Stream, StreamExt, TryStream}; +use futures::{Stream, StreamExt, TryStream, future::ready}; use crate::{Error, Result}; diff --git a/src/core/utils/stream/iter_stream.rs b/src/core/utils/stream/iter_stream.rs index 9077deac..e9a91b1c 100644 --- a/src/core/utils/stream/iter_stream.rs +++ b/src/core/utils/stream/iter_stream.rs @@ -1,7 +1,6 @@ use futures::{ - stream, + StreamExt, stream, stream::{Stream, TryStream}, - StreamExt, }; use crate::{Error, Result}; diff --git a/src/core/utils/stream/mod.rs b/src/core/utils/stream/mod.rs index 23455322..a356f05f 100644 --- a/src/core/utils/stream/mod.rs +++ b/src/core/utils/stream/mod.rs @@ -14,8 +14,8 @@ mod try_wideband; mod wideband; pub use band::{ - automatic_amplification, automatic_width, set_amplification, set_width, AMPLIFICATION_LIMIT, - WIDTH_LIMIT, + AMPLIFICATION_LIMIT, WIDTH_LIMIT, automatic_amplification, automatic_width, + set_amplification, set_width, }; pub use broadband::BroadbandExt; pub use cloned::Cloned; diff --git a/src/core/utils/stream/ready.rs b/src/core/utils/stream/ready.rs index d93187e9..dce7d378 100644 --- a/src/core/utils/stream/ready.rs +++ b/src/core/utils/stream/ready.rs @@ -2,7 +2,7 @@ #![allow(clippy::type_complexity)] use futures::{ - future::{ready, Ready}, + future::{Ready, ready}, stream::{ All, Any, Filter, FilterMap, Fold, ForEach, Scan, SkipWhile, Stream, StreamExt, TakeWhile, }, diff --git a/src/core/utils/stream/try_parallel.rs b/src/core/utils/stream/try_parallel.rs index 7f8a63b1..60fef0ae 100644 --- a/src/core/utils/stream/try_parallel.rs +++ b/src/core/utils/stream/try_parallel.rs @@ -1,10 +1,10 @@ //! Parallelism stream combinator extensions to futures::Stream -use futures::{stream::TryStream, TryFutureExt}; +use futures::{TryFutureExt, stream::TryStream}; use tokio::{runtime, task::JoinError}; use super::TryBroadbandExt; -use crate::{utils::sys::available_parallelism, Error, Result}; +use crate::{Error, Result, utils::sys::available_parallelism}; /// Parallelism extensions to augment futures::StreamExt. These combinators are /// for computation-oriented workloads, unlike -band combinators for I/O diff --git a/src/core/utils/stream/try_ready.rs b/src/core/utils/stream/try_ready.rs index 3261acb6..611c177f 100644 --- a/src/core/utils/stream/try_ready.rs +++ b/src/core/utils/stream/try_ready.rs @@ -2,7 +2,7 @@ #![allow(clippy::type_complexity)] use futures::{ - future::{ready, Ready}, + future::{Ready, ready}, stream::{AndThen, TryFilterMap, TryFold, TryForEach, TryStream, TryStreamExt, TryTakeWhile}, }; diff --git a/src/core/utils/stream/try_tools.rs b/src/core/utils/stream/try_tools.rs index 3ddce6ad..ea3b50fc 100644 --- a/src/core/utils/stream/try_tools.rs +++ b/src/core/utils/stream/try_tools.rs @@ -1,7 +1,7 @@ //! TryStreamTools for futures::TryStream #![allow(clippy::type_complexity)] -use futures::{future, future::Ready, stream::TryTakeWhile, TryStream, TryStreamExt}; +use futures::{TryStream, TryStreamExt, future, future::Ready, stream::TryTakeWhile}; use crate::Result; diff --git a/src/core/utils/stream/wideband.rs b/src/core/utils/stream/wideband.rs index a8560bb4..cbebf610 100644 --- a/src/core/utils/stream/wideband.rs +++ b/src/core/utils/stream/wideband.rs @@ -3,11 +3,11 @@ use std::convert::identity; use futures::{ - stream::{Stream, StreamExt}, Future, + stream::{Stream, StreamExt}, }; -use super::{automatic_width, ReadyExt}; +use super::{ReadyExt, automatic_width}; /// Concurrency extensions to augment futures::StreamExt. wideband_ combinators /// produce in-order. diff --git a/src/core/utils/string.rs b/src/core/utils/string.rs index cc692c14..9340d009 100644 --- a/src/core/utils/string.rs +++ b/src/core/utils/string.rs @@ -5,7 +5,7 @@ mod unquote; mod unquoted; pub use self::{between::Between, split::SplitInfallible, unquote::Unquote, unquoted::Unquoted}; -use crate::{utils::exchange, Result}; +use crate::{Result, utils::exchange}; pub const EMPTY: &str = ""; diff --git a/src/core/utils/string/unquoted.rs b/src/core/utils/string/unquoted.rs index 5b002d99..88fa011f 100644 --- a/src/core/utils/string/unquoted.rs +++ b/src/core/utils/string/unquoted.rs @@ -1,9 +1,9 @@ use std::ops::Deref; -use serde::{de, Deserialize, Deserializer}; +use serde::{Deserialize, Deserializer, de}; use super::Unquote; -use crate::{err, Result}; +use crate::{Result, err}; /// Unquoted string which deserialized from a quoted string. Construction from a /// &str is infallible such that the input can already be unquoted. Construction diff --git a/src/core/utils/sys.rs b/src/core/utils/sys.rs index a0d5be52..f795ccb8 100644 --- a/src/core/utils/sys.rs +++ b/src/core/utils/sys.rs @@ -5,7 +5,7 @@ use std::path::PathBuf; pub use compute::available_parallelism; -use crate::{debug, Result}; +use crate::{Result, debug}; /// This is needed for opening lots of file descriptors, which tends to /// happen more often when using RocksDB and making lots of federation @@ -16,7 +16,7 @@ use crate::{debug, Result}; /// * #[cfg(unix)] pub fn maximize_fd_limit() -> Result<(), nix::errno::Errno> { - use nix::sys::resource::{getrlimit, setrlimit, Resource::RLIMIT_NOFILE as NOFILE}; + use nix::sys::resource::{Resource::RLIMIT_NOFILE as NOFILE, getrlimit, setrlimit}; let (soft_limit, hard_limit) = getrlimit(NOFILE)?; if soft_limit < hard_limit { diff --git a/src/core/utils/sys/compute.rs b/src/core/utils/sys/compute.rs index ce2aa504..5274cd66 100644 --- a/src/core/utils/sys/compute.rs +++ b/src/core/utils/sys/compute.rs @@ -2,7 +2,7 @@ use std::{cell::Cell, fmt::Debug, path::PathBuf, sync::LazyLock}; -use crate::{is_equal_to, Result}; +use crate::{Result, is_equal_to}; type Id = usize; @@ -45,7 +45,7 @@ pub fn set_affinity(mut ids: I) where I: Iterator + Clone + Debug, { - use core_affinity::{set_each_for_current, set_for_current, CoreId}; + use core_affinity::{CoreId, set_each_for_current, set_for_current}; let n = ids.clone().count(); let mask: Mask = ids.clone().fold(0, |mask, id| { @@ -118,7 +118,7 @@ pub fn cores_available() -> impl Iterator { from_mask(*CORES_AVAILABL #[cfg(target_os = "linux")] #[inline] pub fn getcpu() -> Result { - use crate::{utils::math, Error}; + use crate::{Error, utils::math}; // SAFETY: This is part of an interface with many low-level calls taking many // raw params, but it's unclear why this specific call is unsafe. Nevertheless diff --git a/src/core/utils/sys/storage.rs b/src/core/utils/sys/storage.rs index 25b17904..b11df7bb 100644 --- a/src/core/utils/sys/storage.rs +++ b/src/core/utils/sys/storage.rs @@ -3,7 +3,7 @@ use std::{ ffi::OsStr, fs, - fs::{read_to_string, FileType}, + fs::{FileType, read_to_string}, iter::IntoIterator, path::{Path, PathBuf}, }; @@ -11,9 +11,9 @@ use std::{ use libc::dev_t; use crate::{ + Result, result::FlatOk, utils::{result::LogDebugErr, string::SplitInfallible}, - Result, }; /// Device characteristics useful for random access throughput diff --git a/src/core/utils/tests.rs b/src/core/utils/tests.rs index 1bcb92b8..05a0655b 100644 --- a/src/core/utils/tests.rs +++ b/src/core/utils/tests.rs @@ -241,7 +241,7 @@ fn set_intersection_sorted_all() { #[tokio::test] async fn set_intersection_sorted_stream2() { use futures::StreamExt; - use utils::{set::intersection_sorted_stream2, IterStream}; + use utils::{IterStream, set::intersection_sorted_stream2}; let a = ["bar"]; let b = ["bar", "foo"]; diff --git a/src/core/utils/time.rs b/src/core/utils/time.rs index 81fdda2a..73f73971 100644 --- a/src/core/utils/time.rs +++ b/src/core/utils/time.rs @@ -2,7 +2,7 @@ pub mod exponential_backoff; use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use crate::{err, Result}; +use crate::{Result, err}; #[inline] #[must_use] diff --git a/src/database/de.rs b/src/database/de.rs index 441bb4ec..9c0997ff 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -1,10 +1,9 @@ use conduwuit::{ - arrayvec::ArrayVec, checked, debug::DebugInspect, err, utils::string, Error, Result, + Error, Result, arrayvec::ArrayVec, checked, debug::DebugInspect, err, utils::string, }; use serde::{ - de, + Deserialize, de, de::{DeserializeSeed, Visitor}, - Deserialize, }; use crate::util::unhandled; diff --git a/src/database/engine.rs b/src/database/engine.rs index 22e2b9c8..38dd7512 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -12,21 +12,21 @@ mod repair; use std::{ ffi::CStr, sync::{ - atomic::{AtomicU32, Ordering}, Arc, + atomic::{AtomicU32, Ordering}, }, }; -use conduwuit::{debug, info, warn, Err, Result}; +use conduwuit::{Err, Result, debug, info, warn}; use rocksdb::{ AsColumnFamilyRef, BoundColumnFamily, DBCommon, DBWithThreadMode, MultiThreaded, WaitForCompactOptions, }; use crate::{ + Context, pool::Pool, util::{map_err, result}, - Context, }; pub struct Engine { diff --git a/src/database/engine/backup.rs b/src/database/engine/backup.rs index db718c2c..bb110630 100644 --- a/src/database/engine/backup.rs +++ b/src/database/engine/backup.rs @@ -1,6 +1,6 @@ use std::fmt::Write; -use conduwuit::{error, implement, info, utils::time::rfc2822_from_seconds, warn, Result}; +use conduwuit::{Result, error, implement, info, utils::time::rfc2822_from_seconds, warn}; use rocksdb::backup::{BackupEngine, BackupEngineOptions}; use super::Engine; diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 83bce08c..5ddb9473 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -1,4 +1,4 @@ -use conduwuit::{err, utils::math::Expected, Config, Result}; +use conduwuit::{Config, Result, err, utils::math::Expected}; use rocksdb::{ BlockBasedIndexType, BlockBasedOptions, BlockBasedPinningTier, Cache, DBCompressionType as CompressionType, DataBlockIndexType, LruCacheOptions, Options, @@ -6,7 +6,7 @@ use rocksdb::{ }; use super::descriptor::{CacheDisp, Descriptor}; -use crate::{util::map_err, Context}; +use crate::{Context, util::map_err}; pub(super) const SENTINEL_COMPRESSION_LEVEL: i32 = 32767; diff --git a/src/database/engine/context.rs b/src/database/engine/context.rs index 04e08854..380e37af 100644 --- a/src/database/engine/context.rs +++ b/src/database/engine/context.rs @@ -3,7 +3,7 @@ use std::{ sync::{Arc, Mutex}, }; -use conduwuit::{debug, utils::math::usize_from_f64, Result, Server}; +use conduwuit::{Result, Server, debug, utils::math::usize_from_f64}; use rocksdb::{Cache, Env, LruCacheOptions}; use crate::{or_else, pool::Pool}; diff --git a/src/database/engine/db_opts.rs b/src/database/engine/db_opts.rs index 6abeb4b0..18cec742 100644 --- a/src/database/engine/db_opts.rs +++ b/src/database/engine/db_opts.rs @@ -1,7 +1,7 @@ use std::{cmp, convert::TryFrom}; -use conduwuit::{utils, Config, Result}; -use rocksdb::{statistics::StatsLevel, Cache, DBRecoveryMode, Env, LogLevel, Options}; +use conduwuit::{Config, Result, utils}; +use rocksdb::{Cache, DBRecoveryMode, Env, LogLevel, Options, statistics::StatsLevel}; use super::{cf_opts::cache_size_f64, logger::handle as handle_log}; diff --git a/src/database/engine/files.rs b/src/database/engine/files.rs index 33d6fdc4..1f38a63c 100644 --- a/src/database/engine/files.rs +++ b/src/database/engine/files.rs @@ -1,11 +1,11 @@ -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use rocksdb::LiveFile as SstFile; use super::Engine; use crate::util::map_err; #[implement(Engine)] -pub fn file_list(&self) -> impl Iterator> + Send { +pub fn file_list(&self) -> impl Iterator> + Send + use<> { self.db .live_files() .map_err(map_err) diff --git a/src/database/engine/memory_usage.rs b/src/database/engine/memory_usage.rs index 01859815..9bb5c535 100644 --- a/src/database/engine/memory_usage.rs +++ b/src/database/engine/memory_usage.rs @@ -1,6 +1,6 @@ use std::fmt::Write; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use rocksdb::perf::get_memory_usage_stats; use super::Engine; diff --git a/src/database/engine/open.rs b/src/database/engine/open.rs index 59dabce1..24010c3a 100644 --- a/src/database/engine/open.rs +++ b/src/database/engine/open.rs @@ -1,20 +1,20 @@ use std::{ collections::BTreeSet, path::Path, - sync::{atomic::AtomicU32, Arc}, + sync::{Arc, atomic::AtomicU32}, }; -use conduwuit::{debug, implement, info, warn, Result}; +use conduwuit::{Result, debug, implement, info, warn}; use rocksdb::{ColumnFamilyDescriptor, Options}; use super::{ + Db, Engine, cf_opts::cf_options, db_opts::db_options, descriptor::{self, Descriptor}, repair::repair, - Db, Engine, }; -use crate::{or_else, Context}; +use crate::{Context, or_else}; #[implement(Engine)] #[tracing::instrument(skip_all)] diff --git a/src/database/engine/repair.rs b/src/database/engine/repair.rs index 61283904..aeec0caf 100644 --- a/src/database/engine/repair.rs +++ b/src/database/engine/repair.rs @@ -1,6 +1,6 @@ use std::path::PathBuf; -use conduwuit::{info, warn, Err, Result}; +use conduwuit::{Err, Result, info, warn}; use rocksdb::Options; use super::Db; diff --git a/src/database/handle.rs b/src/database/handle.rs index 43b57839..484e5618 100644 --- a/src/database/handle.rs +++ b/src/database/handle.rs @@ -4,7 +4,7 @@ use conduwuit::Result; use rocksdb::DBPinnableSlice; use serde::{Deserialize, Serialize, Serializer}; -use crate::{keyval::deserialize_val, Deserialized, Slice}; +use crate::{Deserialized, Slice, keyval::deserialize_val}; pub struct Handle<'a> { val: DBPinnableSlice<'a>, diff --git a/src/database/keyval.rs b/src/database/keyval.rs index f572d15f..6059cd53 100644 --- a/src/database/keyval.rs +++ b/src/database/keyval.rs @@ -1,4 +1,4 @@ -use conduwuit::{smallvec::SmallVec, Result}; +use conduwuit::{Result, smallvec::SmallVec}; use serde::{Deserialize, Serialize}; use crate::{de, ser}; diff --git a/src/database/map.rs b/src/database/map.rs index 37425ecf..c5a908ba 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -40,7 +40,7 @@ pub(crate) use self::options::{ read_options_default, write_options_default, }; pub use self::{get_batch::Get, qry_batch::Qry}; -use crate::{watchers::Watchers, Engine}; +use crate::{Engine, watchers::Watchers}; pub struct Map { name: &'static str, diff --git a/src/database/map/compact.rs b/src/database/map/compact.rs index c0381eb4..84476de6 100644 --- a/src/database/map/compact.rs +++ b/src/database/map/compact.rs @@ -1,4 +1,4 @@ -use conduwuit::{implement, Err, Result}; +use conduwuit::{Err, Result, implement}; use rocksdb::{BottommostLevelCompaction, CompactOptions}; use crate::keyval::KeyBuf; diff --git a/src/database/map/contains.rs b/src/database/map/contains.rs index 7a09b358..474818e8 100644 --- a/src/database/map/contains.rs +++ b/src/database/map/contains.rs @@ -1,10 +1,10 @@ use std::{convert::AsRef, fmt::Debug, future::Future, io::Write, sync::Arc}; use conduwuit::{ + Result, arrayvec::ArrayVec, err, implement, utils::{future::TryExtExt, result::FlatOk}, - Result, }; use futures::FutureExt; use serde::Serialize; @@ -16,7 +16,10 @@ use crate::{keyval::KeyBuf, ser}; /// - harder errors may not be reported #[inline] #[implement(super::Map)] -pub fn contains(self: &Arc, key: &K) -> impl Future + Send + '_ +pub fn contains( + self: &Arc, + key: &K, +) -> impl Future + Send + '_ + use<'_, K> where K: Serialize + ?Sized + Debug, { @@ -32,7 +35,7 @@ where pub fn acontains( self: &Arc, key: &K, -) -> impl Future + Send + '_ +) -> impl Future + Send + '_ + use<'_, MAX, K> where K: Serialize + ?Sized + Debug, { @@ -49,7 +52,7 @@ pub fn bcontains( self: &Arc, key: &K, buf: &mut B, -) -> impl Future + Send + '_ +) -> impl Future + Send + '_ + use<'_, K, B> where K: Serialize + ?Sized + Debug, B: Write + AsRef<[u8]>, @@ -62,7 +65,10 @@ where /// - key is raw #[inline] #[implement(super::Map)] -pub fn exists<'a, K>(self: &'a Arc, key: &K) -> impl Future + Send + 'a +pub fn exists<'a, K>( + self: &'a Arc, + key: &K, +) -> impl Future + Send + 'a + use<'a, K> where K: AsRef<[u8]> + ?Sized + Debug + 'a, { diff --git a/src/database/map/count.rs b/src/database/map/count.rs index 22b298b9..78f9e2e3 100644 --- a/src/database/map/count.rs +++ b/src/database/map/count.rs @@ -16,7 +16,10 @@ pub fn count(self: &Arc) -> impl Future + Send + '_ { /// - From is a structured key #[implement(super::Map)] #[inline] -pub fn count_from<'a, P>(self: &'a Arc, from: &P) -> impl Future + Send + 'a +pub fn count_from<'a, P>( + self: &'a Arc, + from: &P, +) -> impl Future + Send + 'a + use<'a, P> where P: Serialize + ?Sized + Debug + 'a, { @@ -46,7 +49,7 @@ where pub fn count_prefix<'a, P>( self: &'a Arc, prefix: &P, -) -> impl Future + Send + 'a +) -> impl Future + Send + 'a + use<'a, P> where P: Serialize + ?Sized + Debug + 'a, { diff --git a/src/database/map/get.rs b/src/database/map/get.rs index d6c65be2..0971fb17 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -1,20 +1,23 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{err, implement, utils::result::MapExpect, Err, Result}; -use futures::{future::ready, Future, FutureExt, TryFutureExt}; +use conduwuit::{Err, Result, err, implement, utils::result::MapExpect}; +use futures::{Future, FutureExt, TryFutureExt, future::ready}; use rocksdb::{DBPinnableSlice, ReadOptions}; use tokio::task; use crate::{ - util::{is_incomplete, map_err, or_else}, Handle, + util::{is_incomplete, map_err, or_else}, }; /// Fetch a value from the database into cache, returning a reference-handle /// asynchronously. The key is referenced directly to perform the query. #[implement(super::Map)] #[tracing::instrument(skip(self, key), fields(%self), level = "trace")] -pub fn get(self: &Arc, key: &K) -> impl Future>> + Send +pub fn get( + self: &Arc, + key: &K, +) -> impl Future>> + Send + use<'_, K> where K: AsRef<[u8]> + Debug + ?Sized, { diff --git a/src/database/map/get_batch.rs b/src/database/map/get_batch.rs index ab9c1dc8..e23a8848 100644 --- a/src/database/map/get_batch.rs +++ b/src/database/map/get_batch.rs @@ -1,12 +1,11 @@ use std::{convert::AsRef, sync::Arc}; use conduwuit::{ - implement, + Result, implement, utils::{ - stream::{automatic_amplification, automatic_width, WidebandExt}, IterStream, + stream::{WidebandExt, automatic_amplification, automatic_width}, }, - Result, }; use futures::{Stream, StreamExt, TryStreamExt}; use rocksdb::{DBPinnableSlice, ReadOptions}; @@ -64,7 +63,7 @@ where pub(crate) fn get_batch_cached<'a, I, K>( &self, keys: I, -) -> impl Iterator>>> + Send +) -> impl Iterator>>> + Send + use<'_, I, K> where I: Iterator + ExactSizeIterator + Send, K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, @@ -78,7 +77,7 @@ where pub(crate) fn get_batch_blocking<'a, I, K>( &self, keys: I, -) -> impl Iterator>> + Send +) -> impl Iterator>> + Send + use<'_, I, K> where I: Iterator + ExactSizeIterator + Send, K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, @@ -92,7 +91,7 @@ fn get_batch_blocking_opts<'a, I, K>( &self, keys: I, read_options: &ReadOptions, -) -> impl Iterator>, rocksdb::Error>> + Send +) -> impl Iterator>, rocksdb::Error>> + Send + use<'_, I, K> where I: Iterator + ExactSizeIterator + Send, K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, diff --git a/src/database/map/keys.rs b/src/database/map/keys.rs index 2fe70f15..7ca932a5 100644 --- a/src/database/map/keys.rs +++ b/src/database/map/keys.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::Deserialize; diff --git a/src/database/map/keys_from.rs b/src/database/map/keys_from.rs index 76c76325..c9b1717a 100644 --- a/src/database/map/keys_from.rs +++ b/src/database/map/keys_from.rs @@ -1,13 +1,13 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; use super::stream_from::is_cached; use crate::{ - keyval::{result_deserialize_key, serialize_key, Key}, + keyval::{Key, result_deserialize_key, serialize_key}, stream, }; @@ -15,7 +15,7 @@ use crate::{ pub fn keys_from<'a, K, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -25,7 +25,10 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), level = "trace")] -pub fn keys_from_raw

(self: &Arc, from: &P) -> impl Stream>> + Send +pub fn keys_from_raw

( + self: &Arc, + from: &P, +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { @@ -37,7 +40,7 @@ where pub fn keys_raw_from<'a, K, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -47,7 +50,10 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self, from), fields(%self), level = "trace")] -pub fn raw_keys_from

(self: &Arc, from: &P) -> impl Stream>> + Send +pub fn raw_keys_from

( + self: &Arc, + from: &P, +) -> impl Stream>> + Send + use<'_, P> where P: AsRef<[u8]> + ?Sized + Debug, { diff --git a/src/database/map/keys_prefix.rs b/src/database/map/keys_prefix.rs index 28bc7ccd..09dd79ac 100644 --- a/src/database/map/keys_prefix.rs +++ b/src/database/map/keys_prefix.rs @@ -1,16 +1,16 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; -use futures::{future, Stream, StreamExt, TryStreamExt}; +use conduwuit::{Result, implement}; +use futures::{Stream, StreamExt, TryStreamExt, future}; use serde::{Deserialize, Serialize}; -use crate::keyval::{result_deserialize_key, serialize_key, Key}; +use crate::keyval::{Key, result_deserialize_key, serialize_key}; #[implement(super::Map)] pub fn keys_prefix<'a, K, P>( self: &'a Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -24,7 +24,7 @@ where pub fn keys_prefix_raw

( self: &Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { diff --git a/src/database/map/qry.rs b/src/database/map/qry.rs index 178f4a61..c6f13c0b 100644 --- a/src/database/map/qry.rs +++ b/src/database/map/qry.rs @@ -1,17 +1,20 @@ use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; -use conduwuit::{arrayvec::ArrayVec, implement, Result}; +use conduwuit::{Result, arrayvec::ArrayVec, implement}; use futures::Future; use serde::Serialize; -use crate::{keyval::KeyBuf, ser, Handle}; +use crate::{Handle, keyval::KeyBuf, ser}; /// Fetch a value from the database into cache, returning a reference-handle /// asynchronously. The key is serialized into an allocated buffer to perform /// the query. #[implement(super::Map)] #[inline] -pub fn qry(self: &Arc, key: &K) -> impl Future>> + Send +pub fn qry( + self: &Arc, + key: &K, +) -> impl Future>> + Send + use<'_, K> where K: Serialize + ?Sized + Debug, { @@ -27,7 +30,7 @@ where pub fn aqry( self: &Arc, key: &K, -) -> impl Future>> + Send +) -> impl Future>> + Send + use<'_, MAX, K> where K: Serialize + ?Sized + Debug, { @@ -43,7 +46,7 @@ pub fn bqry( self: &Arc, key: &K, buf: &mut B, -) -> impl Future>> + Send +) -> impl Future>> + Send + use<'_, K, B> where K: Serialize + ?Sized + Debug, B: Write + AsRef<[u8]>, diff --git a/src/database/map/qry_batch.rs b/src/database/map/qry_batch.rs index 31817c48..f44d1c86 100644 --- a/src/database/map/qry_batch.rs +++ b/src/database/map/qry_batch.rs @@ -1,17 +1,16 @@ use std::{fmt::Debug, sync::Arc}; use conduwuit::{ - implement, + Result, implement, utils::{ - stream::{automatic_amplification, automatic_width, WidebandExt}, IterStream, + stream::{WidebandExt, automatic_amplification, automatic_width}, }, - Result, }; use futures::{Stream, StreamExt, TryStreamExt}; use serde::Serialize; -use crate::{keyval::KeyBuf, ser, Handle}; +use crate::{Handle, keyval::KeyBuf, ser}; pub trait Qry<'a, K, S> where diff --git a/src/database/map/rev_keys.rs b/src/database/map/rev_keys.rs index 21558a17..c00f3e55 100644 --- a/src/database/map/rev_keys.rs +++ b/src/database/map/rev_keys.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::Deserialize; diff --git a/src/database/map/rev_keys_from.rs b/src/database/map/rev_keys_from.rs index 65072337..04e457dc 100644 --- a/src/database/map/rev_keys_from.rs +++ b/src/database/map/rev_keys_from.rs @@ -1,13 +1,13 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; use super::rev_stream_from::is_cached; use crate::{ - keyval::{result_deserialize_key, serialize_key, Key}, + keyval::{Key, result_deserialize_key, serialize_key}, stream, }; @@ -15,7 +15,7 @@ use crate::{ pub fn rev_keys_from<'a, K, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -29,7 +29,7 @@ where pub fn rev_keys_from_raw

( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { @@ -41,7 +41,7 @@ where pub fn rev_keys_raw_from<'a, K, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -55,7 +55,7 @@ where pub fn rev_raw_keys_from

( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: AsRef<[u8]> + ?Sized + Debug, { diff --git a/src/database/map/rev_keys_prefix.rs b/src/database/map/rev_keys_prefix.rs index fb29acaf..fbe9f9ca 100644 --- a/src/database/map/rev_keys_prefix.rs +++ b/src/database/map/rev_keys_prefix.rs @@ -1,16 +1,16 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; -use futures::{future, Stream, StreamExt, TryStreamExt}; +use conduwuit::{Result, implement}; +use futures::{Stream, StreamExt, TryStreamExt, future}; use serde::{Deserialize, Serialize}; -use crate::keyval::{result_deserialize_key, serialize_key, Key}; +use crate::keyval::{Key, result_deserialize_key, serialize_key}; #[implement(super::Map)] pub fn rev_keys_prefix<'a, K, P>( self: &'a Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -24,7 +24,7 @@ where pub fn rev_keys_prefix_raw

( self: &Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs index f55053be..fc2d1116 100644 --- a/src/database/map/rev_stream.rs +++ b/src/database/map/rev_stream.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::Deserialize; diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index ddc98607..d67986e7 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -1,13 +1,13 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; use tokio::task; use crate::{ - keyval::{result_deserialize, serialize_key, KeyVal}, + keyval::{KeyVal, result_deserialize, serialize_key}, stream, util::is_incomplete, }; @@ -20,7 +20,7 @@ use crate::{ pub fn rev_stream_from<'a, K, V, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -39,7 +39,7 @@ where pub fn rev_stream_from_raw

( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { @@ -55,7 +55,7 @@ where pub fn rev_stream_raw_from<'a, K, V, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -74,7 +74,7 @@ where pub fn rev_raw_stream_from

( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: AsRef<[u8]> + ?Sized + Debug, { diff --git a/src/database/map/rev_stream_prefix.rs b/src/database/map/rev_stream_prefix.rs index 22a2ce53..46dc9247 100644 --- a/src/database/map/rev_stream_prefix.rs +++ b/src/database/map/rev_stream_prefix.rs @@ -1,10 +1,10 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; -use futures::{future, Stream, StreamExt, TryStreamExt}; +use conduwuit::{Result, implement}; +use futures::{Stream, StreamExt, TryStreamExt, future}; use serde::{Deserialize, Serialize}; -use crate::keyval::{result_deserialize, serialize_key, KeyVal}; +use crate::keyval::{KeyVal, result_deserialize, serialize_key}; /// Iterate key-value entries in the map where the key matches a prefix. /// @@ -14,7 +14,7 @@ use crate::keyval::{result_deserialize, serialize_key, KeyVal}; pub fn rev_stream_prefix<'a, K, V, P>( self: &'a Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -33,7 +33,7 @@ where pub fn rev_stream_prefix_raw

( self: &Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs index bfc8ba04..f1450b6f 100644 --- a/src/database/map/stream.rs +++ b/src/database/map/stream.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::Deserialize; diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index 74140a65..00c3a051 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -1,13 +1,13 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; use tokio::task; use crate::{ - keyval::{result_deserialize, serialize_key, KeyVal}, + keyval::{KeyVal, result_deserialize, serialize_key}, stream, }; @@ -19,7 +19,7 @@ use crate::{ pub fn stream_from<'a, K, V, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -37,7 +37,7 @@ where pub fn stream_from_raw

( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { @@ -53,7 +53,7 @@ where pub fn stream_raw_from<'a, K, V, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -71,7 +71,7 @@ where pub fn raw_stream_from

( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: AsRef<[u8]> + ?Sized + Debug, { diff --git a/src/database/map/stream_prefix.rs b/src/database/map/stream_prefix.rs index adacfc81..a26478aa 100644 --- a/src/database/map/stream_prefix.rs +++ b/src/database/map/stream_prefix.rs @@ -1,10 +1,10 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; -use futures::{future, Stream, StreamExt, TryStreamExt}; +use conduwuit::{Result, implement}; +use futures::{Stream, StreamExt, TryStreamExt, future}; use serde::{Deserialize, Serialize}; -use crate::keyval::{result_deserialize, serialize_key, KeyVal}; +use crate::keyval::{KeyVal, result_deserialize, serialize_key}; /// Iterate key-value entries in the map where the key matches a prefix. /// @@ -14,7 +14,7 @@ use crate::keyval::{result_deserialize, serialize_key, KeyVal}; pub fn stream_prefix<'a, K, V, P>( self: &'a Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -33,7 +33,7 @@ where pub fn stream_prefix_raw

( self: &Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { diff --git a/src/database/maps.rs b/src/database/maps.rs index fc216ee0..b060ab8d 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -3,8 +3,8 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::Result; use crate::{ - engine::descriptor::{self, CacheDisp, Descriptor}, Engine, Map, + engine::descriptor::{self, CacheDisp, Descriptor}, }; pub(super) type Maps = BTreeMap; diff --git a/src/database/mod.rs b/src/database/mod.rs index 4f8e2ad9..0481d1bd 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -23,18 +23,18 @@ mod watchers; use std::{ops::Index, sync::Arc}; -use conduwuit::{err, Result, Server}; +use conduwuit::{Result, Server, err}; pub use self::{ de::{Ignore, IgnoreAll}, deserialized::Deserialized, handle::Handle, - keyval::{serialize_key, serialize_val, KeyVal, Slice}, - map::{compact, Get, Map, Qry}, - ser::{serialize, serialize_to, serialize_to_vec, Cbor, Interfix, Json, Separator, SEP}, + keyval::{KeyVal, Slice, serialize_key, serialize_val}, + map::{Get, Map, Qry, compact}, + ser::{Cbor, Interfix, Json, SEP, Separator, serialize, serialize_to, serialize_to_vec}, }; pub(crate) use self::{ - engine::{context::Context, Engine}, + engine::{Engine, context::Context}, util::or_else, }; use crate::maps::{Maps, MapsKey, MapsVal}; diff --git a/src/database/pool.rs b/src/database/pool.rs index 7636ff5e..e6ed59ac 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -3,8 +3,8 @@ mod configure; use std::{ mem::take, sync::{ - atomic::{AtomicUsize, Ordering}, Arc, Mutex, + atomic::{AtomicUsize, Ordering}, }, thread, thread::JoinHandle, @@ -12,19 +12,18 @@ use std::{ use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; use conduwuit::{ - debug, debug_warn, err, error, implement, + Error, Result, Server, debug, debug_warn, err, error, implement, result::DebugInspect, smallvec::SmallVec, trace, utils::sys::compute::{get_affinity, nth_core_available, set_affinity}, - Error, Result, Server, }; -use futures::{channel::oneshot, TryFutureExt}; +use futures::{TryFutureExt, channel::oneshot}; use oneshot::Sender as ResultSender; use rocksdb::Direction; use self::configure::configure; -use crate::{keyval::KeyBuf, stream, Handle, Map}; +use crate::{Handle, Map, keyval::KeyBuf, stream}; /// Frontend thread-pool. Operating system threads are used to make database /// requests which are not cached. These thread-blocking requests are offloaded diff --git a/src/database/pool/configure.rs b/src/database/pool/configure.rs index ff42ef51..92dda56e 100644 --- a/src/database/pool/configure.rs +++ b/src/database/pool/configure.rs @@ -1,7 +1,7 @@ use std::{path::PathBuf, sync::Arc}; use conduwuit::{ - debug, debug_info, expected, is_equal_to, + Server, debug, debug_info, expected, is_equal_to, utils::{ math::usize_from_f64, result::LogDebugErr, @@ -9,7 +9,6 @@ use conduwuit::{ stream::{AMPLIFICATION_LIMIT, WIDTH_LIMIT}, sys::{compute::is_core_available, storage}, }, - Server, }; use super::{QUEUE_LIMIT, WORKER_LIMIT}; diff --git a/src/database/ser.rs b/src/database/ser.rs index 372b7522..6dd2043d 100644 --- a/src/database/ser.rs +++ b/src/database/ser.rs @@ -1,7 +1,7 @@ use std::io::Write; -use conduwuit::{debug::type_name, err, result::DebugInspect, utils::exchange, Error, Result}; -use serde::{ser, Deserialize, Serialize}; +use conduwuit::{Error, Result, debug::type_name, err, result::DebugInspect, utils::exchange}; +use serde::{Deserialize, Serialize, ser}; use crate::util::unhandled; diff --git a/src/database/stream.rs b/src/database/stream.rs index f3063bb3..eb856b3f 100644 --- a/src/database/stream.rs +++ b/src/database/stream.rs @@ -5,15 +5,15 @@ mod keys_rev; use std::sync::Arc; -use conduwuit::{utils::exchange, Result}; +use conduwuit::{Result, utils::exchange}; use rocksdb::{DBRawIteratorWithThreadMode, ReadOptions}; pub(crate) use self::{items::Items, items_rev::ItemsRev, keys::Keys, keys_rev::KeysRev}; use crate::{ + Map, Slice, engine::Db, keyval::{Key, KeyVal, Val}, util::{is_incomplete, map_err}, - Map, Slice, }; pub(crate) struct State<'a> { diff --git a/src/database/stream/items.rs b/src/database/stream/items.rs index 8814419e..ede2b822 100644 --- a/src/database/stream/items.rs +++ b/src/database/stream/items.rs @@ -2,12 +2,12 @@ use std::pin::Pin; use conduwuit::Result; use futures::{ + Stream, stream::FusedStream, task::{Context, Poll}, - Stream, }; -use super::{keyval_longevity, Cursor, State}; +use super::{Cursor, State, keyval_longevity}; use crate::keyval::KeyVal; pub(crate) struct Items<'a> { diff --git a/src/database/stream/items_rev.rs b/src/database/stream/items_rev.rs index f6fcb0e5..dba8d16c 100644 --- a/src/database/stream/items_rev.rs +++ b/src/database/stream/items_rev.rs @@ -2,12 +2,12 @@ use std::pin::Pin; use conduwuit::Result; use futures::{ + Stream, stream::FusedStream, task::{Context, Poll}, - Stream, }; -use super::{keyval_longevity, Cursor, State}; +use super::{Cursor, State, keyval_longevity}; use crate::keyval::KeyVal; pub(crate) struct ItemsRev<'a> { diff --git a/src/database/stream/keys.rs b/src/database/stream/keys.rs index b953f51c..7c89869b 100644 --- a/src/database/stream/keys.rs +++ b/src/database/stream/keys.rs @@ -2,12 +2,12 @@ use std::pin::Pin; use conduwuit::Result; use futures::{ + Stream, stream::FusedStream, task::{Context, Poll}, - Stream, }; -use super::{slice_longevity, Cursor, State}; +use super::{Cursor, State, slice_longevity}; use crate::keyval::Key; pub(crate) struct Keys<'a> { diff --git a/src/database/stream/keys_rev.rs b/src/database/stream/keys_rev.rs index acf78d88..51561e5c 100644 --- a/src/database/stream/keys_rev.rs +++ b/src/database/stream/keys_rev.rs @@ -2,12 +2,12 @@ use std::pin::Pin; use conduwuit::Result; use futures::{ + Stream, stream::FusedStream, task::{Context, Poll}, - Stream, }; -use super::{slice_longevity, Cursor, State}; +use super::{Cursor, State, slice_longevity}; use crate::keyval::Key; pub(crate) struct KeysRev<'a> { diff --git a/src/database/tests.rs b/src/database/tests.rs index 594170e8..140bc56d 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -4,14 +4,13 @@ use std::fmt::Debug; use conduwuit::{ arrayvec::ArrayVec, - ruma::{serde::Raw, EventId, RoomId, UserId}, + ruma::{EventId, RoomId, UserId, serde::Raw}, }; use serde::Serialize; use crate::{ - de, ser, - ser::{serialize_to_vec, Json}, - Ignore, Interfix, + Ignore, Interfix, de, ser, + ser::{Json, serialize_to_vec}, }; #[test] diff --git a/src/database/watchers.rs b/src/database/watchers.rs index 9ce6f74c..be814f8c 100644 --- a/src/database/watchers.rs +++ b/src/database/watchers.rs @@ -1,5 +1,5 @@ use std::{ - collections::{hash_map, HashMap}, + collections::{HashMap, hash_map}, future::Future, pin::Pin, sync::RwLock, diff --git a/src/macros/admin.rs b/src/macros/admin.rs index e35bd586..bf1586a0 100644 --- a/src/macros/admin.rs +++ b/src/macros/admin.rs @@ -1,10 +1,10 @@ use itertools::Itertools; use proc_macro::{Span, TokenStream}; use proc_macro2::TokenStream as TokenStream2; -use quote::{quote, ToTokens}; -use syn::{parse_quote, Attribute, Error, Fields, Ident, ItemEnum, ItemFn, Meta, Variant}; +use quote::{ToTokens, quote}; +use syn::{Attribute, Error, Fields, Ident, ItemEnum, ItemFn, Meta, Variant, parse_quote}; -use crate::{utils::camel_to_snake_string, Result}; +use crate::{Result, utils::camel_to_snake_string}; pub(super) fn command(mut item: ItemFn, _args: &[Meta]) -> Result { let attr: Attribute = parse_quote! { diff --git a/src/macros/cargo.rs b/src/macros/cargo.rs index cd36658e..a452c672 100644 --- a/src/macros/cargo.rs +++ b/src/macros/cargo.rs @@ -4,7 +4,7 @@ use proc_macro::{Span, TokenStream}; use quote::quote; use syn::{Error, ItemConst, Meta}; -use crate::{utils, Result}; +use crate::{Result, utils}; pub(super) fn manifest(item: ItemConst, args: &[Meta]) -> Result { let member = utils::get_named_string(args, "crate"); diff --git a/src/macros/config.rs b/src/macros/config.rs index 50feefa8..07ac1c0a 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -2,15 +2,15 @@ use std::{collections::HashSet, fmt::Write as _, fs::OpenOptions, io::Write as _ use proc_macro::TokenStream; use proc_macro2::{Span, TokenStream as TokenStream2}; -use quote::{quote, ToTokens}; +use quote::{ToTokens, quote}; use syn::{ - parse::Parser, punctuated::Punctuated, spanned::Spanned, Error, Expr, ExprLit, Field, Fields, - FieldsNamed, ItemStruct, Lit, Meta, MetaList, MetaNameValue, Type, TypePath, + Error, Expr, ExprLit, Field, Fields, FieldsNamed, ItemStruct, Lit, Meta, MetaList, + MetaNameValue, Type, TypePath, parse::Parser, punctuated::Punctuated, spanned::Spanned, }; use crate::{ - utils::{get_simple_settings, is_cargo_build, is_cargo_test}, Result, + utils::{get_simple_settings, is_cargo_build, is_cargo_test}, }; const UNDOCUMENTED: &str = "# This item is undocumented. Please contribute documentation for it."; diff --git a/src/macros/implement.rs b/src/macros/implement.rs index 8d18f243..7acc12d2 100644 --- a/src/macros/implement.rs +++ b/src/macros/implement.rs @@ -3,7 +3,7 @@ use quote::quote; use syn::{Error, ItemFn, Meta, Path}; use utils::get_named_generics; -use crate::{utils, Result}; +use crate::{Result, utils}; pub(super) fn implement(item: ItemFn, args: &[Meta]) -> Result { let generics = get_named_generics(args, "generics")?; diff --git a/src/macros/mod.rs b/src/macros/mod.rs index 1aa1e24f..31a797fe 100644 --- a/src/macros/mod.rs +++ b/src/macros/mod.rs @@ -9,8 +9,9 @@ mod utils; use proc_macro::TokenStream; use syn::{ + Error, Item, ItemConst, ItemEnum, ItemFn, ItemStruct, Meta, parse::{Parse, Parser}, - parse_macro_input, Error, Item, ItemConst, ItemEnum, ItemFn, ItemStruct, Meta, + parse_macro_input, }; pub(crate) type Result = std::result::Result; diff --git a/src/macros/refutable.rs b/src/macros/refutable.rs index 66e0ebc3..acfc4cd5 100644 --- a/src/macros/refutable.rs +++ b/src/macros/refutable.rs @@ -1,5 +1,5 @@ use proc_macro::{Span, TokenStream}; -use quote::{quote, ToTokens}; +use quote::{ToTokens, quote}; use syn::{FnArg::Typed, Ident, ItemFn, Meta, Pat, PatIdent, PatType, Stmt}; use crate::Result; @@ -20,7 +20,7 @@ pub(super) fn refutable(mut item: ItemFn, _args: &[Meta]) -> Result let variant = &pat.path; let fields = &pat.fields; - let Some(Typed(PatType { ref mut pat, .. })) = sig.inputs.get_mut(i) else { + let Some(Typed(PatType { pat, .. })) = sig.inputs.get_mut(i) else { continue; }; diff --git a/src/macros/utils.rs b/src/macros/utils.rs index af2519a7..a45e5ecc 100644 --- a/src/macros/utils.rs +++ b/src/macros/utils.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use syn::{parse_str, Expr, ExprLit, Generics, Lit, Meta, MetaNameValue}; +use syn::{Expr, ExprLit, Generics, Lit, Meta, MetaNameValue, parse_str}; use crate::Result; diff --git a/src/main/clap.rs b/src/main/clap.rs index 2bb6f3f2..c7f33bfe 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -4,10 +4,10 @@ use std::path::PathBuf; use clap::{ArgAction, Parser}; use conduwuit::{ + Err, Result, config::{Figment, FigmentValue}, err, toml, utils::available_parallelism, - Err, Result, }; /// Commandline arguments diff --git a/src/main/logging.rs b/src/main/logging.rs index 35e482de..7ce86d56 100644 --- a/src/main/logging.rs +++ b/src/main/logging.rs @@ -1,13 +1,13 @@ use std::sync::Arc; use conduwuit::{ + Result, config::Config, debug_warn, err, - log::{capture, fmt_span, ConsoleFormat, ConsoleWriter, LogLevelReloadHandles}, + log::{ConsoleFormat, ConsoleWriter, LogLevelReloadHandles, capture, fmt_span}, result::UnwrapOrErr, - Result, }; -use tracing_subscriber::{fmt, layer::SubscriberExt, reload, EnvFilter, Layer, Registry}; +use tracing_subscriber::{EnvFilter, Layer, Registry, fmt, layer::SubscriberExt, reload}; #[cfg(feature = "perf_measurements")] pub(crate) type TracingFlameGuard = diff --git a/src/main/main.rs b/src/main/main.rs index dacc2a2e..2bfc3c06 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -9,9 +9,9 @@ mod signal; extern crate conduwuit_core as conduwuit; -use std::sync::{atomic::Ordering, Arc}; +use std::sync::{Arc, atomic::Ordering}; -use conduwuit::{debug_info, error, rustc_flags_capture, Error, Result}; +use conduwuit::{Error, Result, debug_info, error, rustc_flags_capture}; use server::Server; rustc_flags_capture! {} diff --git a/src/main/mods.rs b/src/main/mods.rs index 9ab36e6c..6dc79b2f 100644 --- a/src/main/mods.rs +++ b/src/main/mods.rs @@ -6,10 +6,10 @@ extern crate conduwuit_service; use std::{ future::Future, pin::Pin, - sync::{atomic::Ordering, Arc}, + sync::{Arc, atomic::Ordering}, }; -use conduwuit::{debug, error, mods, Error, Result}; +use conduwuit::{Error, Result, debug, error, mods}; use conduwuit_service::Services; use crate::Server; diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 474b373b..b3174e9c 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -1,8 +1,8 @@ use std::{ iter::once, sync::{ - atomic::{AtomicUsize, Ordering}, OnceLock, + atomic::{AtomicUsize, Ordering}, }, thread, time::Duration, @@ -11,9 +11,8 @@ use std::{ #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] use conduwuit::result::LogDebugErr; use conduwuit::{ - is_true, + Result, is_true, utils::sys::compute::{nth_core_available, set_affinity}, - Result, }; use tokio::runtime::Builder; diff --git a/src/main/sentry.rs b/src/main/sentry.rs index 02835ec8..1ea1f3ae 100644 --- a/src/main/sentry.rs +++ b/src/main/sentry.rs @@ -7,11 +7,11 @@ use std::{ use conduwuit::{config::Config, debug, trace}; use sentry::{ - types::{ - protocol::v7::{Context, Event}, - Dsn, - }, Breadcrumb, ClientOptions, Level, + types::{ + Dsn, + protocol::v7::{Context, Event}, + }, }; static SEND_PANIC: OnceLock = OnceLock::new(); diff --git a/src/main/server.rs b/src/main/server.rs index 7376b2fc..44ca69b0 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -1,11 +1,11 @@ use std::{path::PathBuf, sync::Arc}; use conduwuit::{ + Error, Result, config::Config, info, log::Log, utils::{stream, sys}, - Error, Result, }; use tokio::{runtime, sync::Mutex}; diff --git a/src/router/layers.rs b/src/router/layers.rs index 7ebec16e..88e6a8d5 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -1,16 +1,16 @@ use std::{any::Any, sync::Arc, time::Duration}; use axum::{ - extract::{DefaultBodyLimit, MatchedPath}, Router, + extract::{DefaultBodyLimit, MatchedPath}, }; use axum_client_ip::SecureClientIpSource; -use conduwuit::{debug, error, Result, Server}; +use conduwuit::{Result, Server, debug, error}; use conduwuit_api::router::state::Guard; use conduwuit_service::Services; use http::{ - header::{self, HeaderName}, HeaderValue, Method, StatusCode, + header::{self, HeaderName}, }; use tower::ServiceBuilder; use tower_http::{ @@ -176,12 +176,12 @@ fn catch_panic( .requests_panic .fetch_add(1, std::sync::atomic::Ordering::Release); - let details = if let Some(s) = err.downcast_ref::() { - s.clone() - } else if let Some(s) = err.downcast_ref::<&str>() { - (*s).to_owned() - } else { - "Unknown internal server error occurred.".to_owned() + let details = match err.downcast_ref::() { + | Some(s) => s.clone(), + | _ => match err.downcast_ref::<&str>() { + | Some(s) => (*s).to_owned(), + | _ => "Unknown internal server error occurred.".to_owned(), + }, }; error!("{details:#}"); diff --git a/src/router/request.rs b/src/router/request.rs index b6c22d45..00769b3f 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -1,6 +1,6 @@ use std::{ fmt::Debug, - sync::{atomic::Ordering, Arc}, + sync::{Arc, atomic::Ordering}, time::Duration, }; @@ -8,7 +8,7 @@ use axum::{ extract::State, response::{IntoResponse, Response}, }; -use conduwuit::{debug, debug_error, debug_warn, err, error, trace, Result}; +use conduwuit::{Result, debug, debug_error, debug_warn, err, error, trace}; use conduwuit_service::Services; use futures::FutureExt; use http::{Method, StatusCode, Uri}; diff --git a/src/router/router.rs b/src/router/router.rs index b3531418..0f95b924 100644 --- a/src/router/router.rs +++ b/src/router/router.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use axum::{response::IntoResponse, routing::get, Router}; +use axum::{Router, response::IntoResponse, routing::get}; use conduwuit::Error; use conduwuit_api::router::{state, state::Guard}; use conduwuit_service::Services; diff --git a/src/router/run.rs b/src/router/run.rs index 024cb813..31789626 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -3,12 +3,12 @@ extern crate conduwuit_core as conduwuit; extern crate conduwuit_service as service; use std::{ - sync::{atomic::Ordering, Arc, Weak}, + sync::{Arc, Weak, atomic::Ordering}, time::Duration, }; use axum_server::Handle as ServerHandle; -use conduwuit::{debug, debug_error, debug_info, error, info, Error, Result, Server}; +use conduwuit::{Error, Result, Server, debug, debug_error, debug_info, error, info}; use futures::FutureExt; use service::Services; use tokio::{ diff --git a/src/router/serve/mod.rs b/src/router/serve/mod.rs index 5c822f2b..2399edf0 100644 --- a/src/router/serve/mod.rs +++ b/src/router/serve/mod.rs @@ -6,7 +6,7 @@ mod unix; use std::sync::Arc; use axum_server::Handle as ServerHandle; -use conduwuit::{err, Result}; +use conduwuit::{Result, err}; use conduwuit_service::Services; use tokio::sync::broadcast; diff --git a/src/router/serve/plain.rs b/src/router/serve/plain.rs index 535282b9..6db7e138 100644 --- a/src/router/serve/plain.rs +++ b/src/router/serve/plain.rs @@ -1,11 +1,11 @@ use std::{ net::SocketAddr, - sync::{atomic::Ordering, Arc}, + sync::{Arc, atomic::Ordering}, }; use axum::Router; -use axum_server::{bind, Handle as ServerHandle}; -use conduwuit::{debug_info, info, Result, Server}; +use axum_server::{Handle as ServerHandle, bind}; +use conduwuit::{Result, Server, debug_info, info}; use tokio::task::JoinSet; pub(super) async fn serve( diff --git a/src/router/serve/tls.rs b/src/router/serve/tls.rs index ab1a9371..dd46ab53 100644 --- a/src/router/serve/tls.rs +++ b/src/router/serve/tls.rs @@ -3,10 +3,10 @@ use std::{net::SocketAddr, sync::Arc}; use axum::Router; use axum_server::Handle as ServerHandle; use axum_server_dual_protocol::{ - axum_server::{bind_rustls, tls_rustls::RustlsConfig}, ServerExt, + axum_server::{bind_rustls, tls_rustls::RustlsConfig}, }; -use conduwuit::{err, Result, Server}; +use conduwuit::{Result, Server, err}; use tokio::task::JoinSet; use tracing::{debug, info, warn}; diff --git a/src/router/serve/unix.rs b/src/router/serve/unix.rs index 6a030c30..2af17274 100644 --- a/src/router/serve/unix.rs +++ b/src/router/serve/unix.rs @@ -4,15 +4,15 @@ use std::{ net::{self, IpAddr, Ipv4Addr}, os::fd::AsRawFd, path::Path, - sync::{atomic::Ordering, Arc}, + sync::{Arc, atomic::Ordering}, }; use axum::{ - extract::{connect_info::IntoMakeServiceWithConnectInfo, Request}, Router, + extract::{Request, connect_info::IntoMakeServiceWithConnectInfo}, }; use conduwuit::{ - debug, debug_error, info, result::UnwrapInfallible, trace, warn, Err, Result, Server, + Err, Result, Server, debug, debug_error, info, result::UnwrapInfallible, trace, warn, }; use hyper::{body::Incoming, service::service_fn}; use hyper_util::{ @@ -21,10 +21,10 @@ use hyper_util::{ }; use tokio::{ fs, - net::{unix::SocketAddr, UnixListener, UnixStream}, + net::{UnixListener, UnixStream, unix::SocketAddr}, sync::broadcast::{self}, task::JoinSet, - time::{sleep, Duration}, + time::{Duration, sleep}, }; use tower::{Service, ServiceExt}; diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 5a943f88..453051be 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -1,23 +1,22 @@ use std::sync::Arc; use conduwuit::{ - err, implement, - utils::{result::LogErr, stream::TryIgnore, ReadyExt}, - Err, Result, + Err, Result, err, implement, + utils::{ReadyExt, result::LogErr, stream::TryIgnore}, }; use database::{Deserialized, Handle, Ignore, Json, Map}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ + RoomId, UserId, events::{ AnyGlobalAccountDataEvent, AnyRawAccountDataEvent, AnyRoomAccountDataEvent, GlobalAccountDataEventType, RoomAccountDataEventType, }, serde::Raw, - RoomId, UserId, }; use serde::Deserialize; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub struct Service { services: Services, diff --git a/src/service/admin/console.rs b/src/service/admin/console.rs index 59b9a31b..02f41303 100644 --- a/src/service/admin/console.rs +++ b/src/service/admin/console.rs @@ -5,14 +5,14 @@ use std::{ sync::{Arc, Mutex}, }; -use conduwuit::{debug, defer, error, log, log::is_systemd_mode, Server}; +use conduwuit::{Server, debug, defer, error, log, log::is_systemd_mode}; use futures::future::{AbortHandle, Abortable}; use ruma::events::room::message::RoomMessageEventContent; use rustyline_async::{Readline, ReadlineError, ReadlineEvent}; use termimad::MadSkin; use tokio::task::JoinHandle; -use crate::{admin, Dep}; +use crate::{Dep, admin}; pub struct Console { server: Arc, @@ -221,7 +221,7 @@ pub fn print(markdown: &str) { } fn configure_output_err(mut output: MadSkin) -> MadSkin { - use termimad::{crossterm::style::Color, Alignment, CompoundStyle, LineStyle}; + use termimad::{Alignment, CompoundStyle, LineStyle, crossterm::style::Color}; let code_style = CompoundStyle::with_fgbg(Color::AnsiValue(196), Color::AnsiValue(234)); output.inline_code = code_style.clone(); @@ -236,7 +236,7 @@ fn configure_output_err(mut output: MadSkin) -> MadSkin { } fn configure_output(mut output: MadSkin) -> MadSkin { - use termimad::{crossterm::style::Color, Alignment, CompoundStyle, LineStyle}; + use termimad::{Alignment, CompoundStyle, LineStyle, crossterm::style::Color}; let code_style = CompoundStyle::with_fgbg(Color::AnsiValue(40), Color::AnsiValue(234)); output.inline_code = code_style.clone(); diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs index 7b691fb1..7f71665a 100644 --- a/src/service/admin/create.rs +++ b/src/service/admin/create.rs @@ -1,7 +1,8 @@ use std::collections::BTreeMap; -use conduwuit::{pdu::PduBuilder, Result}; +use conduwuit::{Result, pdu::PduBuilder}; use ruma::{ + RoomId, RoomVersionId, events::room::{ canonical_alias::RoomCanonicalAliasEventContent, create::RoomCreateEventContent, @@ -14,7 +15,6 @@ use ruma::{ preview_url::RoomPreviewUrlsEventContent, topic::RoomTopicEventContent, }, - RoomId, RoomVersionId, }; use crate::Services; diff --git a/src/service/admin/execute.rs b/src/service/admin/execute.rs index 462681da..174b28ed 100644 --- a/src/service/admin/execute.rs +++ b/src/service/admin/execute.rs @@ -1,6 +1,6 @@ -use conduwuit::{debug, debug_info, error, implement, info, Err, Result}; +use conduwuit::{Err, Result, debug, debug_info, error, implement, info}; use ruma::events::room::message::RoomMessageEventContent; -use tokio::time::{sleep, Duration}; +use tokio::time::{Duration, sleep}; pub(super) const SIGNAL: &str = "SIGUSR2"; diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index 3ad9283f..358ea267 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -1,17 +1,17 @@ use std::collections::BTreeMap; -use conduwuit::{error, implement, Result}; +use conduwuit::{Result, error, implement}; use ruma::{ + RoomId, UserId, events::{ + RoomAccountDataEventType, room::{ member::{MembershipState, RoomMemberEventContent}, message::RoomMessageEventContent, power_levels::RoomPowerLevelsEventContent, }, tag::{TagEvent, TagEventContent, TagInfo}, - RoomAccountDataEventType, }, - RoomId, UserId, }; use crate::pdu::PduBuilder; diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 31b046b7..4622f10e 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -11,18 +11,18 @@ use std::{ use async_trait::async_trait; use conduwuit::{ - debug, err, error, error::default_log, pdu::PduBuilder, Error, PduEvent, Result, Server, + Error, PduEvent, Result, Server, debug, err, error, error::default_log, pdu::PduBuilder, }; pub use create::create_admin_room; use futures::{FutureExt, TryFutureExt}; use loole::{Receiver, Sender}; use ruma::{ - events::room::message::{Relation, RoomMessageEventContent}, OwnedEventId, OwnedRoomId, RoomId, UserId, + events::room::message::{Relation, RoomMessageEventContent}, }; use tokio::sync::RwLock; -use crate::{account_data, globals, rooms, rooms::state::RoomMutexGuard, Dep}; +use crate::{Dep, account_data, globals, rooms, rooms::state::RoomMutexGuard}; pub struct Service { services: Services, diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 2a54ee09..5aba0018 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -4,14 +4,14 @@ mod registration_info; use std::{collections::BTreeMap, sync::Arc}; use async_trait::async_trait; -use conduwuit::{err, utils::stream::TryIgnore, Result}; +use conduwuit::{Result, err, utils::stream::TryIgnore}; use database::Map; use futures::{Future, StreamExt, TryStreamExt}; -use ruma::{api::appservice::Registration, RoomAliasId, RoomId, UserId}; +use ruma::{RoomAliasId, RoomId, UserId, api::appservice::Registration}; use tokio::sync::RwLock; pub use self::{namespace_regex::NamespaceRegex, registration_info::RegistrationInfo}; -use crate::{sending, Dep}; +use crate::{Dep, sending}; pub struct Service { registration_info: RwLock>, diff --git a/src/service/appservice/registration_info.rs b/src/service/appservice/registration_info.rs index 9758e186..a511f58d 100644 --- a/src/service/appservice/registration_info.rs +++ b/src/service/appservice/registration_info.rs @@ -1,5 +1,5 @@ use conduwuit::Result; -use ruma::{api::appservice::Registration, UserId}; +use ruma::{UserId, api::appservice::Registration}; use super::NamespaceRegex; diff --git a/src/service/client/mod.rs b/src/service/client/mod.rs index f63d78b8..d5008491 100644 --- a/src/service/client/mod.rs +++ b/src/service/client/mod.rs @@ -1,6 +1,6 @@ use std::{sync::Arc, time::Duration}; -use conduwuit::{err, implement, trace, Config, Result}; +use conduwuit::{Config, Result, err, implement, trace}; use either::Either; use ipaddress::IPAddress; use reqwest::redirect; @@ -172,10 +172,9 @@ fn base(config: &Config) -> Result { builder = builder.no_zstd(); }; - if let Some(proxy) = config.proxy.to_proxy()? { - Ok(builder.proxy(proxy)) - } else { - Ok(builder) + match config.proxy.to_proxy()? { + | Some(proxy) => Ok(builder.proxy(proxy)), + | _ => Ok(builder), } } diff --git a/src/service/config/mod.rs b/src/service/config/mod.rs index c9ac37a3..fd0d8764 100644 --- a/src/service/config/mod.rs +++ b/src/service/config/mod.rs @@ -2,8 +2,9 @@ use std::{iter, ops::Deref, path::Path, sync::Arc}; use async_trait::async_trait; use conduwuit::{ - config::{check, Config}, - error, implement, Result, Server, + Result, Server, + config::{Config, check}, + error, implement, }; pub struct Service { diff --git a/src/service/emergency/mod.rs b/src/service/emergency/mod.rs index 9b2e4025..47a309a5 100644 --- a/src/service/emergency/mod.rs +++ b/src/service/emergency/mod.rs @@ -1,15 +1,15 @@ use std::sync::Arc; use async_trait::async_trait; -use conduwuit::{error, warn, Result}; +use conduwuit::{Result, error, warn}; use ruma::{ events::{ - push_rules::PushRulesEventContent, GlobalAccountDataEvent, GlobalAccountDataEventType, + GlobalAccountDataEvent, GlobalAccountDataEventType, push_rules::PushRulesEventContent, }, push::Ruleset, }; -use crate::{account_data, globals, users, Dep}; +use crate::{Dep, account_data, globals, users}; pub struct Service { services: Services, diff --git a/src/service/federation/execute.rs b/src/service/federation/execute.rs index 3146bb8a..d254486f 100644 --- a/src/service/federation/execute.rs +++ b/src/service/federation/execute.rs @@ -2,20 +2,20 @@ use std::{fmt::Debug, mem}; use bytes::Bytes; use conduwuit::{ - debug, debug::INFO_SPAN_LEVEL, debug_error, debug_warn, err, error::inspect_debug_log, - implement, trace, utils::string::EMPTY, Err, Error, Result, + Err, Error, Result, debug, debug::INFO_SPAN_LEVEL, debug_error, debug_warn, err, + error::inspect_debug_log, implement, trace, utils::string::EMPTY, }; -use http::{header::AUTHORIZATION, HeaderValue}; +use http::{HeaderValue, header::AUTHORIZATION}; use ipaddress::IPAddress; use reqwest::{Client, Method, Request, Response, Url}; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, ServerName, ServerSigningKeyId, api::{ - client::error::Error as RumaError, EndpointError, IncomingResponse, MatrixVersion, - OutgoingRequest, SendAccessToken, + EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, + client::error::Error as RumaError, }, serde::Base64, server_util::authorization::XMatrix, - CanonicalJsonObject, CanonicalJsonValue, ServerName, ServerSigningKeyId, }; use crate::resolver::actual::ActualDest; diff --git a/src/service/federation/mod.rs b/src/service/federation/mod.rs index dacdb20e..ce7765ee 100644 --- a/src/service/federation/mod.rs +++ b/src/service/federation/mod.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use conduwuit::{Result, Server}; -use crate::{client, resolver, server_keys, Dep}; +use crate::{Dep, client, resolver, server_keys}; pub struct Service { services: Services, diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 26a18607..b43b7c5f 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -1,6 +1,6 @@ use std::sync::{Arc, RwLock}; -use conduwuit::{utils, Result}; +use conduwuit::{Result, utils}; use database::{Database, Deserialized, Map}; pub struct Data { diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 485d5020..16b3ef3c 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -7,7 +7,7 @@ use std::{ time::Instant, }; -use conduwuit::{error, utils::bytes::pretty, Result, Server}; +use conduwuit::{Result, Server, error, utils::bytes::pretty}; use data::Data; use regex::RegexSet; use ruma::{OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, ServerName, UserId}; diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 1165c3ed..1bf048ef 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -1,19 +1,18 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::{ - err, implement, + Err, Result, err, implement, utils::stream::{ReadyExt, TryIgnore}, - Err, Result, }; use database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::StreamExt; use ruma::{ + OwnedRoomId, RoomId, UserId, api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, serde::Raw, - OwnedRoomId, RoomId, UserId, }; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub struct Service { db: Data, diff --git a/src/service/manager.rs b/src/service/manager.rs index e0d885c2..3cdf5945 100644 --- a/src/service/manager.rs +++ b/src/service/manager.rs @@ -1,6 +1,6 @@ use std::{panic::AssertUnwindSafe, sync::Arc, time::Duration}; -use conduwuit::{debug, debug_warn, error, trace, utils::time, warn, Err, Error, Result, Server}; +use conduwuit::{Err, Error, Result, Server, debug, debug_warn, error, trace, utils::time, warn}; use futures::{FutureExt, TryFutureExt}; use tokio::{ sync::{Mutex, MutexGuard}, @@ -8,7 +8,7 @@ use tokio::{ time::sleep, }; -use crate::{service, service::Service, Services}; +use crate::{Services, service, service::Service}; pub(crate) struct Manager { manager: Mutex>>>, diff --git a/src/service/media/blurhash.rs b/src/service/media/blurhash.rs index 60ade723..9d73f5dc 100644 --- a/src/service/media/blurhash.rs +++ b/src/service/media/blurhash.rs @@ -1,6 +1,6 @@ #[cfg(feature = "blurhashing")] use conduwuit::config::BlurhashConfig as CoreBlurhashConfig; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use super::Service; diff --git a/src/service/media/data.rs b/src/service/media/data.rs index f48482ea..0ccd844f 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -1,13 +1,12 @@ use std::{sync::Arc, time::Duration}; use conduwuit::{ - debug, debug_info, err, - utils::{str_from_bytes, stream::TryIgnore, string_from_bytes, ReadyExt}, - Err, Result, + Err, Result, debug, debug_info, err, + utils::{ReadyExt, str_from_bytes, stream::TryIgnore, string_from_bytes}, }; use database::{Database, Interfix, Map}; use futures::StreamExt; -use ruma::{http_headers::ContentDisposition, Mxc, OwnedMxcUri, UserId}; +use ruma::{Mxc, OwnedMxcUri, UserId, http_headers::ContentDisposition}; use super::{preview::UrlPreviewData, thumbnail::Dim}; diff --git a/src/service/media/migrations.rs b/src/service/media/migrations.rs index 8526ffcd..5fd628cd 100644 --- a/src/service/media/migrations.rs +++ b/src/service/media/migrations.rs @@ -8,9 +8,9 @@ use std::{ }; use conduwuit::{ - debug, debug_info, debug_warn, error, info, - utils::{stream::TryIgnore, ReadyExt}, - warn, Config, Result, + Config, Result, debug, debug_info, debug_warn, error, info, + utils::{ReadyExt, stream::TryIgnore}, + warn, }; use crate::Services; diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index f5913f43..5c26efe8 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -8,13 +8,13 @@ mod thumbnail; use std::{path::PathBuf, sync::Arc, time::SystemTime}; use async_trait::async_trait; -use base64::{engine::general_purpose, Engine as _}; +use base64::{Engine as _, engine::general_purpose}; use conduwuit::{ - debug, debug_error, debug_info, debug_warn, err, error, trace, + Err, Result, Server, debug, debug_error, debug_info, debug_warn, err, error, trace, utils::{self, MutexMap}, - warn, Err, Result, Server, + warn, }; -use ruma::{http_headers::ContentDisposition, Mxc, OwnedMxcUri, UserId}; +use ruma::{Mxc, OwnedMxcUri, UserId, http_headers::ContentDisposition}; use tokio::{ fs, io::{AsyncReadExt, AsyncWriteExt, BufReader}, @@ -22,7 +22,7 @@ use tokio::{ use self::data::{Data, Metadata}; pub use self::thumbnail::Dim; -use crate::{client, globals, sending, Dep}; +use crate::{Dep, client, globals, sending}; #[derive(Debug)] pub struct FileMeta { @@ -105,22 +105,27 @@ impl Service { /// Deletes a file in the database and from the media directory via an MXC pub async fn delete(&self, mxc: &Mxc<'_>) -> Result<()> { - if let Ok(keys) = self.db.search_mxc_metadata_prefix(mxc).await { - for key in keys { - trace!(?mxc, "MXC Key: {key:?}"); - debug_info!(?mxc, "Deleting from filesystem"); + match self.db.search_mxc_metadata_prefix(mxc).await { + | Ok(keys) => { + for key in keys { + trace!(?mxc, "MXC Key: {key:?}"); + debug_info!(?mxc, "Deleting from filesystem"); - if let Err(e) = self.remove_media_file(&key).await { - debug_error!(?mxc, "Failed to remove media file: {e}"); + if let Err(e) = self.remove_media_file(&key).await { + debug_error!(?mxc, "Failed to remove media file: {e}"); + } + + debug_info!(?mxc, "Deleting from database"); + self.db.delete_file_mxc(mxc).await; } - debug_info!(?mxc, "Deleting from database"); - self.db.delete_file_mxc(mxc).await; - } - - Ok(()) - } else { - Err!(Database(error!("Failed to find any media keys for MXC {mxc} in our database."))) + Ok(()) + }, + | _ => { + Err!(Database(error!( + "Failed to find any media keys for MXC {mxc} in our database." + ))) + }, } } @@ -154,22 +159,21 @@ impl Service { /// Downloads a file. pub async fn get(&self, mxc: &Mxc<'_>) -> Result> { - if let Ok(Metadata { content_disposition, content_type, key }) = - self.db.search_file_metadata(mxc, &Dim::default()).await - { - let mut content = Vec::with_capacity(8192); - let path = self.get_media_file(&key); - BufReader::new(fs::File::open(path).await?) - .read_to_end(&mut content) - .await?; + match self.db.search_file_metadata(mxc, &Dim::default()).await { + | Ok(Metadata { content_disposition, content_type, key }) => { + let mut content = Vec::with_capacity(8192); + let path = self.get_media_file(&key); + BufReader::new(fs::File::open(path).await?) + .read_to_end(&mut content) + .await?; - Ok(Some(FileMeta { - content: Some(content), - content_type, - content_disposition, - })) - } else { - Ok(None) + Ok(Some(FileMeta { + content: Some(content), + content_type, + content_disposition, + })) + }, + | _ => Ok(None), } } diff --git a/src/service/media/preview.rs b/src/service/media/preview.rs index e7f76bab..17216869 100644 --- a/src/service/media/preview.rs +++ b/src/service/media/preview.rs @@ -7,7 +7,7 @@ use std::time::SystemTime; -use conduwuit::{debug, Err, Result}; +use conduwuit::{Err, Result, debug}; use conduwuit_core::implement; use ipaddress::IPAddress; use serde::Serialize; diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index 72f1184e..61635011 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -1,21 +1,21 @@ use std::{fmt::Debug, time::Duration}; use conduwuit::{ - debug_warn, err, implement, utils::content_disposition::make_content_disposition, Err, Error, - Result, + Err, Error, Result, debug_warn, err, implement, + utils::content_disposition::make_content_disposition, }; -use http::header::{HeaderValue, CONTENT_DISPOSITION, CONTENT_TYPE}; +use http::header::{CONTENT_DISPOSITION, CONTENT_TYPE, HeaderValue}; use ruma::{ + Mxc, ServerName, UserId, api::{ + OutgoingRequest, client::{ error::ErrorKind::{NotFound, Unrecognized}, media, }, federation, federation::authenticated_media::{Content, FileOrLocation}, - OutgoingRequest, }, - Mxc, ServerName, UserId, }; use super::{Dim, FileMeta}; diff --git a/src/service/media/tests.rs b/src/service/media/tests.rs index 1d6dce30..651e0ade 100644 --- a/src/service/media/tests.rs +++ b/src/service/media/tests.rs @@ -5,7 +5,7 @@ async fn long_file_names_works() { use std::path::PathBuf; - use base64::{engine::general_purpose, Engine as _}; + use base64::{Engine as _, engine::general_purpose}; use super::*; diff --git a/src/service/media/thumbnail.rs b/src/service/media/thumbnail.rs index 7350b3a1..e5a98774 100644 --- a/src/service/media/thumbnail.rs +++ b/src/service/media/thumbnail.rs @@ -7,14 +7,14 @@ use std::{cmp, num::Saturating as Sat}; -use conduwuit::{checked, err, implement, Result}; -use ruma::{http_headers::ContentDisposition, media::Method, Mxc, UInt, UserId}; +use conduwuit::{Result, checked, err, implement}; +use ruma::{Mxc, UInt, UserId, http_headers::ContentDisposition, media::Method}; use tokio::{ fs, io::{AsyncReadExt, AsyncWriteExt}, }; -use super::{data::Metadata, FileMeta}; +use super::{FileMeta, data::Metadata}; /// Dimension specification for a thumbnail. #[derive(Debug)] @@ -65,12 +65,12 @@ impl super::Service { // 0, 0 because that's the original file let dim = dim.normalized(); - if let Ok(metadata) = self.db.search_file_metadata(mxc, &dim).await { - self.get_thumbnail_saved(metadata).await - } else if let Ok(metadata) = self.db.search_file_metadata(mxc, &Dim::default()).await { - self.get_thumbnail_generate(mxc, &dim, metadata).await - } else { - Ok(None) + match self.db.search_file_metadata(mxc, &dim).await { + | Ok(metadata) => self.get_thumbnail_saved(metadata).await, + | _ => match self.db.search_file_metadata(mxc, &Dim::default()).await { + | Ok(metadata) => self.get_thumbnail_generate(mxc, &dim, metadata).await, + | _ => Ok(None), + }, } } } diff --git a/src/service/migrations.rs b/src/service/migrations.rs index 69b1be4e..512a7867 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -1,25 +1,25 @@ use std::cmp; use conduwuit::{ - debug, debug_info, debug_warn, error, info, + Err, Result, debug, debug_info, debug_warn, error, info, result::NotFound, utils::{ - stream::{TryExpect, TryIgnore}, IterStream, ReadyExt, + stream::{TryExpect, TryIgnore}, }, - warn, Err, Result, + warn, }; use futures::{FutureExt, StreamExt}; use itertools::Itertools; use ruma::{ + OwnedUserId, RoomId, UserId, events::{ - push_rules::PushRulesEvent, room::member::MembershipState, GlobalAccountDataEventType, + GlobalAccountDataEventType, push_rules::PushRulesEvent, room::member::MembershipState, }, push::Ruleset, - OwnedUserId, RoomId, UserId, }; -use crate::{media, Services}; +use crate::{Services, media}; /// The current schema version. /// - If database is opened at greater version we reject with error. The diff --git a/src/service/mod.rs b/src/service/mod.rs index 71bd0eb4..0bde0255 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -30,7 +30,7 @@ pub mod users; extern crate conduwuit_core as conduwuit; extern crate conduwuit_database as database; -pub use conduwuit::{pdu, PduBuilder, PduCount, PduEvent}; +pub use conduwuit::{PduBuilder, PduCount, PduEvent, pdu}; pub(crate) use service::{Args, Dep, Service}; pub use crate::services::Services; diff --git a/src/service/presence/data.rs b/src/service/presence/data.rs index 4ec0a7ee..d7ef5175 100644 --- a/src/service/presence/data.rs +++ b/src/service/presence/data.rs @@ -1,16 +1,15 @@ use std::sync::Arc; use conduwuit::{ - debug_warn, utils, - utils::{stream::TryIgnore, ReadyExt}, - Result, + Result, debug_warn, utils, + utils::{ReadyExt, stream::TryIgnore}, }; use database::{Deserialized, Json, Map}; use futures::Stream; -use ruma::{events::presence::PresenceEvent, presence::PresenceState, UInt, UserId}; +use ruma::{UInt, UserId, events::presence::PresenceEvent, presence::PresenceState}; use super::Presence; -use crate::{globals, users, Dep}; +use crate::{Dep, globals, users}; pub(crate) struct Data { presenceid_presence: Arc, diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index eb4105e5..8f646be6 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -5,16 +5,16 @@ use std::{sync::Arc, time::Duration}; use async_trait::async_trait; use conduwuit::{ - checked, debug, debug_warn, error, result::LogErr, trace, Error, Result, Server, + Error, Result, Server, checked, debug, debug_warn, error, result::LogErr, trace, }; use database::Database; -use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt}; +use futures::{Stream, StreamExt, TryFutureExt, stream::FuturesUnordered}; use loole::{Receiver, Sender}; -use ruma::{events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, UInt, UserId}; +use ruma::{OwnedUserId, UInt, UserId, events::presence::PresenceEvent, presence::PresenceState}; use tokio::time::sleep; use self::{data::Data, presence::Presence}; -use crate::{globals, users, Dep}; +use crate::{Dep, globals, users}; pub struct Service { timer_channel: (Sender, Receiver), diff --git a/src/service/presence/presence.rs b/src/service/presence/presence.rs index b322dfb4..3357bd61 100644 --- a/src/service/presence/presence.rs +++ b/src/service/presence/presence.rs @@ -1,8 +1,8 @@ -use conduwuit::{utils, Error, Result}; +use conduwuit::{Error, Result, utils}; use ruma::{ + UInt, UserId, events::presence::{PresenceEvent, PresenceEventContent}, presence::PresenceState, - UInt, UserId, }; use serde::{Deserialize, Serialize}; diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 43d60c08..2b269b3d 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -2,34 +2,35 @@ use std::{fmt::Debug, mem, sync::Arc}; use bytes::BytesMut; use conduwuit::{ - debug_warn, err, trace, + Err, PduEvent, Result, debug_warn, err, trace, utils::{stream::TryIgnore, string_from_bytes}, - warn, Err, PduEvent, Result, + warn, }; use database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::{Stream, StreamExt}; use ipaddress::IPAddress; use ruma::{ + RoomId, UInt, UserId, api::{ - client::push::{set_pusher, Pusher, PusherKind}, + IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, + client::push::{Pusher, PusherKind, set_pusher}, push_gateway::send_event_notification::{ self, v1::{Device, Notification, NotificationCounts, NotificationPriority}, }, - IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, }, events::{ - room::power_levels::RoomPowerLevelsEventContent, AnySyncTimelineEvent, StateEventType, - TimelineEventType, + AnySyncTimelineEvent, StateEventType, TimelineEventType, + room::power_levels::RoomPowerLevelsEventContent, }, push::{ Action, PushConditionPowerLevelsCtx, PushConditionRoomCtx, PushFormat, Ruleset, Tweak, }, serde::Raw, - uint, RoomId, UInt, UserId, + uint, }; -use crate::{client, globals, rooms, sending, users, Dep}; +use crate::{Dep, client, globals, rooms, sending, users}; pub struct Service { db: Data, diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 66854764..8860d0a0 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -3,7 +3,7 @@ use std::{ net::{IpAddr, SocketAddr}, }; -use conduwuit::{debug, debug_error, debug_info, debug_warn, err, error, trace, Err, Result}; +use conduwuit::{Err, Result, debug, debug_error, debug_info, debug_warn, err, error, trace}; use futures::{FutureExt, TryFutureExt}; use hickory_resolver::error::ResolveError; use ipaddress::IPAddress; @@ -11,7 +11,7 @@ use ruma::ServerName; use super::{ cache::{CachedDest, CachedOverride, MAX_IPS}, - fed::{add_port_to_hostname, get_ip_with_port, FedDest, PortString}, + fed::{FedDest, PortString, add_port_to_hostname, get_ip_with_port}, }; #[derive(Clone, Debug)] @@ -71,12 +71,16 @@ impl super::Service { | None => if let Some(pos) = dest.as_str().find(':') { self.actual_dest_2(dest, cache, pos).await? - } else if let Some(delegated) = self.request_well_known(dest.as_str()).await? { - self.actual_dest_3(&mut host, cache, delegated).await? - } else if let Some(overrider) = self.query_srv_record(dest.as_str()).await? { - self.actual_dest_4(&host, cache, overrider).await? } else { - self.actual_dest_5(dest, cache).await? + match self.request_well_known(dest.as_str()).await? { + | Some(delegated) => + self.actual_dest_3(&mut host, cache, delegated).await?, + | _ => match self.query_srv_record(dest.as_str()).await? { + | Some(overrider) => + self.actual_dest_4(&host, cache, overrider).await?, + | _ => self.actual_dest_5(dest, cache).await?, + }, + } }, }; @@ -136,10 +140,10 @@ impl super::Service { self.actual_dest_3_2(cache, delegated, pos).await } else { trace!("Delegated hostname has no port in this branch"); - if let Some(overrider) = self.query_srv_record(&delegated).await? { - self.actual_dest_3_3(cache, delegated, overrider).await - } else { - self.actual_dest_3_4(cache, delegated).await + match self.query_srv_record(&delegated).await? { + | Some(overrider) => + self.actual_dest_3_3(cache, delegated, overrider).await, + | _ => self.actual_dest_3_4(cache, delegated).await, } }, } diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index 7b4f104d..6b05c00c 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -1,10 +1,10 @@ use std::{net::IpAddr, sync::Arc, time::SystemTime}; use conduwuit::{ + Result, arrayvec::ArrayVec, at, err, implement, utils::{math::Expected, rand, stream::TryIgnore}, - Result, }; use database::{Cbor, Deserialized, Map}; use futures::{Stream, StreamExt}; @@ -96,7 +96,7 @@ pub fn destinations(&self) -> impl Stream + Se self.destinations .stream() .ignore_err() - .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1 .0)) + .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1.0)) } #[implement(Cache)] @@ -104,7 +104,7 @@ pub fn overrides(&self) -> impl Stream + S self.overrides .stream() .ignore_err() - .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1 .0)) + .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1.0)) } impl CachedDest { diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index ca6106e2..98ad7e60 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -1,8 +1,8 @@ use std::{net::SocketAddr, sync::Arc, time::Duration}; -use conduwuit::{err, Result, Server}; +use conduwuit::{Result, Server, err}; use futures::FutureExt; -use hickory_resolver::{lookup_ip::LookupIp, TokioAsyncResolver}; +use hickory_resolver::{TokioAsyncResolver, lookup_ip::LookupIp}; use reqwest::dns::{Addrs, Name, Resolve, Resolving}; use super::cache::{Cache, CachedOverride}; diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 6be9d42d..2ec9c0ef 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -6,10 +6,10 @@ mod tests; use std::sync::Arc; -use conduwuit::{arrayvec::ArrayString, utils::MutexMap, Result, Server}; +use conduwuit::{Result, Server, arrayvec::ArrayString, utils::MutexMap}; use self::{cache::Cache, dns::Resolver}; -use crate::{client, Dep}; +use crate::{Dep, client}; pub struct Service { pub cache: Arc, diff --git a/src/service/resolver/tests.rs b/src/service/resolver/tests.rs index 870f5eab..6e9d0e71 100644 --- a/src/service/resolver/tests.rs +++ b/src/service/resolver/tests.rs @@ -1,6 +1,6 @@ #![cfg(test)] -use super::fed::{add_port_to_hostname, get_ip_with_port, FedDest}; +use super::fed::{FedDest, add_port_to_hostname, get_ip_with_port}; #[test] fn ips_get_default_ports() { diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 17ed5e13..866e45a9 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -3,21 +3,20 @@ mod remote; use std::sync::Arc; use conduwuit::{ - err, - utils::{stream::TryIgnore, ReadyExt}, - Err, Result, Server, + Err, Result, Server, err, + utils::{ReadyExt, stream::TryIgnore}, }; use database::{Deserialized, Ignore, Interfix, Map}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ - events::{ - room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, - StateEventType, - }, OwnedRoomId, OwnedServerName, OwnedUserId, RoomAliasId, RoomId, RoomOrAliasId, UserId, + events::{ + StateEventType, + room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, + }, }; -use crate::{admin, appservice, appservice::RegistrationInfo, globals, rooms, sending, Dep}; +use crate::{Dep, admin, appservice, appservice::RegistrationInfo, globals, rooms, sending}; pub struct Service { db: Data, diff --git a/src/service/rooms/alias/remote.rs b/src/service/rooms/alias/remote.rs index 7744bee2..60aed76d 100644 --- a/src/service/rooms/alias/remote.rs +++ b/src/service/rooms/alias/remote.rs @@ -1,8 +1,8 @@ use std::iter::once; -use conduwuit::{debug, debug_error, err, implement, Result}; +use conduwuit::{Result, debug, debug_error, err, implement}; use federation::query::get_room_information::v1::Response; -use ruma::{api::federation, OwnedRoomId, OwnedServerName, RoomAliasId, ServerName}; +use ruma::{OwnedRoomId, OwnedServerName, RoomAliasId, ServerName, api::federation}; #[implement(super::Service)] pub(super) async fn remote_resolve( diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index af8ae364..8c3588cc 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -3,7 +3,7 @@ use std::{ sync::{Arc, Mutex}, }; -use conduwuit::{err, utils, utils::math::usize_from_f64, Err, Result}; +use conduwuit::{Err, Result, err, utils, utils::math::usize_from_f64}; use database::Map; use lru_cache::LruCache; diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 0ff96846..0903ea75 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -8,18 +8,18 @@ use std::{ }; use conduwuit::{ - at, debug, debug_error, implement, trace, + Err, Result, at, debug, debug_error, implement, trace, utils::{ - stream::{ReadyExt, TryBroadbandExt}, IterStream, + stream::{ReadyExt, TryBroadbandExt}, }, - validated, warn, Err, Result, + validated, warn, }; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{EventId, OwnedEventId, RoomId}; use self::data::Data; -use crate::{rooms, rooms::short::ShortEventId, Dep}; +use crate::{Dep, rooms, rooms::short::ShortEventId}; pub struct Service { services: Services, diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index 039efca7..4ea10641 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,9 +1,9 @@ use std::sync::Arc; -use conduwuit::{implement, utils::stream::TryIgnore, Result}; +use conduwuit::{Result, implement, utils::stream::TryIgnore}; use database::Map; use futures::Stream; -use ruma::{api::client::room::Visibility, RoomId}; +use ruma::{RoomId, api::client::room::Visibility}; pub struct Service { db: Data, diff --git a/src/service/rooms/event_handler/acl_check.rs b/src/service/rooms/event_handler/acl_check.rs index 714b6fc1..6b432a4b 100644 --- a/src/service/rooms/event_handler/acl_check.rs +++ b/src/service/rooms/event_handler/acl_check.rs @@ -1,7 +1,7 @@ -use conduwuit::{debug, implement, trace, warn, Err, Result}; +use conduwuit::{Err, Result, debug, implement, trace, warn}; use ruma::{ - events::{room::server_acl::RoomServerAclEventContent, StateEventType}, RoomId, ServerName, + events::{StateEventType, room::server_acl::RoomServerAclEventContent}, }; /// Returns Ok if the acl allows the server diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index 540ebb64..80e91eff 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -1,16 +1,16 @@ use std::{ - collections::{hash_map, BTreeMap, HashSet, VecDeque}, + collections::{BTreeMap, HashSet, VecDeque, hash_map}, sync::Arc, time::Instant, }; use conduwuit::{ - debug, debug_error, debug_warn, implement, pdu, trace, - utils::continue_exponential_backoff_secs, warn, PduEvent, + PduEvent, debug, debug_error, debug_warn, implement, pdu, trace, + utils::continue_exponential_backoff_secs, warn, }; use futures::TryFutureExt; use ruma::{ - api::federation::event::get_event, CanonicalJsonValue, OwnedEventId, RoomId, ServerName, + CanonicalJsonValue, OwnedEventId, RoomId, ServerName, api::federation::event::get_event, }; use super::get_room_version_id; @@ -138,12 +138,15 @@ pub(super) async fn fetch_and_handle_outliers<'a>( .and_then(CanonicalJsonValue::as_array) { for auth_event in auth_events { - if let Ok(auth_event) = - serde_json::from_value::(auth_event.clone().into()) - { - todo_auth_events.push_back(auth_event); - } else { - warn!("Auth event id is not valid"); + match serde_json::from_value::( + auth_event.clone().into(), + ) { + | Ok(auth_event) => { + todo_auth_events.push_back(auth_event); + }, + | _ => { + warn!("Auth event id is not valid"); + }, } } } else { diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index 5a38f7fe..e817430b 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -4,14 +4,13 @@ use std::{ }; use conduwuit::{ - debug_warn, err, implement, + PduEvent, Result, debug_warn, err, implement, state_res::{self}, - PduEvent, Result, }; -use futures::{future, FutureExt}; +use futures::{FutureExt, future}; use ruma::{ - int, uint, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName, - UInt, + CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName, UInt, int, + uint, }; use super::check_room_id; @@ -43,54 +42,59 @@ pub(super) async fn fetch_prev( while let Some(prev_event_id) = todo_outlier_stack.pop_front() { self.services.server.check_running()?; - if let Some((pdu, mut json_opt)) = self + match self .fetch_and_handle_outliers(origin, &[prev_event_id.clone()], create_event, room_id) .boxed() .await .pop() { - check_room_id(room_id, &pdu)?; + | Some((pdu, mut json_opt)) => { + check_room_id(room_id, &pdu)?; - let limit = self.services.server.config.max_fetch_prev_events; - if amount > limit { - debug_warn!("Max prev event limit reached! Limit: {limit}"); - graph.insert(prev_event_id.clone(), HashSet::new()); - continue; - } - - if json_opt.is_none() { - json_opt = self - .services - .outlier - .get_outlier_pdu_json(&prev_event_id) - .await - .ok(); - } - - if let Some(json) = json_opt { - if pdu.origin_server_ts > first_ts_in_room { - amount = amount.saturating_add(1); - for prev_prev in &pdu.prev_events { - if !graph.contains_key(prev_prev) { - todo_outlier_stack.push_back(prev_prev.clone()); - } - } - - graph - .insert(prev_event_id.clone(), pdu.prev_events.iter().cloned().collect()); - } else { - // Time based check failed + let limit = self.services.server.config.max_fetch_prev_events; + if amount > limit { + debug_warn!("Max prev event limit reached! Limit: {limit}"); graph.insert(prev_event_id.clone(), HashSet::new()); + continue; } - eventid_info.insert(prev_event_id.clone(), (pdu, json)); - } else { - // Get json failed, so this was not fetched over federation + if json_opt.is_none() { + json_opt = self + .services + .outlier + .get_outlier_pdu_json(&prev_event_id) + .await + .ok(); + } + + if let Some(json) = json_opt { + if pdu.origin_server_ts > first_ts_in_room { + amount = amount.saturating_add(1); + for prev_prev in &pdu.prev_events { + if !graph.contains_key(prev_prev) { + todo_outlier_stack.push_back(prev_prev.clone()); + } + } + + graph.insert( + prev_event_id.clone(), + pdu.prev_events.iter().cloned().collect(), + ); + } else { + // Time based check failed + graph.insert(prev_event_id.clone(), HashSet::new()); + } + + eventid_info.insert(prev_event_id.clone(), (pdu, json)); + } else { + // Get json failed, so this was not fetched over federation + graph.insert(prev_event_id.clone(), HashSet::new()); + } + }, + | _ => { + // Fetch and handle failed graph.insert(prev_event_id.clone(), HashSet::new()); - } - } else { - // Fetch and handle failed - graph.insert(prev_event_id.clone(), HashSet::new()); + }, } } diff --git a/src/service/rooms/event_handler/fetch_state.rs b/src/service/rooms/event_handler/fetch_state.rs index 4f2580db..b1a4a38b 100644 --- a/src/service/rooms/event_handler/fetch_state.rs +++ b/src/service/rooms/event_handler/fetch_state.rs @@ -1,10 +1,10 @@ -use std::collections::{hash_map, HashMap}; +use std::collections::{HashMap, hash_map}; -use conduwuit::{debug, debug_warn, implement, Err, Error, PduEvent, Result}; +use conduwuit::{Err, Error, PduEvent, Result, debug, debug_warn, implement}; use futures::FutureExt; use ruma::{ - api::federation::event::get_room_state_ids, events::StateEventType, EventId, OwnedEventId, - RoomId, ServerName, + EventId, OwnedEventId, RoomId, ServerName, api::federation::event::get_room_state_ids, + events::StateEventType, }; use crate::rooms::short::ShortStateKey; diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index 31c7762d..b6d3e21e 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -1,14 +1,14 @@ use std::{ - collections::{hash_map, BTreeMap}, + collections::{BTreeMap, hash_map}, time::Instant, }; -use conduwuit::{debug, debug::INFO_SPAN_LEVEL, err, implement, warn, Err, Result}; +use conduwuit::{Err, Result, debug, debug::INFO_SPAN_LEVEL, err, implement, warn}; use futures::{ - future::{try_join5, OptionFuture}, FutureExt, + future::{OptionFuture, try_join5}, }; -use ruma::{events::StateEventType, CanonicalJsonValue, EventId, RoomId, ServerName, UserId}; +use ruma::{CanonicalJsonValue, EventId, RoomId, ServerName, UserId, events::StateEventType}; use crate::rooms::timeline::RawPduId; diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index e628c77a..974eb300 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -1,15 +1,15 @@ use std::{ - collections::{hash_map, BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, hash_map}, sync::Arc, }; use conduwuit::{ - debug, debug_info, err, implement, state_res, trace, warn, Err, Error, PduEvent, Result, + Err, Error, PduEvent, Result, debug, debug_info, err, implement, state_res, trace, warn, }; -use futures::{future::ready, TryFutureExt}; +use futures::{TryFutureExt, future::ready}; use ruma::{ - api::client::error::ErrorKind, events::StateEventType, CanonicalJsonObject, - CanonicalJsonValue, EventId, RoomId, ServerName, + CanonicalJsonObject, CanonicalJsonValue, EventId, RoomId, ServerName, + api::client::error::ErrorKind, events::StateEventType, }; use super::{check_room_id, get_room_version_id, to_room_version}; diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index f911f1fd..cf69a515 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -5,8 +5,8 @@ use std::{ }; use conduwuit::{ - debug, debug::INFO_SPAN_LEVEL, implement, utils::continue_exponential_backoff_secs, Err, - PduEvent, Result, + Err, PduEvent, Result, debug, debug::INFO_SPAN_LEVEL, implement, + utils::continue_exponential_backoff_secs, }; use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName, UInt}; diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 5960c734..e9e79ce4 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -18,16 +18,16 @@ use std::{ }; use conduwuit::{ - utils::{MutexMap, TryFutureExtExt}, Err, PduEvent, Result, RoomVersion, Server, + utils::{MutexMap, TryFutureExtExt}, }; use futures::TryFutureExt; use ruma::{ - events::room::create::RoomCreateEventContent, OwnedEventId, OwnedRoomId, RoomId, - RoomVersionId, + OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, + events::room::create::RoomCreateEventContent, }; -use crate::{globals, rooms, sending, server_keys, Dep}; +use crate::{Dep, globals, rooms, sending, server_keys}; pub struct Service { pub mutex_federation: RoomMutexMap, diff --git a/src/service/rooms/event_handler/parse_incoming_pdu.rs b/src/service/rooms/event_handler/parse_incoming_pdu.rs index 9b130763..a49fc541 100644 --- a/src/service/rooms/event_handler/parse_incoming_pdu.rs +++ b/src/service/rooms/event_handler/parse_incoming_pdu.rs @@ -1,4 +1,4 @@ -use conduwuit::{err, implement, pdu::gen_event_id_canonical_json, result::FlatOk, Result}; +use conduwuit::{Result, err, implement, pdu::gen_event_id_canonical_json, result::FlatOk}; use ruma::{CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId}; use serde_json::value::RawValue as RawJsonValue; diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 37d47d47..9033c3a8 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -5,13 +5,12 @@ use std::{ }; use conduwuit::{ - err, implement, + Error, Result, err, implement, state_res::{self, StateMap}, trace, - utils::stream::{automatic_width, IterStream, ReadyExt, TryWidebandExt, WidebandExt}, - Error, Result, + utils::stream::{IterStream, ReadyExt, TryWidebandExt, WidebandExt, automatic_width}, }; -use futures::{future::try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join}; use ruma::{OwnedEventId, RoomId, RoomVersionId}; use crate::rooms::state_compressor::CompressedState; @@ -93,11 +92,7 @@ pub async fn resolve_state( let new_room_state: CompressedState = self .services .state_compressor - .compress_state_events( - state_events - .iter() - .map(|(ref ssk, eid)| (ssk, (*eid).borrow())), - ) + .compress_state_events(state_events.iter().map(|(ssk, eid)| (ssk, (*eid).borrow()))) .collect() .await; diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 2eb6013a..8326f9da 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -6,11 +6,10 @@ use std::{ }; use conduwuit::{ - debug, err, implement, trace, + PduEvent, Result, StateMap, debug, err, implement, trace, utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryWidebandExt}, - PduEvent, Result, StateMap, }; -use futures::{future::try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join}; use ruma::{OwnedEventId, RoomId, RoomVersionId}; use crate::rooms::short::ShortStateHash; diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 385d2142..c1a1c3eb 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,12 +1,13 @@ use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant}; use conduwuit::{ - debug, debug_info, err, implement, state_res, trace, + Err, EventTypeExt, PduEvent, Result, StateKey, debug, debug_info, err, implement, state_res, + trace, utils::stream::{BroadbandExt, ReadyExt}, - warn, Err, EventTypeExt, PduEvent, Result, StateKey, + warn, }; -use futures::{future::ready, FutureExt, StreamExt}; -use ruma::{events::StateEventType, CanonicalJsonValue, RoomId, ServerName}; +use futures::{FutureExt, StreamExt, future::ready}; +use ruma::{CanonicalJsonValue, RoomId, ServerName, events::StateEventType}; use super::{get_room_version_id, to_room_version}; use crate::rooms::{ diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index a6e00271..346314d1 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -3,13 +3,12 @@ use std::{collections::HashSet, sync::Arc}; use conduwuit::{ - implement, - utils::{stream::TryIgnore, IterStream, ReadyExt}, - Result, + Result, implement, + utils::{IterStream, ReadyExt, stream::TryIgnore}, }; use database::{Database, Deserialized, Handle, Interfix, Map, Qry}; -use futures::{pin_mut, Stream, StreamExt}; -use ruma::{api::client::filter::LazyLoadOptions, DeviceId, OwnedUserId, RoomId, UserId}; +use futures::{Stream, StreamExt, pin_mut}; +use ruma::{DeviceId, OwnedUserId, RoomId, UserId, api::client::filter::LazyLoadOptions}; pub struct Service { db: Data, diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 6d5a85a0..54eef47d 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -1,11 +1,11 @@ use std::sync::Arc; -use conduwuit::{implement, utils::stream::TryIgnore, Result}; +use conduwuit::{Result, implement, utils::stream::TryIgnore}; use database::Map; use futures::{Stream, StreamExt}; use ruma::RoomId; -use crate::{rooms, Dep}; +use crate::{Dep, rooms}; pub struct Service { db: Data, diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 9cd3d805..a1b0263a 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use database::{Deserialized, Json, Map}; use ruma::{CanonicalJsonObject, EventId}; diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 26e11ded..f0beab5a 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,25 +1,25 @@ use std::{mem::size_of, sync::Arc}; use conduwuit::{ + PduCount, PduEvent, arrayvec::ArrayVec, result::LogErr, utils::{ + ReadyExt, stream::{TryIgnore, WidebandExt}, - u64_from_u8, ReadyExt, + u64_from_u8, }, - PduCount, PduEvent, }; use database::Map; use futures::{Stream, StreamExt}; -use ruma::{api::Direction, EventId, RoomId, UserId}; +use ruma::{EventId, RoomId, UserId, api::Direction}; use crate::{ - rooms, + Dep, rooms, rooms::{ short::{ShortEventId, ShortRoomId}, timeline::{PduId, RawPduId}, }, - Dep, }; pub(super) struct Data { diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index ba289f9b..18221c2d 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -2,11 +2,11 @@ mod data; use std::sync::Arc; use conduwuit::{PduCount, Result}; -use futures::{future::try_join, StreamExt}; -use ruma::{api::Direction, EventId, RoomId, UserId}; +use futures::{StreamExt, future::try_join}; +use ruma::{EventId, RoomId, UserId, api::Direction}; use self::data::{Data, PdusIterItem}; -use crate::{rooms, Dep}; +use crate::{Dep, rooms}; pub struct Service { services: Services, @@ -81,7 +81,7 @@ impl Service { .collect(); 'limit: while let Some(stack_pdu) = stack.pop() { - let target = match stack_pdu.0 .0 { + let target = match stack_pdu.0.0 { | PduCount::Normal(c) => c, // TODO: Support backfilled relations | PduCount::Backfilled(_) => 0, // This will result in an empty iterator diff --git a/src/service/rooms/read_receipt/data.rs b/src/service/rooms/read_receipt/data.rs index c21ad36c..62f87948 100644 --- a/src/service/rooms/read_receipt/data.rs +++ b/src/service/rooms/read_receipt/data.rs @@ -1,18 +1,18 @@ use std::sync::Arc; use conduwuit::{ - utils::{stream::TryIgnore, ReadyExt}, Result, + utils::{ReadyExt, stream::TryIgnore}, }; use database::{Deserialized, Json, Map}; use futures::{Stream, StreamExt}; use ruma::{ - events::{receipt::ReceiptEvent, AnySyncEphemeralRoomEvent}, - serde::Raw, CanonicalJsonObject, RoomId, UserId, + events::{AnySyncEphemeralRoomEvent, receipt::ReceiptEvent}, + serde::Raw, }; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub(super) struct Data { roomuserid_privateread: Arc, diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index 2bc21355..d6239aee 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -2,19 +2,19 @@ mod data; use std::{collections::BTreeMap, sync::Arc}; -use conduwuit::{debug, err, warn, PduCount, PduId, RawPduId, Result}; -use futures::{try_join, Stream, TryFutureExt}; +use conduwuit::{PduCount, PduId, RawPduId, Result, debug, err, warn}; +use futures::{Stream, TryFutureExt, try_join}; use ruma::{ + OwnedEventId, OwnedUserId, RoomId, UserId, events::{ - receipt::{ReceiptEvent, ReceiptEventContent, Receipts}, AnySyncEphemeralRoomEvent, SyncEphemeralRoomEvent, + receipt::{ReceiptEvent, ReceiptEventContent, Receipts}, }, serde::Raw, - OwnedEventId, OwnedUserId, RoomId, UserId, }; use self::data::{Data, ReceiptItem}; -use crate::{rooms, sending, Dep}; +use crate::{Dep, rooms, sending}; pub struct Service { services: Services, @@ -145,12 +145,14 @@ where let receipt = serde_json::from_str::>( value.json().get(), ); - if let Ok(value) = receipt { - for (event, receipt) in value.content { - json.insert(event, receipt); - } - } else { - debug!("failed to parse receipt: {:?}", receipt); + match receipt { + | Ok(value) => + for (event, receipt) in value.content { + json.insert(event, receipt); + }, + | _ => { + debug!("failed to parse receipt: {:?}", receipt); + }, } } let content = ReceiptEventContent::from_iter(json); diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index cc015237..4100dd75 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,26 +1,24 @@ use std::sync::Arc; use conduwuit::{ + PduCount, PduEvent, Result, arrayvec::ArrayVec, implement, utils::{ - set, + ArrayVecExt, IterStream, ReadyExt, set, stream::{TryIgnore, WidebandExt}, - ArrayVecExt, IterStream, ReadyExt, }, - PduCount, PduEvent, Result, }; -use database::{keyval::Val, Map}; +use database::{Map, keyval::Val}; use futures::{Stream, StreamExt}; -use ruma::{api::client::search::search_events::v3::Criteria, RoomId, UserId}; +use ruma::{RoomId, UserId, api::client::search::search_events::v3::Criteria}; use crate::{ - rooms, + Dep, rooms, rooms::{ short::ShortRoomId, timeline::{PduId, RawPduId}, }, - Dep, }; pub struct Service { @@ -140,7 +138,7 @@ pub async fn search_pdus<'a>( pub async fn search_pdu_ids( &self, query: &RoomQuery<'_>, -) -> Result + Send + '_> { +) -> Result + Send + '_ + use<'_>> { let shortroomid = self.services.short.get_shortroomid(query.room_id).await?; let pdu_ids = self.search_pdu_ids_query_room(query, shortroomid).await; @@ -187,7 +185,7 @@ fn search_pdu_ids_query_word( &self, shortroomid: ShortRoomId, word: &str, -) -> impl Stream> + Send + '_ { +) -> impl Stream> + Send + '_ + use<'_> { // rustc says const'ing this not yet stable let end_id: RawPduId = PduId { shortroomid, diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 8728325a..3980617e 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,13 +1,13 @@ use std::{borrow::Borrow, fmt::Debug, mem::size_of_val, sync::Arc}; pub use conduwuit::pdu::{ShortEventId, ShortId, ShortRoomId, ShortStateKey}; -use conduwuit::{err, implement, utils, utils::IterStream, Result, StateKey}; +use conduwuit::{Result, StateKey, err, implement, utils, utils::IterStream}; use database::{Deserialized, Get, Map, Qry}; use futures::{Stream, StreamExt}; -use ruma::{events::StateEventType, EventId, RoomId}; +use ruma::{EventId, RoomId, events::StateEventType}; use serde::Deserialize; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub struct Service { db: Data, diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 268d6dfe..52e7d2be 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -5,18 +5,18 @@ mod tests; use std::sync::Arc; use conduwuit::{ - implement, + Err, Error, Result, implement, utils::{ + IterStream, future::BoolExt, math::usize_from_f64, stream::{BroadbandExt, ReadyExt}, - IterStream, }, - Err, Error, Result, }; -use futures::{pin_mut, stream::FuturesUnordered, FutureExt, Stream, StreamExt, TryFutureExt}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, pin_mut, stream::FuturesUnordered}; use lru_cache::LruCache; use ruma::{ + OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, api::{ client::space::SpaceHierarchyRoomsChunk, federation::{ @@ -25,18 +25,17 @@ use ruma::{ }, }, events::{ + StateEventType, room::join_rules::{JoinRule, RoomJoinRulesEventContent}, space::child::{HierarchySpaceChildEvent, SpaceChildEventContent}, - StateEventType, }, serde::Raw, space::SpaceRoomJoinRule, - OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, }; use tokio::sync::{Mutex, MutexGuard}; pub use self::pagination_token::PaginationToken; -use crate::{conduwuit::utils::TryFutureExtExt, rooms, sending, Dep}; +use crate::{Dep, conduwuit::utils::TryFutureExtExt, rooms, sending}; pub struct Service { services: Services, @@ -440,8 +439,9 @@ async fn is_accessible_child( pub fn get_parent_children_via( parent: &SpaceHierarchyParentSummary, suggested_only: bool, -) -> impl DoubleEndedIterator)> + Send + '_ -{ +) -> impl DoubleEndedIterator + use<>)> ++ Send ++ '_ { parent .children_state .iter() diff --git a/src/service/rooms/spaces/pagination_token.rs b/src/service/rooms/spaces/pagination_token.rs index 8f019e8d..d97b7a2f 100644 --- a/src/service/rooms/spaces/pagination_token.rs +++ b/src/service/rooms/spaces/pagination_token.rs @@ -4,7 +4,7 @@ use std::{ }; use conduwuit::{Error, Result}; -use ruma::{api::client::error::ErrorKind, UInt}; +use ruma::{UInt, api::client::error::ErrorKind}; use crate::rooms::short::ShortRoomId; diff --git a/src/service/rooms/spaces/tests.rs b/src/service/rooms/spaces/tests.rs index dd6c2f35..d0395fdd 100644 --- a/src/service/rooms/spaces/tests.rs +++ b/src/service/rooms/spaces/tests.rs @@ -1,13 +1,13 @@ use std::str::FromStr; use ruma::{ + UInt, api::federation::space::{SpaceHierarchyParentSummary, SpaceHierarchyParentSummaryInit}, owned_room_id, owned_server_name, space::SpaceRoomJoinRule, - UInt, }; -use crate::rooms::spaces::{get_parent_children_via, PaginationToken}; +use crate::rooms::spaces::{PaginationToken, get_parent_children_via}; #[test] fn get_summary_children() { diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index d538de3c..8683a3be 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,36 +1,34 @@ use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc}; use conduwuit::{ - err, + PduEvent, Result, err, result::FlatOk, state_res::{self, StateMap}, utils::{ - calculate_hash, + IterStream, MutexMap, MutexMapGuard, ReadyExt, calculate_hash, stream::{BroadbandExt, TryIgnore}, - IterStream, MutexMap, MutexMapGuard, ReadyExt, }, - warn, PduEvent, Result, + warn, }; use database::{Deserialized, Ignore, Interfix, Map}; use futures::{ - future::join_all, pin_mut, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, + FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, future::join_all, pin_mut, }; use ruma::{ + EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, UserId, events::{ - room::{create::RoomCreateEventContent, member::RoomMemberEventContent}, AnyStrippedStateEvent, StateEventType, TimelineEventType, + room::{create::RoomCreateEventContent, member::RoomMemberEventContent}, }, serde::Raw, - EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, UserId, }; use crate::{ - globals, rooms, + Dep, globals, rooms, rooms::{ short::{ShortEventId, ShortStateHash}, - state_compressor::{parse_compressed_state_event, CompressedState}, + state_compressor::{CompressedState, parse_compressed_state_event}, }, - Dep, }; pub struct Service { @@ -192,13 +190,13 @@ impl Service { .await; if !already_existed { - let states_parents = if let Ok(p) = previous_shortstatehash { - self.services - .state_compressor - .load_shortstatehash_info(p) - .await? - } else { - Vec::new() + let states_parents = match previous_shortstatehash { + | Ok(p) => + self.services + .state_compressor + .load_shortstatehash_info(p) + .await?, + | _ => Vec::new(), }; let (statediffnew, statediffremoved) = @@ -256,63 +254,65 @@ impl Service { .aput::(shorteventid, p); } - if let Some(state_key) = &new_pdu.state_key { - let states_parents = if let Ok(p) = previous_shortstatehash { - self.services + match &new_pdu.state_key { + | Some(state_key) => { + let states_parents = match previous_shortstatehash { + | Ok(p) => + self.services + .state_compressor + .load_shortstatehash_info(p) + .await?, + | _ => Vec::new(), + }; + + let shortstatekey = self + .services + .short + .get_or_create_shortstatekey(&new_pdu.kind.to_string().into(), state_key) + .await; + + let new = self + .services .state_compressor - .load_shortstatehash_info(p) - .await? - } else { - Vec::new() - }; + .compress_state_event(shortstatekey, &new_pdu.event_id) + .await; - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&new_pdu.kind.to_string().into(), state_key) - .await; + let replaces = states_parents + .last() + .map(|info| { + info.full_state + .iter() + .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) + }) + .unwrap_or_default(); - let new = self - .services - .state_compressor - .compress_state_event(shortstatekey, &new_pdu.event_id) - .await; + if Some(&new) == replaces { + return Ok(previous_shortstatehash.expect("must exist")); + } - let replaces = states_parents - .last() - .map(|info| { - info.full_state - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); + // TODO: statehash with deterministic inputs + let shortstatehash = self.services.globals.next_count()?; - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } + let mut statediffnew = CompressedState::new(); + statediffnew.insert(new); - // TODO: statehash with deterministic inputs - let shortstatehash = self.services.globals.next_count()?; + let mut statediffremoved = CompressedState::new(); + if let Some(replaces) = replaces { + statediffremoved.insert(*replaces); + } - let mut statediffnew = CompressedState::new(); - statediffnew.insert(new); + self.services.state_compressor.save_state_from_diff( + shortstatehash, + Arc::new(statediffnew), + Arc::new(statediffremoved), + 2, + states_parents, + )?; - let mut statediffremoved = CompressedState::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.services.state_compressor.save_state_from_diff( - shortstatehash, - Arc::new(statediffnew), - Arc::new(statediffremoved), - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) + Ok(shortstatehash) + }, + | _ => + Ok(previous_shortstatehash.expect("first event in room must be a state event")), } } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index b7952ce6..7004e35a 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -9,14 +9,16 @@ use std::{ }; use conduwuit::{ - err, utils, - utils::math::{usize_from_f64, Expected}, - Result, + Result, err, utils, + utils::math::{Expected, usize_from_f64}, }; use database::Map; use lru_cache::LruCache; use ruma::{ + EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, UserId, events::{ + StateEventType, room::{ avatar::RoomAvatarEventContent, canonical_alias::RoomCanonicalAliasEventContent, @@ -29,15 +31,12 @@ use ruma::{ name::RoomNameEventContent, topic::RoomTopicEventContent, }, - StateEventType, }, room::RoomType, space::SpaceRoomJoinRule, - EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, UserId, }; -use crate::{rooms, rooms::short::ShortStateHash, Dep}; +use crate::{Dep, rooms, rooms::short::ShortStateHash}; pub struct Service { pub server_visibility_cache: Mutex>, diff --git a/src/service/rooms/state_accessor/room_state.rs b/src/service/rooms/state_accessor/room_state.rs index e3ec55fe..ff26b33a 100644 --- a/src/service/rooms/state_accessor/room_state.rs +++ b/src/service/rooms/state_accessor/room_state.rs @@ -1,8 +1,8 @@ use std::borrow::Borrow; -use conduwuit::{err, implement, PduEvent, Result, StateKey}; +use conduwuit::{PduEvent, Result, StateKey, err, implement}; use futures::{Stream, StreamExt, TryFutureExt}; -use ruma::{events::StateEventType, EventId, RoomId}; +use ruma::{EventId, RoomId, events::StateEventType}; use serde::Deserialize; /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). diff --git a/src/service/rooms/state_accessor/server_can.rs b/src/service/rooms/state_accessor/server_can.rs index 4d834227..2e8f3325 100644 --- a/src/service/rooms/state_accessor/server_can.rs +++ b/src/service/rooms/state_accessor/server_can.rs @@ -1,11 +1,11 @@ use conduwuit::{error, implement, utils::stream::ReadyExt}; use futures::StreamExt; use ruma::{ - events::{ - room::history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - StateEventType, - }, EventId, RoomId, ServerName, + events::{ + StateEventType, + room::history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + }, }; /// Whether a server is allowed to see an event through federation, based on diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs index da1500cb..625defe6 100644 --- a/src/service/rooms/state_accessor/state.rs +++ b/src/service/rooms/state_accessor/state.rs @@ -1,27 +1,26 @@ use std::{borrow::Borrow, ops::Deref, sync::Arc}; use conduwuit::{ - at, err, implement, pair_of, + PduEvent, Result, StateKey, at, err, implement, pair_of, utils::{ result::FlatOk, stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, }, - PduEvent, Result, StateKey, }; use database::Deserialized; -use futures::{future::try_join, pin_mut, FutureExt, Stream, StreamExt, TryFutureExt}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, future::try_join, pin_mut}; use ruma::{ - events::{ - room::member::{MembershipState, RoomMemberEventContent}, - StateEventType, - }, EventId, OwnedEventId, UserId, + events::{ + StateEventType, + room::member::{MembershipState, RoomMemberEventContent}, + }, }; use serde::Deserialize; use crate::rooms::{ short::{ShortEventId, ShortStateHash, ShortStateKey}, - state_compressor::{compress_state_event, parse_compressed_state_event, CompressedState}, + state_compressor::{CompressedState, compress_state_event, parse_compressed_state_event}, }; /// The user was a joined member at this state (potentially in the past) diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs index 0332c227..c30e1da8 100644 --- a/src/service/rooms/state_accessor/user_can.rs +++ b/src/service/rooms/state_accessor/user_can.rs @@ -1,14 +1,14 @@ -use conduwuit::{error, implement, pdu::PduBuilder, Err, Error, Result}; +use conduwuit::{Err, Error, Result, error, implement, pdu::PduBuilder}; use ruma::{ + EventId, RoomId, UserId, events::{ + StateEventType, TimelineEventType, room::{ history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, member::{MembershipState, RoomMemberEventContent}, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, }, - StateEventType, TimelineEventType, }, - EventId, RoomId, UserId, }; use crate::rooms::state::RoomMutexGuard; @@ -44,7 +44,7 @@ pub async fn user_can_redact( ))); } - if let Ok(pl_event_content) = self + match self .room_state_get_content::( room_id, &StateEventType::RoomPowerLevels, @@ -52,33 +52,35 @@ pub async fn user_can_redact( ) .await { - let pl_event: RoomPowerLevels = pl_event_content.into(); - Ok(pl_event.user_can_redact_event_of_other(sender) - || pl_event.user_can_redact_own_event(sender) - && if let Ok(redacting_event) = redacting_event { - if federation { - redacting_event.sender.server_name() == sender.server_name() - } else { - redacting_event.sender == sender - } - } else { - false - }) - } else { - // Falling back on m.room.create to judge power level - if let Ok(room_create) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .await - { - Ok(room_create.sender == sender - || redacting_event - .as_ref() - .is_ok_and(|redacting_event| redacting_event.sender == sender)) - } else { - Err(Error::bad_database( - "No m.room.power_levels or m.room.create events in database for room", - )) - } + | Ok(pl_event_content) => { + let pl_event: RoomPowerLevels = pl_event_content.into(); + Ok(pl_event.user_can_redact_event_of_other(sender) + || pl_event.user_can_redact_own_event(sender) + && match redacting_event { + | Ok(redacting_event) => + if federation { + redacting_event.sender.server_name() == sender.server_name() + } else { + redacting_event.sender == sender + }, + | _ => false, + }) + }, + | _ => { + // Falling back on m.room.create to judge power level + match self + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .await + { + | Ok(room_create) => Ok(room_create.sender == sender + || redacting_event + .as_ref() + .is_ok_and(|redacting_event| redacting_event.sender == sender)), + | _ => Err(Error::bad_database( + "No m.room.power_levels or m.room.create events in database for room", + )), + } + }, } } diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 0d25142d..4403468b 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -4,31 +4,31 @@ use std::{ }; use conduwuit::{ - is_not_empty, + Result, is_not_empty, result::LogErr, - utils::{stream::TryIgnore, ReadyExt, StreamTools}, - warn, Result, + utils::{ReadyExt, StreamTools, stream::TryIgnore}, + warn, }; -use database::{serialize_key, Deserialized, Ignore, Interfix, Json, Map}; -use futures::{future::join5, pin_mut, stream::iter, Stream, StreamExt}; +use database::{Deserialized, Ignore, Interfix, Json, Map, serialize_key}; +use futures::{Stream, StreamExt, future::join5, pin_mut, stream::iter}; use itertools::Itertools; use ruma::{ + OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, events::{ + AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, + RoomAccountDataEventType, StateEventType, direct::DirectEvent, room::{ create::RoomCreateEventContent, member::{MembershipState, RoomMemberEventContent}, power_levels::RoomPowerLevelsEventContent, }, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, StateEventType, }, int, serde::Raw, - OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, }; -use crate::{account_data, appservice::RegistrationInfo, globals, rooms, users, Dep}; +use crate::{Dep, account_data, appservice::RegistrationInfo, globals, rooms, users}; pub struct Service { appservice_in_room_cache: AppServiceInRoomCache, diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 18731809..c566eb1c 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -6,10 +6,10 @@ use std::{ }; use conduwuit::{ + Result, arrayvec::ArrayVec, at, checked, err, expected, utils, utils::{bytes, math::usize_from_f64, stream::IterStream}, - Result, }; use database::Map; use futures::{Stream, StreamExt}; @@ -17,9 +17,8 @@ use lru_cache::LruCache; use ruma::{EventId, RoomId}; use crate::{ - rooms, + Dep, rooms, rooms::short::{ShortEventId, ShortId, ShortStateHash, ShortStateKey}, - Dep, }; pub struct Service { diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index bc995e27..7f9a7515 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -1,22 +1,21 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::{ - err, + PduCount, PduEvent, PduId, RawPduId, Result, err, utils::{ - stream::{TryIgnore, WidebandExt}, ReadyExt, + stream::{TryIgnore, WidebandExt}, }, - PduCount, PduEvent, PduId, RawPduId, Result, }; use database::{Deserialized, Map}; use futures::{Stream, StreamExt}; use ruma::{ - api::client::threads::get_threads::v1::IncludeThreads, events::relation::BundledThread, uint, CanonicalJsonValue, EventId, OwnedUserId, RoomId, UserId, + api::client::threads::get_threads::v1::IncludeThreads, events::relation::BundledThread, uint, }; use serde_json::json; -use crate::{rooms, rooms::short::ShortRoomId, Dep}; +use crate::{Dep, rooms, rooms::short::ShortRoomId}; pub struct Service { db: Data, @@ -121,10 +120,13 @@ impl Service { } let mut users = Vec::new(); - if let Ok(userids) = self.get_participants(&root_id).await { - users.extend_from_slice(&userids); - } else { - users.push(root_pdu.sender); + match self.get_participants(&root_id).await { + | Ok(userids) => { + users.extend_from_slice(&userids); + }, + | _ => { + users.push(root_pdu.sender); + }, } users.push(pdu.sender.clone()); diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 457c1e8d..94c78bb0 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -1,18 +1,17 @@ use std::{borrow::Borrow, sync::Arc}; use conduwuit::{ - at, err, + Err, PduCount, PduEvent, Result, at, err, result::{LogErr, NotFound}, utils, utils::stream::TryReadyExt, - Err, PduCount, PduEvent, Result, }; use database::{Database, Deserialized, Json, KeyVal, Map}; -use futures::{future::select_ok, pin_mut, FutureExt, Stream, TryFutureExt, TryStreamExt}; -use ruma::{api::Direction, CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId}; +use futures::{FutureExt, Stream, TryFutureExt, TryStreamExt, future::select_ok, pin_mut}; +use ruma::{CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId, api::Direction}; use super::{PduId, RawPduId}; -use crate::{rooms, rooms::short::ShortRoomId, Dep}; +use crate::{Dep, rooms, rooms::short::ShortRoomId}; pub(super) struct Data { eventid_outlierpdu: Arc, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 9d6ee982..4be97fb2 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -10,22 +10,25 @@ use std::{ }; use conduwuit::{ - at, debug, debug_warn, err, error, implement, info, - pdu::{gen_event_id, EventHash, PduBuilder, PduCount, PduEvent}, + Err, Error, Result, Server, at, debug, debug_warn, err, error, implement, info, + pdu::{EventHash, PduBuilder, PduCount, PduEvent, gen_event_id}, state_res::{self, Event, RoomVersion}, utils::{ - self, future::TryExtExt, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt, + self, IterStream, MutexMap, MutexMapGuard, ReadyExt, future::TryExtExt, stream::TryIgnore, }, - validated, warn, Err, Error, Result, Server, + validated, warn, }; pub use conduwuit::{PduId, RawPduId}; use futures::{ - future, future::ready, pin_mut, Future, FutureExt, Stream, StreamExt, TryStreamExt, + Future, FutureExt, Stream, StreamExt, TryStreamExt, future, future::ready, pin_mut, }; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, + RoomId, RoomVersionId, ServerName, UserId, api::federation, canonical_json::to_canonical_value, events::{ + GlobalAccountDataEventType, StateEventType, TimelineEventType, push_rules::PushRulesEvent, room::{ create::RoomCreateEventContent, @@ -34,23 +37,21 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, redaction::RoomRedactionEventContent, }, - GlobalAccountDataEventType, StateEventType, TimelineEventType, }, push::{Action, Ruleset, Tweak}, - uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, - OwnedServerName, RoomId, RoomVersionId, ServerName, UserId, + uint, }; use serde::Deserialize; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; use self::data::Data; pub use self::data::PdusIterItem; use crate::{ - account_data, admin, appservice, + Dep, account_data, admin, appservice, appservice::NamespaceRegex, globals, pusher, rooms, rooms::{short::ShortRoomId, state_compressor::CompressedState}, - sending, server_keys, users, Dep, + sending, server_keys, users, }; // Update Relationships diff --git a/src/service/rooms/typing/mod.rs b/src/service/rooms/typing/mod.rs index c710b33a..a81ee95c 100644 --- a/src/service/rooms/typing/mod.rs +++ b/src/service/rooms/typing/mod.rs @@ -1,19 +1,18 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::{ - debug_info, trace, + Result, Server, debug_info, trace, utils::{self, IterStream}, - Result, Server, }; use futures::StreamExt; use ruma::{ + OwnedRoomId, OwnedUserId, RoomId, UserId, api::federation::transactions::edu::{Edu, TypingContent}, events::SyncEphemeralRoomEvent, - OwnedRoomId, OwnedUserId, RoomId, UserId, }; -use tokio::sync::{broadcast, RwLock}; +use tokio::sync::{RwLock, broadcast}; -use crate::{globals, sending, sending::EduBuf, users, Dep}; +use crate::{Dep, globals, sending, sending::EduBuf, users}; pub struct Service { server: Arc, diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 6a0c6aa1..bd76f1f4 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,10 +1,10 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use database::{Database, Deserialized, Map}; use ruma::{RoomId, UserId}; -use crate::{globals, rooms, rooms::short::ShortStateHash, Dep}; +use crate::{Dep, globals, rooms, rooms::short::ShortStateHash}; pub struct Service { db: Data, diff --git a/src/service/sending/appservice.rs b/src/service/sending/appservice.rs index 6b58d964..7fa0be9a 100644 --- a/src/service/sending/appservice.rs +++ b/src/service/sending/appservice.rs @@ -1,10 +1,10 @@ use std::{fmt::Debug, mem}; use bytes::BytesMut; -use conduwuit::{debug_error, err, trace, utils, warn, Err, Result}; +use conduwuit::{Err, Result, debug_error, err, trace, utils, warn}; use reqwest::Client; use ruma::api::{ - appservice::Registration, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, + IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, appservice::Registration, }; /// Sends a request to an appservice diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index 4dd2d5aa..a6bcc2b2 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -1,16 +1,15 @@ use std::{fmt::Debug, sync::Arc}; use conduwuit::{ - at, utils, - utils::{stream::TryIgnore, ReadyExt}, - Error, Result, + Error, Result, at, utils, + utils::{ReadyExt, stream::TryIgnore}, }; use database::{Database, Deserialized, Map}; use futures::{Stream, StreamExt}; use ruma::{OwnedServerName, ServerName, UserId}; use super::{Destination, SendingEvent}; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub(super) type OutgoingItem = (Key, SendingEvent, Destination); pub(super) type SendingItem = (Key, SendingEvent); @@ -102,7 +101,7 @@ impl Data { pub fn active_requests_for( &self, destination: &Destination, - ) -> impl Stream + Send + '_ { + ) -> impl Stream + Send + '_ + use<'_> { let prefix = destination.get_prefix(); self.servercurrentevent_data .raw_stream_from(&prefix) @@ -156,7 +155,7 @@ impl Data { pub fn queued_requests( &self, destination: &Destination, - ) -> impl Stream + Send + '_ { + ) -> impl Stream + Send + '_ + use<'_> { let prefix = destination.get_prefix(); self.servernameevent_data .raw_stream_from(&prefix) diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index b46ce7a8..379829b4 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -12,15 +12,15 @@ use std::{ use async_trait::async_trait; use conduwuit::{ - debug, debug_warn, err, error, + Result, Server, debug, debug_warn, err, error, smallvec::SmallVec, - utils::{available_parallelism, math::usize_from_u64_truncated, ReadyExt, TryReadyExt}, - warn, Result, Server, + utils::{ReadyExt, TryReadyExt, available_parallelism, math::usize_from_u64_truncated}, + warn, }; use futures::{FutureExt, Stream, StreamExt}; use ruma::{ - api::{appservice::Registration, OutgoingRequest}, RoomId, ServerName, UserId, + api::{OutgoingRequest, appservice::Registration}, }; use tokio::{task, task::JoinSet}; @@ -30,8 +30,8 @@ pub use self::{ sender::{EDU_LIMIT, PDU_LIMIT}, }; use crate::{ - account_data, client, federation, globals, presence, pusher, rooms, - rooms::timeline::RawPduId, users, Dep, + Dep, account_data, client, federation, globals, presence, pusher, rooms, + rooms::timeline::RawPduId, users, }; pub struct Service { diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 3e86de2d..c4f34177 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -2,32 +2,33 @@ use std::{ collections::{BTreeMap, HashMap, HashSet}, fmt::Debug, sync::{ - atomic::{AtomicU64, AtomicUsize, Ordering}, Arc, + atomic::{AtomicU64, AtomicUsize, Ordering}, }, time::{Duration, Instant}, }; -use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine as _}; +use base64::{Engine as _, engine::general_purpose::URL_SAFE_NO_PAD}; use conduwuit::{ - debug, err, error, + Error, Result, debug, err, error, result::LogErr, trace, utils::{ - calculate_hash, continue_exponential_backoff_secs, + ReadyExt, calculate_hash, continue_exponential_backoff_secs, future::TryExtExt, stream::{BroadbandExt, IterStream, WidebandExt}, - ReadyExt, }, - warn, Error, Result, + warn, }; use futures::{ + FutureExt, StreamExt, future::{BoxFuture, OptionFuture}, join, pin_mut, stream::FuturesUnordered, - FutureExt, StreamExt, }; use ruma::{ + CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedServerName, OwnedUserId, + RoomId, RoomVersionId, ServerName, UInt, api::{ appservice::event::push_events::v1::EphemeralData, federation::transactions::{ @@ -40,18 +41,17 @@ use ruma::{ }, device_id, events::{ - push_rules::PushRulesEvent, receipt::ReceiptType, AnySyncEphemeralRoomEvent, - GlobalAccountDataEventType, + AnySyncEphemeralRoomEvent, GlobalAccountDataEventType, push_rules::PushRulesEvent, + receipt::ReceiptType, }, push, serde::Raw, - uint, CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, RoomVersionId, ServerName, UInt, + uint, }; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; use super::{ - appservice, data::QueueItem, Destination, EduBuf, EduVec, Msg, SendingEvent, Service, + Destination, EduBuf, EduVec, Msg, SendingEvent, Service, appservice, data::QueueItem, }; #[derive(Debug)] @@ -146,7 +146,7 @@ impl Service { statuses.entry(dest).and_modify(|e| { *e = match e { | TransactionStatus::Running => TransactionStatus::Failed(1, Instant::now()), - | TransactionStatus::Retrying(ref n) => + | &mut TransactionStatus::Retrying(ref n) => TransactionStatus::Failed(n.saturating_add(1), Instant::now()), | TransactionStatus::Failed(..) => { panic!("Request that was not even running failed?!") @@ -211,7 +211,7 @@ impl Service { async fn finish_responses<'a>(&'a self, futures: &mut SendingFutures<'a>) { use tokio::{ select, - time::{sleep_until, Instant}, + time::{Instant, sleep_until}, }; let timeout = self.server.config.sender_shutdown_timeout; diff --git a/src/service/server_keys/acquire.rs b/src/service/server_keys/acquire.rs index 305cbfef..64b936b6 100644 --- a/src/service/server_keys/acquire.rs +++ b/src/service/server_keys/acquire.rs @@ -7,13 +7,13 @@ use std::{ use conduwuit::{ debug, debug_error, debug_warn, error, implement, info, result::FlatOk, trace, warn, }; -use futures::{stream::FuturesUnordered, StreamExt}; +use futures::{StreamExt, stream::FuturesUnordered}; use ruma::{ - api::federation::discovery::ServerSigningKeys, serde::Raw, CanonicalJsonObject, - OwnedServerName, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, + CanonicalJsonObject, OwnedServerName, OwnedServerSigningKeyId, ServerName, + ServerSigningKeyId, api::federation::discovery::ServerSigningKeys, serde::Raw, }; use serde_json::value::RawValue as RawJsonValue; -use tokio::time::{timeout_at, Instant}; +use tokio::time::{Instant, timeout_at}; use super::key_exists; diff --git a/src/service/server_keys/get.rs b/src/service/server_keys/get.rs index 5a027d64..00aeae1e 100644 --- a/src/service/server_keys/get.rs +++ b/src/service/server_keys/get.rs @@ -1,12 +1,12 @@ use std::borrow::Borrow; -use conduwuit::{implement, Err, Result}; +use conduwuit::{Err, Result, implement}; use ruma::{ - api::federation::discovery::VerifyKey, CanonicalJsonObject, RoomVersionId, ServerName, - ServerSigningKeyId, + CanonicalJsonObject, RoomVersionId, ServerName, ServerSigningKeyId, + api::federation::discovery::VerifyKey, }; -use super::{extract_key, PubKeyMap, PubKeys}; +use super::{PubKeyMap, PubKeys, extract_key}; #[implement(super::Service)] pub async fn get_event_keys( diff --git a/src/service/server_keys/keypair.rs b/src/service/server_keys/keypair.rs index 6f983c26..259c37fb 100644 --- a/src/service/server_keys/keypair.rs +++ b/src/service/server_keys/keypair.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{debug, debug_info, err, error, utils, utils::string_from_bytes, Result}; +use conduwuit::{Result, debug, debug_info, err, error, utils, utils::string_from_bytes}; use database::Database; use ruma::{api::federation::discovery::VerifyKey, serde::Base64, signatures::Ed25519KeyPair}; diff --git a/src/service/server_keys/mod.rs b/src/service/server_keys/mod.rs index 3f6a3039..bf6799ba 100644 --- a/src/service/server_keys/mod.rs +++ b/src/service/server_keys/mod.rs @@ -8,22 +8,21 @@ mod verify; use std::{collections::BTreeMap, sync::Arc, time::Duration}; use conduwuit::{ - implement, - utils::{timepoint_from_now, IterStream}, - Result, Server, + Result, Server, implement, + utils::{IterStream, timepoint_from_now}, }; use database::{Deserialized, Json, Map}; use futures::StreamExt; use ruma::{ + CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, RoomVersionId, + ServerName, ServerSigningKeyId, api::federation::discovery::{ServerSigningKeys, VerifyKey}, serde::Raw, signatures::{Ed25519KeyPair, PublicKeyMap, PublicKeySet}, - CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, RoomVersionId, - ServerName, ServerSigningKeyId, }; use serde_json::value::RawValue as RawJsonValue; -use crate::{globals, sending, Dep}; +use crate::{Dep, globals, sending}; pub struct Service { keypair: Box, diff --git a/src/service/server_keys/request.rs b/src/service/server_keys/request.rs index afe8958b..171b755b 100644 --- a/src/service/server_keys/request.rs +++ b/src/service/server_keys/request.rs @@ -1,13 +1,13 @@ use std::{collections::BTreeMap, fmt::Debug}; -use conduwuit::{debug, implement, Err, Result}; +use conduwuit::{Err, Result, debug, implement}; use ruma::{ - api::federation::discovery::{ - get_remote_server_keys, - get_remote_server_keys_batch::{self, v2::QueryCriteria}, - get_server_keys, ServerSigningKeys, - }, OwnedServerName, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, + api::federation::discovery::{ + ServerSigningKeys, get_remote_server_keys, + get_remote_server_keys_batch::{self, v2::QueryCriteria}, + get_server_keys, + }, }; #[implement(super::Service)] @@ -79,7 +79,7 @@ pub async fn notary_request( &self, notary: &ServerName, target: &ServerName, -) -> Result + Clone + Debug + Send> { +) -> Result + Clone + Debug + Send + use<>> { use get_remote_server_keys::v2::Request; let request = Request { diff --git a/src/service/server_keys/sign.rs b/src/service/server_keys/sign.rs index 8d6f108c..e8cc485d 100644 --- a/src/service/server_keys/sign.rs +++ b/src/service/server_keys/sign.rs @@ -1,4 +1,4 @@ -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use ruma::{CanonicalJsonObject, RoomVersionId}; #[implement(super::Service)] diff --git a/src/service/server_keys/verify.rs b/src/service/server_keys/verify.rs index 0f03e59e..84433628 100644 --- a/src/service/server_keys/verify.rs +++ b/src/service/server_keys/verify.rs @@ -1,6 +1,6 @@ -use conduwuit::{implement, pdu::gen_event_id_canonical_json, Err, Result}; +use conduwuit::{Err, Result, implement, pdu::gen_event_id_canonical_json}; use ruma::{ - signatures::Verified, CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomVersionId, + CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomVersionId, signatures::Verified, }; use serde_json::value::RawValue as RawJsonValue; diff --git a/src/service/service.rs b/src/service/service.rs index cad01437..2907a562 100644 --- a/src/service/service.rs +++ b/src/service/service.rs @@ -7,7 +7,7 @@ use std::{ }; use async_trait::async_trait; -use conduwuit::{err, error::inspect_log, utils::string::SplitInfallible, Err, Result, Server}; +use conduwuit::{Err, Result, Server, err, error::inspect_log, utils::string::SplitInfallible}; use database::Database; /// Abstract interface for a Service diff --git a/src/service/services.rs b/src/service/services.rs index fb334b96..269a1f87 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -5,7 +5,7 @@ use std::{ sync::{Arc, RwLock}, }; -use conduwuit::{debug, debug_info, info, trace, Result, Server}; +use conduwuit::{Result, Server, debug, debug_info, info, trace}; use database::Database; use tokio::sync::Mutex; diff --git a/src/service/sync/mod.rs b/src/service/sync/mod.rs index 0b86377a..bf2bc142 100644 --- a/src/service/sync/mod.rs +++ b/src/service/sync/mod.rs @@ -8,15 +8,15 @@ use std::{ use conduwuit::{Result, Server}; use database::Map; use ruma::{ + DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, UserId, api::client::sync::sync_events::{ self, v4::{ExtensionsConfig, SyncRequestList}, v5, }, - DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, UserId, }; -use crate::{rooms, Dep}; +use crate::{Dep, rooms}; pub struct Service { db: Data, diff --git a/src/service/sync/watch.rs b/src/service/sync/watch.rs index 0a9c5d15..96981472 100644 --- a/src/service/sync/watch.rs +++ b/src/service/sync/watch.rs @@ -1,5 +1,5 @@ -use conduwuit::{implement, trace, Result}; -use futures::{pin_mut, stream::FuturesUnordered, FutureExt, StreamExt}; +use conduwuit::{Result, implement, trace}; +use futures::{FutureExt, StreamExt, pin_mut, stream::FuturesUnordered}; use ruma::{DeviceId, UserId}; #[implement(super::Service)] diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index 912c0b49..9c284b70 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use database::{Handle, Map}; use ruma::{DeviceId, TransactionId, UserId}; diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 7084f32a..51f5fb11 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -4,20 +4,19 @@ use std::{ }; use conduwuit::{ - err, error, implement, utils, + Error, Result, err, error, implement, utils, utils::{hash, string::EMPTY}, - Error, Result, }; use database::{Deserialized, Json, Map}; use ruma::{ + CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedUserId, UserId, api::client::{ error::ErrorKind, uiaa::{AuthData, AuthType, Password, UiaaInfo, UserIdentifier}, }, - CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedUserId, UserId, }; -use crate::{config, globals, users, Dep}; +use crate::{Dep, config, globals, users}; pub struct Service { userdevicesessionid_uiaarequest: RwLock, @@ -144,8 +143,7 @@ pub async fn try_auth( }; #[cfg(not(feature = "element_hacks"))] - let Some(UserIdentifier::UserIdOrLocalpart(username)) = identifier - else { + let Some(UserIdentifier::UserIdOrLocalpart(username)) = identifier else { return Err(Error::BadRequest( ErrorKind::Unrecognized, "Identifier type not recognized.", diff --git a/src/service/updates/mod.rs b/src/service/updates/mod.rs index 7fd93b6c..28bee65a 100644 --- a/src/service/updates/mod.rs +++ b/src/service/updates/mod.rs @@ -1,16 +1,16 @@ use std::{sync::Arc, time::Duration}; use async_trait::async_trait; -use conduwuit::{debug, info, warn, Result, Server}; +use conduwuit::{Result, Server, debug, info, warn}; use database::{Deserialized, Map}; use ruma::events::room::message::RoomMessageEventContent; use serde::Deserialize; use tokio::{ sync::Notify, - time::{interval, MissedTickBehavior}, + time::{MissedTickBehavior, interval}, }; -use crate::{admin, client, globals, Dep}; +use crate::{Dep, admin, client, globals}; pub struct Service { interval: Duration, diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index f0389a4a..b3f5db88 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,25 +1,24 @@ use std::{collections::BTreeMap, mem, sync::Arc}; use conduwuit::{ - at, debug_warn, err, trace, - utils::{self, stream::TryIgnore, string::Unquoted, ReadyExt}, - Err, Error, Result, Server, + Err, Error, Result, Server, at, debug_warn, err, trace, + utils::{self, ReadyExt, stream::TryIgnore, string::Unquoted}, }; use database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ + DeviceId, KeyId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OneTimeKeyId, + OneTimeKeyName, OwnedDeviceId, OwnedKeyId, OwnedMxcUri, OwnedUserId, RoomId, UInt, UserId, api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{ - ignored_user_list::IgnoredUserListEvent, AnyToDeviceEvent, GlobalAccountDataEventType, + AnyToDeviceEvent, GlobalAccountDataEventType, ignored_user_list::IgnoredUserListEvent, }, serde::Raw, - DeviceId, KeyId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OneTimeKeyId, - OneTimeKeyName, OwnedDeviceId, OwnedKeyId, OwnedMxcUri, OwnedUserId, RoomId, UInt, UserId, }; use serde_json::json; -use crate::{account_data, admin, globals, rooms, Dep}; +use crate::{Dep, account_data, admin, globals, rooms}; pub struct Service { services: Services, @@ -246,10 +245,13 @@ impl Service { /// Sets a new avatar_url or removes it if avatar_url is None. pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) { - if let Some(avatar_url) = avatar_url { - self.db.userid_avatarurl.insert(user_id, &avatar_url); - } else { - self.db.userid_avatarurl.remove(user_id); + match avatar_url { + | Some(avatar_url) => { + self.db.userid_avatarurl.insert(user_id, &avatar_url); + }, + | _ => { + self.db.userid_avatarurl.remove(user_id); + }, } } From 045e8a293740ba1ee94d93d09d27d07a6c0d67d0 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 23 Feb 2025 02:51:55 -0500 Subject: [PATCH 189/328] stop building mac binaries for now because of linker issues Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 126 --------------------------------------- 1 file changed, 126 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 24f2db45..8e1cf6c6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -623,132 +623,6 @@ jobs: scp oci-image-${{ matrix.target }}-debug.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-${{ matrix.target }}-debug.tar.gz fi - build_mac_binaries: - name: Build MacOS Binaries - strategy: - matrix: - os: [macos-latest, macos-13] - runs-on: ${{ matrix.os }} - steps: - - name: Sync repository - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Setup SSH web publish - env: - web_upload_ssh_private_key: ${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }} - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' - run: | - mkdir -p -v ~/.ssh - - echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts - echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519 - - chmod 600 ~/.ssh/id_ed25519 - - cat >>~/.ssh/config <> "$GITHUB_ENV" - - - name: Tag comparison check - if: ${{ startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') }} - run: | - # Tag mismatch with latest repo tag check to prevent potential downgrades - LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`) - - if [ ${LATEST_TAG} != ${GH_REF_NAME} ]; then - echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' - echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' >> $GITHUB_STEP_SUMMARY - exit 1 - fi - - # use sccache for Rust - - name: Run sccache-cache - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - #if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} - uses: mozilla-actions/sccache-action@main - - # use rust-cache - - uses: Swatinem/rust-cache@v2 - with: - cache-all-crates: "true" - cache-on-failure: "true" - cache-targets: "true" - - # Nix can't do portable macOS builds yet - - name: Build macOS x86_64 binary - if: ${{ matrix.os == 'macos-13' }} - run: | - CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release --locked --features=perf_measurements,sentry_telemetry,direct_tls - cp -v -f target/release/conduwuit conduwuit-macos-x86_64 - otool -L conduwuit-macos-x86_64 - - # quick smoke test of the x86_64 macOS binary - - name: Run x86_64 macOS release binary - if: ${{ matrix.os == 'macos-13' }} - run: | - ./conduwuit-macos-x86_64 --help - ./conduwuit-macos-x86_64 --version - - - name: Build macOS arm64 binary - if: ${{ matrix.os == 'macos-latest' }} - run: | - CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release --locked --features=perf_measurements,sentry_telemetry,direct_tls - cp -v -f target/release/conduwuit conduwuit-macos-arm64 - otool -L conduwuit-macos-arm64 - - # quick smoke test of the arm64 macOS binary - - name: Run arm64 macOS release binary - if: ${{ matrix.os == 'macos-latest' }} - run: | - ./conduwuit-macos-arm64 --help - ./conduwuit-macos-arm64 --version - - - name: Upload macOS x86_64 binary to webserver - if: ${{ matrix.os == 'macos-13' }} - run: | - if [ ! -z $SSH_WEBSITE ]; then - chmod +x conduwuit-macos-x86_64 - scp conduwuit-macos-x86_64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/conduwuit-macos-x86_64 - fi - - - name: Upload macOS arm64 binary to webserver - if: ${{ matrix.os == 'macos-latest' }} - run: | - if [ ! -z $SSH_WEBSITE ]; then - chmod +x conduwuit-macos-arm64 - scp conduwuit-macos-arm64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/conduwuit-macos-arm64 - fi - - - name: Upload macOS x86_64 binary - if: ${{ matrix.os == 'macos-13' }} - uses: actions/upload-artifact@v4 - with: - name: conduwuit-macos-x86_64 - path: conduwuit-macos-x86_64 - if-no-files-found: error - - - name: Upload macOS arm64 binary - if: ${{ matrix.os == 'macos-latest' }} - uses: actions/upload-artifact@v4 - with: - name: conduwuit-macos-arm64 - path: conduwuit-macos-arm64 - if-no-files-found: error variables: outputs: github_repository: ${{ steps.var.outputs.github_repository }} From 4bdd0d77db9b4eaa7864431da6c5b19218e18c79 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 23 Feb 2025 02:52:19 -0500 Subject: [PATCH 190/328] bump complement, actually run all tests Signed-off-by: June Clementine Strawberry --- bin/complement | 2 +- flake.lock | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bin/complement b/bin/complement index ffd7a938..4356f2e7 100755 --- a/bin/complement +++ b/bin/complement @@ -45,7 +45,7 @@ set +o pipefail env \ -C "$COMPLEMENT_SRC" \ COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \ - go test -tags="conduwuit_blacklist" "$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests ./tests/msc3967 | tee "$LOG_FILE" + go test -tags="conduwuit_blacklist" "$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail # Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results diff --git a/flake.lock b/flake.lock index 9bf6ac55..a7d80508 100644 --- a/flake.lock +++ b/flake.lock @@ -80,11 +80,11 @@ "complement": { "flake": false, "locked": { - "lastModified": 1734303596, - "narHash": "sha256-HjDRyLR4MBqQ3IjfMM6eE+8ayztXlbz3gXdyDmFla68=", + "lastModified": 1740291865, + "narHash": "sha256-wl1+yCTEtvIH8vgXygnxPkaSgg4MYNKs+c9tzVytr20=", "owner": "girlbossceo", "repo": "complement", - "rev": "14cc5be797b774f1a2b9f826f38181066d4952b8", + "rev": "35ad9d9051498fbac8ea4abff8ab7d8b1844f87b", "type": "github" }, "original": { From cbf207bd1f1587418be0de2a1a5cbd745baec9e2 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 23 Feb 2025 03:11:34 -0500 Subject: [PATCH 191/328] try adding back some skipped complement tests Signed-off-by: June Clementine Strawberry --- bin/complement | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/complement b/bin/complement index 4356f2e7..9960299c 100755 --- a/bin/complement +++ b/bin/complement @@ -18,7 +18,7 @@ RESULTS_FILE="$3" OCI_IMAGE="complement-conduwuit:main" # Complement tests that are skipped due to flakiness/reliability issues -SKIPPED_COMPLEMENT_TESTS='-skip=TestClientSpacesSummary.*|TestJoinFederatedRoomFromApplicationServiceBridgeUser.*|TestJumpToDateEndpoint.*|TestUnbanViaInvite.*' +SKIPPED_COMPLEMENT_TESTS='-skip=TestPartialStateJoin.*' # $COMPLEMENT_SRC needs to be a directory to Complement source code if [ -f "$COMPLEMENT_SRC" ]; then From a67ab754179d0bbaa09aa19d974035c521643fe9 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 25 Feb 2025 18:38:12 +0000 Subject: [PATCH 192/328] fix edition 2024 lints Signed-off-by: Jason Volk --- Cargo.toml | 2 ++ src/admin/admin.rs | 2 +- src/admin/room/commands.rs | 2 +- src/admin/room/directory.rs | 2 +- src/admin/user/commands.rs | 6 +++--- src/api/client/account.rs | 2 +- src/api/client/directory.rs | 14 +++----------- src/api/client/membership.rs | 6 +++--- src/api/client/report.rs | 6 +++--- src/api/router/auth.rs | 2 +- src/api/server/send_join.rs | 2 +- src/api/server/send_knock.rs | 2 +- src/database/de.rs | 2 +- src/database/engine/logger.rs | 2 +- src/database/map/compact.rs | 2 +- src/database/map/qry_batch.rs | 1 - src/database/map/rev_stream.rs | 2 +- src/database/map/rev_stream_from.rs | 2 +- src/database/map/stream.rs | 2 +- src/database/map/stream_from.rs | 2 +- src/database/pool.rs | 14 ++++++-------- src/database/ser.rs | 2 +- src/database/stream.rs | 6 +++--- src/database/watchers.rs | 2 +- src/macros/config.rs | 2 +- src/service/media/blurhash.rs | 2 +- src/service/media/remote.rs | 2 +- .../rooms/event_handler/handle_incoming_pdu.rs | 2 +- src/service/rooms/spaces/mod.rs | 2 +- src/service/rooms/state_cache/mod.rs | 4 ++-- src/service/rooms/state_compressor/mod.rs | 4 ++-- src/service/rooms/timeline/mod.rs | 14 +++++++------- src/service/sending/mod.rs | 2 +- src/service/sending/sender.rs | 7 ++----- src/service/server_keys/request.rs | 2 +- src/service/uiaa/mod.rs | 2 +- 36 files changed, 60 insertions(+), 72 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 76de2212..52695d89 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -905,6 +905,7 @@ missing_docs_in_private_items = { level = "allow", priority = 1 } missing_errors_doc = { level = "allow", priority = 1 } missing_panics_doc = { level = "allow", priority = 1 } module_name_repetitions = { level = "allow", priority = 1 } +needless_continue = { level = "allow", priority = 1 } no_effect_underscore_binding = { level = "allow", priority = 1 } similar_names = { level = "allow", priority = 1 } single_match_else = { level = "allow", priority = 1 } @@ -969,6 +970,7 @@ style = { level = "warn", priority = -1 } # trivial assertions are quite alright assertions_on_constants = { level = "allow", priority = 1 } module_inception = { level = "allow", priority = 1 } +obfuscated_if_else = { level = "allow", priority = 1 } ################### suspicious = { level = "warn", priority = -1 } diff --git a/src/admin/admin.rs b/src/admin/admin.rs index b6de1ec6..9e010a59 100644 --- a/src/admin/admin.rs +++ b/src/admin/admin.rs @@ -62,7 +62,7 @@ pub(super) async fn process(command: AdminCommand, context: &Command<'_>) -> Res | Debug(command) => debug::process(command, context).await?, | Query(command) => query::process(command, context).await?, | Check(command) => check::process(command, context).await?, - }; + } Ok(()) } diff --git a/src/admin/room/commands.rs b/src/admin/room/commands.rs index b5c303c8..6dd31b48 100644 --- a/src/admin/room/commands.rs +++ b/src/admin/room/commands.rs @@ -42,7 +42,7 @@ pub(super) async fn list_rooms( if rooms.is_empty() { return Ok(RoomMessageEventContent::text_plain("No more rooms.")); - }; + } let output_plain = format!( "Rooms ({}):\n```\n{}\n```", diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index e9c23a1d..ca036825 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -67,7 +67,7 @@ pub(super) async fn reprocess( if rooms.is_empty() { return Ok(RoomMessageEventContent::text_plain("No more rooms.")); - }; + } let output = format!( "Rooms (page {page}):\n```\n{}\n```", diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 8cb8edc3..8565f04a 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -166,7 +166,7 @@ pub(super) async fn create_user( "Failed to automatically join room {room} for user {user_id}: {e}" ); }, - }; + } } } } @@ -550,7 +550,7 @@ pub(super) async fn force_join_list_of_local_users( debug_warn!("Failed force joining {user_id} to {room_id} during bulk join: {e}"); failed_joins = failed_joins.saturating_add(1); }, - }; + } } Ok(RoomMessageEventContent::notice_markdown(format!( @@ -646,7 +646,7 @@ pub(super) async fn force_join_all_local_users( debug_warn!("Failed force joining {user_id} to {room_id} during bulk join: {e}"); failed_joins = failed_joins.saturating_add(1); }, - }; + } } Ok(RoomMessageEventContent::notice_markdown(format!( diff --git a/src/api/client/account.rs b/src/api/client/account.rs index cb49a6db..b42f51f7 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -499,7 +499,7 @@ pub(crate) async fn register_route( | _ => { info!("Automatically joined room {room} for user {user_id}"); }, - }; + } } } } diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 136c5961..6af9b533 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -131,7 +131,7 @@ pub(crate) async fn set_room_visibility_route( if !services.rooms.metadata.exists(&body.room_id).await { // Return 404 if the room doesn't exist - return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found")); + return Err!(Request(NotFound("Room not found"))); } if services @@ -145,10 +145,7 @@ pub(crate) async fn set_room_visibility_route( } if !user_can_publish_room(&services, sender_user, &body.room_id).await? { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "User is not allowed to publish this room", - )); + return Err!(Request(Forbidden("User is not allowed to publish this room"))); } match &body.visibility { @@ -386,12 +383,7 @@ async fn user_can_publish_room( .await { | Ok(event) => Ok(event.sender == user_id), - | _ => { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "User is not allowed to publish this room", - )); - }, + | _ => Err!(Request(Forbidden("User is not allowed to publish this room"))), } }, } diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 9c2693dc..0b9c0c69 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -993,7 +993,7 @@ async fn join_room_by_id_helper_remote( | _ => { join_event_stub.remove("event_id"); }, - }; + } // In order to create a compatible ref hash (EventID) the `hashes` field needs // to be present @@ -1420,7 +1420,7 @@ async fn join_room_by_id_helper_local( | _ => { join_event_stub.remove("event_id"); }, - }; + } // In order to create a compatible ref hash (EventID) the `hashes` field needs // to be present @@ -1947,7 +1947,7 @@ async fn remote_leave_room( | _ => { leave_event_stub.remove("event_id"); }, - }; + } // In order to create a compatible ref hash (EventID) the `hashes` field needs // to be present diff --git a/src/api/client/report.rs b/src/api/client/report.rs index db085721..7922caca 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -43,7 +43,7 @@ pub(crate) async fn report_room_route( ErrorKind::InvalidParam, "Reason too long, should be 750 characters or fewer", )); - }; + } delay_response().await; @@ -164,14 +164,14 @@ async fn is_event_report_valid( ErrorKind::InvalidParam, "Invalid score, must be within 0 to -100", )); - }; + } if reason.as_ref().is_some_and(|s| s.len() > 750) { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Reason too long, should be 750 characters or fewer", )); - }; + } if !services .rooms diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 56256683..92b75cfa 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -110,7 +110,7 @@ pub(super) async fn auth( } }, | _ => {}, - }; + } } match (metadata.authentication, token) { diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 08fa3835..c1749835 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -135,7 +135,7 @@ async fn create_join_event( if state_key != sender { return Err!(Request(BadJson("State key does not match sender user."))); - }; + } if let Some(authorising_user) = content.join_authorized_via_users_server { use ruma::RoomVersionId::*; diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index 1d4c2a6c..f7bb0735 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -137,7 +137,7 @@ pub(crate) async fn create_knock_event_v1_route( if state_key != sender { return Err!(Request(InvalidParam("state_key does not match sender user of event."))); - }; + } let origin: OwnedServerName = serde_json::from_value( value diff --git a/src/database/de.rs b/src/database/de.rs index 9c0997ff..849b3b2e 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -241,7 +241,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { | "Ignore" => self.record_ignore(), | "IgnoreAll" => self.record_ignore_all(), | _ => unhandled!("Unrecognized deserialization Directive {name:?}"), - }; + } visitor.visit_unit() } diff --git a/src/database/engine/logger.rs b/src/database/engine/logger.rs index a1898e30..23e23fc7 100644 --- a/src/database/engine/logger.rs +++ b/src/database/engine/logger.rs @@ -18,5 +18,5 @@ pub(crate) fn handle(level: LogLevel, msg: &str) { | LogLevel::Error | LogLevel::Fatal => error!("{msg}"), | LogLevel::Info => debug!("{msg}"), | LogLevel::Warn => warn!("{msg}"), - }; + } } diff --git a/src/database/map/compact.rs b/src/database/map/compact.rs index 84476de6..b49bf30b 100644 --- a/src/database/map/compact.rs +++ b/src/database/map/compact.rs @@ -52,7 +52,7 @@ pub fn compact_blocking(&self, opts: Options) -> Result { co.set_target_level(level.try_into()?); }, | (Some(_), Some(_)) => return Err!("compacting between specific levels not supported"), - }; + } self.db .db diff --git a/src/database/map/qry_batch.rs b/src/database/map/qry_batch.rs index f44d1c86..e42d3e63 100644 --- a/src/database/map/qry_batch.rs +++ b/src/database/map/qry_batch.rs @@ -50,7 +50,6 @@ where .iter() .map(ser::serialize_to::) .map(|result| result.expect("failed to serialize query key")) - .map(Into::into) .collect(); self.db diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs index fc2d1116..789a52e8 100644 --- a/src/database/map/rev_stream.rs +++ b/src/database/map/rev_stream.rs @@ -40,7 +40,7 @@ pub fn rev_raw_stream(self: &Arc) -> impl Stream> .into_stream() .flatten() .boxed(); - }; + } let seek = Seek { map: self.clone(), diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index d67986e7..a612d2a2 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -89,7 +89,7 @@ where .into_stream() .flatten() .boxed(); - }; + } let seek = Seek { map: self.clone(), diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs index f1450b6f..f7371b6c 100644 --- a/src/database/map/stream.rs +++ b/src/database/map/stream.rs @@ -39,7 +39,7 @@ pub fn raw_stream(self: &Arc) -> impl Stream>> + .into_stream() .flatten() .boxed(); - }; + } let seek = Seek { map: self.clone(), diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index 00c3a051..ccf48db6 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -86,7 +86,7 @@ where .into_stream() .flatten() .boxed(); - }; + } let seek = Seek { map: self.clone(), diff --git a/src/database/pool.rs b/src/database/pool.rs index e6ed59ac..47e61c30 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -146,11 +146,9 @@ pub(crate) fn close(&self) { .map(JoinHandle::join) .map(|result| result.map_err(Error::from_panic)) .enumerate() - .for_each(|(id, result)| { - match result { - | Ok(()) => trace!(?id, "worker joined"), - | Err(error) => error!(?id, "worker joined with error: {error}"), - }; + .for_each(|(id, result)| match result { + | Ok(()) => trace!(?id, "worker joined"), + | Err(error) => error!(?id, "worker joined with error: {error}"), }); } @@ -345,7 +343,7 @@ fn worker_handle(self: &Arc, cmd: Cmd) { | Cmd::Get(cmd) if cmd.key.len() == 1 => self.handle_get(cmd), | Cmd::Get(cmd) => self.handle_batch(cmd), | Cmd::Iter(cmd) => self.handle_iter(cmd), - }; + } } #[implement(Pool)] @@ -362,7 +360,7 @@ fn handle_iter(&self, mut cmd: Seek) { return; } - let from = cmd.key.as_deref().map(Into::into); + let from = cmd.key.as_deref(); let result = match cmd.dir { | Direction::Forward => cmd.state.init_fwd(from), @@ -394,7 +392,7 @@ fn handle_batch(self: &Arc, mut cmd: Get) { return; } - let keys = cmd.key.iter().map(Into::into); + let keys = cmd.key.iter(); let result: SmallVec<_> = cmd.map.get_batch_blocking(keys).collect(); diff --git a/src/database/ser.rs b/src/database/ser.rs index 6dd2043d..2e1a2cb0 100644 --- a/src/database/ser.rs +++ b/src/database/ser.rs @@ -224,7 +224,7 @@ impl ser::Serializer for &mut Serializer<'_, W> { self.separator()?; }, | _ => unhandled!("Unrecognized serialization directive: {name:?}"), - }; + } Ok(()) } diff --git a/src/database/stream.rs b/src/database/stream.rs index eb856b3f..eb264ccd 100644 --- a/src/database/stream.rs +++ b/src/database/stream.rs @@ -113,13 +113,13 @@ impl<'a> State<'a> { } #[inline] - fn fetch_key(&self) -> Option> { self.inner.key().map(Key::from) } + fn fetch_key(&self) -> Option> { self.inner.key() } #[inline] - fn _fetch_val(&self) -> Option> { self.inner.value().map(Val::from) } + fn _fetch_val(&self) -> Option> { self.inner.value() } #[inline] - fn fetch(&self) -> Option> { self.inner.item().map(KeyVal::from) } + fn fetch(&self) -> Option> { self.inner.item() } #[inline] pub(super) fn status(&self) -> Option { self.inner.status().err() } diff --git a/src/database/watchers.rs b/src/database/watchers.rs index be814f8c..b3907833 100644 --- a/src/database/watchers.rs +++ b/src/database/watchers.rs @@ -53,6 +53,6 @@ impl Watchers { tx.0.send(()).expect("channel should still be open"); } } - }; + } } } diff --git a/src/macros/config.rs b/src/macros/config.rs index 07ac1c0a..7b424325 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -205,7 +205,7 @@ fn get_default(field: &Field) -> Option { }, | Meta::Path { .. } => return Some("false".to_owned()), | _ => return None, - }; + } } None diff --git a/src/service/media/blurhash.rs b/src/service/media/blurhash.rs index 9d73f5dc..91e00228 100644 --- a/src/service/media/blurhash.rs +++ b/src/service/media/blurhash.rs @@ -172,7 +172,7 @@ impl std::fmt::Display for BlurhashingError { #[cfg(feature = "blurhashing")] | Self::ImageError(e) => write!(f, "There was an error with the image loading library => {e}")?, - }; + } Ok(()) } diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index 61635011..b6c853d2 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -283,7 +283,7 @@ async fn location_request(&self, location: &str) -> Result { .map_err(Into::into) .map(|content| FileMeta { content: Some(content), - content_type: content_type.clone().map(Into::into), + content_type: content_type.clone(), content_disposition: Some(make_content_disposition( content_disposition.as_ref(), content_type.as_deref(), diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index b6d3e21e..b437bf2e 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -170,7 +170,7 @@ pub async fn handle_incoming_pdu<'a>( | Entry::Occupied(mut e) => { *e.get_mut() = (now, e.get().1.saturating_add(1)); }, - }; + } } } diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 52e7d2be..910da914 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -125,7 +125,7 @@ pub async fn get_summary_and_children_local( SummaryAccessibility::Inaccessible }, )), - }; + } let children_pdus: Vec<_> = self .get_stripped_space_child_events(current_room) diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 4403468b..02ffa0d1 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -218,7 +218,7 @@ impl Service { ) .await .ok(); - }; + } // Copy direct chat flag if let Ok(mut direct_event) = self @@ -250,7 +250,7 @@ impl Service { ) .await?; } - }; + } } } diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index c566eb1c..305d3187 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -303,7 +303,7 @@ impl Service { }); return Ok(()); - }; + } // Else we have two options. // 1. We add the current diff on top of the parent layer. @@ -419,7 +419,7 @@ impl Service { 2, // every state change is 2 event changes on average states_parents, )?; - }; + } Ok(HashSetCompressStateEvent { shortstatehash: new_shortstatehash, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 4be97fb2..35c972fa 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -422,7 +422,7 @@ impl Service { highlight = true; }, | _ => {}, - }; + } // Break early if both conditions are true if notify && highlight { @@ -484,7 +484,7 @@ impl Service { } } }, - }; + } }, | TimelineEventType::SpaceChild => if let Some(_state_key) = &pdu.state_key { @@ -776,7 +776,7 @@ impl Service { | _ => { pdu_json.remove("event_id"); }, - }; + } // Add origin because synapse likes that (and it's required in the spec) pdu_json.insert( @@ -847,7 +847,7 @@ impl Service { { return Err!(Request(Forbidden("User cannot redact this event."))); } - }; + } }, | _ => { let content: RoomRedactionEventContent = pdu.get_content()?; @@ -863,7 +863,7 @@ impl Service { } }, } - }; + } if pdu.kind == TimelineEventType::RoomMember { let content: RoomMemberEventContent = pdu.get_content()?; @@ -1293,10 +1293,10 @@ async fn check_pdu_for_admin_room(&self, pdu: &PduEvent, sender: &UserId) -> Res } }, | _ => {}, - }; + } }, | _ => {}, - }; + } Ok(()) } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 379829b4..08ca7010 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -131,7 +131,7 @@ impl crate::Service for Service { | Err(error) => { error!(id = ?error.id(), ?error, "sender worker finished"); }, - }; + } } Ok(()) diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index c4f34177..616f0846 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -138,7 +138,7 @@ impl Service { match response { | Ok(dest) => self.handle_response_ok(&dest, futures, statuses).await, | Err((dest, e)) => Self::handle_response_err(dest, statuses, &e), - }; + } } fn handle_response_err(dest: Destination, statuses: &mut CurTransactionStatus, e: &Error) { @@ -319,10 +319,7 @@ impl Service { if let Destination::Federation(server_name) = dest { if let Ok((select_edus, last_count)) = self.select_edus(server_name).await { debug_assert!(select_edus.len() <= EDU_LIMIT, "exceeded edus limit"); - let select_edus = select_edus - .into_iter() - .map(Into::into) - .map(SendingEvent::Edu); + let select_edus = select_edus.into_iter().map(SendingEvent::Edu); events.extend(select_edus); self.db.set_latest_educount(server_name, last_count); diff --git a/src/service/server_keys/request.rs b/src/service/server_keys/request.rs index 171b755b..d9907616 100644 --- a/src/service/server_keys/request.rs +++ b/src/service/server_keys/request.rs @@ -43,7 +43,7 @@ where .keys() .rev() .take(self.services.server.config.trusted_server_batch_size) - .last() + .next_back() .cloned() { let request = Request { diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 51f5fb11..39dd2b41 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -69,7 +69,7 @@ pub async fn read_tokens(&self) -> Result> { }, | Err(e) => error!("Failed to read the registration token file: {e}"), } - }; + } if let Some(token) = &self.services.config.registration_token { tokens.insert(token.to_owned()); } From dca7bf9635ecd1fef3cd4bca56a25054d346692d Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 27 Feb 2025 10:39:06 -0500 Subject: [PATCH 193/328] try bumping cache-nix-action to v6 Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 8 ++------ .github/workflows/documentation.yml | 4 +--- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8e1cf6c6..82ffc6b6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -129,7 +129,7 @@ jobs: # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags #if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v5.1.0 + uses: nix-community/cache-nix-action@v6 with: # restore and save a cache using this key primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/.lock') }} @@ -146,8 +146,6 @@ jobs: purge-last-accessed: 86400 # except the version with the `primary-key`, if it exists purge-primary-key: never - # always save the cache - save-always: true - name: Enable Cachix binary cache run: | @@ -324,7 +322,7 @@ jobs: # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags #if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v5.1.0 + uses: nix-community/cache-nix-action@v6 with: # restore and save a cache using this key primary-key: nix-${{ runner.os }}-${{ matrix.target }}-${{ hashFiles('**/*.nix', '**/.lock') }} @@ -341,8 +339,6 @@ jobs: purge-last-accessed: 86400 # except the version with the `primary-key`, if it exists purge-primary-key: never - # always save the cache - save-always: true - name: Enable Cachix binary cache run: | diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 0eefe0a4..fadc7b3f 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -76,7 +76,7 @@ jobs: # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v5.1.0 + uses: nix-community/cache-nix-action@v6 with: # restore and save a cache using this key primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/.lock') }} @@ -93,8 +93,6 @@ jobs: purge-last-accessed: 86400 # except the version with the `primary-key`, if it exists purge-primary-key: never - # always save the cache - save-always: true - name: Enable Cachix binary cache run: | From 17e0384eeb91bfbb77576359252db25e3248cc40 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 2 Mar 2025 23:11:43 -0500 Subject: [PATCH 194/328] ignore errors instead of expecting for state gathering Signed-off-by: June Clementine Strawberry --- src/service/rooms/state_accessor/state.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs index 625defe6..02a6194e 100644 --- a/src/service/rooms/state_accessor/state.rs +++ b/src/service/rooms/state_accessor/state.rs @@ -4,7 +4,7 @@ use conduwuit::{ PduEvent, Result, StateKey, at, err, implement, pair_of, utils::{ result::FlatOk, - stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, + stream::{BroadbandExt, IterStream, ReadyExt, TryIgnore}, }, }; use database::Deserialized; @@ -232,7 +232,7 @@ pub fn state_keys_with_shortids<'a>( ) -> impl Stream + Send + 'a { let short_ids = self .state_full_shortids(shortstatehash) - .expect_ok() + .ignore_err() .unzip() .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) .shared(); @@ -269,7 +269,7 @@ pub fn state_keys<'a>( ) -> impl Stream + Send + 'a { let short_ids = self .state_full_shortids(shortstatehash) - .expect_ok() + .ignore_err() .map(at!(0)); self.services @@ -305,7 +305,7 @@ pub fn state_added( .map_ok(|(a, b)| b.difference(&a).copied().collect::>()) .map_ok(IterStream::try_stream) .try_flatten_stream() - .expect_ok() + .ignore_err() .map(parse_compressed_state_event) } @@ -327,7 +327,7 @@ pub fn state_full_pdus( ) -> impl Stream + Send + '_ { let short_ids = self .state_full_shortids(shortstatehash) - .expect_ok() + .ignore_err() .map(at!(1)); self.services @@ -352,7 +352,7 @@ where { let shortids = self .state_full_shortids(shortstatehash) - .expect_ok() + .ignore_err() .unzip() .shared(); From de53ad83b2ec49170075cc5176e0ec7a604aad94 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 2 Mar 2025 23:15:05 -0500 Subject: [PATCH 195/328] run nightly cargo fmt again Signed-off-by: June Clementine Strawberry --- src/admin/room/alias.rs | 5 +++-- src/admin/room/moderation.rs | 20 +++++++++++-------- src/api/client/directory.rs | 5 +++-- src/api/client/read_marker.rs | 5 +++-- src/api/server/hierarchy.rs | 5 +++-- src/core/state_res/event_auth.rs | 5 +++-- src/router/layers.rs | 8 ++++++-- .../rooms/event_handler/fetch_state.rs | 5 +++-- .../rooms/event_handler/handle_outlier_pdu.rs | 5 +++-- src/service/rooms/spaces/mod.rs | 5 +++-- src/service/server_keys/get.rs | 5 +++-- 11 files changed, 45 insertions(+), 28 deletions(-) diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index 6262f33e..ab21170c 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -66,10 +66,11 @@ pub(super) async fn reprocess( format!("#{}:{}", room_alias_localpart, services.globals.server_name()); let room_alias = match OwnedRoomAliasId::parse(room_alias_str) { | Ok(alias) => alias, - | Err(err) => + | Err(err) => { return Ok(RoomMessageEventContent::text_plain(format!( "Failed to parse alias: {err}" - ))), + ))); + }, }; match command { | RoomAliasCommand::Set { force, room_id, .. } => { diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index ee132590..444dfa2f 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -96,12 +96,13 @@ async fn ban_room( let room_id = if room.is_room_id() { let room_id = match RoomId::parse(&room) { | Ok(room_id) => room_id, - | Err(e) => + | Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))), + ))); + }, }; debug!("Room specified is a room ID, banning room ID"); @@ -111,12 +112,13 @@ async fn ban_room( } else if room.is_room_alias_id() { let room_alias = match RoomAliasId::parse(&room) { | Ok(room_alias) => room_alias, - | Err(e) => + | Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))), + ))); + }, }; debug!( @@ -514,12 +516,13 @@ async fn unban_room( let room_id = if room.is_room_id() { let room_id = match RoomId::parse(&room) { | Ok(room_id) => room_id, - | Err(e) => + | Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))), + ))); + }, }; debug!("Room specified is a room ID, unbanning room ID"); @@ -529,12 +532,13 @@ async fn unban_room( } else if room.is_room_alias_id() { let room_alias = match RoomAliasId::parse(&room) { | Ok(room_alias) => room_alias, - | Err(e) => + | Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))), + ))); + }, }; debug!( diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 6af9b533..88f0e668 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -267,8 +267,9 @@ pub(crate) async fn get_public_rooms_filtered_helper( let backwards = match characters.next() { | Some('n') => false, | Some('p') => true, - | _ => - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token")), + | _ => { + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token")); + }, }; num_since = characters diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index d01327f6..187616b4 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -197,11 +197,12 @@ pub(crate) async fn create_receipt_route( .read_receipt .private_read_set(&body.room_id, sender_user, count); }, - | _ => + | _ => { return Err!(Request(InvalidParam(warn!( "Received unknown read receipt type: {}", &body.receipt_type - )))), + )))); + }, } Ok(create_receipt::v3::Response {}) diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs index 41eaedd0..c759c8ea 100644 --- a/src/api/server/hierarchy.rs +++ b/src/api/server/hierarchy.rs @@ -32,8 +32,9 @@ pub(crate) async fn get_hierarchy_route( { | None => Err!(Request(NotFound("The requested room was not found"))), - | Some(SummaryAccessibility::Inaccessible) => - Err!(Request(NotFound("The requested room is inaccessible"))), + | Some(SummaryAccessibility::Inaccessible) => { + Err!(Request(NotFound("The requested room is inaccessible"))) + }, | Some(SummaryAccessibility::Accessible(room)) => { let (children, inaccessible_children) = diff --git a/src/core/state_res/event_auth.rs b/src/core/state_res/event_auth.rs index 4b8e55f3..65bec802 100644 --- a/src/core/state_res/event_auth.rs +++ b/src/core/state_res/event_auth.rs @@ -682,7 +682,7 @@ fn valid_membership_change( } allow }, - | _ => + | _ => { if !sender_is_joined || target_user_current_membership == MembershipState::Join || target_user_current_membership == MembershipState::Ban @@ -706,7 +706,8 @@ fn valid_membership_change( ); } allow - }, + } + }, } }, | MembershipState::Leave => diff --git a/src/router/layers.rs b/src/router/layers.rs index 88e6a8d5..6920555d 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -61,8 +61,12 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { ) .layer(axum::middleware::from_fn_with_state(Arc::clone(services), request::handle)) .layer(SecureClientIpSource::ConnectInfo.into_extension()) - .layer(ResponseBodyTimeoutLayer::new(Duration::from_secs(server.config.client_response_timeout))) - .layer(RequestBodyTimeoutLayer::new(Duration::from_secs(server.config.client_receive_timeout))) + .layer(ResponseBodyTimeoutLayer::new(Duration::from_secs( + server.config.client_response_timeout, + ))) + .layer(RequestBodyTimeoutLayer::new(Duration::from_secs( + server.config.client_receive_timeout, + ))) .layer(TimeoutLayer::new(Duration::from_secs(server.config.client_request_timeout))) .layer(SetResponseHeaderLayer::if_not_present( HeaderName::from_static("origin-agent-cluster"), // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin-Agent-Cluster diff --git a/src/service/rooms/event_handler/fetch_state.rs b/src/service/rooms/event_handler/fetch_state.rs index b1a4a38b..0f9e093b 100644 --- a/src/service/rooms/event_handler/fetch_state.rs +++ b/src/service/rooms/event_handler/fetch_state.rs @@ -58,10 +58,11 @@ pub(super) async fn fetch_state( | hash_map::Entry::Vacant(v) => { v.insert(pdu.event_id.clone()); }, - | hash_map::Entry::Occupied(_) => + | hash_map::Entry::Occupied(_) => { return Err!(Database( "State event's type and state_key combination exists multiple times.", - )), + )); + }, } } diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index 974eb300..99e90a50 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -56,10 +56,11 @@ pub(super) async fn handle_outlier_pdu<'a>( obj }, - | Err(e) => + | Err(e) => { return Err!(Request(InvalidParam(debug_error!( "Signature verification failed for {event_id}: {e}" - )))), + )))); + }, }; // Now that we have checked the signature and hashes we can add the eventID and diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 910da914..1da38234 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -109,7 +109,7 @@ pub async fn get_summary_and_children_local( { | None => (), // cache miss | Some(None) => return Ok(None), - | Some(Some(cached)) => + | Some(Some(cached)) => { return Ok(Some( if self .is_accessible_child( @@ -124,7 +124,8 @@ pub async fn get_summary_and_children_local( } else { SummaryAccessibility::Inaccessible }, - )), + )); + }, } let children_pdus: Vec<_> = self diff --git a/src/service/server_keys/get.rs b/src/service/server_keys/get.rs index 00aeae1e..f9c5bdaf 100644 --- a/src/service/server_keys/get.rs +++ b/src/service/server_keys/get.rs @@ -18,8 +18,9 @@ pub async fn get_event_keys( let required = match required_keys(object, version) { | Ok(required) => required, - | Err(e) => - return Err!(BadServerResponse("Failed to determine keys required to verify: {e}")), + | Err(e) => { + return Err!(BadServerResponse("Failed to determine keys required to verify: {e}")); + }, }; let batch = required From 00cc23b6496533b9cfb77145966e2e7355f1f886 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 2 Mar 2025 23:15:30 -0500 Subject: [PATCH 196/328] bump nix lockfile, bump cargo.lock, bump ruwuma Signed-off-by: June Clementine Strawberry --- Cargo.lock | 263 +++++++++++++++++++++++++++-------------------------- Cargo.toml | 4 +- flake.lock | 24 ++--- 3 files changed, 146 insertions(+), 145 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7e84437c..e632b504 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -55,9 +55,9 @@ checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anyhow" -version = "1.0.95" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" +checksum = "6b964d184e89d9b6b67dd2715bc8e74cf3107fb2b529990c90cf517326150bf4" [[package]] name = "arbitrary" @@ -128,9 +128,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.18" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df895a515f70646414f4b45c0b79082783b80552b373a68283012928df56f522" +checksum = "310c9bcae737a48ef5cdee3174184e6d548b292739ede61a1f955ef76a738861" dependencies = [ "brotli", "flate2", @@ -212,18 +212,18 @@ dependencies = [ [[package]] name = "avif-serialize" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e335041290c43101ca215eed6f43ec437eb5a42125573f600fc3fa42b9bddd62" +checksum = "98922d6a4cfbcb08820c69d8eeccc05bb1f29bfa06b4f5b1dbfe9a868bd7608e" dependencies = [ "arrayvec", ] [[package]] name = "aws-lc-rs" -version = "1.12.2" +version = "1.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c2b7ddaa2c56a367ad27a094ad8ef4faacf8a617c2575acb2ba88949df999ca" +checksum = "5e4e8200b9a4a5801a769d50eeabc05670fec7e959a8cb7a63a93e4e519942ae" dependencies = [ "aws-lc-sys", "paste", @@ -232,9 +232,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.25.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54ac4f13dad353b209b34cbec082338202cbc01c8f00336b55c750c13ac91f8f" +checksum = "0f9dd2e03ee80ca2822dd6ea431163d2ef259f2066a4d6ccaca6d9dcb386aa43" dependencies = [ "bindgen", "cc", @@ -414,7 +414,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -445,9 +445,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" +checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" [[package]] name = "bitstream-io" @@ -505,9 +505,9 @@ dependencies = [ [[package]] name = "built" -version = "0.7.5" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c360505aed52b7ec96a3636c3f039d99103c37d1d9b4f7a8c743d3ea9ffcd03b" +checksum = "56ed6191a7e78c36abdb16ab65341eefd73d64d303fffccdbb00d51e4205967b" [[package]] name = "bumpalo" @@ -541,18 +541,17 @@ checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" [[package]] name = "bytesize" -version = "1.3.0" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e368af43e418a04d52505cf3dbc23dda4e3407ae2fa99fd0e4f308ce546acc" +checksum = "2d2c12f985c78475a6b8d629afd0c360260ef34cfef52efccdcfd31972f81c2e" [[package]] name = "bzip2-sys" -version = "0.1.11+1.0.8" +version = "0.1.13+1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" +checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" dependencies = [ "cc", - "libc", "pkg-config", ] @@ -568,9 +567,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.12" +version = "1.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "755717a7de9ec452bf7f3f1a3099085deabd7f2962b861dae91ecd7a365903d2" +checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" dependencies = [ "jobserver", "libc", @@ -619,9 +618,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.39" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" +checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" dependencies = [ "num-traits", ] @@ -639,9 +638,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.28" +version = "4.5.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e77c3243bd94243c03672cb5154667347c457ca271254724f9f393aee1c05ff" +checksum = "027bb0d98429ae334a8698531da7077bdf906419543a35a55c2cb1b66437d767" dependencies = [ "clap_builder", "clap_derive", @@ -649,9 +648,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.27" +version = "4.5.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b26884eb4b57140e4d2d93652abfa49498b938b3c9179f9fc487b0acc3edad7" +checksum = "5589e0cba072e0f3d23791efac0fd8627b49c829c196a492e88168e6a669d863" dependencies = [ "anstyle", "clap_lex", @@ -677,9 +676,9 @@ checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "cmake" -version = "0.1.53" +version = "0.1.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e24a03c8b52922d68a1589ad61032f2c1aa5a8158d2aa0d93c6e9534944bbad6" +checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" dependencies = [ "cc", ] @@ -1134,7 +1133,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "crossterm_winapi", "futures-core", "mio", @@ -1215,9 +1214,9 @@ checksum = "817fa642fb0ee7fe42e95783e00e0969927b96091bdd4b9b1af082acd943913b" [[package]] name = "data-encoding" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f" +checksum = "575f75dfd25738df5b91b8e43e14d44bda14637a58fae779fd2b064f8bf3e010" [[package]] name = "date_header" @@ -1309,9 +1308,9 @@ dependencies = [ [[package]] name = "either" -version = "1.13.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +checksum = "b7914353092ddf589ad78f25c5c1c21b7f80b0ff8621e7c814c3485b5306da9d" dependencies = [ "serde", ] @@ -1330,9 +1329,9 @@ dependencies = [ [[package]] name = "equivalent" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" @@ -1422,9 +1421,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.35" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" +checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc" dependencies = [ "crc32fast", "miniz_oxide", @@ -1618,9 +1617,9 @@ checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "h2" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" +checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" dependencies = [ "atomic-waker", "bytes", @@ -1720,9 +1719,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hickory-proto" -version = "0.24.3" +version = "0.24.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ad3d6d98c648ed628df039541a5577bee1a7c83e9e16fe3dbedeea4cdfeb971" +checksum = "92652067c9ce6f66ce53cc38d1169daa36e6e7eb7dd3b63b5103bd9d97117248" dependencies = [ "async-trait", "cfg-if", @@ -1744,9 +1743,9 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.3" +version = "0.24.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf287bde7b776e85d7188e6e5db7cf410a2f9531fe82817eb87feed034c8d14" +checksum = "cbb117a1ca520e111743ab2f6688eddee69db4e0ea242545a604dce8a66fd22e" dependencies = [ "cfg-if", "futures-util", @@ -2223,6 +2222,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.14" @@ -2335,9 +2343,9 @@ checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8" [[package]] name = "libc" -version = "0.2.169" +version = "0.2.170" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" +checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" [[package]] name = "libfuzzer-sys" @@ -2384,9 +2392,9 @@ checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" +checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" [[package]] name = "lock_api" @@ -2400,9 +2408,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.25" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" +checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" [[package]] name = "loole" @@ -2570,9 +2578,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" +checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" dependencies = [ "adler2", "simd-adler32", @@ -2602,7 +2610,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "cfg-if", "cfg_aliases", "libc", @@ -2844,9 +2852,9 @@ dependencies = [ [[package]] name = "os_info" -version = "3.9.2" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e6520c8cc998c5741ee68ec1dc369fc47e5f0ea5320018ecf2a1ccd6328f48b" +checksum = "2a604e53c24761286860eba4e2c8b23a0161526476b1de520139d69cdb85a6b5" dependencies = [ "log", "serde", @@ -3116,9 +3124,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c0fef6c4230e4ccf618a35c59d7ede15dea37de8427500f50aff708806e42ec" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" dependencies = [ "bytes", "prost-derive", @@ -3126,12 +3134,12 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "157c5a9d7ea5c2ed2d9fb8f495b64759f7816c7eaea54ba3978f0d63000162e3" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", - "itertools 0.13.0", + "itertools 0.14.0", "proc-macro2", "quote", "syn 2.0.98", @@ -3139,20 +3147,20 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2f1e56baa61e93533aebc21af4d2134b70f66275e0fcdf3cbe43d77ff7e8fc" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" dependencies = [ "prost", ] [[package]] name = "pulldown-cmark" -version = "0.12.2" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f86ba2052aebccc42cbbb3ed234b8b13ce76f75c3551a303cb2bcffcff12bb14" +checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "memchr", "pulldown-cmark-escape", "unicase", @@ -3225,9 +3233,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c40286217b4ba3a71d644d752e6a0b71f13f1b6a2c5311acfcbe0c2418ed904" +checksum = "e46f3055866785f6b92bc6164b76be02ca8f2eb4b002c0354b28cf4c119e5944" dependencies = [ "cfg_aliases", "libc", @@ -3348,11 +3356,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" +checksum = "82b568323e98e49e2a0899dcee453dd679fae22d69adf9b11dd508d1549b7e2f" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", ] [[package]] @@ -3466,15 +3474,14 @@ checksum = "57397d16646700483b67d2dd6511d79318f9d057fdbd21a4066aeac8b41d310a" [[package]] name = "ring" -version = "0.17.8" +version = "0.17.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +checksum = "da5349ae27d3887ca812fb375b45a4fbb36d8d12d2df394968cd86e35683fe73" dependencies = [ "cc", "cfg-if", "getrandom 0.2.15", "libc", - "spin", "untrusted", "windows-sys 0.52.0", ] @@ -3482,7 +3489,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "assign", "js_int", @@ -3504,7 +3511,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "js_int", "ruma-common", @@ -3516,7 +3523,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "as_variant", "assign", @@ -3539,7 +3546,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "as_variant", "base64 0.22.1", @@ -3571,7 +3578,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3596,7 +3603,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "bytes", "http", @@ -3614,7 +3621,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3623,7 +3630,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "js_int", "ruma-common", @@ -3633,7 +3640,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3648,7 +3655,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3667,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "headers", "http", @@ -3673,7 +3680,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3689,7 +3696,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "futures-util", "js_int", @@ -3768,7 +3775,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "errno", "libc", "linux-raw-sys", @@ -3777,9 +3784,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.22" +version = "0.23.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb9263ab4eb695e42321db096e3b8fbd715a59b154d5c88d82db2175b681ba7" +checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" dependencies = [ "aws-lc-rs", "log", @@ -3899,7 +3906,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "core-foundation", "core-foundation-sys", "libc", @@ -4059,18 +4066,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.217" +version = "1.0.218" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" +checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.217" +version = "1.0.218" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" +checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b" dependencies = [ "proc-macro2", "quote", @@ -4092,9 +4099,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.138" +version = "1.0.139" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" +checksum = "44f86c3acccc9c65b153fe1b85a3be07fe5515274ec9f0653b4a0875731c72a6" dependencies = [ "itoa", "memchr", @@ -4274,9 +4281,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.2" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" dependencies = [ "serde", ] @@ -4291,12 +4298,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" - [[package]] name = "spki" version = "0.7.3" @@ -4334,9 +4335,9 @@ dependencies = [ [[package]] name = "string_cache_codegen" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "244292f3441c89febe5b5bdfbb6863aeaf4f64da810ea3050fd927b27b8d92ce" +checksum = "c711928715f1fe0fe509c53b43e993a9a557babc2d0a3567d0a3006f1ac931a0" dependencies = [ "phf_generator", "phf_shared", @@ -4667,9 +4668,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" +checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ "rustls", "tokio", @@ -4734,9 +4735,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.23" +version = "0.22.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02a8b472d1a3d7c18e2d61a489aee3453fd9031c33e4f55bd533f4a7adca1bee" +checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" dependencies = [ "indexmap 2.7.1", "serde", @@ -4817,7 +4818,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", - "bitflags 2.8.0", + "bitflags 2.9.0", "bytes", "futures-core", "futures-util", @@ -4939,9 +4940,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "typenum" -version = "1.17.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" [[package]] name = "typewit" @@ -4984,9 +4985,9 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" +checksum = "00e2473a93778eb0bad35909dff6a10d28e63f792f16ed15e404fca9d5eeedbe" [[package]] name = "unicode-segmentation" @@ -5071,9 +5072,9 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.13.1" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced87ca4be083373936a67f8de945faa23b6b42384bd5b64434850802c6dccd0" +checksum = "e0f540e3240398cce6128b64ba83fdbdd86129c16a3aa1a3a252efd66eb3d587" dependencies = [ "getrandom 0.3.1", "serde", @@ -5511,9 +5512,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.7.1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86e376c75f4f43f44db463cf729e0d3acbf954d13e22c51e26e4c264b4ab545f" +checksum = "0e7f4ea97f6f78012141bcdb6a216b2609f0979ada50b20ca5b52dde2eac2bb1" dependencies = [ "memchr", ] @@ -5534,7 +5535,7 @@ version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", ] [[package]] @@ -5613,18 +5614,18 @@ dependencies = [ [[package]] name = "zerofrom" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", @@ -5662,27 +5663,27 @@ dependencies = [ [[package]] name = "zstd" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "7.2.1" +version = "7.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" +checksum = "f3051792fbdc2e1e143244dc28c60f73d8470e93f3f9cbd0ead44da5ed802722" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.13+zstd.1.5.6" +version = "2.0.14+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" +checksum = "8fb060d4926e4ac3a3ad15d864e99ceb5f343c6b34f5bd6d81ae6ed417311be5" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index 52695d89..e2fe7021 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +rev = "b40e76528660f6a389eacd19a83ef9060644ee8f" features = [ "compat", "rand", @@ -509,7 +509,7 @@ version = "1.0.37" version = "1.0.89" [workspace.dependencies.bytesize] -version = "1.3.0" +version = "1.3.2" [workspace.dependencies.core_affinity] version = "0.8.1" diff --git a/flake.lock b/flake.lock index a7d80508..59fcbd8d 100644 --- a/flake.lock +++ b/flake.lock @@ -170,11 +170,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1740206139, - "narHash": "sha256-wWSv4KYhPKggKuJLzghfBs99pS3Kli9UBlyXVBzuIzc=", + "lastModified": 1740724364, + "narHash": "sha256-D1jLIueJx1dPrP09ZZwTrPf4cubV+TsFMYbpYYTVj6A=", "owner": "nix-community", "repo": "fenix", - "rev": "133a9eb59fb4ddac443ebe5ab2449d3940396533", + "rev": "edf7d9e431cda8782e729253835f178a356d3aab", "type": "github" }, "original": { @@ -364,11 +364,11 @@ "liburing": { "flake": false, "locked": { - "lastModified": 1740063075, - "narHash": "sha256-AfrCMPiXwgB0yxociq4no4NjCqGf/nRVhC3CLRoKqhA=", + "lastModified": 1740613216, + "narHash": "sha256-NpPOBqNND3Qe9IwqYs0mJLGTmIx7e6FgUEBAnJ+1ZLA=", "owner": "axboe", "repo": "liburing", - "rev": "5c788d514b9ed6d1a3624150de8aa6db403c1c65", + "rev": "e1003e496e66f9b0ae06674869795edf772d5500", "type": "github" }, "original": { @@ -550,11 +550,11 @@ }, "nixpkgs_5": { "locked": { - "lastModified": 1740019556, - "narHash": "sha256-vn285HxnnlHLWnv59Og7muqECNMS33mWLM14soFIv2g=", + "lastModified": 1740547748, + "narHash": "sha256-Ly2fBL1LscV+KyCqPRufUBuiw+zmWrlJzpWOWbahplg=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "dad564433178067be1fbdfcce23b546254b6d641", + "rev": "3a05eebede89661660945da1f151959900903b6a", "type": "github" }, "original": { @@ -599,11 +599,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1740077634, - "narHash": "sha256-KlYdDhon/hy91NutuBeN8e3qTKf3FXgsudWsjnHud68=", + "lastModified": 1740691488, + "narHash": "sha256-Fs6vBrByuiOf2WO77qeMDMTXcTGzrIMqLBv+lNeywwM=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "88fbdcd510e79ef3bcd81d6d9d4f07bdce84be8c", + "rev": "fe3eda77d3a7ce212388bda7b6cec8bffcc077e5", "type": "github" }, "original": { From af714d5778bf8b5ba4356821941e48bff55aefea Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 2 Mar 2025 23:16:30 -0500 Subject: [PATCH 197/328] refactor+fix various issues with regs/logins and admin user commands Signed-off-by: June Clementine Strawberry --- src/admin/user/commands.rs | 35 ++++-- src/api/client/account.rs | 224 ++++++++++++++++++--------------- src/api/client/session.rs | 88 ++++++------- src/core/config/check.rs | 8 ++ src/service/admin/create.rs | 20 +-- src/service/admin/grant.rs | 129 ++++++++++++------- src/service/admin/mod.rs | 7 +- src/service/appservice/mod.rs | 2 +- src/service/emergency/mod.rs | 9 +- src/service/globals/mod.rs | 2 - src/service/resolver/actual.rs | 5 +- 11 files changed, 309 insertions(+), 220 deletions(-) diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 8565f04a..35067304 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -2,7 +2,7 @@ use std::{collections::BTreeMap, fmt::Write as _}; use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room}; use conduwuit::{ - PduBuilder, Result, debug_warn, error, info, is_equal_to, + PduBuilder, Result, debug, debug_warn, error, info, is_equal_to, utils::{self, ReadyExt}, warn, }; @@ -57,16 +57,16 @@ pub(super) async fn create_user( // Validate user id let user_id = parse_local_user_id(self.services, &username)?; - if self.services.users.exists(&user_id).await { - return Ok(RoomMessageEventContent::text_plain(format!( - "Userid {user_id} already exists" - ))); + if let Err(e) = user_id.validate_strict() { + if self.services.config.emergency_password.is_none() { + return Ok(RoomMessageEventContent::text_plain(format!( + "Username {user_id} contains disallowed characters or spaces: {e}" + ))); + } } - if user_id.is_historical() { - return Ok(RoomMessageEventContent::text_plain(format!( - "User ID {user_id} does not conform to new Matrix identifier spec" - ))); + if self.services.users.exists(&user_id).await { + return Ok(RoomMessageEventContent::text_plain(format!("User {user_id} already exists"))); } let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); @@ -185,12 +185,12 @@ pub(super) async fn create_user( .is_ok_and(is_equal_to!(1)) { self.services.admin.make_user_admin(&user_id).await?; - warn!("Granting {user_id} admin privileges as the first user"); } + } else { + debug!("create_user admin command called without an admin room being available"); } - // Inhibit login does not work for guests Ok(RoomMessageEventContent::text_plain(format!( "Created user with user_id: {user_id} and password: `{password}`" ))) @@ -694,6 +694,19 @@ pub(super) async fn force_leave_room( self.services.globals.user_is_local(&user_id), "Parsed user_id must be a local user" ); + + if !self + .services + .rooms + .state_cache + .is_joined(&user_id, &room_id) + .await + { + return Ok(RoomMessageEventContent::notice_markdown(format!( + "{user_id} is not joined in the room" + ))); + } + leave_room(self.services, &user_id, &room_id, None).await?; Ok(RoomMessageEventContent::notice_markdown(format!( diff --git a/src/api/client/account.rs b/src/api/client/account.rs index b42f51f7..2b8209d4 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -3,7 +3,8 @@ use std::fmt::Write; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Error, PduBuilder, Result, debug_info, error, info, is_equal_to, utils, utils::ReadyExt, warn, + Err, Error, PduBuilder, Result, debug_info, err, error, info, is_equal_to, utils, + utils::ReadyExt, warn, }; use futures::{FutureExt, StreamExt}; use register::RegistrationKind; @@ -17,7 +18,6 @@ use ruma::{ request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, whoami, }, - error::ErrorKind, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, events::{ @@ -60,6 +60,14 @@ pub(crate) async fn get_register_available_route( || appservice.registration.id.contains("matrix_appservice_irc") }); + if services + .globals + .forbidden_usernames() + .is_match(&body.username) + { + return Err!(Request(Forbidden("Username is forbidden"))); + } + // don't force the username lowercase if it's from matrix-appservice-irc let body_username = if is_matrix_appservice_irc { body.username.clone() @@ -68,30 +76,45 @@ pub(crate) async fn get_register_available_route( }; // Validate user id - let user_id = UserId::parse_with_server_name(body_username, services.globals.server_name()) - .ok() - .filter(|user_id| { - (!user_id.is_historical() || is_matrix_appservice_irc) - && services.globals.user_is_local(user_id) - }) - .ok_or(Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + let user_id = + match UserId::parse_with_server_name(&body_username, services.globals.server_name()) { + | Ok(user_id) => { + if let Err(e) = user_id.validate_strict() { + // unless the username is from the broken matrix appservice IRC bridge, we + // should follow synapse's behaviour on not allowing things like spaces + // and UTF-8 characters in usernames + if !is_matrix_appservice_irc { + return Err!(Request(InvalidUsername(debug_warn!( + "Username {body_username} contains disallowed characters or spaces: \ + {e}" + )))); + } + } + + user_id + }, + | Err(e) => { + return Err!(Request(InvalidUsername(debug_warn!( + "Username {body_username} is not valid: {e}" + )))); + }, + }; // Check if username is creative enough if services.users.exists(&user_id).await { - return Err(Error::BadRequest(ErrorKind::UserInUse, "Desired user ID is already taken.")); + return Err!(Request(UserInUse("User ID is not available."))); } - if services - .globals - .forbidden_usernames() - .is_match(user_id.localpart()) - { - return Err(Error::BadRequest(ErrorKind::Unknown, "Username is forbidden.")); + if let Some(ref info) = body.appservice_info { + if !info.is_user_match(&user_id) { + return Err!(Request(Exclusive("Username is not in an appservice namespace."))); + } + }; + + if services.appservice.is_exclusive_user_id(&user_id).await { + return Err!(Request(Exclusive("Username is reserved by an appservice."))); } - // TODO add check for appservice namespaces - - // If no if check is true we have an username that's available to be used. Ok(get_username_availability::v3::Response { available: true }) } @@ -119,16 +142,27 @@ pub(crate) async fn register_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - if !services.globals.allow_registration() && body.appservice_info.is_none() { - info!( - "Registration disabled and request not from known appservice, rejecting \ - registration attempt for username \"{}\"", - body.username.as_deref().unwrap_or("") - ); - return Err(Error::BadRequest(ErrorKind::forbidden(), "Registration has been disabled.")); - } - let is_guest = body.kind == RegistrationKind::Guest; + let emergency_mode_enabled = services.config.emergency_password.is_some(); + + if !services.globals.allow_registration() && body.appservice_info.is_none() { + match (body.username.as_ref(), body.initial_device_display_name.as_ref()) { + | (Some(username), Some(device_display_name)) => { + info!(%is_guest, user = %username, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled"); + }, + | (Some(username), _) => { + info!(%is_guest, user = %username, "Rejecting registration attempt as registration is disabled"); + }, + | (_, Some(device_display_name)) => { + info!(%is_guest, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled"); + }, + | (None, _) => { + info!(%is_guest, "Rejecting registration attempt as registration is disabled"); + }, + }; + + return Err!(Request(Forbidden("Registration has been disabled."))); + } if is_guest && (!services.globals.allow_guest_registration() @@ -140,10 +174,7 @@ pub(crate) async fn register_route( rejecting guest registration attempt, initial device name: \"{}\"", body.initial_device_display_name.as_deref().unwrap_or("") ); - return Err(Error::BadRequest( - ErrorKind::GuestAccessForbidden, - "Guest registration is disabled.", - )); + return Err!(Request(GuestAccessForbidden("Guest registration is disabled."))); } // forbid guests from registering if there is not a real admin user yet. give @@ -154,13 +185,10 @@ pub(crate) async fn register_route( rejecting registration. Guest's initial device name: \"{}\"", body.initial_device_display_name.as_deref().unwrap_or("") ); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Registration temporarily disabled.", - )); + return Err!(Request(Forbidden("Registration is temporarily disabled."))); } - let user_id = match (&body.username, is_guest) { + let user_id = match (body.username.as_ref(), is_guest) { | (Some(username), false) => { // workaround for https://github.com/matrix-org/matrix-appservice-irc/issues/1780 due to inactivity of fixing the issue let is_matrix_appservice_irc = @@ -170,6 +198,12 @@ pub(crate) async fn register_route( || appservice.registration.id.contains("matrix_appservice_irc") }); + if services.globals.forbidden_usernames().is_match(username) + && !emergency_mode_enabled + { + return Err!(Request(Forbidden("Username is forbidden"))); + } + // don't force the username lowercase if it's from matrix-appservice-irc let body_username = if is_matrix_appservice_irc { username.clone() @@ -177,31 +211,34 @@ pub(crate) async fn register_route( username.to_lowercase() }; - let proposed_user_id = - UserId::parse_with_server_name(body_username, services.globals.server_name()) - .ok() - .filter(|user_id| { - (!user_id.is_historical() || is_matrix_appservice_irc) - && services.globals.user_is_local(user_id) - }) - .ok_or(Error::BadRequest( - ErrorKind::InvalidUsername, - "Username is invalid.", - ))?; + let proposed_user_id = match UserId::parse_with_server_name( + &body_username, + services.globals.server_name(), + ) { + | Ok(user_id) => { + if let Err(e) = user_id.validate_strict() { + // unless the username is from the broken matrix appservice IRC bridge, or + // we are in emergency mode, we should follow synapse's behaviour on + // not allowing things like spaces and UTF-8 characters in usernames + if !is_matrix_appservice_irc && !emergency_mode_enabled { + return Err!(Request(InvalidUsername(debug_warn!( + "Username {body_username} contains disallowed characters or \ + spaces: {e}" + )))); + } + } + + user_id + }, + | Err(e) => { + return Err!(Request(InvalidUsername(debug_warn!( + "Username {body_username} is not valid: {e}" + )))); + }, + }; if services.users.exists(&proposed_user_id).await { - return Err(Error::BadRequest( - ErrorKind::UserInUse, - "Desired user ID is already taken.", - )); - } - - if services - .globals - .forbidden_usernames() - .is_match(proposed_user_id.localpart()) - { - return Err(Error::BadRequest(ErrorKind::Unknown, "Username is forbidden.")); + return Err!(Request(UserInUse("User ID is not available."))); } proposed_user_id @@ -221,21 +258,18 @@ pub(crate) async fn register_route( if body.body.login_type == Some(LoginType::ApplicationService) { match body.appservice_info { | Some(ref info) => - if !info.is_user_match(&user_id) { - return Err(Error::BadRequest( - ErrorKind::Exclusive, - "User is not in namespace.", - )); + if !info.is_user_match(&user_id) && !emergency_mode_enabled { + return Err!(Request(Exclusive( + "Username is not in an appservice namespace." + ))); }, | _ => { - return Err(Error::BadRequest( - ErrorKind::MissingToken, - "Missing appservice token.", - )); + return Err!(Request(MissingToken("Missing appservice token."))); }, } - } else if services.appservice.is_exclusive_user_id(&user_id).await { - return Err(Error::BadRequest(ErrorKind::Exclusive, "User ID reserved by appservice.")); + } else if services.appservice.is_exclusive_user_id(&user_id).await && !emergency_mode_enabled + { + return Err!(Request(Exclusive("Username is reserved by an appservice."))); } // UIAA @@ -271,7 +305,7 @@ pub(crate) async fn register_route( .uiaa .try_auth( &UserId::parse_with_server_name("", services.globals.server_name()) - .expect("we know this is valid"), + .unwrap(), "".into(), auth, &uiaainfo, @@ -287,7 +321,7 @@ pub(crate) async fn register_route( uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services.uiaa.create( &UserId::parse_with_server_name("", services.globals.server_name()) - .expect("we know this is valid"), + .unwrap(), "".into(), &uiaainfo, &json, @@ -295,7 +329,7 @@ pub(crate) async fn register_route( return Err(Error::Uiaa(uiaainfo)); }, | _ => { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err!(Request(NotJson("JSON body is not valid"))); }, }, } @@ -407,7 +441,7 @@ pub(crate) async fn register_route( // log in conduit admin channel if a guest registered if body.appservice_info.is_none() && is_guest && services.globals.log_guest_registrations() { - info!("New guest user \"{user_id}\" registered on this server."); + debug_info!("New guest user \"{user_id}\" registered on this server."); if !device_display_name.is_empty() { if services.server.config.admin_room_notices { @@ -436,7 +470,8 @@ pub(crate) async fn register_route( } // If this is the first real user, grant them admin privileges except for guest - // users Note: the server user, @conduit:servername, is generated first + // users + // Note: the server user is generated first if !is_guest { if let Ok(admin_room) = services.admin.get_admin_room().await { if services @@ -541,8 +576,8 @@ pub(crate) async fn change_password_route( let sender_user = body .sender_user .as_ref() - .ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?; - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + .ok_or_else(|| err!(Request(MissingToken("Missing access token."))))?; + let sender_device = body.sender_device(); let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { stages: vec![AuthType::Password] }], @@ -566,16 +601,16 @@ pub(crate) async fn change_password_route( // Success! }, | _ => match body.json_body { - | Some(json) => { + | Some(ref json) => { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + .create(sender_user, sender_device, &uiaainfo, json); return Err(Error::Uiaa(uiaainfo)); }, | _ => { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err!(Request(NotJson("JSON body is not valid"))); }, }, } @@ -589,7 +624,7 @@ pub(crate) async fn change_password_route( services .users .all_device_ids(sender_user) - .ready_filter(|id| id != sender_device) + .ready_filter(|id| *id != sender_device) .for_each(|id| services.users.remove_device(sender_user, id)) .await; } @@ -651,8 +686,8 @@ pub(crate) async fn deactivate_route( let sender_user = body .sender_user .as_ref() - .ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?; - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + .ok_or_else(|| err!(Request(MissingToken("Missing access token."))))?; + let sender_device = body.sender_device(); let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { stages: vec![AuthType::Password] }], @@ -675,16 +710,16 @@ pub(crate) async fn deactivate_route( // Success! }, | _ => match body.json_body { - | Some(json) => { + | Some(ref json) => { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + .create(sender_user, sender_device, &uiaainfo, json); return Err(Error::Uiaa(uiaainfo)); }, | _ => { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err!(Request(NotJson("JSON body is not valid"))); }, }, } @@ -743,10 +778,7 @@ pub(crate) async fn third_party_route( pub(crate) async fn request_3pid_management_token_via_email_route( _body: Ruma, ) -> Result { - Err(Error::BadRequest( - ErrorKind::ThreepidDenied, - "Third party identifier is not allowed", - )) + Err!(Request(ThreepidDenied("Third party identifiers are not implemented"))) } /// # `POST /_matrix/client/v3/account/3pid/msisdn/requestToken` @@ -759,10 +791,7 @@ pub(crate) async fn request_3pid_management_token_via_email_route( pub(crate) async fn request_3pid_management_token_via_msisdn_route( _body: Ruma, ) -> Result { - Err(Error::BadRequest( - ErrorKind::ThreepidDenied, - "Third party identifier is not allowed", - )) + Err!(Request(ThreepidDenied("Third party identifiers are not implemented"))) } /// # `GET /_matrix/client/v1/register/m.login.registration_token/validity` @@ -776,10 +805,7 @@ pub(crate) async fn check_registration_token_validity( body: Ruma, ) -> Result { let Some(reg_token) = services.globals.registration_token.clone() else { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Server does not allow token registration.", - )); + return Err!(Request(Forbidden("Server does not allow token registration"))); }; Ok(check_registration_token_validity::v1::Response { valid: reg_token == body.token }) diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 5c0ab47d..6db761af 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -2,12 +2,11 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, debug, err, info, utils::ReadyExt, warn}; +use conduwuit::{Err, debug, err, info, utils::ReadyExt}; use futures::StreamExt; use ruma::{ - OwnedUserId, UserId, + UserId, api::client::{ - error::ErrorKind, session::{ get_login_token, get_login_types::{ @@ -67,6 +66,8 @@ pub(crate) async fn login_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { + let emergency_mode_enabled = services.config.emergency_password.is_some(); + // Validate login method // TODO: Other login methods let user_id = match &body.login_info { @@ -78,20 +79,22 @@ pub(crate) async fn login_route( .. }) => { debug!("Got password login type"); - let user_id = if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = - identifier - { - UserId::parse_with_server_name( - user_id.to_lowercase(), - services.globals.server_name(), - ) - } else if let Some(user) = user { - OwnedUserId::parse(user) - } else { - warn!("Bad login type: {:?}", &body.login_info); - return Err!(Request(Forbidden("Bad login type."))); - } - .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + let user_id = + if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { + UserId::parse_with_server_name(user_id, &services.config.server_name) + } else if let Some(user) = user { + UserId::parse_with_server_name(user, &services.config.server_name) + } else { + return Err!(Request(Unknown( + warn!(?body.login_info, "Invalid or unsupported login type") + ))); + } + .map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?; + + assert!( + services.globals.user_is_local(&user_id), + "User ID does not belong to this homeserver" + ); let hash = services .users @@ -124,46 +127,40 @@ pub(crate) async fn login_route( debug!("Got appservice login type"); let user_id = if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { - UserId::parse_with_server_name( - user_id.to_lowercase(), - services.globals.server_name(), - ) + UserId::parse_with_server_name(user_id, &services.config.server_name) } else if let Some(user) = user { - OwnedUserId::parse(user) + UserId::parse_with_server_name(user, &services.config.server_name) } else { - warn!("Bad login type: {:?}", &body.login_info); - return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type.")); + return Err!(Request(Unknown( + warn!(?body.login_info, "Invalid or unsupported login type") + ))); } - .map_err(|e| { - warn!("Failed to parse username from appservice logging in: {e}"); - Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") - })?; + .map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?; + + assert!( + services.globals.user_is_local(&user_id), + "User ID does not belong to this homeserver" + ); match body.appservice_info { | Some(ref info) => - if !info.is_user_match(&user_id) { - return Err(Error::BadRequest( - ErrorKind::Exclusive, - "User is not in namespace.", - )); + if !info.is_user_match(&user_id) && !emergency_mode_enabled { + return Err!(Request(Exclusive( + "Username is not in an appservice namespace." + ))); }, | _ => { - return Err(Error::BadRequest( - ErrorKind::MissingToken, - "Missing appservice token.", - )); + return Err!(Request(MissingToken("Missing appservice token."))); }, } user_id }, | _ => { - warn!("Unsupported or unknown login type: {:?}", &body.login_info); - debug!("JSON body: {:?}", &body.json_body); - return Err(Error::BadRequest( - ErrorKind::Unknown, - "Unsupported or unknown login type.", - )); + debug!("/login json_body: {:?}", &body.json_body); + return Err!(Request(Unknown( + warn!(?body.login_info, "Invalid or unsupported login type") + ))); }, }; @@ -216,9 +213,6 @@ pub(crate) async fn login_route( info!("{user_id} logged in"); - // home_server is deprecated but apparently must still be sent despite it being - // deprecated over 6 years ago. initially i thought this macro was unnecessary, - // but ruma uses this same macro for the same reason so... #[allow(deprecated)] Ok(login::v3::Response { user_id, @@ -226,7 +220,7 @@ pub(crate) async fn login_route( device_id, well_known: client_discovery_info, expires_in: None, - home_server: Some(services.globals.server_name().to_owned()), + home_server: Some(services.config.server_name.clone()), refresh_token: None, }) } diff --git a/src/core/config/check.rs b/src/core/config/check.rs index 488f7f94..98223be4 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -126,6 +126,14 @@ pub fn check(config: &Config) -> Result { )); } + if config.emergency_password == Some(String::new()) { + return Err!(Config( + "emergency_password", + "Emergency password was set to an empty string, this is not valid. Unset \ + emergency_password to disable it or set it to a real password." + )); + } + // check if the user specified a registration token as `""` if config.registration_token == Some(String::new()) { return Err!(Config( diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs index 7f71665a..4de37092 100644 --- a/src/service/admin/create.rs +++ b/src/service/admin/create.rs @@ -21,11 +21,11 @@ use crate::Services; /// Create the admin room. /// -/// Users in this room are considered admins by conduit, and the room can be +/// Users in this room are considered admins by conduwuit, and the room can be /// used to issue admin commands by talking to the server user inside it. -pub async fn create_admin_room(services: &Services) -> Result<()> { +pub async fn create_admin_room(services: &Services) -> Result { let room_id = RoomId::new(services.globals.server_name()); - let room_version = &services.server.config.default_room_version; + let room_version = &services.config.default_room_version; let _short_id = services .rooms @@ -36,14 +36,14 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { let state_lock = services.rooms.state.mutex.lock(&room_id).await; // Create a user for the server - let server_user = &services.globals.server_user; + let server_user = services.globals.server_user.as_ref(); services.users.create(server_user, None)?; let create_content = { use RoomVersionId::*; match room_version { | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => - RoomCreateEventContent::new_v1(server_user.clone()), + RoomCreateEventContent::new_v1(server_user.into()), | _ => RoomCreateEventContent::new_v11(), } }; @@ -71,7 +71,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .timeline .build_and_append_pdu( PduBuilder::state( - server_user.to_string(), + String::from(server_user), &RoomMemberEventContent::new(MembershipState::Join), ), server_user, @@ -81,7 +81,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .await?; // 3. Power levels - let users = BTreeMap::from_iter([(server_user.clone(), 100.into())]); + let users = BTreeMap::from_iter([(server_user.into(), 69420.into())]); services .rooms @@ -140,7 +140,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .await?; // 5. Events implied by name and topic - let room_name = format!("{} Admin Room", services.globals.server_name()); + let room_name = format!("{} Admin Room", services.config.server_name); services .rooms .timeline @@ -157,7 +157,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .timeline .build_and_append_pdu( PduBuilder::state(String::new(), &RoomTopicEventContent { - topic: format!("Manage {}", services.globals.server_name()), + topic: format!("Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://conduwuit.puppyirl.gay/", services.config.server_name), }), server_user, &room_id, @@ -187,7 +187,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .alias .set_alias(alias, &room_id, server_user)?; - // 7. (ad-hoc) Disable room previews for everyone by default + // 7. (ad-hoc) Disable room URL previews for everyone by default services .rooms .timeline diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index 358ea267..5173987a 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -1,10 +1,10 @@ use std::collections::BTreeMap; -use conduwuit::{Result, error, implement}; +use conduwuit::{Err, Result, debug_info, debug_warn, error, implement}; use ruma::{ RoomId, UserId, events::{ - RoomAccountDataEventType, + RoomAccountDataEventType, StateEventType, room::{ member::{MembershipState, RoomMemberEventContent}, message::RoomMessageEventContent, @@ -20,55 +20,98 @@ use crate::pdu::PduBuilder; /// /// This is equivalent to granting server admin privileges. #[implement(super::Service)] -pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { +pub async fn make_user_admin(&self, user_id: &UserId) -> Result { let Ok(room_id) = self.get_admin_room().await else { + debug_warn!( + "make_user_admin was called without an admin room being available or created" + ); return Ok(()); }; let state_lock = self.services.state.mutex.lock(&room_id).await; + if self.services.state_cache.is_joined(user_id, &room_id).await { + return Err!(debug_warn!("User is already joined in the admin room")); + } + if self + .services + .state_cache + .is_invited(user_id, &room_id) + .await + { + return Err!(debug_warn!("User is already pending an invitation to the admin room")); + } + // Use the server user to grant the new admin's power level - let server_user = &self.services.globals.server_user; + let server_user = self.services.globals.server_user.as_ref(); - // Invite and join the real user - self.services - .timeline - .build_and_append_pdu( - PduBuilder::state( - user_id.to_string(), - &RoomMemberEventContent::new(MembershipState::Invite), - ), - server_user, + // if this is our local user, just forcefully join them in the room. otherwise, + // invite the remote user. + if self.services.globals.user_is_local(user_id) { + debug_info!("Inviting local user {user_id} to admin room {room_id}"); + self.services + .timeline + .build_and_append_pdu( + PduBuilder::state( + String::from(user_id), + &RoomMemberEventContent::new(MembershipState::Invite), + ), + server_user, + &room_id, + &state_lock, + ) + .await?; + + debug_info!("Force joining local user {user_id} to admin room {room_id}"); + self.services + .timeline + .build_and_append_pdu( + PduBuilder::state( + String::from(user_id), + &RoomMemberEventContent::new(MembershipState::Join), + ), + user_id, + &room_id, + &state_lock, + ) + .await?; + } else { + debug_info!("Inviting remote user {user_id} to admin room {room_id}"); + self.services + .timeline + .build_and_append_pdu( + PduBuilder::state( + user_id.to_string(), + &RoomMemberEventContent::new(MembershipState::Invite), + ), + server_user, + &room_id, + &state_lock, + ) + .await?; + } + + // Set power levels + let mut room_power_levels = self + .services + .state_accessor + .room_state_get_content::( &room_id, - &state_lock, + &StateEventType::RoomPowerLevels, + "", ) - .await?; - self.services - .timeline - .build_and_append_pdu( - PduBuilder::state( - user_id.to_string(), - &RoomMemberEventContent::new(MembershipState::Join), - ), - user_id, - &room_id, - &state_lock, - ) - .await?; + .await + .unwrap_or_default(); - // Set power level - let users = BTreeMap::from_iter([ - (server_user.clone(), 100.into()), - (user_id.to_owned(), 100.into()), - ]); + room_power_levels + .users + .insert(server_user.into(), 69420.into()); + room_power_levels.users.insert(user_id.into(), 100.into()); self.services .timeline .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomPowerLevelsEventContent { - users, - ..Default::default() - }), + PduBuilder::state(String::new(), &room_power_levels), server_user, &room_id, &state_lock, @@ -76,15 +119,17 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { .await?; // Set room tag - let room_tag = &self.services.server.config.admin_room_tag; + let room_tag = self.services.server.config.admin_room_tag.as_str(); if !room_tag.is_empty() { if let Err(e) = self.set_room_tag(&room_id, user_id, room_tag).await { - error!(?room_id, ?user_id, ?room_tag, ?e, "Failed to set tag for admin grant"); + error!(?room_id, ?user_id, ?room_tag, "Failed to set tag for admin grant: {e}"); } } if self.services.server.config.admin_room_notices { - let welcome_message = String::from("## Thank you for trying out conduwuit!\n\nconduwuit is technically a hard fork of Conduit, which is in Beta. The Beta status initially was inherited from Conduit, however overtime this Beta status is rapidly becoming less and less relevant as our codebase significantly diverges more and more. conduwuit is quite stable and very usable as a daily driver and for a low-medium sized homeserver. There is still a lot of more work to be done, but it is in a far better place than the project was in early 2024.\n\nHelpful links:\n> GitHub Repo: https://github.com/girlbossceo/conduwuit\n> Documentation: https://conduwuit.puppyirl.gay/\n> Report issues: https://github.com/girlbossceo/conduwuit/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`\n\nHere are some rooms you can join (by typing the command into your client) -\n\nconduwuit space: `/join #conduwuit-space:puppygock.gay`\nconduwuit main room (Ask questions and get notified on updates): `/join #conduwuit:puppygock.gay`\nconduwuit offtopic room: `/join #conduwuit-offtopic:puppygock.gay`"); + let welcome_message = String::from( + "## Thank you for trying out conduwuit!\n\nconduwuit is technically a hard fork of Conduit, which is in Beta. The Beta status initially was inherited from Conduit, however overtime this Beta status is rapidly becoming less and less relevant as our codebase significantly diverges more and more. conduwuit is quite stable and very usable as a daily driver and for a low-medium sized homeserver. There is still a lot of more work to be done, but it is in a far better place than the project was in early 2024.\n\nHelpful links:\n> GitHub Repo: https://github.com/girlbossceo/conduwuit\n> Documentation: https://conduwuit.puppyirl.gay/\n> Report issues: https://github.com/girlbossceo/conduwuit/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`\n\nHere are some rooms you can join (by typing the command into your client) -\n\nconduwuit space: `/join #conduwuit-space:puppygock.gay`\nconduwuit main room (Ask questions and get notified on updates): `/join #conduwuit:puppygock.gay`\nconduwuit offtopic room: `/join #conduwuit-offtopic:puppygock.gay`", + ); // Send welcome message self.services @@ -102,7 +147,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { } #[implement(super::Service)] -async fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> Result<()> { +async fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> Result { let mut event = self .services .account_data @@ -125,7 +170,5 @@ async fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> R RoomAccountDataEventType::Tag, &serde_json::to_value(event)?, ) - .await?; - - Ok(()) + .await } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 4622f10e..b3466711 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -40,6 +40,7 @@ struct Services { timeline: Dep, state: Dep, state_cache: Dep, + state_accessor: Dep, account_data: Dep, services: StdRwLock>>, } @@ -85,6 +86,8 @@ impl crate::Service for Service { timeline: args.depend::("rooms::timeline"), state: args.depend::("rooms::state"), state_cache: args.depend::("rooms::state_cache"), + state_accessor: args + .depend::("rooms::state_accessor"), account_data: args.depend::("account_data"), services: None.into(), }, @@ -357,8 +360,8 @@ impl Service { } // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let emergency_password_set = self.services.globals.emergency_password().is_some(); + // the administrator can execute commands as the server user + let emergency_password_set = self.services.server.config.emergency_password.is_some(); let from_server = pdu.sender == *server_user && !emergency_password_set; if from_server && self.is_admin_room(&pdu.room_id).await { return false; diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 5aba0018..50a60033 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -90,7 +90,7 @@ impl Service { .write() .await .remove(appservice_id) - .ok_or(err!("Appservice not found"))?; + .ok_or_else(|| err!("Appservice not found"))?; // remove the appservice from the database self.db.id_appserviceregistrations.del(appservice_id); diff --git a/src/service/emergency/mod.rs b/src/service/emergency/mod.rs index 47a309a5..3a61f710 100644 --- a/src/service/emergency/mod.rs +++ b/src/service/emergency/mod.rs @@ -9,7 +9,7 @@ use ruma::{ push::Ruleset, }; -use crate::{Dep, account_data, globals, users}; +use crate::{Dep, account_data, config, globals, users}; pub struct Service { services: Services, @@ -17,6 +17,7 @@ pub struct Service { struct Services { account_data: Dep, + config: Dep, globals: Dep, users: Dep, } @@ -27,6 +28,8 @@ impl crate::Service for Service { Ok(Arc::new(Self { services: Services { account_data: args.depend::("account_data"), + config: args.depend::("config"), + globals: args.depend::("globals"), users: args.depend::("users"), }, @@ -54,9 +57,9 @@ impl Service { self.services .users - .set_password(server_user, self.services.globals.emergency_password().as_deref())?; + .set_password(server_user, self.services.config.emergency_password.as_deref())?; - let (ruleset, pwd_set) = match self.services.globals.emergency_password() { + let (ruleset, pwd_set) = match self.services.config.emergency_password { | Some(_) => (Ruleset::server_default(server_user), true), | None => (Ruleset::new(), false), }; diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 16b3ef3c..74f83228 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -153,8 +153,6 @@ impl Service { pub fn notification_push_path(&self) -> &String { &self.server.config.notification_push_path } - pub fn emergency_password(&self) -> &Option { &self.server.config.emergency_password } - pub fn url_preview_domain_contains_allowlist(&self) -> &Vec { &self.server.config.url_preview_domain_contains_allowlist } diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 8860d0a0..b037cf77 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -363,7 +363,7 @@ impl super::Service { let hostname = hostname.trim_end_matches('.'); match self.resolver.resolver.srv_lookup(hostname).await { | Err(e) => Self::handle_resolve_error(&e, hostname)?, - | Ok(result) => + | Ok(result) => { return Ok(result.iter().next().map(|result| { FedDest::Named( result.target().to_string().trim_end_matches('.').to_owned(), @@ -372,7 +372,8 @@ impl super::Service { .try_into() .unwrap_or_else(|_| FedDest::default_port()), ) - })), + })); + }, } } From 0d741bbd46cd1c2a86321a4a68da3167c46d53e3 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 3 Mar 2025 00:15:12 -0500 Subject: [PATCH 198/328] remove nix run ci test as its covered by complement Signed-off-by: June Clementine Strawberry --- engage.toml | 28 ++-------------------------- 1 file changed, 2 insertions(+), 26 deletions(-) diff --git a/engage.toml b/engage.toml index c1a2be1f..71366532 100644 --- a/engage.toml +++ b/engage.toml @@ -18,12 +18,12 @@ script = "direnv --version" [[task]] name = "rustc" group = "versions" -script = "rustc --version" +script = "rustc --version -v" [[task]] name = "cargo" group = "versions" -script = "cargo --version" +script = "cargo --version -v" [[task]] name = "cargo-fmt" @@ -60,11 +60,6 @@ name = "markdownlint" group = "versions" script = "markdownlint --version" -[[task]] -name = "dpkg" -group = "versions" -script = "dpkg --version" - [[task]] name = "cargo-audit" group = "security" @@ -228,22 +223,3 @@ depends = ["cargo/default"] script = """ git diff --exit-code conduwuit-example.toml """ - -# Ensure that the flake's default output can build and run without crashing -# -# This is a dynamically-linked jemalloc build, which is a case not covered by -# our other tests. We've had linking problems in the past with dynamic -# jemalloc builds that usually show up as an immediate segfault or "invalid free" -[[task]] -name = "nix-default" -group = "tests" -script = """ -env DIRENV_DEVSHELL=dynamic \ - CARGO_PROFILE="test" \ - direnv exec . \ - bin/nix-build-and-cache just .#default-test -env DIRENV_DEVSHELL=dynamic \ - CARGO_PROFILE="test" \ - direnv exec . \ - nix run -L .#default-test -- --help && nix run -L .#default-test -- --version -""" From df72384c16aa77ccedf532888b0799a3edc2d8b0 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 3 Mar 2025 01:05:43 -0500 Subject: [PATCH 199/328] delete snappy, bump rust-rocksdb, bump rocksdb to v9.10.0 again Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- deps/rust-rocksdb/Cargo.toml | 3 +-- flake.lock | 8 ++++---- flake.nix | 2 +- 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e632b504..ec531994 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3711,7 +3711,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.32.0+9.10.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=7b0e1bbe395a41ba8a11347a4921da590e3ad0d9#7b0e1bbe395a41ba8a11347a4921da590e3ad0d9" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b#2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b" dependencies = [ "bindgen", "bzip2-sys", @@ -3728,7 +3728,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.36.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=7b0e1bbe395a41ba8a11347a4921da590e3ad0d9#7b0e1bbe395a41ba8a11347a4921da590e3ad0d9" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b#2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index c6af428d..f9069fc1 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -14,7 +14,6 @@ default = ["lz4", "zstd", "zlib", "bzip2"] jemalloc = ["rust-rocksdb/jemalloc"] io-uring = ["rust-rocksdb/io-uring"] valgrind = ["rust-rocksdb/valgrind"] -snappy = ["rust-rocksdb/snappy"] lz4 = ["rust-rocksdb/lz4"] zstd = ["rust-rocksdb/zstd"] zlib = ["rust-rocksdb/zlib"] @@ -27,7 +26,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "7b0e1bbe395a41ba8a11347a4921da590e3ad0d9" +rev = "2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b" #branch = "master" default-features = false diff --git a/flake.lock b/flake.lock index 59fcbd8d..ba7fdcff 100644 --- a/flake.lock +++ b/flake.lock @@ -567,16 +567,16 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1739735789, - "narHash": "sha256-BIzuZS0TV4gRnciP4ieW5J3Hql986iedM5dHQfK6z68=", + "lastModified": 1739735940, + "narHash": "sha256-9AqKOWsYXy0sU2C+kB+3NLCDMZ2VsjfbHqvSiydUlcs=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "34e401fd4392dd3268e042f1e40dffd064b9a7ff", + "rev": "f8ad8cd72fd7e527171d35fa8dbca9a073b5b26c", "type": "github" }, "original": { "owner": "girlbossceo", - "ref": "v9.9.3", + "ref": "v9.10.0", "repo": "rocksdb", "type": "github" } diff --git a/flake.nix b/flake.nix index 04dee681..6702111f 100644 --- a/flake.nix +++ b/flake.nix @@ -9,7 +9,7 @@ flake-utils.url = "github:numtide/flake-utils?ref=main"; nix-filter.url = "github:numtide/nix-filter?ref=main"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable"; - rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.9.3"; flake = false; }; + rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.10.0"; flake = false; }; liburing = { url = "github:axboe/liburing?ref=master"; flake = false; }; }; From 1ecd02738992f6fd75ea627e60a2ebf1133f4561 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 3 Mar 2025 01:06:04 -0500 Subject: [PATCH 200/328] always run checks when building in nix (doCheck true) Signed-off-by: June Clementine Strawberry --- nix/pkgs/main/default.nix | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index 4150b389..5dfb32ec 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -162,18 +162,12 @@ commonAttrs = { ]; }; - # This is redundant with CI - doCheck = false; + doCheck = true; - cargoTestCommand = "cargo test --locked "; cargoExtraArgs = "--no-default-features --locked " + lib.optionalString (features'' != []) "--features " + (builtins.concatStringsSep "," features''); - cargoTestExtraArgs = "--no-default-features --locked " - + lib.optionalString - (features'' != []) - "--features " + (builtins.concatStringsSep "," features''); dontStrip = profile == "dev" || profile == "test"; dontPatchELF = profile == "dev" || profile == "test"; @@ -209,18 +203,12 @@ craneLib.buildPackage ( commonAttrs // { env = buildDepsOnlyEnv; }); - # This is redundant with CI - doCheck = false; + doCheck = true; - cargoTestCommand = "cargo test --locked "; cargoExtraArgs = "--no-default-features --locked " + lib.optionalString (features'' != []) "--features " + (builtins.concatStringsSep "," features''); - cargoTestExtraArgs = "--no-default-features --locked " - + lib.optionalString - (features'' != []) - "--features " + (builtins.concatStringsSep "," features''); env = buildPackageEnv; From 7c17163730fcd0f43132cce82cc28b6793ae662a Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 4 Mar 2025 23:35:21 -0500 Subject: [PATCH 201/328] switch to self-hosted ci runner, remove sudo usages Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 132 ++-------------------------- .github/workflows/documentation.yml | 64 +------------- 2 files changed, 9 insertions(+), 187 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 82ffc6b6..c0425873 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -45,8 +45,8 @@ env: # Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps NIX_CONFIG: | show-trace = true - extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net - extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= + extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net https://nix-community.cachix.org https://crane.cachix.org + extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs= crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk= experimental-features = nix-command flakes extra-experimental-features = nix-command flakes accept-flake-config = true @@ -59,7 +59,7 @@ permissions: {} jobs: tests: name: Test - runs-on: ubuntu-24.04 + runs-on: self-hosted steps: - name: Setup SSH web publish env: @@ -93,19 +93,6 @@ jobs: echo "SSH_WEBSITE=1" >> "$GITHUB_ENV" - - name: Install liburing - run: | - sudo apt install liburing-dev -y - - - name: Free up a bit of runner space - run: | - set +o pipefail - sudo docker image prune --all --force || true - sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true - sudo apt clean - sudo rm -rf /usr/local/lib/android /usr/local/julia* /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/local/lib/heroku /usr/lib/heroku /usr/local/share/boost /usr/share/dotnet /usr/local/bin/cmake* /usr/local/bin/stack /usr/local/bin/terraform /opt/microsoft/powershell /opt/hostedtoolcache/CodeQL /opt/hostedtoolcache/go /opt/hostedtoolcache/PyPy /usr/local/bin/sam || true - set -o pipefail - - name: Sync repository uses: actions/checkout@v4 with: @@ -123,58 +110,9 @@ jobs: exit 1 fi - - uses: nixbuild/nix-quick-install-action@master - - - name: Restore and cache Nix store - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - #if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v6 - with: - # restore and save a cache using this key - primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/.lock') }} - # if there's no cache hit, restore a cache by this prefix - restore-prefixes-first-match: nix-${{ runner.os }}- - # collect garbage until Nix store size (in bytes) is at most this number - # before trying to save a new cache - gc-max-store-size-linux: 2073741824 - # do purge caches - purge: true - # purge all versions of the cache - purge-prefixes: nix-${{ runner.os }}- - # created more than this number of seconds ago relative to the start of the `Post Restore` phase - purge-last-accessed: 86400 - # except the version with the `primary-key`, if it exists - purge-primary-key: never - - - name: Enable Cachix binary cache - run: | - nix profile install nixpkgs#cachix - cachix use crane - cachix use nix-community - - - name: Apply Nix binary cache configuration - run: | - sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null < /dev/null < "$HOME/.direnvrc" - nix profile install --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv direnv allow nix develop .#all-features --command true @@ -267,22 +205,13 @@ jobs: build: name: Build - runs-on: ubuntu-24.04 + runs-on: self-hosted strategy: matrix: include: - target: aarch64-linux-musl - target: x86_64-linux-musl steps: - - name: Free up a bit of runner space - run: | - set +o pipefail - sudo docker image prune --all --force || true - sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true - sudo apt clean - sudo rm -rf /usr/local/lib/android /usr/local/julia* /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/local/lib/heroku /usr/lib/heroku /usr/local/share/boost /usr/share/dotnet /usr/local/bin/cmake* /usr/local/bin/stack /usr/local/bin/terraform /opt/microsoft/powershell /opt/hostedtoolcache/CodeQL /opt/hostedtoolcache/go /opt/hostedtoolcache/PyPy /usr/local/bin/sam || true - set -o pipefail - - name: Sync repository uses: actions/checkout@v4 with: @@ -316,58 +245,9 @@ jobs: echo "SSH_WEBSITE=1" >> "$GITHUB_ENV" - - uses: nixbuild/nix-quick-install-action@master - - - name: Restore and cache Nix store - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - #if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v6 - with: - # restore and save a cache using this key - primary-key: nix-${{ runner.os }}-${{ matrix.target }}-${{ hashFiles('**/*.nix', '**/.lock') }} - # if there's no cache hit, restore a cache by this prefix - restore-prefixes-first-match: nix-${{ runner.os }}- - # collect garbage until Nix store size (in bytes) is at most this number - # before trying to save a new cache - gc-max-store-size-linux: 2073741824 - # do purge caches - purge: true - # purge all versions of the cache - purge-prefixes: nix-${{ runner.os }}- - # created more than this number of seconds ago relative to the start of the `Post Restore` phase - purge-last-accessed: 86400 - # except the version with the `primary-key`, if it exists - purge-primary-key: never - - - name: Enable Cachix binary cache - run: | - nix profile install nixpkgs#cachix - cachix use crane - cachix use nix-community - - - name: Apply Nix binary cache configuration - run: | - sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null < /dev/null < "$HOME/.direnvrc" - nix profile install --impure --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv direnv allow nix develop .#all-features --command true --impure @@ -622,7 +502,7 @@ jobs: variables: outputs: github_repository: ${{ steps.var.outputs.github_repository }} - runs-on: "ubuntu-latest" + runs-on: self-hosted steps: - name: Setting global variables uses: actions/github-script@v7 @@ -632,7 +512,7 @@ jobs: core.setOutput('github_repository', '${{ github.repository }}'.toLowerCase()) docker: name: Docker publish - runs-on: ubuntu-24.04 + runs-on: self-hosted needs: [build, variables, tests] permissions: packages: write diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index fadc7b3f..88e7bbe1 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -24,8 +24,8 @@ env: # Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps NIX_CONFIG: | show-trace = true - extra-substituters = extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net - extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= + extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net https://nix-community.cachix.org https://crane.cachix.org + extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs= crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk= experimental-features = nix-command flakes extra-experimental-features = nix-command flakes accept-flake-config = true @@ -41,7 +41,7 @@ permissions: {} jobs: docs: name: Documentation and GitHub Pages - runs-on: ubuntu-24.04 + runs-on: self-hosted permissions: pages: write @@ -52,15 +52,6 @@ jobs: url: ${{ steps.deployment.outputs.page_url }} steps: - - name: Free up a bit of runner space - run: | - set +o pipefail - sudo docker image prune --all --force || true - sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true - sudo apt clean - sudo rm -v -rf /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/lib/heroku - set -o pipefail - - name: Sync repository uses: actions/checkout@v4 with: @@ -70,58 +61,9 @@ jobs: if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') uses: actions/configure-pages@v5 - - uses: nixbuild/nix-quick-install-action@master - - - name: Restore and cache Nix store - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v6 - with: - # restore and save a cache using this key - primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/.lock') }} - # if there's no cache hit, restore a cache by this prefix - restore-prefixes-first-match: nix-${{ runner.os }}- - # collect garbage until Nix store size (in bytes) is at most this number - # before trying to save a new cache - gc-max-store-size-linux: 2073741824 - # do purge caches - purge: true - # purge all versions of the cache - purge-prefixes: nix-${{ runner.os }}- - # created more than this number of seconds ago relative to the start of the `Post Restore` phase - purge-last-accessed: 86400 - # except the version with the `primary-key`, if it exists - purge-primary-key: never - - - name: Enable Cachix binary cache - run: | - nix profile install nixpkgs#cachix - cachix use crane - cachix use nix-community - - - name: Apply Nix binary cache configuration - run: | - sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null < /dev/null < "$HOME/.direnvrc" - nix profile install --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv direnv allow nix develop --command true From 35981d5aef8785c132d2e2a166cfcde1cd24169e Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 5 Mar 2025 19:05:42 -0500 Subject: [PATCH 202/328] automatically forget rooms on leaving Signed-off-by: June Clementine Strawberry --- src/service/rooms/state_cache/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 02ffa0d1..f406eb69 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -267,6 +267,10 @@ impl Service { }, | MembershipState::Leave | MembershipState::Ban => { self.mark_as_left(user_id, room_id); + + if self.services.globals.user_is_local(user_id) { + self.forget(room_id, user_id); + } }, | _ => {}, } From 97208d6081da92f8b5c732aa6b3bf06997ad4a16 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 5 Mar 2025 19:06:10 -0500 Subject: [PATCH 203/328] add more safety checks before allowing a room marked as forgotten Signed-off-by: June Clementine Strawberry --- src/api/client/membership.rs | 40 +++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 0b9c0c69..940c8639 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -11,12 +11,12 @@ use axum_client_ip::InsecureClientIp; use conduwuit::{ Err, PduEvent, Result, StateKey, at, debug, debug_info, debug_warn, err, error, info, pdu::{PduBuilder, gen_event_id_canonical_json}, - result::FlatOk, + result::{FlatOk, NotFound}, state_res, trace, utils::{self, IterStream, ReadyExt, shuffle}, warn, }; -use futures::{FutureExt, StreamExt, TryFutureExt, join}; +use futures::{FutureExt, StreamExt, TryFutureExt, future::join4, join}; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, @@ -717,21 +717,37 @@ pub(crate) async fn forget_room_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user(); + let user_id = body.sender_user(); + let room_id = &body.room_id; - if services - .rooms - .state_cache - .is_joined(sender_user, &body.room_id) - .await - { + let joined = services.rooms.state_cache.is_joined(user_id, room_id); + let knocked = services.rooms.state_cache.is_knocked(user_id, room_id); + let left = services.rooms.state_cache.is_left(user_id, room_id); + let invited = services.rooms.state_cache.is_invited(user_id, room_id); + + let (joined, knocked, left, invited) = join4(joined, knocked, left, invited).await; + + if joined || knocked || invited { return Err!(Request(Unknown("You must leave the room before forgetting it"))); } - services + let membership = services .rooms - .state_cache - .forget(&body.room_id, sender_user); + .state_accessor + .get_member(room_id, user_id) + .await; + + if membership.is_not_found() { + return Err!(Request(Unknown("No membership event was found, room was never joined"))); + } + + if left + || membership.is_ok_and(|member| { + member.membership == MembershipState::Leave + || member.membership == MembershipState::Ban + }) { + services.rooms.state_cache.forget(room_id, user_id); + } Ok(forget_room::v3::Response::new()) } From 408f5bd30cb461cec9472a51b87f0bb1ed6b7381 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 5 Mar 2025 19:06:31 -0500 Subject: [PATCH 204/328] add val_size_hints on membership cfs (todo remove these anyways) Signed-off-by: June Clementine Strawberry --- src/database/maps.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/database/maps.rs b/src/database/maps.rs index b060ab8d..9af45159 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -181,6 +181,7 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "roomuserid_invitecount", + val_size_hint: Some(8), ..descriptor::RANDOM_SMALL }, Descriptor { @@ -193,10 +194,12 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "roomuserid_leftcount", + val_size_hint: Some(8), ..descriptor::RANDOM }, Descriptor { name: "roomuserid_knockedcount", + val_size_hint: Some(8), ..descriptor::RANDOM_SMALL }, Descriptor { From 2c1ec3fb02a823515697b159e26d5464ebe29937 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 5 Mar 2025 21:31:49 -0500 Subject: [PATCH 205/328] allow both lowercase and uppercase usernames to login Signed-off-by: June Clementine Strawberry --- src/api/client/session.rs | 54 +++++++++++++++++++++++++++------------ 1 file changed, 37 insertions(+), 17 deletions(-) diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 6db761af..ab67ee18 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -3,7 +3,7 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{Err, debug, err, info, utils::ReadyExt}; -use futures::StreamExt; +use futures::{StreamExt, TryFutureExt}; use ruma::{ UserId, api::client::{ @@ -86,29 +86,40 @@ pub(crate) async fn login_route( UserId::parse_with_server_name(user, &services.config.server_name) } else { return Err!(Request(Unknown( - warn!(?body.login_info, "Invalid or unsupported login type") + debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)") ))); } .map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?; + let lowercased_user_id = UserId::parse_with_server_name( + user_id.localpart().to_lowercase(), + &services.config.server_name, + )?; + assert!( services.globals.user_is_local(&user_id), "User ID does not belong to this homeserver" ); + assert!( + services.globals.user_is_local(&lowercased_user_id), + "User ID does not belong to this homeserver" + ); let hash = services .users .password_hash(&user_id) + .or_else(|_| services.users.password_hash(&lowercased_user_id)) .await + .inspect_err(|e| debug!("{e}")) .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; if hash.is_empty() { return Err!(Request(UserDeactivated("The user has been deactivated"))); } - if hash::verify_password(password, &hash).is_err() { - return Err!(Request(Forbidden("Wrong username or password."))); - } + hash::verify_password(password, &hash) + .inspect_err(|e| debug!("{e}")) + .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; user_id }, @@ -125,6 +136,11 @@ pub(crate) async fn login_route( user, }) => { debug!("Got appservice login type"); + + let Some(ref info) = body.appservice_info else { + return Err!(Request(MissingToken("Missing appservice token."))); + }; + let user_id = if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { UserId::parse_with_server_name(user_id, &services.config.server_name) @@ -132,26 +148,30 @@ pub(crate) async fn login_route( UserId::parse_with_server_name(user, &services.config.server_name) } else { return Err!(Request(Unknown( - warn!(?body.login_info, "Invalid or unsupported login type") + debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)") ))); } .map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?; + let lowercased_user_id = UserId::parse_with_server_name( + user_id.localpart().to_lowercase(), + &services.config.server_name, + )?; + assert!( services.globals.user_is_local(&user_id), "User ID does not belong to this homeserver" ); + assert!( + services.globals.user_is_local(&lowercased_user_id), + "User ID does not belong to this homeserver" + ); - match body.appservice_info { - | Some(ref info) => - if !info.is_user_match(&user_id) && !emergency_mode_enabled { - return Err!(Request(Exclusive( - "Username is not in an appservice namespace." - ))); - }, - | _ => { - return Err!(Request(MissingToken("Missing appservice token."))); - }, + if !info.is_user_match(&user_id) + && !info.is_user_match(&lowercased_user_id) + && !emergency_mode_enabled + { + return Err!(Request(Exclusive("Username is not in an appservice namespace."))); } user_id @@ -159,7 +179,7 @@ pub(crate) async fn login_route( | _ => { debug!("/login json_body: {:?}", &body.json_body); return Err!(Request(Unknown( - warn!(?body.login_info, "Invalid or unsupported login type") + debug_warn!(?body.login_info, "Invalid or unsupported login type") ))); }, }; From c10500f8aebcd52a219bdba4f2114b03d9474565 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 00:14:24 -0500 Subject: [PATCH 206/328] bump rust-rocksdb and ruwuma Signed-off-by: June Clementine Strawberry --- Cargo.lock | 60 +++++++++---------------------- Cargo.toml | 3 +- deps/rust-rocksdb/Cargo.toml | 5 +-- flake.lock | 8 ++--- flake.nix | 2 +- src/api/router/auth.rs | 3 +- src/service/federation/execute.rs | 3 +- 7 files changed, 28 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ec531994..d51bb966 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3489,7 +3489,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "assign", "js_int", @@ -3502,16 +3502,14 @@ dependencies = [ "ruma-identifiers-validation", "ruma-identity-service-api", "ruma-push-gateway-api", - "ruma-server-util", "ruma-signatures", - "ruma-state-res", "web-time 1.1.0", ] [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "js_int", "ruma-common", @@ -3523,7 +3521,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "as_variant", "assign", @@ -3546,7 +3544,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "as_variant", "base64 0.22.1", @@ -3578,7 +3576,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3603,10 +3601,12 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "bytes", + "headers", "http", + "http-auth", "httparse", "js_int", "memchr", @@ -3616,12 +3616,14 @@ dependencies = [ "ruma-events", "serde", "serde_json", + "thiserror 2.0.11", + "tracing", ] [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3630,7 +3632,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "js_int", "ruma-common", @@ -3640,7 +3642,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3655,7 +3657,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "js_int", "ruma-common", @@ -3664,23 +3666,10 @@ dependencies = [ "serde_json", ] -[[package]] -name = "ruma-server-util" -version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" -dependencies = [ - "headers", - "http", - "http-auth", - "ruma-common", - "thiserror 2.0.11", - "tracing", -] - [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3693,25 +3682,10 @@ dependencies = [ "thiserror 2.0.11", ] -[[package]] -name = "ruma-state-res" -version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" -dependencies = [ - "futures-util", - "js_int", - "ruma-common", - "ruma-events", - "serde", - "serde_json", - "thiserror 2.0.11", - "tracing", -] - [[package]] name = "rust-librocksdb-sys" version = "0.32.0+9.10.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b#2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=513133a3dc24b667f32933aa3247c6ec71a958f3#513133a3dc24b667f32933aa3247c6ec71a958f3" dependencies = [ "bindgen", "bzip2-sys", @@ -3728,7 +3702,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.36.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b#2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=513133a3dc24b667f32933aa3247c6ec71a958f3#513133a3dc24b667f32933aa3247c6ec71a958f3" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index e2fe7021..7f08a21a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "b40e76528660f6a389eacd19a83ef9060644ee8f" +rev = "bb42118bd85e731b652a6110896b6945085bf944" features = [ "compat", "rand", @@ -355,7 +355,6 @@ features = [ "federation-api", "markdown", "push-gateway-api-c", - "server-util", "unstable-exhaustive-types", "ring-compat", "compat-upload-signatures", diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index f9069fc1..61bd2333 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -2,7 +2,7 @@ name = "rust-rocksdb-uwu" categories.workspace = true description = "dylib wrapper for rust-rocksdb" -edition = "2021" +edition = "2024" keywords.workspace = true license.workspace = true readme.workspace = true @@ -13,6 +13,7 @@ version = "0.0.1" default = ["lz4", "zstd", "zlib", "bzip2"] jemalloc = ["rust-rocksdb/jemalloc"] io-uring = ["rust-rocksdb/io-uring"] +numa = ["rust-rocksdb/numa"] # unused by rocksdb for now valgrind = ["rust-rocksdb/valgrind"] lz4 = ["rust-rocksdb/lz4"] zstd = ["rust-rocksdb/zstd"] @@ -26,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b" +rev = "513133a3dc24b667f32933aa3247c6ec71a958f3" #branch = "master" default-features = false diff --git a/flake.lock b/flake.lock index ba7fdcff..a1bd423f 100644 --- a/flake.lock +++ b/flake.lock @@ -567,16 +567,16 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1739735940, - "narHash": "sha256-9AqKOWsYXy0sU2C+kB+3NLCDMZ2VsjfbHqvSiydUlcs=", + "lastModified": 1741234703, + "narHash": "sha256-sT5g/RM9vrwY6AmjSfl4RoJPGtcJCkZCsxiX3PFJgKQ=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "f8ad8cd72fd7e527171d35fa8dbca9a073b5b26c", + "rev": "185593ce4534091e57025e9f3571dbf681c04631", "type": "github" }, "original": { "owner": "girlbossceo", - "ref": "v9.10.0", + "ref": "v9.9.3", "repo": "rocksdb", "type": "github" } diff --git a/flake.nix b/flake.nix index 6702111f..04dee681 100644 --- a/flake.nix +++ b/flake.nix @@ -9,7 +9,7 @@ flake-utils.url = "github:numtide/flake-utils?ref=main"; nix-filter.url = "github:numtide/nix-filter?ref=main"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable"; - rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.10.0"; flake = false; }; + rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.9.3"; flake = false; }; liburing = { url = "github:axboe/liburing?ref=master"; flake = false; }; }; diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 92b75cfa..5cd7b831 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -17,9 +17,8 @@ use ruma::{ }, voip::get_turn_server_info, }, - federation::openid::get_openid_userinfo, + federation::{authentication::XMatrix, openid::get_openid_userinfo}, }, - server_util::authorization::XMatrix, }; use service::{ Services, diff --git a/src/service/federation/execute.rs b/src/service/federation/execute.rs index d254486f..63f2ccfb 100644 --- a/src/service/federation/execute.rs +++ b/src/service/federation/execute.rs @@ -12,10 +12,9 @@ use ruma::{ CanonicalJsonObject, CanonicalJsonValue, ServerName, ServerSigningKeyId, api::{ EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, - client::error::Error as RumaError, + client::error::Error as RumaError, federation::authentication::XMatrix, }, serde::Base64, - server_util::authorization::XMatrix, }; use crate::resolver::actual::ActualDest; From 17b625a85b908d4c2cb3df308c2337be6e571ce2 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 00:14:49 -0500 Subject: [PATCH 207/328] reject device keys if they dont match user ID or device ID or are missing fields Signed-off-by: June Clementine Strawberry --- src/api/client/keys.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 6f20153b..8a7eab7e 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -48,6 +48,19 @@ pub(crate) async fn upload_keys_route( } if let Some(device_keys) = &body.device_keys { + let deser_device_keys = device_keys.deserialize()?; + + if deser_device_keys.user_id != sender_user { + return Err!(Request(Unknown( + "User ID in keys uploaded does not match your own user ID" + ))); + } + if deser_device_keys.device_id != sender_device { + return Err!(Request(Unknown( + "Device ID in keys uploaded does not match your own device ID" + ))); + } + // TODO: merge this and the existing event? // This check is needed to assure that signatures are kept if services From f4c51cd405f1a0695b16c085655eb0180637fe2d Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 00:18:28 -0500 Subject: [PATCH 208/328] remove zlib as a default rocksdb compression option Signed-off-by: June Clementine Strawberry --- conduwuit-example.toml | 2 +- deps/rust-rocksdb/Cargo.toml | 2 +- src/core/config/mod.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 9b6f6ce0..541f062d 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -821,7 +821,7 @@ # Type of RocksDB database compression to use. # -# Available options are "zstd", "zlib", "bz2", "lz4", or "none". +# Available options are "zstd", "bz2", "lz4", or "none". # # It is best to use ZSTD as an overall good balance between # speed/performance, storage, IO amplification, and CPU usage. For more diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index 61bd2333..35f755b4 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -10,7 +10,7 @@ repository.workspace = true version = "0.0.1" [features] -default = ["lz4", "zstd", "zlib", "bzip2"] +default = ["lz4", "zstd", "bzip2"] jemalloc = ["rust-rocksdb/jemalloc"] io-uring = ["rust-rocksdb/io-uring"] numa = ["rust-rocksdb/numa"] # unused by rocksdb for now diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 67c3b95c..5a4819e0 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -975,7 +975,7 @@ pub struct Config { /// Type of RocksDB database compression to use. /// - /// Available options are "zstd", "zlib", "bz2", "lz4", or "none". + /// Available options are "zstd", "bz2", "lz4", or "none". /// /// It is best to use ZSTD as an overall good balance between /// speed/performance, storage, IO amplification, and CPU usage. For more From 657e91fd4226d2521e9e7bb15d5982e62ad68624 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 00:34:17 -0500 Subject: [PATCH 209/328] dont send push notifications from ignored users PDUs Signed-off-by: June Clementine Strawberry --- src/service/rooms/timeline/mod.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 35c972fa..138340a4 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -368,7 +368,7 @@ impl Service { .state_accessor .room_state_get_content(&pdu.room_id, &StateEventType::RoomPowerLevels, "") .await - .map_err(|_| err!(Database("invalid m.room.power_levels event"))) + .map_err(|e| err!(Database(warn!("invalid m.room.power_levels event: {e}")))) .unwrap_or_default(); let sync_pdu = pdu.to_sync_room_event(); @@ -377,9 +377,10 @@ impl Service { .services .state_cache .active_local_users_in_room(&pdu.room_id) - // Don't notify the sender of their own events - .ready_filter(|user| user != &pdu.sender) .map(ToOwned::to_owned) + // Don't notify the sender of their own events, and dont send from ignored users + .ready_filter(|user| user != &pdu.sender) + .filter_map(|recipient_user| async move { (!self.services.users.user_is_ignored(&pdu.sender, recipient_user).await).then_some(recipient_user) }) .collect() .await; From 931fd4c80215cee5cd709d42c86d1fefe0844fe1 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 00:44:57 -0500 Subject: [PATCH 210/328] add missing target Signed-off-by: June Clementine Strawberry --- rust-toolchain.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 00fb6cee..97b4a789 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -24,5 +24,6 @@ targets = [ "x86_64-unknown-linux-gnu", "x86_64-unknown-linux-musl", "aarch64-unknown-linux-musl", + "aarch64-unknown-linux-gnu", #"aarch64-apple-darwin", ] From ecea0cff69d583439e4a84fba6bd2d5aaba8faee Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 00:51:13 -0500 Subject: [PATCH 211/328] fix TestFetchMessagesFromNonExistentRoom complement test Signed-off-by: June Clementine Strawberry --- src/api/client/message.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 571a238a..c755cc47 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,6 +1,6 @@ use axum::extract::State; use conduwuit::{ - Event, PduCount, PduEvent, Result, at, + Err, Event, PduCount, PduEvent, Result, at, utils::{ IterStream, ReadyExt, result::{FlatOk, LogErr}, @@ -68,6 +68,10 @@ pub(crate) async fn get_message_events_route( let room_id = &body.room_id; let filter = &body.filter; + if !services.rooms.metadata.exists(room_id).await { + return Err!(Request(Forbidden("Room does not exist to this server"))); + } + let from: PduCount = body .from .as_deref() From c92678ecbeb55cf323758da08e8c36e65496aa38 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 13:08:01 -0500 Subject: [PATCH 212/328] dont build with zlib in the nix flake Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- Cargo.toml | 1 - deps/rust-rocksdb/Cargo.toml | 2 +- flake.nix | 9 +++++++-- src/service/rooms/timeline/mod.rs | 4 ++-- 5 files changed, 12 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d51bb966..2ade8b83 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3685,7 +3685,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.32.0+9.10.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=513133a3dc24b667f32933aa3247c6ec71a958f3#513133a3dc24b667f32933aa3247c6ec71a958f3" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=a5d5358ca1358f828283e1558cf6a402b6cbea34#a5d5358ca1358f828283e1558cf6a402b6cbea34" dependencies = [ "bindgen", "bzip2-sys", @@ -3702,7 +3702,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.36.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=513133a3dc24b667f32933aa3247c6ec71a958f3#513133a3dc24b667f32933aa3247c6ec71a958f3" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=a5d5358ca1358f828283e1558cf6a402b6cbea34#a5d5358ca1358f828283e1558cf6a402b6cbea34" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index 7f08a21a..5edcc60a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -389,7 +389,6 @@ features = [ "mt_static", "lz4", "zstd", - "zlib", "bzip2", ] diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index 35f755b4..f6e0a54f 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "513133a3dc24b667f32933aa3247c6ec71a958f3" +rev = "a5d5358ca1358f828283e1558cf6a402b6cbea34" #branch = "master" default-features = false diff --git a/flake.nix b/flake.nix index 04dee681..faff87d6 100644 --- a/flake.nix +++ b/flake.nix @@ -64,8 +64,10 @@ patches = []; cmakeFlags = pkgs.lib.subtractLists [ - # no real reason to have snappy, no one uses this + # no real reason to have snappy or zlib, no one uses this "-DWITH_SNAPPY=1" + "-DZLIB=1" + "-DWITH_ZLIB=1" # we dont need to use ldb or sst_dump (core_tools) "-DWITH_CORE_TOOLS=1" # we dont need to build rocksdb tests @@ -82,6 +84,8 @@ ++ [ # no real reason to have snappy, no one uses this "-DWITH_SNAPPY=0" + "-DZLIB=0" + "-DWITH_ZLIB=0" # we dont need to use ldb or sst_dump (core_tools) "-DWITH_CORE_TOOLS=0" # we dont need trace tools @@ -171,7 +175,8 @@ sccache ] # liburing is Linux-exclusive - ++ lib.optional stdenv.hostPlatform.isLinux liburing) + ++ lib.optional stdenv.hostPlatform.isLinux liburing + ++ lib.optional stdenv.hostPlatform.isLinux numactl) ++ scope.main.buildInputs ++ scope.main.propagatedBuildInputs ++ scope.main.nativeBuildInputs; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 138340a4..276b8b6a 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -379,8 +379,8 @@ impl Service { .active_local_users_in_room(&pdu.room_id) .map(ToOwned::to_owned) // Don't notify the sender of their own events, and dont send from ignored users - .ready_filter(|user| user != &pdu.sender) - .filter_map(|recipient_user| async move { (!self.services.users.user_is_ignored(&pdu.sender, recipient_user).await).then_some(recipient_user) }) + .ready_filter(|user| *user != pdu.sender) + .filter_map(|recipient_user| async move { (!self.services.users.user_is_ignored(&pdu.sender, &recipient_user).await).then_some(recipient_user) }) .collect() .await; From d80e61cbee21706454d1033ba46b51e4dcbb8679 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 17:53:23 -0500 Subject: [PATCH 213/328] bump ring to 0.17.12 Signed-off-by: June Clementine Strawberry --- Cargo.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2ade8b83..3a57df7b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2364,7 +2364,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -3474,9 +3474,9 @@ checksum = "57397d16646700483b67d2dd6511d79318f9d057fdbd21a4066aeac8b41d310a" [[package]] name = "ring" -version = "0.17.11" +version = "0.17.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da5349ae27d3887ca812fb375b45a4fbb36d8d12d2df394968cd86e35683fe73" +checksum = "ed9b823fa29b721a59671b41d6b06e66b29e0628e207e8b1c3ceeda701ec928d" dependencies = [ "cc", "cfg-if", From f34e0b21a3cbf7eaa737256fc57c13719b225507 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 18:12:54 -0500 Subject: [PATCH 214/328] remove rust-rocksdb dylib wrapper as we have a fork already Signed-off-by: June Clementine Strawberry --- Cargo.lock | 10 +----- Cargo.toml | 9 +++--- deps/rust-rocksdb/Cargo.toml | 42 ------------------------ deps/rust-rocksdb/lib.rs | 62 ------------------------------------ 4 files changed, 6 insertions(+), 117 deletions(-) delete mode 100644 deps/rust-rocksdb/Cargo.toml delete mode 100644 deps/rust-rocksdb/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 3a57df7b..9a46f008 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -848,7 +848,7 @@ dependencies = [ "log", "minicbor", "minicbor-serde", - "rust-rocksdb-uwu", + "rust-rocksdb", "serde", "serde_json", "tokio", @@ -3706,14 +3706,6 @@ source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=a5d5358ca dependencies = [ "libc", "rust-librocksdb-sys", - "serde", -] - -[[package]] -name = "rust-rocksdb-uwu" -version = "0.0.1" -dependencies = [ - "rust-rocksdb", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 5edcc60a..62c90119 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -382,8 +382,9 @@ features = [ ] [workspace.dependencies.rust-rocksdb] -path = "deps/rust-rocksdb" -package = "rust-rocksdb-uwu" +git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" +rev = "a5d5358ca1358f828283e1558cf6a402b6cbea34" +default-features = false features = [ "multi-threaded-cf", "mt_static", @@ -683,7 +684,7 @@ inherits = "release" # To enable hot-reloading: # 1. Uncomment all of the rustflags here. -# 2. Uncomment crate-type=dylib in src/*/Cargo.toml and deps/rust-rocksdb/Cargo.toml +# 2. Uncomment crate-type=dylib in src/*/Cargo.toml # # opt-level, mir-opt-level, validate-mir are not known to interfere with reloading # and can be raised if build times are tolerable. @@ -751,7 +752,7 @@ inherits = "dev" # '-Clink-arg=-Wl,-z,lazy', #] -[profile.dev.package.rust-rocksdb-uwu] +[profile.dev.package.rust-rocksdb] inherits = "dev" debug = 'limited' incremental = false diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml deleted file mode 100644 index f6e0a54f..00000000 --- a/deps/rust-rocksdb/Cargo.toml +++ /dev/null @@ -1,42 +0,0 @@ -[package] -name = "rust-rocksdb-uwu" -categories.workspace = true -description = "dylib wrapper for rust-rocksdb" -edition = "2024" -keywords.workspace = true -license.workspace = true -readme.workspace = true -repository.workspace = true -version = "0.0.1" - -[features] -default = ["lz4", "zstd", "bzip2"] -jemalloc = ["rust-rocksdb/jemalloc"] -io-uring = ["rust-rocksdb/io-uring"] -numa = ["rust-rocksdb/numa"] # unused by rocksdb for now -valgrind = ["rust-rocksdb/valgrind"] -lz4 = ["rust-rocksdb/lz4"] -zstd = ["rust-rocksdb/zstd"] -zlib = ["rust-rocksdb/zlib"] -bzip2 = ["rust-rocksdb/bzip2"] -rtti = ["rust-rocksdb/rtti"] -mt_static = ["rust-rocksdb/mt_static"] -multi-threaded-cf = ["rust-rocksdb/multi-threaded-cf"] -serde1 = ["rust-rocksdb/serde1"] -malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] - -[dependencies.rust-rocksdb] -git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "a5d5358ca1358f828283e1558cf6a402b6cbea34" -#branch = "master" -default-features = false - -[lib] -path = "lib.rs" -crate-type = [ - "rlib", -# "dylib" -] - -[lints] -workspace = true diff --git a/deps/rust-rocksdb/lib.rs b/deps/rust-rocksdb/lib.rs deleted file mode 100644 index 8dbbda98..00000000 --- a/deps/rust-rocksdb/lib.rs +++ /dev/null @@ -1,62 +0,0 @@ -pub use rust_rocksdb::*; - -#[cfg_attr(not(conduwuit_mods), link(name = "rocksdb"))] -#[cfg_attr(conduwuit_mods, link(name = "rocksdb", kind = "static"))] -unsafe extern "C" { - pub unsafe fn rocksdb_list_column_families(); - pub unsafe fn rocksdb_logger_create_stderr_logger(); - pub unsafe fn rocksdb_logger_create_callback_logger(); - pub unsafe fn rocksdb_options_set_info_log(); - pub unsafe fn rocksdb_get_options_from_string(); - pub unsafe fn rocksdb_writebatch_create(); - pub unsafe fn rocksdb_writebatch_destroy(); - pub unsafe fn rocksdb_writebatch_put_cf(); - pub unsafe fn rocksdb_writebatch_delete_cf(); - pub unsafe fn rocksdb_iter_value(); - pub unsafe fn rocksdb_iter_seek_to_last(); - pub unsafe fn rocksdb_iter_seek_for_prev(); - pub unsafe fn rocksdb_iter_seek_to_first(); - pub unsafe fn rocksdb_iter_next(); - pub unsafe fn rocksdb_iter_prev(); - pub unsafe fn rocksdb_iter_seek(); - pub unsafe fn rocksdb_iter_valid(); - pub unsafe fn rocksdb_iter_get_error(); - pub unsafe fn rocksdb_iter_key(); - pub unsafe fn rocksdb_iter_destroy(); - pub unsafe fn rocksdb_livefiles(); - pub unsafe fn rocksdb_livefiles_count(); - pub unsafe fn rocksdb_livefiles_destroy(); - pub unsafe fn rocksdb_livefiles_column_family_name(); - pub unsafe fn rocksdb_livefiles_name(); - pub unsafe fn rocksdb_livefiles_size(); - pub unsafe fn rocksdb_livefiles_level(); - pub unsafe fn rocksdb_livefiles_smallestkey(); - pub unsafe fn rocksdb_livefiles_largestkey(); - pub unsafe fn rocksdb_livefiles_entries(); - pub unsafe fn rocksdb_livefiles_deletions(); - pub unsafe fn rocksdb_put_cf(); - pub unsafe fn rocksdb_delete_cf(); - pub unsafe fn rocksdb_get_pinned_cf(); - pub unsafe fn rocksdb_create_column_family(); - pub unsafe fn rocksdb_get_latest_sequence_number(); - pub unsafe fn rocksdb_batched_multi_get_cf(); - pub unsafe fn rocksdb_cancel_all_background_work(); - pub unsafe fn rocksdb_repair_db(); - pub unsafe fn rocksdb_list_column_families_destroy(); - pub unsafe fn rocksdb_flush(); - pub unsafe fn rocksdb_flush_wal(); - pub unsafe fn rocksdb_open_column_families(); - pub unsafe fn rocksdb_open_for_read_only_column_families(); - pub unsafe fn rocksdb_open_as_secondary_column_families(); - pub unsafe fn rocksdb_open_column_families_with_ttl(); - pub unsafe fn rocksdb_open(); - pub unsafe fn rocksdb_open_for_read_only(); - pub unsafe fn rocksdb_open_with_ttl(); - pub unsafe fn rocksdb_open_as_secondary(); - pub unsafe fn rocksdb_write(); - pub unsafe fn rocksdb_create_iterator_cf(); - pub unsafe fn rocksdb_backup_engine_create_new_backup_flush(); - pub unsafe fn rocksdb_backup_engine_options_create(); - pub unsafe fn rocksdb_write_buffer_manager_destroy(); - pub unsafe fn rocksdb_options_set_ttl(); -} From fa71162c7dd943afdf78d10710914076ec2d3c85 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 18:45:14 -0500 Subject: [PATCH 215/328] bump rocksdb to v9.11.1 Signed-off-by: June Clementine Strawberry --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- flake.lock | 8 ++++---- flake.nix | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9a46f008..56ff3c6b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3684,8 +3684,8 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" -version = "0.32.0+9.10.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=a5d5358ca1358f828283e1558cf6a402b6cbea34#a5d5358ca1358f828283e1558cf6a402b6cbea34" +version = "0.33.0+9.11.1" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=3f4c5357243defedc849ae6227490102a9f90bef#3f4c5357243defedc849ae6227490102a9f90bef" dependencies = [ "bindgen", "bzip2-sys", @@ -3701,8 +3701,8 @@ dependencies = [ [[package]] name = "rust-rocksdb" -version = "0.36.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=a5d5358ca1358f828283e1558cf6a402b6cbea34#a5d5358ca1358f828283e1558cf6a402b6cbea34" +version = "0.37.0" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=3f4c5357243defedc849ae6227490102a9f90bef#3f4c5357243defedc849ae6227490102a9f90bef" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index 62c90119..43b2d55d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -383,7 +383,7 @@ features = [ [workspace.dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "a5d5358ca1358f828283e1558cf6a402b6cbea34" +rev = "3f4c5357243defedc849ae6227490102a9f90bef" default-features = false features = [ "multi-threaded-cf", diff --git a/flake.lock b/flake.lock index a1bd423f..3a43c4cd 100644 --- a/flake.lock +++ b/flake.lock @@ -567,16 +567,16 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1741234703, - "narHash": "sha256-sT5g/RM9vrwY6AmjSfl4RoJPGtcJCkZCsxiX3PFJgKQ=", + "lastModified": 1741303627, + "narHash": "sha256-7HpydEinYHvskC4vkl1Yie2kg2yShfZbREAyQMkvEUc=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "185593ce4534091e57025e9f3571dbf681c04631", + "rev": "cecee0e4fbff2b69e3edc6e9b5b751d8098a3ba1", "type": "github" }, "original": { "owner": "girlbossceo", - "ref": "v9.9.3", + "ref": "v9.11.1", "repo": "rocksdb", "type": "github" } diff --git a/flake.nix b/flake.nix index faff87d6..8f08a7d9 100644 --- a/flake.nix +++ b/flake.nix @@ -9,7 +9,7 @@ flake-utils.url = "github:numtide/flake-utils?ref=main"; nix-filter.url = "github:numtide/nix-filter?ref=main"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable"; - rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.9.3"; flake = false; }; + rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.11.1"; flake = false; }; liburing = { url = "github:axboe/liburing?ref=master"; flake = false; }; }; From 20dd1d148dd31948d9055c5a19ba8f8e13041363 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 18:45:48 -0500 Subject: [PATCH 216/328] add new complement test results Signed-off-by: June Clementine Strawberry --- .../complement/test_results.jsonl | 425 +++++++++++++++++- 1 file changed, 421 insertions(+), 4 deletions(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 11339049..fed43b48 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -1,5 +1,26 @@ {"Action":"pass","Test":"TestACLs"} +{"Action":"pass","Test":"TestAddAccountData"} +{"Action":"pass","Test":"TestAddAccountData/Can_add_global_account_data"} +{"Action":"pass","Test":"TestAddAccountData/Can_add_room_account_data"} +{"Action":"fail","Test":"TestArchivedRoomsHistory"} +{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events"} +{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events/incremental_sync"} +{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events/initial_sync"} +{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_is_empty"} +{"Action":"skip","Test":"TestArchivedRoomsHistory/timeline_is_empty/incremental_sync"} +{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_is_empty/initial_sync"} +{"Action":"fail","Test":"TestAsyncUpload"} +{"Action":"fail","Test":"TestAsyncUpload/Cannot_upload_to_a_media_ID_that_has_already_been_uploaded_to"} +{"Action":"fail","Test":"TestAsyncUpload/Create_media"} +{"Action":"fail","Test":"TestAsyncUpload/Download_media"} +{"Action":"fail","Test":"TestAsyncUpload/Download_media_over__matrix/client/v1/media/download"} +{"Action":"fail","Test":"TestAsyncUpload/Not_yet_uploaded"} +{"Action":"fail","Test":"TestAsyncUpload/Upload_media"} +{"Action":"pass","Test":"TestAvatarUrlUpdate"} {"Action":"pass","Test":"TestBannedUserCannotSendJoin"} +{"Action":"skip","Test":"TestCanRegisterAdmin"} +{"Action":"pass","Test":"TestCannotKickLeftUser"} +{"Action":"fail","Test":"TestCannotKickNonPresentUser"} {"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room"} {"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/event_with_mismatched_state_key"} {"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/invite_event"} @@ -42,30 +63,124 @@ {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/knock_event"} {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/non-state_membership_event"} {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/regular_event"} +{"Action":"pass","Test":"TestChangePassword"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_a_different_session_no_longer_works_by_default"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_can't_log_in_with_old_password"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_can_log_in_with_new_password"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_different_sessions_can_optionally_be_kept"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_existing_session_still_works"} +{"Action":"fail","Test":"TestChangePasswordPushers"} +{"Action":"fail","Test":"TestChangePasswordPushers/Pushers_created_with_a_different_access_token_are_deleted_on_password_change"} +{"Action":"pass","Test":"TestChangePasswordPushers/Pushers_created_with_the_same_access_token_are_not_deleted_on_password_change"} +{"Action":"fail","Test":"TestClientSpacesSummary"} +{"Action":"fail","Test":"TestClientSpacesSummary/max_depth"} +{"Action":"fail","Test":"TestClientSpacesSummary/pagination"} +{"Action":"fail","Test":"TestClientSpacesSummary/query_whole_graph"} +{"Action":"fail","Test":"TestClientSpacesSummary/redact_link"} +{"Action":"fail","Test":"TestClientSpacesSummary/suggested_only"} +{"Action":"fail","Test":"TestClientSpacesSummaryJoinRules"} +{"Action":"pass","Test":"TestContent"} +{"Action":"pass","Test":"TestContentCSAPIMediaV1"} {"Action":"pass","Test":"TestContentMediaV1"} +{"Action":"fail","Test":"TestCumulativeJoinLeaveJoinSync"} +{"Action":"pass","Test":"TestDeactivateAccount"} +{"Action":"pass","Test":"TestDeactivateAccount/After_deactivating_account,_can't_log_in_with_password"} +{"Action":"pass","Test":"TestDeactivateAccount/Can't_deactivate_account_with_wrong_password"} +{"Action":"pass","Test":"TestDeactivateAccount/Can_deactivate_account"} +{"Action":"pass","Test":"TestDeactivateAccount/Password_flow_is_available"} +{"Action":"fail","Test":"TestDelayedEvents"} +{"Action":"fail","Test":"TestDelayedEvents/cannot_update_a_delayed_event_with_an_invalid_action"} +{"Action":"pass","Test":"TestDelayedEvents/cannot_update_a_delayed_event_without_a_delay_ID"} +{"Action":"fail","Test":"TestDelayedEvents/cannot_update_a_delayed_event_without_a_request_body"} +{"Action":"fail","Test":"TestDelayedEvents/cannot_update_a_delayed_event_without_an_action"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_events_are_empty_on_startup"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_message_events_are_sent_on_timeout"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_are_cancelled_by_a_more_recent_state_event_from_another_user"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_are_cancelled_by_a_more_recent_state_event_from_the_same_user"} +{"Action":"skip","Test":"TestDelayedEvents/delayed_state_events_are_kept_on_server_restart"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_are_sent_on_timeout"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_can_be_cancelled"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_can_be_restarted"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_can_be_sent_on_request"} +{"Action":"pass","Test":"TestDelayedEvents/parallel"} +{"Action":"pass","Test":"TestDelayedEvents/parallel/cannot_cancel_a_delayed_event_without_a_matching_delay_ID"} +{"Action":"pass","Test":"TestDelayedEvents/parallel/cannot_restart_a_delayed_event_without_a_matching_delay_ID"} +{"Action":"pass","Test":"TestDelayedEvents/parallel/cannot_send_a_delayed_event_without_a_matching_delay_ID"} +{"Action":"fail","Test":"TestDeletingDeviceRemovesDeviceLocalNotificationSettings"} +{"Action":"fail","Test":"TestDeletingDeviceRemovesDeviceLocalNotificationSettings/Deleting_a_user's_device_should_delete_any_local_notification_settings_entries_from_their_account_data"} +{"Action":"pass","Test":"TestDemotingUsersViaUsersDefault"} +{"Action":"fail","Test":"TestDeviceListUpdates"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_joining_a_room_with_a_local_user"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_joining_a_room_with_a_remote_user"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_leaving_a_room_with_a_local_user"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_leaving_a_room_with_a_remote_user"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_local_user_joins_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_local_user_leaves_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_local_user_rejoins_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_remote_user_joins_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_remote_user_leaves_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_remote_user_rejoins_a_room"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederation"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederation/good_connectivity"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederation/interrupted_connectivity"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederation/stopped_server"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederationOnRoomJoin"} +{"Action":"fail","Test":"TestDeviceManagement"} +{"Action":"fail","Test":"TestDeviceManagement/DELETE_/device/{deviceId}"} +{"Action":"fail","Test":"TestDeviceManagement/DELETE_/device/{deviceId}_requires_UI_auth_user_to_match_device_owner"} +{"Action":"pass","Test":"TestDeviceManagement/GET_/device/{deviceId}"} +{"Action":"pass","Test":"TestDeviceManagement/GET_/device/{deviceId}_gives_a_404_for_unknown_devices"} +{"Action":"pass","Test":"TestDeviceManagement/GET_/devices"} +{"Action":"pass","Test":"TestDeviceManagement/PUT_/device/{deviceId}_gives_a_404_for_unknown_devices"} +{"Action":"pass","Test":"TestDeviceManagement/PUT_/device/{deviceId}_updates_device_fields"} +{"Action":"pass","Test":"TestDisplayNameUpdate"} +{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules"} +{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel"} +{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:false_firstMessageIndex:10_forwardedCount:5}"} +{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:true_firstMessageIndex:10_forwardedCount:5}"} +{"Action":"pass","Test":"TestEvent"} +{"Action":"pass","Test":"TestEvent/Parallel"} +{"Action":"pass","Test":"TestEvent/Parallel/Large_Event"} +{"Action":"pass","Test":"TestEvent/Parallel/Large_State_Event"} {"Action":"pass","Test":"TestEventAuth"} {"Action":"pass","Test":"TestEventAuth/returns_auth_events_for_the_requested_event"} {"Action":"pass","Test":"TestEventAuth/returns_the_auth_chain_for_the_requested_event"} -{"Action":"pass","Test":"TestFederatedClientSpaces"} +{"Action":"fail","Test":"TestEventRelationships"} +{"Action":"fail","Test":"TestFederatedClientSpaces"} +{"Action":"fail","Test":"TestFederatedEventRelationships"} {"Action":"fail","Test":"TestFederationKeyUploadQuery"} {"Action":"fail","Test":"TestFederationKeyUploadQuery/Can_claim_remote_one_time_key_using_POST"} {"Action":"fail","Test":"TestFederationKeyUploadQuery/Can_query_remote_device_keys_using_POST"} {"Action":"pass","Test":"TestFederationRedactSendsWithoutEvent"} {"Action":"pass","Test":"TestFederationRejectInvite"} -{"Action":"pass","Test":"TestFederationRoomsInvite"} -{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel"} +{"Action":"fail","Test":"TestFederationRoomsInvite"} +{"Action":"fail","Test":"TestFederationRoomsInvite/Parallel"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation"} -{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room"} +{"Action":"fail","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_several_times"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_has_'is_direct'_flag_in_prev_content_after_joining"} +{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Remote_invited_user_can_join_the_room_when_homeserver_is_already_participating_in_the_room"} +{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Remote_invited_user_can_reject_invite_when_homeserver_is_already_participating_in_the_room"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Remote_invited_user_can_see_room_metadata"} {"Action":"pass","Test":"TestFederationThumbnail"} +{"Action":"pass","Test":"TestFetchEvent"} +{"Action":"fail","Test":"TestFetchEventNonWorldReadable"} +{"Action":"pass","Test":"TestFetchEventWorldReadable"} +{"Action":"fail","Test":"TestFetchHistoricalInvitedEventFromBeforeInvite"} +{"Action":"pass","Test":"TestFetchHistoricalInvitedEventFromBetweenInvite"} +{"Action":"fail","Test":"TestFetchHistoricalJoinedEventDenied"} +{"Action":"pass","Test":"TestFetchHistoricalSharedEvent"} +{"Action":"pass","Test":"TestFetchMessagesFromNonExistentRoom"} +{"Action":"pass","Test":"TestFilter"} +{"Action":"fail","Test":"TestFilterMessagesByRelType"} +{"Action":"fail","Test":"TestGappedSyncLeaveSection"} +{"Action":"fail","Test":"TestGetFilteredRoomMembers"} +{"Action":"fail","Test":"TestGetFilteredRoomMembers/membership/join"} +{"Action":"fail","Test":"TestGetFilteredRoomMembers/membership/leave"} +{"Action":"fail","Test":"TestGetFilteredRoomMembers/not_membership"} {"Action":"fail","Test":"TestGetMissingEventsGapFilling"} +{"Action":"pass","Test":"TestGetRoomMembers"} +{"Action":"fail","Test":"TestGetRoomMembersAtPoint"} {"Action":"fail","Test":"TestInboundCanReturnMissingEvents"} {"Action":"fail","Test":"TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_invited_visibility"} {"Action":"fail","Test":"TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_joined_visibility"} @@ -76,15 +191,41 @@ {"Action":"pass","Test":"TestInboundFederationProfile/Inbound_federation_can_query_profile_data"} {"Action":"pass","Test":"TestInboundFederationProfile/Non-numeric_ports_in_server_names_are_rejected"} {"Action":"fail","Test":"TestInboundFederationRejectsEventsWithRejectedAuthEvents"} +{"Action":"fail","Test":"TestInviteFromIgnoredUsersDoesNotAppearInSync"} {"Action":"pass","Test":"TestIsDirectFlagFederation"} {"Action":"pass","Test":"TestIsDirectFlagLocal"} {"Action":"pass","Test":"TestJoinFederatedRoomFailOver"} +{"Action":"fail","Test":"TestJoinFederatedRoomFromApplicationServiceBridgeUser"} +{"Action":"fail","Test":"TestJoinFederatedRoomFromApplicationServiceBridgeUser/join_remote_federated_room_as_application_service_user"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_missing_signatures_shouldn't_block_room_join"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_bad_signatures_shouldn't_block_room_join"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_state_with_unverifiable_auth_events_shouldn't_block_room_join"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_unobtainable_keys_shouldn't_block_room_join"} {"Action":"pass","Test":"TestJoinViaRoomIDAndServerName"} +{"Action":"fail","Test":"TestJson"} +{"Action":"fail","Test":"TestJson/Parallel"} +{"Action":"fail","Test":"TestJson/Parallel/Invalid_JSON_special_values"} +{"Action":"fail","Test":"TestJson/Parallel/Invalid_numerical_values"} +{"Action":"fail","Test":"TestJumpToDateEndpoint"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/can_paginate_after_getting_remote_event_from_timestamp_to_event_endpoint"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/looking_backwards,_should_be_able_to_find_event_that_was_sent_before_we_joined"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/looking_forwards,_should_be_able_to_find_event_that_was_sent_before_we_joined"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/when_looking_backwards_before_the_room_was_created,_should_be_able_to_find_event_that_was_imported"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_event_after_given_timestmap"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_event_before_given_timestmap"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_next_event_topologically_after_given_timestmap_when_all_message_timestamps_are_the_same"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_next_event_topologically_before_given_timestamp_when_all_message_timestamps_are_the_same"} +{"Action":"pass","Test":"TestJumpToDateEndpoint/parallel/should_find_nothing_after_the_latest_timestmap"} +{"Action":"pass","Test":"TestJumpToDateEndpoint/parallel/should_find_nothing_before_the_earliest_timestmap"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_not_be_able_to_query_a_private_room_you_are_not_a_member_of"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_not_be_able_to_query_a_public_room_you_are_not_a_member_of"} +{"Action":"fail","Test":"TestKeyChangesLocal"} +{"Action":"fail","Test":"TestKeyChangesLocal/New_login_should_create_a_device_lists.changed_entry"} +{"Action":"fail","Test":"TestKeyClaimOrdering"} +{"Action":"pass","Test":"TestKeysQueryWithDeviceIDAsObjectFails"} {"Action":"fail","Test":"TestKnockRoomsInPublicRoomsDirectory"} {"Action":"fail","Test":"TestKnockRoomsInPublicRoomsDirectoryInMSC3787Room"} {"Action":"fail","Test":"TestKnocking"} @@ -139,9 +280,35 @@ {"Action":"fail","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"} +{"Action":"pass","Test":"TestLeakyTyping"} +{"Action":"fail","Test":"TestLeaveEventInviteRejection"} +{"Action":"fail","Test":"TestLeaveEventVisibility"} +{"Action":"fail","Test":"TestLeftRoomFixture"} +{"Action":"fail","Test":"TestLeftRoomFixture/Can_get_'m.room.name'_state_for_a_departed_room"} +{"Action":"fail","Test":"TestLeftRoomFixture/Can_get_rooms/{roomId}/members_for_a_departed_room"} +{"Action":"pass","Test":"TestLeftRoomFixture/Can_get_rooms/{roomId}/messages_for_a_departed_room"} +{"Action":"fail","Test":"TestLeftRoomFixture/Can_get_rooms/{roomId}/state_for_a_departed_room"} +{"Action":"pass","Test":"TestLeftRoomFixture/Getting_messages_going_forward_is_limited_for_a_departed_room"} {"Action":"pass","Test":"TestLocalPngThumbnail"} {"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/client/v1/media_endpoint"} {"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/media/v3_endpoint"} +{"Action":"fail","Test":"TestLogin"} +{"Action":"fail","Test":"TestLogin/parallel"} +{"Action":"pass","Test":"TestLogin/parallel/GET_/login_yields_a_set_of_flows"} +{"Action":"fail","Test":"TestLogin/parallel/Login_with_uppercase_username_works_and_GET_/whoami_afterwards_also"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_as_non-existing_user_is_rejected"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_can_log_in_as_a_user_with_just_the_local_part_of_the_id"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_can_login_as_user"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_returns_the_same_device_id_as_that_in_the_request"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_wrong_password_is_rejected"} +{"Action":"pass","Test":"TestLogout"} +{"Action":"pass","Test":"TestLogout/Can_logout_all_devices"} +{"Action":"pass","Test":"TestLogout/Can_logout_current_device"} +{"Action":"pass","Test":"TestLogout/Request_to_logout_with_invalid_an_access_token_is_rejected"} +{"Action":"pass","Test":"TestLogout/Request_to_logout_without_an_access_token_is_rejected"} +{"Action":"fail","Test":"TestMSC3757OwnedState"} +{"Action":"pass","Test":"TestMSC3967"} +{"Action":"pass","Test":"TestMediaConfig"} {"Action":"pass","Test":"TestMediaFilenames"} {"Action":"pass","Test":"TestMediaFilenames/Parallel"} {"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII"} @@ -178,11 +345,74 @@ {"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_download_without_a_file_name_locally"} {"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_download_without_a_file_name_over_federation"} {"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_upload_without_a_file_name"} +{"Action":"fail","Test":"TestMembersLocal"} +{"Action":"fail","Test":"TestMembersLocal/Parallel"} +{"Action":"pass","Test":"TestMembersLocal/Parallel/Existing_members_see_new_members'_join_events"} +{"Action":"fail","Test":"TestMembersLocal/Parallel/Existing_members_see_new_members'_presence_(in_incremental_sync)"} +{"Action":"pass","Test":"TestMembersLocal/Parallel/Existing_members_see_new_members'_presence_(in_initial_sync)"} +{"Action":"pass","Test":"TestMembersLocal/Parallel/New_room_members_see_their_own_join_event"} +{"Action":"fail","Test":"TestMembershipOnEvents"} {"Action":"fail","Test":"TestNetworkPartitionOrdering"} +{"Action":"pass","Test":"TestNotPresentUserCannotBanOthers"} +{"Action":"fail","Test":"TestOlderLeftRoomsNotInLeaveSection"} +{"Action":"fail","Test":"TestOutboundFederationEventSizeGetMissingEvents"} {"Action":"fail","Test":"TestOutboundFederationIgnoresMissingEventWithBadJSONForRoomVersion6"} {"Action":"pass","Test":"TestOutboundFederationProfile"} {"Action":"pass","Test":"TestOutboundFederationProfile/Outbound_federation_can_query_profile_data"} {"Action":"pass","Test":"TestOutboundFederationSend"} +{"Action":"fail","Test":"TestPollsLocalPushRules"} +{"Action":"fail","Test":"TestPollsLocalPushRules/Polls_push_rules_are_correctly_presented_to_the_client"} +{"Action":"pass","Test":"TestPowerLevels"} +{"Action":"pass","Test":"TestPowerLevels/GET_/rooms/:room_id/state/m.room.power_levels_can_fetch_levels"} +{"Action":"pass","Test":"TestPowerLevels/PUT_/rooms/:room_id/state/m.room.power_levels_can_set_levels"} +{"Action":"pass","Test":"TestPowerLevels/PUT_power_levels_should_not_explode_if_the_old_power_levels_were_empty"} +{"Action":"fail","Test":"TestPresence"} +{"Action":"fail","Test":"TestPresence/GET_/presence/:user_id/status_fetches_initial_status"} +{"Action":"pass","Test":"TestPresence/PUT_/presence/:user_id/status_updates_my_presence"} +{"Action":"pass","Test":"TestPresence/Presence_can_be_set_from_sync"} +{"Action":"pass","Test":"TestPresence/Presence_changes_are_reported_to_local_room_members"} +{"Action":"pass","Test":"TestPresence/Presence_changes_to_UNAVAILABLE_are_reported_to_local_room_members"} +{"Action":"pass","Test":"TestPresenceSyncDifferentRooms"} +{"Action":"pass","Test":"TestProfileAvatarURL"} +{"Action":"pass","Test":"TestProfileAvatarURL/GET_/profile/:user_id/avatar_url_publicly_accessible"} +{"Action":"pass","Test":"TestProfileAvatarURL/PUT_/profile/:user_id/avatar_url_sets_my_avatar"} +{"Action":"pass","Test":"TestProfileDisplayName"} +{"Action":"pass","Test":"TestProfileDisplayName/GET_/profile/:user_id/displayname_publicly_accessible"} +{"Action":"pass","Test":"TestProfileDisplayName/PUT_/profile/:user_id/displayname_sets_my_name"} +{"Action":"pass","Test":"TestPushRuleCacheHealth"} +{"Action":"pass","Test":"TestPushSync"} +{"Action":"pass","Test":"TestPushSync/Adding_a_push_rule_wakes_up_an_incremental_/sync"} +{"Action":"pass","Test":"TestPushSync/Disabling_a_push_rule_wakes_up_an_incremental_/sync"} +{"Action":"pass","Test":"TestPushSync/Enabling_a_push_rule_wakes_up_an_incremental_/sync"} +{"Action":"pass","Test":"TestPushSync/Push_rules_come_down_in_an_initial_/sync"} +{"Action":"pass","Test":"TestPushSync/Setting_actions_for_a_push_rule_wakes_up_an_incremental_/sync"} +{"Action":"pass","Test":"TestRegistration"} +{"Action":"pass","Test":"TestRegistration/parallel"} +{"Action":"pass","Test":"TestRegistration/parallel/GET_/register/available_returns_M_INVALID_USERNAME_for_invalid_user_name"} +{"Action":"pass","Test":"TestRegistration/parallel/GET_/register/available_returns_M_USER_IN_USE_for_registered_user_name"} +{"Action":"pass","Test":"TestRegistration/parallel/GET_/register/available_returns_available_for_unregistered_user_name"} +{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_admin_with_shared_secret"} +{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_with_shared_secret"} +{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_with_shared_secret_disallows_symbols"} +{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_with_shared_secret_downcases_capitals"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/-"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/."} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_//"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/3"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/="} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/_"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/q"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_can_create_a_user"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_downcases_capitals_in_usernames"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_rejects_if_user_already_exists"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_rejects_usernames_with_special_characters"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_returns_the_same_device_id_as_that_in_the_request"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_{}_returns_a_set_of_flows"} +{"Action":"pass","Test":"TestRegistration/parallel/Registration_accepts_non-ascii_passwords"} +{"Action":"pass","Test":"TestRelations"} +{"Action":"fail","Test":"TestRelationsPagination"} +{"Action":"pass","Test":"TestRelationsPaginationSync"} {"Action":"pass","Test":"TestRemoteAliasRequestsUnderstandUnicode"} {"Action":"pass","Test":"TestRemotePngThumbnail"} {"Action":"pass","Test":"TestRemotePngThumbnail/test_/_matrix/client/v1/media_endpoint"} @@ -191,6 +421,13 @@ {"Action":"fail","Test":"TestRemotePresence/Presence_changes_are_also_reported_to_remote_room_members"} {"Action":"fail","Test":"TestRemotePresence/Presence_changes_to_UNAVAILABLE_are_reported_to_remote_room_members"} {"Action":"pass","Test":"TestRemoteTyping"} +{"Action":"fail","Test":"TestRemovingAccountData"} +{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_account_data_via_DELETE_works"} +{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_account_data_via_PUT_works"} +{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_room_account_data_via_PUT_works"} +{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_room_data_via_DELETE_works"} +{"Action":"fail","Test":"TestRequestEncodingFails"} +{"Action":"fail","Test":"TestRequestEncodingFails/POST_rejects_invalid_utf-8_in_JSON"} {"Action":"fail","Test":"TestRestrictedRoomsLocalJoin"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_fail_initially"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_fail_when_left_allowed_room"} @@ -221,12 +458,170 @@ {"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinLocalUserInMSC3787Room"} {"Action":"pass","Test":"TestRestrictedRoomsSpacesSummaryFederation"} {"Action":"fail","Test":"TestRestrictedRoomsSpacesSummaryLocal"} +{"Action":"pass","Test":"TestRoomAlias"} +{"Action":"pass","Test":"TestRoomAlias/Parallel"} +{"Action":"pass","Test":"TestRoomAlias/Parallel/GET_/rooms/:room_id/aliases_lists_aliases"} +{"Action":"pass","Test":"TestRoomAlias/Parallel/Only_room_members_can_list_aliases_of_a_room"} +{"Action":"pass","Test":"TestRoomAlias/Parallel/PUT_/directory/room/:room_alias_creates_alias"} +{"Action":"pass","Test":"TestRoomAlias/Parallel/Room_aliases_can_contain_Unicode"} +{"Action":"fail","Test":"TestRoomCanonicalAlias"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_accepts_present_aliases"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_accepts_present_alt_aliases"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alias_pointing_to_different_local_room"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alt_alias_pointing_to_different_local_room"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases#01"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_missing_aliases"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_missing_aliases#01"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_setting_rejects_deleted_aliases"} +{"Action":"pass","Test":"TestRoomCreate"} +{"Action":"pass","Test":"TestRoomCreate/Parallel"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/Can_/sync_newly_created_room"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_creates_a_room_with_the_given_version"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_ignores_attempts_to_set_the_room_version_via_creation_content"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_private_room"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_private_room_with_invites"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_public_room"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_room_with_a_name"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_room_with_a_topic"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_rejects_attempts_to_create_rooms_with_numeric_versions"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_rejects_attempts_to_create_rooms_with_unknown_versions"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/Rooms_can_be_created_with_an_initial_invite_list_(SYN-205)"} +{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself"} +{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel"} +{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Joining_room_twice_is_idempotent"} +{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.create_to_myself"} +{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.member_to_myself"} +{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_room_topic_reports_m.room.topic_to_myself"} +{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_state_twice_is_idempotent"} +{"Action":"fail","Test":"TestRoomDeleteAlias"} +{"Action":"fail","Test":"TestRoomDeleteAlias/Parallel"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Alias_creators_can_delete_alias_with_no_ops"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Alias_creators_can_delete_canonical_alias_with_no_ops"} +{"Action":"fail","Test":"TestRoomDeleteAlias/Parallel/Can_delete_canonical_alias"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Deleting_a_non-existent_alias_should_return_a_404"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_in_the_default_room_configuration"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_when_m.room.aliases_is_restricted"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Users_can't_delete_other's_aliases"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Users_with_sufficient_power-level_can_delete_other's_aliases"} +{"Action":"fail","Test":"TestRoomForget"} +{"Action":"fail","Test":"TestRoomForget/Parallel"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Can't_forget_room_you're_still_in"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Can_forget_room_we_weren't_an_actual_member"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Can_forget_room_you've_been_kicked_from"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Can_re-join_room_if_re-invited"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Forgetting_room_does_not_show_up_in_v2_initial_/sync"} +{"Action":"fail","Test":"TestRoomForget/Parallel/Forgotten_room_messages_cannot_be_paginated"} +{"Action":"fail","Test":"TestRoomForget/Parallel/Leave_for_forgotten_room_shows_up_in_v2_incremental_/sync"} +{"Action":"pass","Test":"TestRoomImageRoundtrip"} +{"Action":"fail","Test":"TestRoomMembers"} +{"Action":"fail","Test":"TestRoomMembers/Parallel"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/join/:room_alias_can_join_a_room"} +{"Action":"fail","Test":"TestRoomMembers/Parallel/POST_/join/:room_alias_can_join_a_room_with_custom_content"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/join/:room_id_can_join_a_room"} +{"Action":"fail","Test":"TestRoomMembers/Parallel/POST_/join/:room_id_can_join_a_room_with_custom_content"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/ban_can_ban_a_user"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/invite_can_send_an_invite"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/join_can_join_a_room"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/leave_can_leave_a_room"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/Test_that_we_can_be_reinvited_to_a_room_we_created"} +{"Action":"pass","Test":"TestRoomMessagesLazyLoading"} +{"Action":"pass","Test":"TestRoomMessagesLazyLoadingLocalUser"} +{"Action":"pass","Test":"TestRoomReadMarkers"} +{"Action":"pass","Test":"TestRoomReceipts"} +{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin"} +{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_mxid"} +{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_profile_display_name"} +{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_mxid"} +{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Eve_cannot_find_Alice_by_room-specific_name_that_Eve_is_not_privy_to"} +{"Action":"fail","Test":"TestRoomSpecificUsernameChange"} +{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_mxid"} +{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_profile_display_name"} +{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_mxid"} +{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Eve_cannot_find_Alice_by_room-specific_name_that_Eve_is_not_privy_to"} +{"Action":"fail","Test":"TestRoomState"} +{"Action":"fail","Test":"TestRoomState/Parallel"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/directory/room/:room_alias_yields_room_ID"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/joined_rooms_lists_newly-created_room"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/publicRooms_lists_newly-created_room"} +{"Action":"fail","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/joined_members_fetches_my_membership"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/joined_members_is_forbidden_after_leaving_room"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.member/:user_id?format=event_fetches_my_membership_event"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.member/:user_id_fetches_my_membership"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.name_gets_name"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.power_levels_fetches_powerlevels"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.topic_gets_topic"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state_fetches_entire_room_state"} +{"Action":"pass","Test":"TestRoomState/Parallel/POST_/rooms/:room_id/state/m.room.name_sets_name"} +{"Action":"pass","Test":"TestRoomState/Parallel/PUT_/createRoom_with_creation_content"} +{"Action":"pass","Test":"TestRoomState/Parallel/PUT_/rooms/:room_id/state/m.room.topic_sets_topic"} +{"Action":"pass","Test":"TestRoomSummary"} +{"Action":"fail","Test":"TestRoomsInvite"} +{"Action":"fail","Test":"TestRoomsInvite/Parallel"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Can_invite_users_to_invite-only_rooms"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_reject_invite"} +{"Action":"fail","Test":"TestRoomsInvite/Parallel/Invited_user_can_reject_invite_for_empty_room"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_see_room_metadata"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Test_that_we_can_be_reinvited_to_a_room_we_created"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Uninvited_users_cannot_join_the_room"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Users_cannot_invite_a_user_that_is_already_in_the_room"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Users_cannot_invite_themselves_to_a_room"} +{"Action":"fail","Test":"TestSearch"} +{"Action":"fail","Test":"TestSearch/parallel"} +{"Action":"fail","Test":"TestSearch/parallel/Can_back-paginate_search_results"} +{"Action":"fail","Test":"TestSearch/parallel/Can_get_context_around_search_results"} +{"Action":"pass","Test":"TestSearch/parallel/Can_search_for_an_event_by_body"} +{"Action":"pass","Test":"TestSearch/parallel/Search_results_with_rank_ordering_do_not_include_redacted_events"} +{"Action":"pass","Test":"TestSearch/parallel/Search_results_with_recent_ordering_do_not_include_redacted_events"} +{"Action":"pass","Test":"TestSearch/parallel/Search_works_across_an_upgraded_room_and_its_predecessor"} +{"Action":"fail","Test":"TestSendAndFetchMessage"} {"Action":"skip","Test":"TestSendJoinPartialStateResponse"} +{"Action":"pass","Test":"TestSendMessageWithTxn"} +{"Action":"pass","Test":"TestServerCapabilities"} +{"Action":"skip","Test":"TestServerNotices"} +{"Action":"fail","Test":"TestSync"} +{"Action":"fail","Test":"TestSync/parallel"} +{"Action":"pass","Test":"TestSync/parallel/Can_sync_a_joined_room"} +{"Action":"fail","Test":"TestSync/parallel/Device_list_tracking"} +{"Action":"fail","Test":"TestSync/parallel/Device_list_tracking/User_is_correctly_listed_when_they_leave,_even_when_lazy_loading_is_enabled"} +{"Action":"pass","Test":"TestSync/parallel/Full_state_sync_includes_joined_rooms"} +{"Action":"fail","Test":"TestSync/parallel/Get_presence_for_newly_joined_members_in_incremental_sync"} +{"Action":"fail","Test":"TestSync/parallel/Newly_joined_room_has_correct_timeline_in_incremental_sync"} +{"Action":"fail","Test":"TestSync/parallel/Newly_joined_room_includes_presence_in_incremental_sync"} +{"Action":"pass","Test":"TestSync/parallel/Newly_joined_room_is_included_in_an_incremental_sync"} +{"Action":"fail","Test":"TestSync/parallel/sync_should_succeed_even_if_the_sync_token_points_to_a_redaction_of_an_unknown_event"} +{"Action":"pass","Test":"TestSyncFilter"} +{"Action":"pass","Test":"TestSyncFilter/Can_create_filter"} +{"Action":"pass","Test":"TestSyncFilter/Can_download_filter"} +{"Action":"fail","Test":"TestSyncLeaveSection"} +{"Action":"fail","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_full_state_sync"} +{"Action":"fail","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_sync"} +{"Action":"fail","Test":"TestSyncLeaveSection/Newly_left_rooms_appear_in_the_leave_section_of_incremental_sync"} {"Action":"pass","Test":"TestSyncOmitsStateChangeOnFilteredEvents"} +{"Action":"pass","Test":"TestSyncTimelineGap"} +{"Action":"pass","Test":"TestSyncTimelineGap/full"} +{"Action":"pass","Test":"TestSyncTimelineGap/incremental"} +{"Action":"fail","Test":"TestTentativeEventualJoiningAfterRejecting"} +{"Action":"fail","Test":"TestThreadReceiptsInSyncMSC4102"} +{"Action":"fail","Test":"TestThreadedReceipts"} +{"Action":"fail","Test":"TestThreadsEndpoint"} +{"Action":"pass","Test":"TestToDeviceMessages"} {"Action":"fail","Test":"TestToDeviceMessagesOverFederation"} {"Action":"pass","Test":"TestToDeviceMessagesOverFederation/good_connectivity"} {"Action":"pass","Test":"TestToDeviceMessagesOverFederation/interrupted_connectivity"} {"Action":"fail","Test":"TestToDeviceMessagesOverFederation/stopped_server"} +{"Action":"fail","Test":"TestTxnIdWithRefreshToken"} +{"Action":"fail","Test":"TestTxnIdempotency"} +{"Action":"pass","Test":"TestTxnIdempotencyScopedToDevice"} +{"Action":"pass","Test":"TestTxnInEvent"} +{"Action":"pass","Test":"TestTxnScopeOnLocalEcho"} +{"Action":"pass","Test":"TestTyping"} +{"Action":"pass","Test":"TestTyping/Typing_can_be_explicitly_stopped"} +{"Action":"pass","Test":"TestTyping/Typing_notification_sent_to_local_room_members"} +{"Action":"fail","Test":"TestUnbanViaInvite"} {"Action":"fail","Test":"TestUnknownEndpoints"} {"Action":"pass","Test":"TestUnknownEndpoints/Client-server_endpoints"} {"Action":"fail","Test":"TestUnknownEndpoints/Key_endpoints"} @@ -234,5 +629,27 @@ {"Action":"pass","Test":"TestUnknownEndpoints/Server-server_endpoints"} {"Action":"pass","Test":"TestUnknownEndpoints/Unknown_prefix"} {"Action":"fail","Test":"TestUnrejectRejectedEvents"} +{"Action":"fail","Test":"TestUploadKey"} +{"Action":"fail","Test":"TestUploadKey/Parallel"} +{"Action":"fail","Test":"TestUploadKey/Parallel/Can_claim_one_time_key_using_POST"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Can_query_device_keys_using_POST"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Can_query_specific_device_keys_using_POST"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Can_upload_device_keys"} +{"Action":"fail","Test":"TestUploadKey/Parallel/Rejects_invalid_device_keys"} +{"Action":"fail","Test":"TestUploadKey/Parallel/Should_reject_keys_claiming_to_belong_to_a_different_user"} +{"Action":"pass","Test":"TestUploadKey/Parallel/query_for_user_with_no_keys_returns_empty_key_dict"} +{"Action":"pass","Test":"TestUploadKeyIdempotency"} +{"Action":"pass","Test":"TestUploadKeyIdempotencyOverlap"} +{"Action":"fail","Test":"TestUrlPreview"} {"Action":"pass","Test":"TestUserAppearsInChangedDeviceListOnJoinOverFederation"} +{"Action":"pass","Test":"TestVersionStructure"} +{"Action":"pass","Test":"TestVersionStructure/Version_responds_200_OK_with_valid_structure"} +{"Action":"pass","Test":"TestWithoutOwnedState"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_a_non-member_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_another_suffixed_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_another_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_malformed_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_their_own_suffixed_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/user_can_set_state_with_their_own_user_ID_as_state_key"} {"Action":"pass","Test":"TestWriteMDirectAccountData"} From 5ad1100e0fdf41a380b445154b42bc09f38a64b5 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 19:48:06 -0500 Subject: [PATCH 217/328] bump our rocksdb fork Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- flake.lock | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 56ff3c6b..7dd24e2e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3685,7 +3685,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.33.0+9.11.1" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=3f4c5357243defedc849ae6227490102a9f90bef#3f4c5357243defedc849ae6227490102a9f90bef" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=d05c8bd7ba8814de1731ec0ae29e863c8ecb7206#d05c8bd7ba8814de1731ec0ae29e863c8ecb7206" dependencies = [ "bindgen", "bzip2-sys", @@ -3702,7 +3702,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.37.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=3f4c5357243defedc849ae6227490102a9f90bef#3f4c5357243defedc849ae6227490102a9f90bef" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=d05c8bd7ba8814de1731ec0ae29e863c8ecb7206#d05c8bd7ba8814de1731ec0ae29e863c8ecb7206" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index 43b2d55d..a9f1abb3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -383,7 +383,7 @@ features = [ [workspace.dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "3f4c5357243defedc849ae6227490102a9f90bef" +rev = "d05c8bd7ba8814de1731ec0ae29e863c8ecb7206" default-features = false features = [ "multi-threaded-cf", diff --git a/flake.lock b/flake.lock index 3a43c4cd..c3292cbc 100644 --- a/flake.lock +++ b/flake.lock @@ -567,11 +567,11 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1741303627, - "narHash": "sha256-7HpydEinYHvskC4vkl1Yie2kg2yShfZbREAyQMkvEUc=", + "lastModified": 1741308171, + "narHash": "sha256-YdBvdQ75UJg5ffwNjxizpviCVwVDJnBkM8ZtGIduMgY=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "cecee0e4fbff2b69e3edc6e9b5b751d8098a3ba1", + "rev": "3ce04794bcfbbb0d2e6f81ae35fc4acf688b6986", "type": "github" }, "original": { From fe65648296b1827841c3e2a602cc78bd1af0a9b5 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 20:10:32 -0500 Subject: [PATCH 218/328] remove unnecessary map_err Signed-off-by: June Clementine Strawberry --- src/service/rooms/timeline/mod.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 276b8b6a..826a1dae 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -344,7 +344,7 @@ impl Service { let insert_lock = self.mutex_insert.lock(&pdu.room_id).await; - let count1 = self.services.globals.next_count()?; + let count1 = self.services.globals.next_count().unwrap(); // Mark as read first so the sending client doesn't get a notification even if // appending fails self.services @@ -362,13 +362,12 @@ impl Service { drop(insert_lock); - // See if the event matches any known pushers + // See if the event matches any known pushers via power level let power_levels: RoomPowerLevelsEventContent = self .services .state_accessor .room_state_get_content(&pdu.room_id, &StateEventType::RoomPowerLevels, "") .await - .map_err(|e| err!(Database(warn!("invalid m.room.power_levels event: {e}")))) .unwrap_or_default(); let sync_pdu = pdu.to_sync_room_event(); From 2c58a6efda4f0ae7fa7b5ad05758489b5ff2e5f5 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 7 Mar 2025 00:54:30 -0500 Subject: [PATCH 219/328] allow broken no-op deny+allow room server ACL keys Signed-off-by: June Clementine Strawberry --- src/service/rooms/event_handler/acl_check.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/service/rooms/event_handler/acl_check.rs b/src/service/rooms/event_handler/acl_check.rs index 6b432a4b..f847015b 100644 --- a/src/service/rooms/event_handler/acl_check.rs +++ b/src/service/rooms/event_handler/acl_check.rs @@ -14,14 +14,21 @@ pub async fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Res .room_state_get_content(room_id, &StateEventType::RoomServerAcl, "") .await .map(|c: RoomServerAclEventContent| c) - .inspect(|acl| trace!("ACL content found: {acl:?}")) - .inspect_err(|e| trace!("No ACL content found: {e:?}")) + .inspect(|acl| trace!(%room_id, "ACL content found: {acl:?}")) + .inspect_err(|e| trace!(%room_id, "No ACL content found: {e:?}")) else { return Ok(()); }; if acl_event_content.allow.is_empty() { - warn!("Ignoring broken ACL event (allow key is empty)"); + warn!(%room_id, "Ignoring broken ACL event (allow key is empty)"); + return Ok(()); + } + + if acl_event_content.deny.contains(&String::from("*")) + && acl_event_content.allow.contains(&String::from("*")) + { + warn!(%room_id, "Ignoring broken ACL event (allow key and deny key both contain wildcard \"*\""); return Ok(()); } From 4f882c3bd8adfa86edc504396f6cd45b56fd8b62 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 7 Mar 2025 00:57:39 -0500 Subject: [PATCH 220/328] add some ACL paw-gun checks, better `PUT` state event validation Signed-off-by: June Clementine Strawberry --- src/api/client/keys.rs | 23 +++- src/api/client/state.rs | 253 +++++++++++++++++++++++++--------------- 2 files changed, 178 insertions(+), 98 deletions(-) diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 8a7eab7e..4c1c986a 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -1,7 +1,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use axum::extract::State; -use conduwuit::{Err, Error, Result, debug, err, info, result::NotFound, utils}; +use conduwuit::{Err, Error, Result, debug, debug_warn, err, info, result::NotFound, utils}; use futures::{StreamExt, stream::FuturesUnordered}; use ruma::{ OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, @@ -41,6 +41,20 @@ pub(crate) async fn upload_keys_route( let (sender_user, sender_device) = body.sender(); for (key_id, one_time_key) in &body.one_time_keys { + if one_time_key + .deserialize() + .inspect_err(|e| { + debug_warn!( + ?key_id, + ?one_time_key, + "Invalid one time key JSON submitted by client, skipping: {e}" + ) + }) + .is_err() + { + continue; + } + services .users .add_one_time_key(sender_user, sender_device, key_id, one_time_key) @@ -48,7 +62,12 @@ pub(crate) async fn upload_keys_route( } if let Some(device_keys) = &body.device_keys { - let deser_device_keys = device_keys.deserialize()?; + let deser_device_keys = device_keys.deserialize().map_err(|e| { + err!(Request(BadJson(debug_warn!( + ?device_keys, + "Invalid device keys JSON uploaded by client: {e}" + )))) + })?; if deser_device_keys.user_id != sender_user { return Err!(Request(Unknown( diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 6353fe1c..c92091eb 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -11,6 +11,7 @@ use ruma::{ history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, + server_acl::RoomServerAclEventContent, }, }, serde::Raw, @@ -194,134 +195,194 @@ async fn allowed_to_send_state_event( ) -> Result { match event_type { | StateEventType::RoomCreate => { - return Err!(Request(BadJson( + return Err!(Request(BadJson(debug_warn!( + ?room_id, "You cannot update m.room.create after a room has been created." - ))); + )))); + }, + | StateEventType::RoomServerAcl => { + // prevents common ACL paw-guns as ACL management is difficult and prone to + // irreversible mistakes + match json.deserialize_as::() { + | Ok(acl_content) => { + if acl_content.allow.is_empty() { + return Err!(Request(BadJson(debug_warn!( + ?room_id, + "Sending an ACL event with an empty allow key will permanently \ + brick the room for non-conduwuit's as this equates to no servers \ + being allowed to participate in this room." + )))); + } + + if acl_content.deny.contains(&String::from("*")) + && acl_content.allow.contains(&String::from("*")) + { + return Err!(Request(BadJson(debug_warn!( + ?room_id, + "Sending an ACL event with a deny and allow key value of \"*\" will \ + permanently brick the room for non-conduwuit's as this equates to \ + no servers being allowed to participate in this room." + )))); + } + + if acl_content.deny.contains(&String::from("*")) + && !acl_content.is_allowed(services.globals.server_name()) + { + return Err!(Request(BadJson(debug_warn!( + ?room_id, + "Sending an ACL event with a deny key value of \"*\" and without \ + your own server name in the allow key will result in you being \ + unable to participate in this room." + )))); + } + + if !acl_content.allow.contains(&String::from("*")) + && !acl_content.is_allowed(services.globals.server_name()) + { + return Err!(Request(BadJson(debug_warn!( + ?room_id, + "Sending an ACL event for an allow key without \"*\" and without \ + your own server name in the allow key will result in you being \ + unable to participate in this room." + )))); + } + }, + | Err(e) => { + return Err!(Request(BadJson(debug_warn!( + "Room server ACL event is invalid: {e}" + )))); + }, + }; }, - // Forbid m.room.encryption if encryption is disabled | StateEventType::RoomEncryption => - if !services.globals.allow_encryption() { + // Forbid m.room.encryption if encryption is disabled + if !services.config.allow_encryption { return Err!(Request(Forbidden("Encryption is disabled on this homeserver."))); }, - // admin room is a sensitive room, it should not ever be made public | StateEventType::RoomJoinRules => { + // admin room is a sensitive room, it should not ever be made public if let Ok(admin_room_id) = services.admin.get_admin_room().await { if admin_room_id == room_id { - if let Ok(join_rule) = - serde_json::from_str::(json.json().get()) - { - if join_rule.join_rule == JoinRule::Public { - return Err!(Request(Forbidden( - "Admin room is a sensitive room, it cannot be made public" - ))); - } + match json.deserialize_as::() { + | Ok(join_rule) => + if join_rule.join_rule == JoinRule::Public { + return Err!(Request(Forbidden( + "Admin room is a sensitive room, it cannot be made public" + ))); + }, + | Err(e) => { + return Err!(Request(BadJson(debug_warn!( + "Room join rules event is invalid: {e}" + )))); + }, } } } }, - // admin room is a sensitive room, it should not ever be made world readable | StateEventType::RoomHistoryVisibility => { - if let Ok(visibility_content) = - serde_json::from_str::(json.json().get()) - { - if let Ok(admin_room_id) = services.admin.get_admin_room().await { - if admin_room_id == room_id - && visibility_content.history_visibility - == HistoryVisibility::WorldReadable - { - return Err!(Request(Forbidden( - "Admin room is a sensitive room, it cannot be made world readable \ - (public room history)." - ))); - } + // admin room is a sensitive room, it should not ever be made world readable + if let Ok(admin_room_id) = services.admin.get_admin_room().await { + match json.deserialize_as::() { + | Ok(visibility_content) => { + if admin_room_id == room_id + && visibility_content.history_visibility + == HistoryVisibility::WorldReadable + { + return Err!(Request(Forbidden( + "Admin room is a sensitive room, it cannot be made world \ + readable (public room history)." + ))); + } + }, + | Err(e) => { + return Err!(Request(BadJson(debug_warn!( + "Room history visibility event is invalid: {e}" + )))); + }, } } }, | StateEventType::RoomCanonicalAlias => { - if let Ok(canonical_alias) = - serde_json::from_str::(json.json().get()) - { - let mut aliases = canonical_alias.alt_aliases.clone(); + match json.deserialize_as::() { + | Ok(canonical_alias_content) => { + let mut aliases = canonical_alias_content.alt_aliases.clone(); - if let Some(alias) = canonical_alias.alias { - aliases.push(alias); - } + if let Some(alias) = canonical_alias_content.alias { + aliases.push(alias); + } - for alias in aliases { - if !services.globals.server_is_ours(alias.server_name()) { - return Err!(Request(Forbidden( - "canonical_alias must be for this server" + for alias in aliases { + let (alias_room_id, _servers) = + services.rooms.alias.resolve_alias(&alias, None).await?; + + if alias_room_id != room_id { + return Err!(Request(Forbidden( + "Room alias {alias} does not belong to room {room_id}" + ))); + } + } + }, + | Err(e) => { + return Err!(Request(BadJson(debug_warn!( + "Room canonical alias event is invalid: {e}" + )))); + }, + } + }, + | StateEventType::RoomMember => match json.deserialize_as::() { + | Ok(membership_content) => { + let Ok(state_key) = UserId::parse(state_key) else { + return Err!(Request(BadJson( + "Membership event has invalid or non-existent state key" + ))); + }; + + if let Some(authorising_user) = + membership_content.join_authorized_via_users_server + { + if membership_content.membership != MembershipState::Join { + return Err!(Request(BadJson( + "join_authorised_via_users_server is only for member joins" + ))); + } + + if services + .rooms + .state_cache + .is_joined(state_key, room_id) + .await + { + return Err!(Request(InvalidParam( + "{state_key} is already joined, an authorising user is not required." + ))); + } + + if !services.globals.user_is_local(&authorising_user) { + return Err!(Request(InvalidParam( + "Authorising user {authorising_user} does not belong to this \ + homeserver" ))); } if !services .rooms - .alias - .resolve_local_alias(&alias) + .state_cache + .is_joined(&authorising_user, room_id) .await - .is_ok_and(|room| room == room_id) - // Make sure it's the right room { - return Err!(Request(Forbidden( - "You are only allowed to send canonical_alias events when its \ - aliases already exist" + return Err!(Request(InvalidParam( + "Authorising user {authorising_user} is not in the room, they \ + cannot authorise the join." ))); } } - } - }, - | StateEventType::RoomMember => { - let Ok(membership_content) = - serde_json::from_str::(json.json().get()) - else { + }, + | Err(e) => { return Err!(Request(BadJson( "Membership content must have a valid JSON body with at least a valid \ - membership state." + membership state: {e}" ))); - }; - - let Ok(state_key) = UserId::parse(state_key) else { - return Err!(Request(BadJson( - "Membership event has invalid or non-existent state key" - ))); - }; - - if let Some(authorising_user) = membership_content.join_authorized_via_users_server { - if membership_content.membership != MembershipState::Join { - return Err!(Request(BadJson( - "join_authorised_via_users_server is only for member joins" - ))); - } - - if services - .rooms - .state_cache - .is_joined(state_key, room_id) - .await - { - return Err!(Request(InvalidParam( - "{state_key} is already joined, an authorising user is not required." - ))); - } - - if !services.globals.user_is_local(&authorising_user) { - return Err!(Request(InvalidParam( - "Authorising user {authorising_user} does not belong to this homeserver" - ))); - } - - if !services - .rooms - .state_cache - .is_joined(&authorising_user, room_id) - .await - { - return Err!(Request(InvalidParam( - "Authorising user {authorising_user} is not in the room, they cannot \ - authorise the join." - ))); - } - } + }, }, | _ => (), } From 8b3f62919831650a8198ca751dd1892e9889a51d Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 7 Mar 2025 00:57:47 -0500 Subject: [PATCH 221/328] bump rust-rocksdb Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7dd24e2e..a224ad0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3685,7 +3685,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.33.0+9.11.1" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=d05c8bd7ba8814de1731ec0ae29e863c8ecb7206#d05c8bd7ba8814de1731ec0ae29e863c8ecb7206" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2e692ae026881fc385f111fdcfba38bee98f1e47#2e692ae026881fc385f111fdcfba38bee98f1e47" dependencies = [ "bindgen", "bzip2-sys", @@ -3702,7 +3702,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.37.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=d05c8bd7ba8814de1731ec0ae29e863c8ecb7206#d05c8bd7ba8814de1731ec0ae29e863c8ecb7206" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2e692ae026881fc385f111fdcfba38bee98f1e47#2e692ae026881fc385f111fdcfba38bee98f1e47" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index a9f1abb3..0b08cd8f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -383,7 +383,7 @@ features = [ [workspace.dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "d05c8bd7ba8814de1731ec0ae29e863c8ecb7206" +rev = "2e692ae026881fc385f111fdcfba38bee98f1e47" default-features = false features = [ "multi-threaded-cf", From 6052c0c8a2c5722a5ca057576ba174f8f72ab9e0 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 7 Mar 2025 01:04:53 -0500 Subject: [PATCH 222/328] ci: allow ourselves to write to the public docs directory Signed-off-by: June Clementine Strawberry --- .github/workflows/documentation.yml | 1 + conduwuit-example.toml | 2 +- src/api/client/keys.rs | 2 +- src/core/config/mod.rs | 2 +- 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 88e7bbe1..b5b4ff46 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -81,6 +81,7 @@ jobs: bin/nix-build-and-cache just .#book cp -r --dereference result public + chmod u+w -R public - name: Upload generated documentation (book) as normal artifact uses: actions/upload-artifact@v4 diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 541f062d..3d4b15bc 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -593,7 +593,7 @@ # Currently, conduwuit doesn't support inbound batched key requests, so # this list should only contain other Synapse servers. # -# example: ["matrix.org", "envs.net", "constellatory.net", "tchncs.de"] +# example: ["matrix.org", "envs.net", "tchncs.de"] # #trusted_servers = ["matrix.org"] diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 4c1c986a..9cd50e85 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -48,7 +48,7 @@ pub(crate) async fn upload_keys_route( ?key_id, ?one_time_key, "Invalid one time key JSON submitted by client, skipping: {e}" - ) + ); }) .is_err() { diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 5a4819e0..a82f5f53 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -713,7 +713,7 @@ pub struct Config { /// Currently, conduwuit doesn't support inbound batched key requests, so /// this list should only contain other Synapse servers. /// - /// example: ["matrix.org", "envs.net", "constellatory.net", "tchncs.de"] + /// example: ["matrix.org", "envs.net", "tchncs.de"] /// /// default: ["matrix.org"] #[serde(default = "default_trusted_servers")] From 298b58c069534833cfd027510ad7683e18d71e7a Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 7 Mar 2025 21:44:33 -0500 Subject: [PATCH 223/328] set file_shape for roomsynctoken_shortstatehash to 3, remove rust-rocksdb package spec Signed-off-by: strawberry --- Cargo.toml | 21 --------------------- src/database/maps.rs | 1 + 2 files changed, 1 insertion(+), 21 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 0b08cd8f..c48be06a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -752,27 +752,6 @@ inherits = "dev" # '-Clink-arg=-Wl,-z,lazy', #] -[profile.dev.package.rust-rocksdb] -inherits = "dev" -debug = 'limited' -incremental = false -codegen-units = 1 -opt-level = 'z' -#rustflags = [ -# '--cfg', 'conduwuit_mods', -# '-Ztls-model=initial-exec', -# '-Cprefer-dynamic=true', -# '-Zstaticlib-prefer-dynamic=true', -# '-Zstaticlib-allow-rdylib-deps=true', -# '-Zpacked-bundled-libs=true', -# '-Zplt=true', -# '-Clink-arg=-Wl,--no-as-needed', -# '-Clink-arg=-Wl,--allow-shlib-undefined', -# '-Clink-arg=-Wl,-z,lazy', -# '-Clink-arg=-Wl,-z,nodlopen', -# '-Clink-arg=-Wl,-z,nodelete', -#] - [profile.dev.package.'*'] inherits = "dev" debug = 'limited' diff --git a/src/database/maps.rs b/src/database/maps.rs index 9af45159..138bb038 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -169,6 +169,7 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "roomsynctoken_shortstatehash", + file_shape: 3, val_size_hint: Some(8), block_size: 512, compression_level: 3, From 51d29bc1cbca84c001c3b4efbfca9c34a9b94f37 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 7 Mar 2025 21:44:52 -0500 Subject: [PATCH 224/328] bump complement Signed-off-by: strawberry --- flake.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.lock b/flake.lock index c3292cbc..03fc205c 100644 --- a/flake.lock +++ b/flake.lock @@ -80,11 +80,11 @@ "complement": { "flake": false, "locked": { - "lastModified": 1740291865, - "narHash": "sha256-wl1+yCTEtvIH8vgXygnxPkaSgg4MYNKs+c9tzVytr20=", + "lastModified": 1741378155, + "narHash": "sha256-rJSfqf3q4oWxcAwENtAowLZeCi8lktwKVH9XQvvZR64=", "owner": "girlbossceo", "repo": "complement", - "rev": "35ad9d9051498fbac8ea4abff8ab7d8b1844f87b", + "rev": "1502a00d8551d0f6e8954a23e43868877c3e57d9", "type": "github" }, "original": { From 90fee4f50eb5a0f81390e088f60265ab4974370e Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 8 Mar 2025 00:15:13 -0500 Subject: [PATCH 225/328] add gotestfmt log output to complement script and CI output Signed-off-by: strawberry --- .github/workflows/ci.yml | 30 +++++++++++++++++++----------- bin/complement | 22 +++++++++++++++++----- flake.nix | 1 + 3 files changed, 37 insertions(+), 16 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c0425873..c8fef47f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -176,6 +176,13 @@ jobs: path: complement_test_results.jsonl if-no-files-found: error + - name: Upload Complement logs (gotestfmt) + uses: actions/upload-artifact@v4 + with: + name: complement_test_logs_gotestfmt.log + path: complement_test_logs_gotestfmt.log + if-no-files-found: error + - name: Diff Complement results with checked-in repo results run: | diff -u --color=always tests/test_results/complement/test_results.jsonl complement_test_results.jsonl > >(tee -a complement_diff_output.log) @@ -186,22 +193,23 @@ jobs: if: success() || failure() run: | if [ ${GH_JOB_STATUS} == 'success' ]; then - echo '# ✅ completed suwuccessfully' >> $GITHUB_STEP_SUMMARY + echo '# ✅ CI completed suwuccessfully' >> $GITHUB_STEP_SUMMARY else - echo '# CI failure' >> $GITHUB_STEP_SUMMARY + echo '# ❌ CI failed (last 100 lines of output)' >> $GITHUB_STEP_SUMMARY echo '```' >> $GITHUB_STEP_SUMMARY - tail -n 40 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - echo '# Complement diff results' >> $GITHUB_STEP_SUMMARY - echo '```diff' >> $GITHUB_STEP_SUMMARY - tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY + tail -n 100 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY echo '```' >> $GITHUB_STEP_SUMMARY fi - - name: Run cargo clean test artifacts to free up space - run: | - cargo clean --profile test + echo '# Complement diff results (last 100 lines)' >> $GITHUB_STEP_SUMMARY + echo '```diff' >> $GITHUB_STEP_SUMMARY + tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + + echo '# Complement gotestfmt logs (last 100 lines)' >> $GITHUB_STEP_SUMMARY + echo '```diff' >> $GITHUB_STEP_SUMMARY + tail -n 100 complement_test_logs_gotestfmt.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY build: name: Build diff --git a/bin/complement b/bin/complement index 9960299c..aec27c5b 100755 --- a/bin/complement +++ b/bin/complement @@ -10,15 +10,15 @@ set -euo pipefail COMPLEMENT_SRC="${COMPLEMENT_SRC:-$1}" # A `.jsonl` file to write test logs to -LOG_FILE="$2" +LOG_FILE="${2:-complement_test_logs.jsonl}" # A `.jsonl` file to write test results to -RESULTS_FILE="$3" +RESULTS_FILE="${3:-complement_test_results.jsonl}" OCI_IMAGE="complement-conduwuit:main" -# Complement tests that are skipped due to flakiness/reliability issues -SKIPPED_COMPLEMENT_TESTS='-skip=TestPartialStateJoin.*' +# Complement tests that are skipped due to flakiness/reliability issues or we don't implement such features and won't for a long time +#SKIPPED_COMPLEMENT_TESTS='-skip=TestPartialStateJoin.*' # $COMPLEMENT_SRC needs to be a directory to Complement source code if [ -f "$COMPLEMENT_SRC" ]; then @@ -34,6 +34,7 @@ toplevel="$(git rev-parse --show-toplevel)" pushd "$toplevel" > /dev/null +# if using macOS, use linux-complement #bin/nix-build-and-cache just .#linux-complement bin/nix-build-and-cache just .#complement @@ -45,7 +46,8 @@ set +o pipefail env \ -C "$COMPLEMENT_SRC" \ COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \ - go test -tags="conduwuit_blacklist" "$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" + COMPLEMENT_ENABLE_DIRTY_RUNS=1 \ # reuses the same complement container for faster complement, at the possible expense of test environment pollution + go test -tags="conduwuit_blacklist" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail # Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results @@ -55,3 +57,13 @@ cat "$LOG_FILE" | jq -s -c 'sort_by(.Test)[]' | jq -c ' and .Test != null ) | {Action: .Action, Test: .Test} ' > "$RESULTS_FILE" + +grep '^{"Time":' "$LOG_FILE" | gotestfmt > "${LOG_FILE}_gotestfmt.log" + +echo "" +echo "" +echo "complement logs saved at $LOG_FILE" +echo "complement results saved at $RESULTS_FILE" +echo "complement logs in gotestfmt pretty format outputted at ${LOG_FILE}_gotestfmt.log (use an editor/terminal that interprets ANSI colours)" +echo "" +echo "" diff --git a/flake.nix b/flake.nix index 8f08a7d9..544cdd4a 100644 --- a/flake.nix +++ b/flake.nix @@ -161,6 +161,7 @@ # Needed for our script for Complement jq + gotestfmt # Needed for finding broken markdown links lychee From 5a3264980aee8f5869eb953e82c01b62c2ac5bed Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 8 Mar 2025 01:35:26 -0500 Subject: [PATCH 226/328] adjust complement script to allow using your own hs OCI image without nix Signed-off-by: strawberry --- bin/complement | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/bin/complement b/bin/complement index aec27c5b..47c02843 100755 --- a/bin/complement +++ b/bin/complement @@ -15,7 +15,7 @@ LOG_FILE="${2:-complement_test_logs.jsonl}" # A `.jsonl` file to write test results to RESULTS_FILE="${3:-complement_test_results.jsonl}" -OCI_IMAGE="complement-conduwuit:main" +COMPLEMENT_OCI_IMAGE="${COMPLEMENT_OCI_IMAGE:-complement-conduwuit:main}" # Complement tests that are skipped due to flakiness/reliability issues or we don't implement such features and won't for a long time #SKIPPED_COMPLEMENT_TESTS='-skip=TestPartialStateJoin.*' @@ -34,18 +34,38 @@ toplevel="$(git rev-parse --show-toplevel)" pushd "$toplevel" > /dev/null -# if using macOS, use linux-complement -#bin/nix-build-and-cache just .#linux-complement -bin/nix-build-and-cache just .#complement +if [ ! -f "complement_oci_image.tar.gz" ]; then + echo "building complement conduwuit image" -docker load < result -popd > /dev/null + # if using macOS, use linux-complement + #bin/nix-build-and-cache just .#linux-complement + bin/nix-build-and-cache just .#complement + + echo "complement conduwuit image tar.gz built at \"result\"" + + echo "loading into docker" + docker load < result + popd > /dev/null +else + echo "skipping building a complement conduwuit image as complement_oci_image.tar.gz was already found, loading this" + + docker load < complement_oci_image.tar.gz + popd > /dev/null +fi + +echo "" +echo "running go test with:" +echo "\$COMPLEMENT_SRC: $COMPLEMENT_SRC" +echo "\$COMPLEMENT_BASE_IMAGE: $COMPLEMENT_BASE_IMAGE" +echo "\$RESULTS_FILE: $RESULTS_FILE" +echo "\$LOG_FILE: $LOG_FILE" +echo "" # It's okay (likely, even) that `go test` exits nonzero set +o pipefail env \ -C "$COMPLEMENT_SRC" \ - COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \ + COMPLEMENT_BASE_IMAGE="$COMPLEMENT_OCI_IMAGE" \ COMPLEMENT_ENABLE_DIRTY_RUNS=1 \ # reuses the same complement container for faster complement, at the possible expense of test environment pollution go test -tags="conduwuit_blacklist" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail From bb0b57efb8d8d89fce0392e7c6c34c169ba054b8 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 8 Mar 2025 02:30:58 -0500 Subject: [PATCH 227/328] bump rust-rocksdb Signed-off-by: strawberry --- Cargo.lock | 26 ++++++++++++++++++++++---- Cargo.toml | 2 +- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a224ad0f..8d4688f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -236,7 +236,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f9dd2e03ee80ca2822dd6ea431163d2ef259f2066a4d6ccaca6d9dcb386aa43" dependencies = [ - "bindgen", + "bindgen 0.69.5", "cc", "cmake", "dunce", @@ -431,6 +431,24 @@ dependencies = [ "which", ] +[[package]] +name = "bindgen" +version = "0.71.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" +dependencies = [ + "bitflags 2.9.0", + "cexpr", + "clang-sys", + "itertools 0.12.1", + "proc-macro2", + "quote", + "regex", + "rustc-hash 2.1.1", + "shlex", + "syn 2.0.98", +] + [[package]] name = "bit_field" version = "0.10.2" @@ -3685,9 +3703,9 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.33.0+9.11.1" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2e692ae026881fc385f111fdcfba38bee98f1e47#2e692ae026881fc385f111fdcfba38bee98f1e47" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1c267e0bf0cc7b7702e9a329deccd89de79ef4c3#1c267e0bf0cc7b7702e9a329deccd89de79ef4c3" dependencies = [ - "bindgen", + "bindgen 0.71.1", "bzip2-sys", "cc", "glob", @@ -3702,7 +3720,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.37.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2e692ae026881fc385f111fdcfba38bee98f1e47#2e692ae026881fc385f111fdcfba38bee98f1e47" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1c267e0bf0cc7b7702e9a329deccd89de79ef4c3#1c267e0bf0cc7b7702e9a329deccd89de79ef4c3" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index c48be06a..de90e63e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -383,7 +383,7 @@ features = [ [workspace.dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "2e692ae026881fc385f111fdcfba38bee98f1e47" +rev = "1c267e0bf0cc7b7702e9a329deccd89de79ef4c3" default-features = false features = [ "multi-threaded-cf", From c8a730c29e3ec5c9d38028b89f3fd26ed546ef8f Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 8 Mar 2025 03:07:42 -0500 Subject: [PATCH 228/328] implement MSC4267 automatically forgetting room on leave Signed-off-by: strawberry --- conduwuit-example.toml | 11 ++++++++++- src/api/client/capabilities.rs | 7 +++++++ src/core/config/mod.rs | 10 ++++++++++ src/service/rooms/state_cache/mod.rs | 8 ++++++-- 4 files changed, 33 insertions(+), 3 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 3d4b15bc..15e6dd37 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -445,10 +445,19 @@ # #allow_federation = true -# This item is undocumented. Please contribute documentation for it. +# Allows federation requests to be made to itself +# +# This isn't intended and is very likely a bug if federation requests are +# being sent to yourself. This currently mainly exists for development +# purposes. # #federation_loopback = false +# Always calls /forget on behalf of the user if leaving a room. This is a +# part of MSC4267 "Automatically forgetting rooms on leave" +# +#forget_forced_upon_leave = false + # Set this to true to require authentication on the normally # unauthenticated profile retrieval endpoints (GET) # "/_matrix/client/v3/profile/{userId}". diff --git a/src/api/client/capabilities.rs b/src/api/client/capabilities.rs index e20af21b..470ff6ab 100644 --- a/src/api/client/capabilities.rs +++ b/src/api/client/capabilities.rs @@ -42,5 +42,12 @@ pub(crate) async fn get_capabilities_route( .set("uk.tcpip.msc4133.profile_fields", json!({"enabled": true})) .expect("this is valid JSON we created"); + capabilities + .set( + "org.matrix.msc4267.forget_forced_upon_leave", + json!({"enabled": services.config.forget_forced_upon_leave}), + ) + .expect("valid JSON we created"); + Ok(get_capabilities::v3::Response { capabilities }) } diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index a82f5f53..e69a56b9 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -558,9 +558,19 @@ pub struct Config { #[serde(default = "true_fn")] pub allow_federation: bool, + /// Allows federation requests to be made to itself + /// + /// This isn't intended and is very likely a bug if federation requests are + /// being sent to yourself. This currently mainly exists for development + /// purposes. #[serde(default)] pub federation_loopback: bool, + /// Always calls /forget on behalf of the user if leaving a room. This is a + /// part of MSC4267 "Automatically forgetting rooms on leave" + #[serde(default)] + pub forget_forced_upon_leave: bool, + /// Set this to true to require authentication on the normally /// unauthenticated profile retrieval endpoints (GET) /// "/_matrix/client/v3/profile/{userId}". diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index f406eb69..23ba0520 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -28,7 +28,7 @@ use ruma::{ serde::Raw, }; -use crate::{Dep, account_data, appservice::RegistrationInfo, globals, rooms, users}; +use crate::{Dep, account_data, appservice::RegistrationInfo, config, globals, rooms, users}; pub struct Service { appservice_in_room_cache: AppServiceInRoomCache, @@ -38,6 +38,7 @@ pub struct Service { struct Services { account_data: Dep, + config: Dep, globals: Dep, state_accessor: Dep, users: Dep, @@ -70,6 +71,7 @@ impl crate::Service for Service { appservice_in_room_cache: RwLock::new(HashMap::new()), services: Services { account_data: args.depend::("account_data"), + config: args.depend::("config"), globals: args.depend::("globals"), state_accessor: args .depend::("rooms::state_accessor"), @@ -268,7 +270,9 @@ impl Service { | MembershipState::Leave | MembershipState::Ban => { self.mark_as_left(user_id, room_id); - if self.services.globals.user_is_local(user_id) { + if self.services.globals.user_is_local(user_id) + && self.services.config.forget_forced_upon_leave + { self.forget(room_id, user_id); } }, From ef96e7afac81ffa6e3335144644277e4ac28658b Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 8 Mar 2025 13:52:56 -0500 Subject: [PATCH 229/328] add cargo auditable for future use, ignore paste dependency being unmaintained for now Signed-off-by: strawberry Signed-off-by: June Clementine Strawberry --- .cargo/audit.toml | 27 +++++++++++++++++++++++++++ engage.toml | 2 +- flake.nix | 8 +++++--- 3 files changed, 33 insertions(+), 4 deletions(-) create mode 100644 .cargo/audit.toml diff --git a/.cargo/audit.toml b/.cargo/audit.toml new file mode 100644 index 00000000..bf44fbd6 --- /dev/null +++ b/.cargo/audit.toml @@ -0,0 +1,27 @@ +[advisories] +ignore = ["RUSTSEC-2024-0436"] # advisory IDs to ignore e.g. ["RUSTSEC-2019-0001", ...] +informational_warnings = [] # warn for categories of informational advisories +severity_threshold = "none" # CVSS severity ("none", "low", "medium", "high", "critical") + +# Advisory Database Configuration +[database] +path = "~/.cargo/advisory-db" # Path where advisory git repo will be cloned +url = "https://github.com/RustSec/advisory-db.git" # URL to git repo +fetch = true # Perform a `git fetch` before auditing (default: true) +stale = false # Allow stale advisory DB (i.e. no commits for 90 days, default: false) + +# Output Configuration +[output] +deny = ["warnings", "unmaintained", "unsound", "yanked"] # exit on error if unmaintained dependencies are found +format = "terminal" # "terminal" (human readable report) or "json" +quiet = false # Only print information on error +show_tree = true # Show inverse dependency trees along with advisories (default: true) + +# Target Configuration +[target] +arch = ["x86_64", "aarch64"] # Ignore advisories for CPU architectures other than these +os = ["linux", "windows", "macos"] # Ignore advisories for operating systems other than these + +[yanked] +enabled = true # Warn for yanked crates in Cargo.lock (default: true) +update_index = true # Auto-update the crates.io index (default: true) diff --git a/engage.toml b/engage.toml index 71366532..0a857b5a 100644 --- a/engage.toml +++ b/engage.toml @@ -63,7 +63,7 @@ script = "markdownlint --version" [[task]] name = "cargo-audit" group = "security" -script = "cargo audit -D warnings -D unmaintained -D unsound -D yanked" +script = "cargo audit --color=always -D warnings -D unmaintained -D unsound -D yanked" [[task]] name = "cargo-fmt" diff --git a/flake.nix b/flake.nix index 544cdd4a..9db2e90a 100644 --- a/flake.nix +++ b/flake.nix @@ -144,18 +144,20 @@ toolchain ] ++ (with pkgsHost.pkgs; [ - engage - cargo-audit - # Required by hardened-malloc.rs dep binutils + cargo-audit + cargo-auditable + # Needed for producing Debian packages cargo-deb # Needed for CI to check validity of produced Debian packages (dpkg-deb) dpkg + engage + # Needed for Complement go From 5efe804a207420482dc5c57b8db044c5818d5037 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 8 Mar 2025 15:48:23 -0500 Subject: [PATCH 230/328] always disable fed, evict admins, and forget the room when banning a room Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 20 +-- bin/complement | 18 +- nix/pkgs/main/default.nix | 2 +- src/admin/room/moderation.rs | 328 +++++++++-------------------------- 4 files changed, 109 insertions(+), 259 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c8fef47f..9a1366f1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -199,18 +199,18 @@ jobs: echo '```' >> $GITHUB_STEP_SUMMARY tail -n 100 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY echo '```' >> $GITHUB_STEP_SUMMARY + + echo '# Complement diff results (last 100 lines)' >> $GITHUB_STEP_SUMMARY + echo '```diff' >> $GITHUB_STEP_SUMMARY + tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + + echo '# Complement gotestfmt logs (last 100 lines)' >> $GITHUB_STEP_SUMMARY + echo '```diff' >> $GITHUB_STEP_SUMMARY + tail -n 100 complement_test_logs_gotestfmt.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY fi - echo '# Complement diff results (last 100 lines)' >> $GITHUB_STEP_SUMMARY - echo '```diff' >> $GITHUB_STEP_SUMMARY - tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - echo '# Complement gotestfmt logs (last 100 lines)' >> $GITHUB_STEP_SUMMARY - echo '```diff' >> $GITHUB_STEP_SUMMARY - tail -n 100 complement_test_logs_gotestfmt.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - build: name: Build runs-on: self-hosted diff --git a/bin/complement b/bin/complement index 47c02843..b869bad6 100755 --- a/bin/complement +++ b/bin/complement @@ -15,7 +15,7 @@ LOG_FILE="${2:-complement_test_logs.jsonl}" # A `.jsonl` file to write test results to RESULTS_FILE="${3:-complement_test_results.jsonl}" -COMPLEMENT_OCI_IMAGE="${COMPLEMENT_OCI_IMAGE:-complement-conduwuit:main}" +COMPLEMENT_BASE_IMAGE="${COMPLEMENT_BASE_IMAGE:-complement-conduwuit:main}" # Complement tests that are skipped due to flakiness/reliability issues or we don't implement such features and won't for a long time #SKIPPED_COMPLEMENT_TESTS='-skip=TestPartialStateJoin.*' @@ -62,12 +62,13 @@ echo "\$LOG_FILE: $LOG_FILE" echo "" # It's okay (likely, even) that `go test` exits nonzero +# `COMPLEMENT_ENABLE_DIRTY_RUNS=1` reuses the same complement container for faster complement, at the possible expense of test environment pollution set +o pipefail env \ -C "$COMPLEMENT_SRC" \ - COMPLEMENT_BASE_IMAGE="$COMPLEMENT_OCI_IMAGE" \ - COMPLEMENT_ENABLE_DIRTY_RUNS=1 \ # reuses the same complement container for faster complement, at the possible expense of test environment pollution - go test -tags="conduwuit_blacklist" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" + COMPLEMENT_BASE_IMAGE="$COMPLEMENT_BASE_IMAGE" \ + COMPLEMENT_ENABLE_DIRTY_RUNS=1 \ + go test -tags="conduwuit_blacklist" -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail # Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results @@ -78,12 +79,17 @@ cat "$LOG_FILE" | jq -s -c 'sort_by(.Test)[]' | jq -c ' ) | {Action: .Action, Test: .Test} ' > "$RESULTS_FILE" -grep '^{"Time":' "$LOG_FILE" | gotestfmt > "${LOG_FILE}_gotestfmt.log" +if command -v gotestfmt &> /dev/null; then + echo "using gotestfmt on $LOG_FILE" + grep '^{"Time":' "$LOG_FILE" | gotestfmt > "complement_test_logs_gotestfmt.log" +fi echo "" echo "" echo "complement logs saved at $LOG_FILE" echo "complement results saved at $RESULTS_FILE" -echo "complement logs in gotestfmt pretty format outputted at ${LOG_FILE}_gotestfmt.log (use an editor/terminal that interprets ANSI colours)" +if command -v gotestfmt &> /dev/null; then + echo "complement logs in gotestfmt pretty format outputted at complement_test_logs_gotestfmt.log (use an editor/terminal/pager that interprets ANSI colours and UTF-8 emojis)" +fi echo "" echo "" diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index 5dfb32ec..9c8038a7 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -155,9 +155,9 @@ commonAttrs = { # Keep sorted include = [ + ".cargo" "Cargo.lock" "Cargo.toml" - "deps" "src" ]; }; diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index 444dfa2f..dd5ea627 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -1,7 +1,7 @@ use api::client::leave_room; use clap::Subcommand; use conduwuit::{ - Result, debug, error, info, + Result, debug, utils::{IterStream, ReadyExt}, warn, }; @@ -17,51 +17,23 @@ use crate::{admin_command, admin_command_dispatch, get_room_info}; #[derive(Debug, Subcommand)] pub(crate) enum RoomModerationCommand { /// - Bans a room from local users joining and evicts all our local users + /// (including server + /// admins) /// from the room. Also blocks any invites (local and remote) for the - /// banned room. - /// - /// Server admins (users in the conduwuit admin room) will not be evicted - /// and server admins can still join the room. To evict admins too, use - /// --force (also ignores errors) To disable incoming federation of the - /// room, use --disable-federation + /// banned room, and disables federation entirely with it. BanRoom { - #[arg(short, long)] - /// Evicts admins out of the room and ignores any potential errors when - /// making our local users leave the room - force: bool, - - #[arg(long)] - /// Disables incoming federation of the room after banning and evicting - /// users - disable_federation: bool, - /// The room in the format of `!roomid:example.com` or a room alias in /// the format of `#roomalias:example.com` room: Box, }, /// - Bans a list of rooms (room IDs and room aliases) from a newline - /// delimited codeblock similar to `user deactivate-all` - BanListOfRooms { - #[arg(short, long)] - /// Evicts admins out of the room and ignores any potential errors when - /// making our local users leave the room - force: bool, - - #[arg(long)] - /// Disables incoming federation of the room after banning and evicting - /// users - disable_federation: bool, - }, + /// delimited codeblock similar to `user deactivate-all`. Applies the same + /// steps as ban-room + BanListOfRooms, /// - Unbans a room to allow local users to join again - /// - /// To re-enable incoming federation of the room, use --enable-federation UnbanRoom { - #[arg(long)] - /// Enables incoming federation of the room after unbanning - enable_federation: bool, - /// The room in the format of `!roomid:example.com` or a room alias in /// the format of `#roomalias:example.com` room: Box, @@ -77,12 +49,7 @@ pub(crate) enum RoomModerationCommand { } #[admin_command] -async fn ban_room( - &self, - force: bool, - disable_federation: bool, - room: Box, -) -> Result { +async fn ban_room(&self, room: Box) -> Result { debug!("Got room alias or ID: {}", room); let admin_room_alias = &self.services.globals.admin_alias; @@ -175,98 +142,56 @@ async fn ban_room( )); }; - debug!("Making all users leave the room {}", &room); - if force { - let mut users = self - .services - .rooms - .state_cache - .room_members(&room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .boxed(); + debug!("Making all users leave the room {room_id} and forgetting it"); + let mut users = self + .services + .rooms + .state_cache + .room_members(&room_id) + .map(ToOwned::to_owned) + .ready_filter(|user| self.services.globals.user_is_local(user)) + .boxed(); - while let Some(local_user) = users.next().await { - debug!( - "Attempting leave for user {local_user} in room {room_id} (forced, ignoring all \ - errors, evicting admins too)", - ); + while let Some(ref user_id) = users.next().await { + debug!( + "Attempting leave for user {user_id} in room {room_id} (ignoring all errors, \ + evicting admins too)", + ); - if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { - warn!(%e, "Failed to leave room"); - } + if let Err(e) = leave_room(self.services, user_id, &room_id, None).await { + warn!("Failed to leave room: {e}"); } - } else { - let mut users = self - .services - .rooms - .state_cache - .room_members(&room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .boxed(); - while let Some(local_user) = users.next().await { - if self.services.users.is_admin(local_user).await { - continue; - } - - debug!("Attempting leave for user {} in room {}", &local_user, &room_id); - if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { - error!( - "Error attempting to make local user {} leave room {} during room banning: \ - {}", - &local_user, &room_id, e - ); - return Ok(RoomMessageEventContent::text_plain(format!( - "Error attempting to make local user {} leave room {} during room banning \ - (room is still banned but not removing any more users): {}\nIf you would \ - like to ignore errors, use --force", - &local_user, &room_id, e - ))); - } - } + self.services.rooms.state_cache.forget(&room_id, user_id); } - // remove any local aliases, ignore errors - for local_alias in &self - .services + self.services .rooms .alias .local_aliases_for_room(&room_id) .map(ToOwned::to_owned) - .collect::>() - .await - { - _ = self - .services - .rooms - .alias - .remove_alias(local_alias, &self.services.globals.server_user) - .await; - } + .for_each(|local_alias| async move { + self.services + .rooms + .alias + .remove_alias(&local_alias, &self.services.globals.server_user) + .await + .ok(); + }) + .await; - // unpublish from room directory, ignore errors + // unpublish from room directory self.services.rooms.directory.set_not_public(&room_id); - if disable_federation { - self.services.rooms.metadata.disable_room(&room_id, true); - return Ok(RoomMessageEventContent::text_plain( - "Room banned, removed all our local users, and disabled incoming federation with \ - room.", - )); - } + self.services.rooms.metadata.disable_room(&room_id, true); Ok(RoomMessageEventContent::text_plain( - "Room banned and removed all our local users, use `!admin federation disable-room` to \ - stop receiving new inbound federation events as well if needed.", + "Room banned, removed all our local users, and disabled incoming federation with room.", )) } #[admin_command] -async fn ban_list_of_rooms( - &self, - force: bool, - disable_federation: bool, -) -> Result { +async fn ban_list_of_rooms(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" @@ -293,7 +218,7 @@ async fn ban_list_of_rooms( if let Ok(admin_room_id) = self.services.admin.get_admin_room().await { if room.to_owned().eq(&admin_room_id) || room.to_owned().eq(admin_room_alias) { - info!("User specified admin room in bulk ban list, ignoring"); + warn!("User specified admin room in bulk ban list, ignoring"); continue; } } @@ -302,19 +227,12 @@ async fn ban_list_of_rooms( let room_id = match RoomId::parse(room_alias_or_id) { | Ok(room_id) => room_id, | Err(e) => { - if force { - // ignore rooms we failed to parse if we're force banning - warn!( - "Error parsing room \"{room}\" during bulk room banning, \ - ignoring error and logging here: {e}" - ); - continue; - } - - return Ok(RoomMessageEventContent::text_plain(format!( - "{room} is not a valid room ID or room alias, please fix the \ - list and try again: {e}" - ))); + // ignore rooms we failed to parse + warn!( + "Error parsing room \"{room}\" during bulk room banning, \ + ignoring error and logging here: {e}" + ); + continue; }, }; @@ -355,21 +273,11 @@ async fn ban_list_of_rooms( room_id }, | Err(e) => { - // don't fail if force blocking - if force { - warn!( - "Failed to resolve room alias {room} to a \ - room ID: {e}" - ); - continue; - } - - return Ok(RoomMessageEventContent::text_plain( - format!( - "Failed to resolve room alias {room} to a \ - room ID: {e}" - ), - )); + warn!( + "Failed to resolve room alias {room} to a room \ + ID: {e}" + ); + continue; }, } }, @@ -378,37 +286,21 @@ async fn ban_list_of_rooms( room_ids.push(room_id); }, | Err(e) => { - if force { - // ignore rooms we failed to parse if we're force deleting - error!( - "Error parsing room \"{room}\" during bulk room banning, \ - ignoring error and logging here: {e}" - ); - continue; - } - - return Ok(RoomMessageEventContent::text_plain(format!( - "{room} is not a valid room ID or room alias, please fix the \ - list and try again: {e}" - ))); + warn!( + "Error parsing room \"{room}\" during bulk room banning, \ + ignoring error and logging here: {e}" + ); + continue; }, } } }, | Err(e) => { - if force { - // ignore rooms we failed to parse if we're force deleting - error!( - "Error parsing room \"{room}\" during bulk room banning, ignoring error \ - and logging here: {e}" - ); - continue; - } - - return Ok(RoomMessageEventContent::text_plain(format!( - "{room} is not a valid room ID or room alias, please fix the list and try \ - again: {e}" - ))); + warn!( + "Error parsing room \"{room}\" during bulk room banning, ignoring error and \ + logging here: {e}" + ); + continue; }, } } @@ -419,56 +311,27 @@ async fn ban_list_of_rooms( debug!("Banned {room_id} successfully"); room_ban_count = room_ban_count.saturating_add(1); - debug!("Making all users leave the room {}", &room_id); - if force { - let mut users = self - .services - .rooms - .state_cache - .room_members(&room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .boxed(); + debug!("Making all users leave the room {room_id} and forgetting it"); + let mut users = self + .services + .rooms + .state_cache + .room_members(&room_id) + .map(ToOwned::to_owned) + .ready_filter(|user| self.services.globals.user_is_local(user)) + .boxed(); - while let Some(local_user) = users.next().await { - debug!( - "Attempting leave for user {local_user} in room {room_id} (forced, ignoring \ - all errors, evicting admins too)", - ); + while let Some(ref user_id) = users.next().await { + debug!( + "Attempting leave for user {user_id} in room {room_id} (ignoring all errors, \ + evicting admins too)", + ); - if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { - warn!(%e, "Failed to leave room"); - } + if let Err(e) = leave_room(self.services, user_id, &room_id, None).await { + warn!("Failed to leave room: {e}"); } - } else { - let mut users = self - .services - .rooms - .state_cache - .room_members(&room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .boxed(); - while let Some(local_user) = users.next().await { - if self.services.users.is_admin(local_user).await { - continue; - } - - debug!("Attempting leave for user {local_user} in room {room_id}"); - if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { - error!( - "Error attempting to make local user {local_user} leave room {room_id} \ - during bulk room banning: {e}", - ); - - return Ok(RoomMessageEventContent::text_plain(format!( - "Error attempting to make local user {} leave room {} during room \ - banning (room is still banned but not removing any more users and not \ - banning any more rooms): {}\nIf you would like to ignore errors, use \ - --force", - &local_user, &room_id, e - ))); - } - } + self.services.rooms.state_cache.forget(&room_id, user_id); } // remove any local aliases, ignore errors @@ -490,29 +353,17 @@ async fn ban_list_of_rooms( // unpublish from room directory, ignore errors self.services.rooms.directory.set_not_public(&room_id); - if disable_federation { - self.services.rooms.metadata.disable_room(&room_id, true); - } + self.services.rooms.metadata.disable_room(&room_id, true); } - if disable_federation { - Ok(RoomMessageEventContent::text_plain(format!( - "Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, \ - and disabled incoming federation with the room." - ))) - } else { - Ok(RoomMessageEventContent::text_plain(format!( - "Finished bulk room ban, banned {room_ban_count} total rooms and evicted all users." - ))) - } + Ok(RoomMessageEventContent::text_plain(format!( + "Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, and \ + disabled incoming federation with the room." + ))) } #[admin_command] -async fn unban_room( - &self, - enable_federation: bool, - room: Box, -) -> Result { +async fn unban_room(&self, room: Box) -> Result { let room_id = if room.is_room_id() { let room_id = match RoomId::parse(&room) { | Ok(room_id) => room_id, @@ -595,15 +446,8 @@ async fn unban_room( )); }; - if enable_federation { - self.services.rooms.metadata.disable_room(&room_id, false); - return Ok(RoomMessageEventContent::text_plain("Room unbanned.")); - } - - Ok(RoomMessageEventContent::text_plain( - "Room unbanned, you may need to re-enable federation with the room using enable-room if \ - this is a remote room to make it fully functional.", - )) + self.services.rooms.metadata.disable_room(&room_id, false); + Ok(RoomMessageEventContent::text_plain("Room unbanned and federation re-enabled.")) } #[admin_command] From 0b012b529f2c925f2bc20aee2381e2d30f116c46 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 8 Mar 2025 18:59:51 -0500 Subject: [PATCH 231/328] comment gotestfmt for now Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 12 ------------ bin/complement | 15 +++++++-------- 2 files changed, 7 insertions(+), 20 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9a1366f1..cd7d2484 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -176,13 +176,6 @@ jobs: path: complement_test_results.jsonl if-no-files-found: error - - name: Upload Complement logs (gotestfmt) - uses: actions/upload-artifact@v4 - with: - name: complement_test_logs_gotestfmt.log - path: complement_test_logs_gotestfmt.log - if-no-files-found: error - - name: Diff Complement results with checked-in repo results run: | diff -u --color=always tests/test_results/complement/test_results.jsonl complement_test_results.jsonl > >(tee -a complement_diff_output.log) @@ -204,11 +197,6 @@ jobs: echo '```diff' >> $GITHUB_STEP_SUMMARY tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY echo '```' >> $GITHUB_STEP_SUMMARY - - echo '# Complement gotestfmt logs (last 100 lines)' >> $GITHUB_STEP_SUMMARY - echo '```diff' >> $GITHUB_STEP_SUMMARY - tail -n 100 complement_test_logs_gotestfmt.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY fi build: diff --git a/bin/complement b/bin/complement index b869bad6..89521796 100755 --- a/bin/complement +++ b/bin/complement @@ -67,7 +67,6 @@ set +o pipefail env \ -C "$COMPLEMENT_SRC" \ COMPLEMENT_BASE_IMAGE="$COMPLEMENT_BASE_IMAGE" \ - COMPLEMENT_ENABLE_DIRTY_RUNS=1 \ go test -tags="conduwuit_blacklist" -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail @@ -79,17 +78,17 @@ cat "$LOG_FILE" | jq -s -c 'sort_by(.Test)[]' | jq -c ' ) | {Action: .Action, Test: .Test} ' > "$RESULTS_FILE" -if command -v gotestfmt &> /dev/null; then - echo "using gotestfmt on $LOG_FILE" - grep '^{"Time":' "$LOG_FILE" | gotestfmt > "complement_test_logs_gotestfmt.log" -fi +#if command -v gotestfmt &> /dev/null; then +# echo "using gotestfmt on $LOG_FILE" +# grep '{"Time":' "$LOG_FILE" | gotestfmt > "complement_test_logs_gotestfmt.log" +#fi echo "" echo "" echo "complement logs saved at $LOG_FILE" echo "complement results saved at $RESULTS_FILE" -if command -v gotestfmt &> /dev/null; then - echo "complement logs in gotestfmt pretty format outputted at complement_test_logs_gotestfmt.log (use an editor/terminal/pager that interprets ANSI colours and UTF-8 emojis)" -fi +#if command -v gotestfmt &> /dev/null; then +# echo "complement logs in gotestfmt pretty format outputted at complement_test_logs_gotestfmt.log (use an editor/terminal/pager that interprets ANSI colours and UTF-8 emojis)" +#fi echo "" echo "" From 06f2039eeeec2d5adf51e8ffbb470f01a8d9e868 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 9 Mar 2025 00:44:56 -0500 Subject: [PATCH 232/328] bump ruwuma to maybe fix rare device key upload issues Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 4 +++- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8d4688f5..f768eae1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3507,7 +3507,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "assign", "js_int", @@ -3527,7 +3527,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "js_int", "ruma-common", @@ -3539,7 +3539,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "as_variant", "assign", @@ -3562,7 +3562,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "as_variant", "base64 0.22.1", @@ -3594,7 +3594,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3619,7 +3619,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "bytes", "headers", @@ -3641,7 +3641,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3650,7 +3650,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3675,7 +3675,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "js_int", "ruma-common", @@ -3687,7 +3687,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index de90e63e..2bc1d20f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "bb42118bd85e731b652a6110896b6945085bf944" +rev = "d577100f5480c6c528e7a8ff59cd08d95a3a16e7" features = [ "compat", "rand", @@ -371,7 +371,9 @@ features = [ "unstable-msc3381", # polls "unstable-msc3489", # beacon / live location "unstable-msc3575", + "unstable-msc3930", # polls push rules "unstable-msc4075", + "unstable-msc4095", "unstable-msc4121", "unstable-msc4125", "unstable-msc4186", From d0c767c23c1dff11400388c5a8dd9e43f68705f1 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 9 Mar 2025 01:43:49 -0500 Subject: [PATCH 233/328] fix a few things to make some complement tests pass Signed-off-by: June Clementine Strawberry --- src/api/client/membership.rs | 4 +- src/api/client/room/create.rs | 8 +--- src/api/client/session.rs | 79 +++++++++++++++++++---------------- src/service/media/preview.rs | 23 ++++++---- src/service/users/mod.rs | 4 +- 5 files changed, 60 insertions(+), 58 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 940c8639..3f77e69e 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -517,9 +517,7 @@ pub(crate) async fn invite_user_route( join!(sender_ignored_recipient, recipient_ignored_by_sender); if sender_ignored_recipient { - return Err!(Request(Forbidden( - "You cannot invite users you have ignored to rooms." - ))); + return Ok(invite_user::v3::Response {}); } if let Ok(target_user_membership) = services diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index 1b8294a5..bb06e966 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -239,9 +239,7 @@ pub(crate) async fn create_room_route( if preset == RoomPreset::TrustedPrivateChat { for invite in &body.invite { if services.users.user_is_ignored(sender_user, invite).await { - return Err!(Request(Forbidden( - "You cannot invite users you have ignored to rooms." - ))); + continue; } else if services.users.user_is_ignored(invite, sender_user).await { // silently drop the invite to the recipient if they've been ignored by the // sender, pretend it worked @@ -420,9 +418,7 @@ pub(crate) async fn create_room_route( drop(state_lock); for user_id in &body.invite { if services.users.user_is_ignored(sender_user, user_id).await { - return Err!(Request(Forbidden( - "You cannot invite users you have ignored to rooms." - ))); + continue; } else if services.users.user_is_ignored(user_id, sender_user).await { // silently drop the invite to the recipient if they've been ignored by the // sender, pretend it worked diff --git a/src/api/client/session.rs b/src/api/client/session.rs index ab67ee18..3de625e4 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -3,7 +3,7 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{Err, debug, err, info, utils::ReadyExt}; -use futures::{StreamExt, TryFutureExt}; +use futures::StreamExt; use ruma::{ UserId, api::client::{ @@ -96,32 +96,50 @@ pub(crate) async fn login_route( &services.config.server_name, )?; - assert!( - services.globals.user_is_local(&user_id), - "User ID does not belong to this homeserver" - ); - assert!( - services.globals.user_is_local(&lowercased_user_id), - "User ID does not belong to this homeserver" - ); + if !services.globals.user_is_local(&user_id) + || !services.globals.user_is_local(&lowercased_user_id) + { + return Err!(Request(Unknown("User ID does not belong to this homeserver"))); + } + // first try the username as-is let hash = services .users .password_hash(&user_id) - .or_else(|_| services.users.password_hash(&lowercased_user_id)) .await - .inspect_err(|e| debug!("{e}")) - .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; + .inspect_err(|e| debug!("{e}")); - if hash.is_empty() { - return Err!(Request(UserDeactivated("The user has been deactivated"))); + match hash { + | Ok(hash) => { + if hash.is_empty() { + return Err!(Request(UserDeactivated("The user has been deactivated"))); + } + + hash::verify_password(password, &hash) + .inspect_err(|e| debug!("{e}")) + .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; + + user_id + }, + | Err(_e) => { + let hash_lowercased_user_id = services + .users + .password_hash(&lowercased_user_id) + .await + .inspect_err(|e| debug!("{e}")) + .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; + + if hash_lowercased_user_id.is_empty() { + return Err!(Request(UserDeactivated("The user has been deactivated"))); + } + + hash::verify_password(password, &hash_lowercased_user_id) + .inspect_err(|e| debug!("{e}")) + .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; + + lowercased_user_id + }, } - - hash::verify_password(password, &hash) - .inspect_err(|e| debug!("{e}")) - .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; - - user_id }, | login::v3::LoginInfo::Token(login::v3::Token { token }) => { debug!("Got token login type"); @@ -153,24 +171,11 @@ pub(crate) async fn login_route( } .map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?; - let lowercased_user_id = UserId::parse_with_server_name( - user_id.localpart().to_lowercase(), - &services.config.server_name, - )?; + if !services.globals.user_is_local(&user_id) { + return Err!(Request(Unknown("User ID does not belong to this homeserver"))); + } - assert!( - services.globals.user_is_local(&user_id), - "User ID does not belong to this homeserver" - ); - assert!( - services.globals.user_is_local(&lowercased_user_id), - "User ID does not belong to this homeserver" - ); - - if !info.is_user_match(&user_id) - && !info.is_user_match(&lowercased_user_id) - && !emergency_mode_enabled - { + if !info.is_user_match(&user_id) && !emergency_mode_enabled { return Err!(Request(Exclusive("Username is not in an appservice namespace."))); } diff --git a/src/service/media/preview.rs b/src/service/media/preview.rs index 17216869..ba5be7d4 100644 --- a/src/service/media/preview.rs +++ b/src/service/media/preview.rs @@ -7,7 +7,7 @@ use std::time::SystemTime; -use conduwuit::{Err, Result, debug}; +use conduwuit::{Err, Result, debug, err}; use conduwuit_core::implement; use ipaddress::IPAddress; use serde::Serialize; @@ -64,28 +64,33 @@ pub async fn get_url_preview(&self, url: &Url) -> Result { async fn request_url_preview(&self, url: &Url) -> Result { if let Ok(ip) = IPAddress::parse(url.host_str().expect("URL previously validated")) { if !self.services.client.valid_cidr_range(&ip) { - return Err!(BadServerResponse("Requesting from this address is forbidden")); + return Err!(Request(Forbidden("Requesting from this address is forbidden"))); } } let client = &self.services.client.url_preview; let response = client.head(url.as_str()).send().await?; + debug!(?url, "URL preview response headers: {:?}", response.headers()); + if let Some(remote_addr) = response.remote_addr() { + debug!(?url, "URL preview response remote address: {:?}", remote_addr); + if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) { if !self.services.client.valid_cidr_range(&ip) { - return Err!(BadServerResponse("Requesting from this address is forbidden")); + return Err!(Request(Forbidden("Requesting from this address is forbidden"))); } } } - let Some(content_type) = response - .headers() - .get(reqwest::header::CONTENT_TYPE) - .and_then(|x| x.to_str().ok()) - else { - return Err!(Request(Unknown("Unknown Content-Type"))); + let Some(content_type) = response.headers().get(reqwest::header::CONTENT_TYPE) else { + return Err!(Request(Unknown("Unknown or invalid Content-Type header"))); }; + + let content_type = content_type + .to_str() + .map_err(|e| err!(Request(Unknown("Unknown or invalid Content-Type header: {e}"))))?; + let data = match content_type { | html if html.starts_with("text/html") => self.download_html(url.as_str()).await?, | img if img.starts_with("image/") => self.download_image(url.as_str()).await?, diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index b3f5db88..5265e64b 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -278,11 +278,9 @@ impl Service { initial_device_display_name: Option, client_ip: Option, ) -> Result<()> { - // This method should never be called for nonexistent users. We shouldn't assert - // though... if !self.exists(user_id).await { return Err!(Request(InvalidParam(error!( - "Called create_device for non-existent {user_id}" + "Called create_device for non-existent user {user_id}" )))); } From 47ff91243d0da2088806351c040ac1386c92c63d Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 9 Mar 2025 03:33:29 -0400 Subject: [PATCH 234/328] update complement results Signed-off-by: June Clementine Strawberry --- .../complement/test_results.jsonl | 112 ++++++++++++++---- 1 file changed, 89 insertions(+), 23 deletions(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index fed43b48..7b06510b 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -6,9 +6,9 @@ {"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events"} {"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events/incremental_sync"} {"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events/initial_sync"} -{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_is_empty"} +{"Action":"pass","Test":"TestArchivedRoomsHistory/timeline_is_empty"} {"Action":"skip","Test":"TestArchivedRoomsHistory/timeline_is_empty/incremental_sync"} -{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_is_empty/initial_sync"} +{"Action":"pass","Test":"TestArchivedRoomsHistory/timeline_is_empty/initial_sync"} {"Action":"fail","Test":"TestAsyncUpload"} {"Action":"fail","Test":"TestAsyncUpload/Cannot_upload_to_a_media_ID_that_has_already_been_uploaded_to"} {"Action":"fail","Test":"TestAsyncUpload/Create_media"} @@ -82,7 +82,7 @@ {"Action":"pass","Test":"TestContent"} {"Action":"pass","Test":"TestContentCSAPIMediaV1"} {"Action":"pass","Test":"TestContentMediaV1"} -{"Action":"fail","Test":"TestCumulativeJoinLeaveJoinSync"} +{"Action":"pass","Test":"TestCumulativeJoinLeaveJoinSync"} {"Action":"pass","Test":"TestDeactivateAccount"} {"Action":"pass","Test":"TestDeactivateAccount/After_deactivating_account,_can't_log_in_with_password"} {"Action":"pass","Test":"TestDeactivateAccount/Can't_deactivate_account_with_wrong_password"} @@ -153,10 +153,10 @@ {"Action":"fail","Test":"TestFederationKeyUploadQuery/Can_query_remote_device_keys_using_POST"} {"Action":"pass","Test":"TestFederationRedactSendsWithoutEvent"} {"Action":"pass","Test":"TestFederationRejectInvite"} -{"Action":"fail","Test":"TestFederationRoomsInvite"} -{"Action":"fail","Test":"TestFederationRoomsInvite/Parallel"} +{"Action":"pass","Test":"TestFederationRoomsInvite"} +{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation"} -{"Action":"fail","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room"} +{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_several_times"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_has_'is_direct'_flag_in_prev_content_after_joining"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Remote_invited_user_can_join_the_room_when_homeserver_is_already_participating_in_the_room"} @@ -173,7 +173,7 @@ {"Action":"pass","Test":"TestFetchMessagesFromNonExistentRoom"} {"Action":"pass","Test":"TestFilter"} {"Action":"fail","Test":"TestFilterMessagesByRelType"} -{"Action":"fail","Test":"TestGappedSyncLeaveSection"} +{"Action":"pass","Test":"TestGappedSyncLeaveSection"} {"Action":"fail","Test":"TestGetFilteredRoomMembers"} {"Action":"fail","Test":"TestGetFilteredRoomMembers/membership/join"} {"Action":"fail","Test":"TestGetFilteredRoomMembers/membership/leave"} @@ -191,7 +191,7 @@ {"Action":"pass","Test":"TestInboundFederationProfile/Inbound_federation_can_query_profile_data"} {"Action":"pass","Test":"TestInboundFederationProfile/Non-numeric_ports_in_server_names_are_rejected"} {"Action":"fail","Test":"TestInboundFederationRejectsEventsWithRejectedAuthEvents"} -{"Action":"fail","Test":"TestInviteFromIgnoredUsersDoesNotAppearInSync"} +{"Action":"pass","Test":"TestInviteFromIgnoredUsersDoesNotAppearInSync"} {"Action":"pass","Test":"TestIsDirectFlagFederation"} {"Action":"pass","Test":"TestIsDirectFlagLocal"} {"Action":"pass","Test":"TestJoinFederatedRoomFailOver"} @@ -281,7 +281,7 @@ {"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"} {"Action":"pass","Test":"TestLeakyTyping"} -{"Action":"fail","Test":"TestLeaveEventInviteRejection"} +{"Action":"pass","Test":"TestLeaveEventInviteRejection"} {"Action":"fail","Test":"TestLeaveEventVisibility"} {"Action":"fail","Test":"TestLeftRoomFixture"} {"Action":"fail","Test":"TestLeftRoomFixture/Can_get_'m.room.name'_state_for_a_departed_room"} @@ -292,10 +292,10 @@ {"Action":"pass","Test":"TestLocalPngThumbnail"} {"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/client/v1/media_endpoint"} {"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/media/v3_endpoint"} -{"Action":"fail","Test":"TestLogin"} -{"Action":"fail","Test":"TestLogin/parallel"} +{"Action":"pass","Test":"TestLogin"} +{"Action":"pass","Test":"TestLogin/parallel"} {"Action":"pass","Test":"TestLogin/parallel/GET_/login_yields_a_set_of_flows"} -{"Action":"fail","Test":"TestLogin/parallel/Login_with_uppercase_username_works_and_GET_/whoami_afterwards_also"} +{"Action":"pass","Test":"TestLogin/parallel/Login_with_uppercase_username_works_and_GET_/whoami_afterwards_also"} {"Action":"pass","Test":"TestLogin/parallel/POST_/login_as_non-existing_user_is_rejected"} {"Action":"pass","Test":"TestLogin/parallel/POST_/login_can_log_in_as_a_user_with_just_the_local_part_of_the_id"} {"Action":"pass","Test":"TestLogin/parallel/POST_/login_can_login_as_user"} @@ -354,12 +354,78 @@ {"Action":"fail","Test":"TestMembershipOnEvents"} {"Action":"fail","Test":"TestNetworkPartitionOrdering"} {"Action":"pass","Test":"TestNotPresentUserCannotBanOthers"} -{"Action":"fail","Test":"TestOlderLeftRoomsNotInLeaveSection"} +{"Action":"pass","Test":"TestOlderLeftRoomsNotInLeaveSection"} {"Action":"fail","Test":"TestOutboundFederationEventSizeGetMissingEvents"} {"Action":"fail","Test":"TestOutboundFederationIgnoresMissingEventWithBadJSONForRoomVersion6"} {"Action":"pass","Test":"TestOutboundFederationProfile"} {"Action":"pass","Test":"TestOutboundFederationProfile/Outbound_federation_can_query_profile_data"} {"Action":"pass","Test":"TestOutboundFederationSend"} +{"Action":"fail","Test":"TestPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanFastJoinDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanLazyLoadingSyncDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveDeviceListUpdateDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsWithHalfMissingGrandparentsDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsWithHalfMissingParentsDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsWithMissingParentsDuringPartialStateJoin"} +{"Action":"skip","Test":"TestPartialStateJoin/CanReceivePresenceDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveReceiptDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveSigningKeyUpdateDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveToDeviceDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveTypingDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanSendEventsDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/Can_change_display_name_during_partial_state_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_for_user_incorrectly_believed_to_be_in_room"} +{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_when_failing_to_complete_partial_state_join"} +{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_when_leaving_partial_state_room"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_when_new_member_leaves_partial_state_room"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracked_for_new_members_in_partial_state_room"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_pre-existing_members_in_partial_state_room"} +{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_user_incorrectly_believed_to_be_in_room_when_they_join_another_shared_room_before_partial_state_join_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_user_incorrectly_believed_to_be_in_room_when_they_rejoin_after_partial_state_join_completes"} +{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_user_incorrectly_believed_to_be_in_room_when_they_rejoin_before_partial_state_join_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_when_pre-existing_members_in_partial_state_room_join_another_shared_room"} +{"Action":"fail","Test":"TestPartialStateJoin/EagerIncrementalSyncDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/EagerInitialSyncDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/EagerLongPollingSyncWokenWhenResyncCompletes"} +{"Action":"fail","Test":"TestPartialStateJoin/GappySyncAfterPartialStateSynced"} +{"Action":"fail","Test":"TestPartialStateJoin/Lazy-loading_gappy_sync_includes_remote_memberships_during_partial_state_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Lazy-loading_incremental_sync_includes_remote_memberships_during_partial_state_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Lazy-loading_initial_sync_includes_remote_memberships_during_partial_state_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/can_be_triggered_by_remote_ban"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/can_be_triggered_by_remote_kick"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/does_not_wait_for_resync"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/is_seen_after_the_resync"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/succeeds,_then_another_user_can_join_without_resync_completing"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/succeeds,_then_rejoin_succeeds_without_resync_completing"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/works_after_a_second_partial_join"} +{"Action":"fail","Test":"TestPartialStateJoin/MembersRequestBlocksDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_no_longer_reach_departed_servers_after_partial_state_join_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_all_servers_in_partial_state_rooms"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_absent_servers_once_partial_state_join_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_absent_servers_once_partial_state_join_completes_even_though_remote_server_left_room"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_kicked_servers_once_partial_state_join_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_kicked_servers_once_partial_state_join_completes_even_though_remote_server_left_room"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_newly_joined_servers_in_partial_state_rooms"} +{"Action":"fail","Test":"TestPartialStateJoin/PartialStateJoinContinuesAfterRestart"} +{"Action":"fail","Test":"TestPartialStateJoin/PartialStateJoinSyncsUsingOtherHomeservers"} +{"Action":"skip","Test":"TestPartialStateJoin/Purge_during_resync"} +{"Action":"fail","Test":"TestPartialStateJoin/Rejected_events_remain_rejected_after_resync"} +{"Action":"fail","Test":"TestPartialStateJoin/Rejects_make_join_during_partial_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Rejects_make_knock_during_partial_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Rejects_send_join_during_partial_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Rejects_send_knock_during_partial_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Resync_completes_even_when_events_arrive_before_their_prev_events"} +{"Action":"fail","Test":"TestPartialStateJoin/Room_aliases_can_be_added_and_deleted_during_a_resync"} +{"Action":"fail","Test":"TestPartialStateJoin/Room_aliases_can_be_added_and_queried_during_a_resync"} +{"Action":"skip","Test":"TestPartialStateJoin/Room_stats_are_correctly_updated_once_state_re-sync_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/State_accepted_incorrectly"} +{"Action":"fail","Test":"TestPartialStateJoin/State_rejected_incorrectly"} +{"Action":"fail","Test":"TestPartialStateJoin/User_directory_is_correctly_updated_once_state_re-sync_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/joined_members_blocks_during_partial_state_join"} {"Action":"fail","Test":"TestPollsLocalPushRules"} {"Action":"fail","Test":"TestPollsLocalPushRules/Polls_push_rules_are_correctly_presented_to_the_client"} {"Action":"pass","Test":"TestPowerLevels"} @@ -559,11 +625,11 @@ {"Action":"pass","Test":"TestRoomState/Parallel/PUT_/createRoom_with_creation_content"} {"Action":"pass","Test":"TestRoomState/Parallel/PUT_/rooms/:room_id/state/m.room.topic_sets_topic"} {"Action":"pass","Test":"TestRoomSummary"} -{"Action":"fail","Test":"TestRoomsInvite"} -{"Action":"fail","Test":"TestRoomsInvite/Parallel"} +{"Action":"pass","Test":"TestRoomsInvite"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel"} {"Action":"pass","Test":"TestRoomsInvite/Parallel/Can_invite_users_to_invite-only_rooms"} {"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_reject_invite"} -{"Action":"fail","Test":"TestRoomsInvite/Parallel/Invited_user_can_reject_invite_for_empty_room"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_reject_invite_for_empty_room"} {"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_see_room_metadata"} {"Action":"pass","Test":"TestRoomsInvite/Parallel/Test_that_we_can_be_reinvited_to_a_room_we_created"} {"Action":"pass","Test":"TestRoomsInvite/Parallel/Uninvited_users_cannot_join_the_room"} @@ -596,15 +662,15 @@ {"Action":"pass","Test":"TestSyncFilter"} {"Action":"pass","Test":"TestSyncFilter/Can_create_filter"} {"Action":"pass","Test":"TestSyncFilter/Can_download_filter"} -{"Action":"fail","Test":"TestSyncLeaveSection"} -{"Action":"fail","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_full_state_sync"} -{"Action":"fail","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_sync"} -{"Action":"fail","Test":"TestSyncLeaveSection/Newly_left_rooms_appear_in_the_leave_section_of_incremental_sync"} +{"Action":"pass","Test":"TestSyncLeaveSection"} +{"Action":"pass","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_full_state_sync"} +{"Action":"pass","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_sync"} +{"Action":"pass","Test":"TestSyncLeaveSection/Newly_left_rooms_appear_in_the_leave_section_of_incremental_sync"} {"Action":"pass","Test":"TestSyncOmitsStateChangeOnFilteredEvents"} {"Action":"pass","Test":"TestSyncTimelineGap"} {"Action":"pass","Test":"TestSyncTimelineGap/full"} {"Action":"pass","Test":"TestSyncTimelineGap/incremental"} -{"Action":"fail","Test":"TestTentativeEventualJoiningAfterRejecting"} +{"Action":"pass","Test":"TestTentativeEventualJoiningAfterRejecting"} {"Action":"fail","Test":"TestThreadReceiptsInSyncMSC4102"} {"Action":"fail","Test":"TestThreadedReceipts"} {"Action":"fail","Test":"TestThreadsEndpoint"} @@ -635,8 +701,8 @@ {"Action":"pass","Test":"TestUploadKey/Parallel/Can_query_device_keys_using_POST"} {"Action":"pass","Test":"TestUploadKey/Parallel/Can_query_specific_device_keys_using_POST"} {"Action":"pass","Test":"TestUploadKey/Parallel/Can_upload_device_keys"} -{"Action":"fail","Test":"TestUploadKey/Parallel/Rejects_invalid_device_keys"} -{"Action":"fail","Test":"TestUploadKey/Parallel/Should_reject_keys_claiming_to_belong_to_a_different_user"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Rejects_invalid_device_keys"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Should_reject_keys_claiming_to_belong_to_a_different_user"} {"Action":"pass","Test":"TestUploadKey/Parallel/query_for_user_with_no_keys_returns_empty_key_dict"} {"Action":"pass","Test":"TestUploadKeyIdempotency"} {"Action":"pass","Test":"TestUploadKeyIdempotencyOverlap"} From 0e342aab7f2a173638fa723a9d36ae16fe9396d1 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 9 Mar 2025 13:44:57 -0400 Subject: [PATCH 235/328] fix a few error codes Signed-off-by: June Clementine Strawberry --- src/api/client/alias.rs | 2 +- src/api/client/context.rs | 18 ++++++++++++------ src/api/client/state.rs | 12 +++++++----- 3 files changed, 20 insertions(+), 12 deletions(-) diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index 319e5141..9cd7e0c5 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -92,7 +92,7 @@ pub(crate) async fn get_alias_route( let Ok((room_id, servers)) = services.rooms.alias.resolve_alias(&room_alias, None).await else { - return Err!(Request(NotFound("Room with alias not found."))); + return Err!(Request(Unknown("Room with alias not found."))); }; let servers = room_available_servers(&services, &room_id, &room_alias, servers).await; diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 3f16c850..cb95dfef 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,6 +1,6 @@ use axum::extract::State; use conduwuit::{ - Err, PduEvent, Result, at, err, ref_at, + Err, PduEvent, Result, at, debug_warn, err, ref_at, utils::{ IterStream, future::TryExtExt, @@ -35,8 +35,13 @@ pub(crate) async fn get_context_route( let sender = body.sender(); let (sender_user, sender_device) = sender; let room_id = &body.room_id; + let event_id = &body.event_id; let filter = &body.filter; + if !services.rooms.metadata.exists(room_id).await { + return Err!(Request(Forbidden("Room does not exist to this server"))); + } + // Use limit or else 10, with maximum 100 let limit: usize = body .limit @@ -47,29 +52,30 @@ pub(crate) async fn get_context_route( let base_id = services .rooms .timeline - .get_pdu_id(&body.event_id) + .get_pdu_id(event_id) .map_err(|_| err!(Request(NotFound("Event not found.")))); let base_pdu = services .rooms .timeline - .get_pdu(&body.event_id) + .get_pdu(event_id) .map_err(|_| err!(Request(NotFound("Base event not found.")))); let visible = services .rooms .state_accessor - .user_can_see_event(sender_user, &body.room_id, &body.event_id) + .user_can_see_event(sender_user, room_id, event_id) .map(Ok); let (base_id, base_pdu, visible) = try_join3(base_id, base_pdu, visible).await?; - if base_pdu.room_id != body.room_id || base_pdu.event_id != body.event_id { + if base_pdu.room_id != *room_id || base_pdu.event_id != *event_id { return Err!(Request(NotFound("Base event not found."))); } if !visible { - return Err!(Request(Forbidden("You don't have permission to view this event."))); + debug_warn!(req_evt = ?event_id, ?base_id, ?room_id, "Event requested by {sender_user} but is not allowed to see it, returning 404"); + return Err!(Request(NotFound("Event not found."))); } let base_count = base_id.pdu_count(); diff --git a/src/api/client/state.rs b/src/api/client/state.rs index c92091eb..d04aac35 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -27,7 +27,7 @@ pub(crate) async fn send_state_event_for_key_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); Ok(send_state_event::v3::Response { event_id: send_state_event_for_key_helper( @@ -103,7 +103,7 @@ pub(crate) async fn get_state_events_for_key_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if !services .rooms @@ -111,7 +111,9 @@ pub(crate) async fn get_state_events_for_key_route( .user_can_see_state_events(sender_user, &body.room_id) .await { - return Err!(Request(Forbidden("You don't have permission to view the room state."))); + return Err!(Request(NotFound(debug_warn!( + "You don't have permission to view the room state." + )))); } let event = services @@ -316,14 +318,14 @@ async fn allowed_to_send_state_event( services.rooms.alias.resolve_alias(&alias, None).await?; if alias_room_id != room_id { - return Err!(Request(Forbidden( + return Err!(Request(Unknown( "Room alias {alias} does not belong to room {room_id}" ))); } } }, | Err(e) => { - return Err!(Request(BadJson(debug_warn!( + return Err!(Request(InvalidParam(debug_warn!( "Room canonical alias event is invalid: {e}" )))); }, From 0e2ca7d7192684a945ac49aa53066c488dd40886 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 9 Mar 2025 21:55:07 -0400 Subject: [PATCH 236/328] implement disable TLS validation config option Signed-off-by: June Clementine Strawberry --- nix/pkgs/complement/config.toml | 2 ++ src/core/config/check.rs | 4 ++++ src/core/config/mod.rs | 12 +++++++++++- src/service/client/mod.rs | 3 ++- 4 files changed, 19 insertions(+), 2 deletions(-) diff --git a/nix/pkgs/complement/config.toml b/nix/pkgs/complement/config.toml index 99c151c5..4d7637db 100644 --- a/nix/pkgs/complement/config.toml +++ b/nix/pkgs/complement/config.toml @@ -32,6 +32,8 @@ allow_legacy_media = true startup_netburst = true startup_netburst_keep = -1 +allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure = true + # valgrind makes things so slow dns_timeout = 60 dns_attempts = 20 diff --git a/src/core/config/check.rs b/src/core/config/check.rs index 98223be4..f9d51eeb 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -28,6 +28,10 @@ pub fn check(config: &Config) -> Result { warn!("Note: conduwuit was built without optimisations (i.e. debug build)"); } + if config.allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure { + warn!("\n\nWARNING: \n\nTLS CERTIFICATE VALIDATION IS DISABLED, THIS IS HIGHLY INSECURE AND SHOULD NOT BE USED IN PRODUCTION.\n\n"); + } + warn_deprecated(config); warn_unknown_key(config); diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index e69a56b9..6b669ad3 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -52,7 +52,7 @@ use crate::{Result, err, error::Error, utils::sys}; ### For more information, see: ### https://conduwuit.puppyirl.gay/configuration.html "#, - ignore = "catchall well_known tls blurhashing" + ignore = "catchall well_known tls blurhashing allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure" )] pub struct Config { /// The server_name is the pretty name of this server. It is used as a @@ -1806,6 +1806,16 @@ pub struct Config { #[serde(default = "true_fn")] pub config_reload_signal: bool, + /// Toggles ignore checking/validating TLS certificates + /// + /// This applies to everything, including URL previews, federation requests, + /// etc. This is a hidden argument that should NOT be used in production as + /// it is highly insecure and I will personally yell at you if I catch you + /// using this. + #[serde(default)] + pub allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure: + bool, + // external structure; separate section #[serde(default)] pub blurhashing: BlurhashConfig, diff --git a/src/service/client/mod.rs b/src/service/client/mod.rs index d5008491..d51e5721 100644 --- a/src/service/client/mod.rs +++ b/src/service/client/mod.rs @@ -128,7 +128,8 @@ fn base(config: &Config) -> Result { .pool_max_idle_per_host(config.request_idle_per_host.into()) .user_agent(conduwuit::version::user_agent()) .redirect(redirect::Policy::limited(6)) - .connection_verbose(true); + .danger_accept_invalid_certs(config.allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure) + .connection_verbose(cfg!(debug_assertions)); #[cfg(feature = "gzip_compression")] { From df1edcf498ac58e27e6ff261b0d53a773d82f69f Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 10 Mar 2025 10:32:11 -0400 Subject: [PATCH 237/328] adjust complement cert generation Signed-off-by: June Clementine Strawberry --- bin/complement | 1 + nix/pkgs/complement/config.toml | 2 -- nix/pkgs/complement/default.nix | 22 +++++++------------ nix/pkgs/complement/private_key.key | 28 +++++++++++++++++++++++++ nix/pkgs/complement/signing_request.csr | 16 ++++++++++++++ 5 files changed, 53 insertions(+), 16 deletions(-) create mode 100644 nix/pkgs/complement/private_key.key create mode 100644 nix/pkgs/complement/signing_request.csr diff --git a/bin/complement b/bin/complement index 89521796..92539f97 100755 --- a/bin/complement +++ b/bin/complement @@ -40,6 +40,7 @@ if [ ! -f "complement_oci_image.tar.gz" ]; then # if using macOS, use linux-complement #bin/nix-build-and-cache just .#linux-complement bin/nix-build-and-cache just .#complement + #nix build -L .#complement echo "complement conduwuit image tar.gz built at \"result\"" diff --git a/nix/pkgs/complement/config.toml b/nix/pkgs/complement/config.toml index 4d7637db..759f8d78 100644 --- a/nix/pkgs/complement/config.toml +++ b/nix/pkgs/complement/config.toml @@ -47,6 +47,4 @@ sender_idle_timeout = 300 sender_retry_backoff_limit = 300 [global.tls] -certs = "/certificate.crt" dual_protocol = true -key = "/private_key.key" diff --git a/nix/pkgs/complement/default.nix b/nix/pkgs/complement/default.nix index d9af0779..bbd1bd74 100644 --- a/nix/pkgs/complement/default.nix +++ b/nix/pkgs/complement/default.nix @@ -42,25 +42,18 @@ let start = writeShellScriptBin "start" '' set -euxo pipefail - ${lib.getExe openssl} genrsa -out private_key.key 2048 - ${lib.getExe openssl} req \ - -new \ - -sha256 \ - -key private_key.key \ - -subj "/C=US/ST=CA/O=MyOrg, Inc./CN=$SERVER_NAME" \ - -out signing_request.csr - cp ${./v3.ext} v3.ext - echo "DNS.1 = $SERVER_NAME" >> v3.ext + cp ${./v3.ext} /complement/v3.ext + echo "DNS.1 = $SERVER_NAME" >> /complement/v3.ext echo "IP.1 = $(${lib.getExe gawk} 'END{print $1}' /etc/hosts)" \ - >> v3.ext + >> /complement/v3.ext ${lib.getExe openssl} x509 \ -req \ - -extfile v3.ext \ - -in signing_request.csr \ + -extfile /complement/v3.ext \ + -in ${./signing_request.csr} \ -CA /complement/ca/ca.crt \ -CAkey /complement/ca/ca.key \ -CAcreateserial \ - -out certificate.crt \ + -out /complement/certificate.crt \ -days 1 \ -sha256 @@ -99,7 +92,8 @@ dockerTools.buildImage { else []; Env = [ - "SSL_CERT_FILE=/complement/ca/ca.crt" + "CONDUWUIT_TLS__KEY=${./private_key.key}" + "CONDUWUIT_TLS__CERTS=/complement/certificate.crt" "CONDUWUIT_CONFIG=${./config.toml}" "RUST_BACKTRACE=full" ]; diff --git a/nix/pkgs/complement/private_key.key b/nix/pkgs/complement/private_key.key new file mode 100644 index 00000000..5b9d4d4f --- /dev/null +++ b/nix/pkgs/complement/private_key.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDS/odmZivxajeb +iyT7SMuhXqnMm+hF+zEARLcbieem0wG4x7gi2S6WLf8DlifdXax6me13eYk4rBnT +LvGEvNNx0px5M54H+FVyoVa3c1tmA66WUcZjobafPGsDh5j+5qpScgWwjkMPGg1a +09CphCFswO4PpxUUORX/OTGj/rEKxximW6OtavBwaS9F7mqjXJK7lCrcZxKq5ucc +ebGMmCoO660hROSTBaFigdRTVicclk+NgYRrZyWbCiuXPjQ0jlOE2rcaDepqTUga +Qs/2tdT4kBzBH6kZOiQOIN/ddXaj032QXr1HQYfIJfJmiM6nmRob8nik5rpZdWNO +/Ncsro/fAgMBAAECggEAITCCkfv+a5I+vwvrPE/eIDso0JOxvNhfg+BLQVy3AMnu +WmeoMmshZeREWgcTrEGg8QQnk4Sdrjl8MnkO6sddJ2luza3t7OkGX+q7Hk5aETkB +DIo+f8ufU3sIhlydF3OnVSK0fGpUaBq8AQ6Soyeyrk3G5NVufmjgae5QPbDBnqUb +piOGyfcwagL4JtCbZsMk8AT7vQSynLm6zaWsVzWNd71jummLqtVV063K95J9PqVN +D8meEcP3WR5kQrvf+mgy9RVgWLRtVWN8OLZfJ9yrnl4Efj62elrldUj4jaCFezGQ +8f0W+d8jjt038qhmEdymw2MWQ+X/b0R79lJar1Up8QKBgQD1DtHxauhl+JUoI3y+ +3eboqXl7YPJt1/GTnChb4b6D1Z1hvLsOKUa7hjGEfruYGbsWXBCRMICdfzp+iWcq +/lEOp7/YU9OaW4lQMoG4sXMoBWd9uLgg0E+aH6VDJOBvxsfafqM4ufmtspzwEm90 +FU1cq6oImomFnPChSq4X+3+YpwKBgQDcalaK9llCcscWA8HAP8WVVNTjCOqiDp9q +td61E9IO/FIB/gW5y+JkaFRrA2CN1zY3s3K92uveLTNYTArecWlDcPNNFDuaYu2M +Roz4bC104HGh+zztJ0iPVzELL81Lgg6wHhLONN+eVi4gTftJxzJFXybyb+xVT25A +91ynKXB+CQKBgQC+Ub43MoI+/6pHvBfb3FbDByvz6D0flgBmVXb6tP3TQYmzKHJV +8zSd2wCGGC71V7Z3DRVIzVR1/SOetnPLbivhp+JUzfWfAcxI3pDksdvvjxLrDxTh +VycbWcxtsywjY0w/ou581eLVRcygnpC0pP6qJCAwAmUfwd0YRvmiYo6cLQKBgHIW +UIlJDdaJFmdctnLOD3VGHZMOUHRlYTqYvJe5lKbRD5mcZFZRI/OY1Ok3LEj+tj+K +kL+YizHK76KqaY3N4hBYbHbfHCLDRfWvptQHGlg+vFJ9eoG+LZ6UIPyLV5XX0cZz +KoS1dXG9Zc6uznzXsDucDsq6B/f4TzctUjXsCyARAoGAOKb4HtuNyYAW0jUlujR7 +IMHwUesOGlhSXqFtP9aTvk6qJgvV0+3CKcWEb4y02g+uYftP8BLNbJbIt9qOqLYh +tOVyzCoamAi8araAhjA0w4dXvqDCDK7k/gZFkojmKQtRijoxTHnWcDc3vAjYCgaM +9MVtdgSkuh2gwkD/mMoAJXM= +-----END PRIVATE KEY----- diff --git a/nix/pkgs/complement/signing_request.csr b/nix/pkgs/complement/signing_request.csr new file mode 100644 index 00000000..707e73b4 --- /dev/null +++ b/nix/pkgs/complement/signing_request.csr @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICkTCCAXkCAQAwTDELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRYwFAYDVQQK +DA13b29mZXJzLCBpbmMuMRgwFgYDVQQDDA9jb21wbGVtZW50LW9ubHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDS/odmZivxajebiyT7SMuhXqnMm+hF ++zEARLcbieem0wG4x7gi2S6WLf8DlifdXax6me13eYk4rBnTLvGEvNNx0px5M54H ++FVyoVa3c1tmA66WUcZjobafPGsDh5j+5qpScgWwjkMPGg1a09CphCFswO4PpxUU +ORX/OTGj/rEKxximW6OtavBwaS9F7mqjXJK7lCrcZxKq5uccebGMmCoO660hROST +BaFigdRTVicclk+NgYRrZyWbCiuXPjQ0jlOE2rcaDepqTUgaQs/2tdT4kBzBH6kZ +OiQOIN/ddXaj032QXr1HQYfIJfJmiM6nmRob8nik5rpZdWNO/Ncsro/fAgMBAAGg +ADANBgkqhkiG9w0BAQsFAAOCAQEAjW+aD4E0phtRT5b2RyedY1uiSe7LQECsQnIO +wUSyGGG1GXYlJscyxxyzE9W9+QIALrxZkmc/+e02u+bFb1zQXW/uB/7u7FgXzrj6 +2YSDiWYXiYKvgGWEfCi3lpcTJK9x6WWkR+iREaoKRjcl0ynhhGuR7YwP38TNyu+z +FN6B1Lo398fvJkaTCiiHngWiwztXZ2d0MxkicuwZ1LJhIQA72OTl3QoRb5uiqbze +T9QJfU6W3v8cB8c8PuKMv5gl1QsGNtlfyQB56/X0cMxWl25vWXd2ankLkAGRTDJ8 +9YZHxP1ki4/yh75AknFq02nCOsmxYrAazCYgP2TzIPhQwBurKQ== +-----END CERTIFICATE REQUEST----- From 5ba0c02d526d77b9d983335af76585cd49be12c1 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 10 Mar 2025 12:29:54 -0400 Subject: [PATCH 238/328] bump ruwuma to fix a threads issue, fix more error codes, delete legacy sytest cruft Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +- Cargo.toml | 2 +- src/api/client/alias.rs | 2 +- src/api/client/state.rs | 12 +- tests/sytest/are-we-synapse-yet.list | 866 ----------------------- tests/sytest/are-we-synapse-yet.py | 266 ------- tests/sytest/show-expected-fail-tests.sh | 105 --- tests/sytest/sytest-blacklist | 7 - tests/sytest/sytest-whitelist | 516 -------------- 9 files changed, 22 insertions(+), 1776 deletions(-) delete mode 100644 tests/sytest/are-we-synapse-yet.list delete mode 100755 tests/sytest/are-we-synapse-yet.py delete mode 100755 tests/sytest/show-expected-fail-tests.sh delete mode 100644 tests/sytest/sytest-blacklist delete mode 100644 tests/sytest/sytest-whitelist diff --git a/Cargo.lock b/Cargo.lock index f768eae1..65e8eca1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3507,7 +3507,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "assign", "js_int", @@ -3527,7 +3527,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "js_int", "ruma-common", @@ -3539,7 +3539,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "as_variant", "assign", @@ -3562,7 +3562,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "as_variant", "base64 0.22.1", @@ -3594,7 +3594,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3619,7 +3619,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "bytes", "headers", @@ -3641,7 +3641,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3650,7 +3650,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3675,7 +3675,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "js_int", "ruma-common", @@ -3687,7 +3687,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index 2bc1d20f..d611c08e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +rev = "f5ab6302aaa55a14827a9cb5b40e980dd135fe14" features = [ "compat", "rand", diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index 9cd7e0c5..319e5141 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -92,7 +92,7 @@ pub(crate) async fn get_alias_route( let Ok((room_id, servers)) = services.rooms.alias.resolve_alias(&room_alias, None).await else { - return Err!(Request(Unknown("Room with alias not found."))); + return Err!(Request(NotFound("Room with alias not found."))); }; let servers = room_available_servers(&services, &room_id, &room_alias, servers).await; diff --git a/src/api/client/state.rs b/src/api/client/state.rs index d04aac35..db79735f 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -314,11 +314,17 @@ async fn allowed_to_send_state_event( } for alias in aliases { - let (alias_room_id, _servers) = - services.rooms.alias.resolve_alias(&alias, None).await?; + let (alias_room_id, _servers) = services + .rooms + .alias + .resolve_alias(&alias, None) + .await + .map_err(|e| { + err!(Request(Unknown("Failed resolving alias \"{alias}\": {e}"))) + })?; if alias_room_id != room_id { - return Err!(Request(Unknown( + return Err!(Request(BadAlias( "Room alias {alias} does not belong to room {room_id}" ))); } diff --git a/tests/sytest/are-we-synapse-yet.list b/tests/sytest/are-we-synapse-yet.list deleted file mode 100644 index 99091989..00000000 --- a/tests/sytest/are-we-synapse-yet.list +++ /dev/null @@ -1,866 +0,0 @@ -reg GET /register yields a set of flows -reg POST /register can create a user -reg POST /register downcases capitals in usernames -reg POST /register returns the same device_id as that in the request -reg POST /register rejects registration of usernames with '!' -reg POST /register rejects registration of usernames with '"' -reg POST /register rejects registration of usernames with ':' -reg POST /register rejects registration of usernames with '?' -reg POST /register rejects registration of usernames with '\' -reg POST /register rejects registration of usernames with '@' -reg POST /register rejects registration of usernames with '[' -reg POST /register rejects registration of usernames with ']' -reg POST /register rejects registration of usernames with '{' -reg POST /register rejects registration of usernames with '|' -reg POST /register rejects registration of usernames with '}' -reg POST /register rejects registration of usernames with '£' -reg POST /register rejects registration of usernames with 'é' -reg POST /register rejects registration of usernames with '\n' -reg POST /register rejects registration of usernames with ''' -reg POST /r0/admin/register with shared secret -reg POST /r0/admin/register admin with shared secret -reg POST /r0/admin/register with shared secret downcases capitals -reg POST /r0/admin/register with shared secret disallows symbols -reg POST rejects invalid utf-8 in JSON -log GET /login yields a set of flows -log POST /login can log in as a user -log POST /login returns the same device_id as that in the request -log POST /login can log in as a user with just the local part of the id -log POST /login as non-existing user is rejected -log POST /login wrong password is rejected -log Interactive authentication types include SSO -log Can perform interactive authentication with SSO -log The user must be consistent through an interactive authentication session with SSO -log The operation must be consistent through an interactive authentication session -v1s GET /events initially -v1s GET /initialSync initially -csa Version responds 200 OK with valid structure -pro PUT /profile/:user_id/displayname sets my name -pro GET /profile/:user_id/displayname publicly accessible -pro PUT /profile/:user_id/avatar_url sets my avatar -pro GET /profile/:user_id/avatar_url publicly accessible -dev GET /device/{deviceId} -dev GET /device/{deviceId} gives a 404 for unknown devices -dev GET /devices -dev PUT /device/{deviceId} updates device fields -dev PUT /device/{deviceId} gives a 404 for unknown devices -dev DELETE /device/{deviceId} -dev DELETE /device/{deviceId} requires UI auth user to match device owner -dev DELETE /device/{deviceId} with no body gives a 401 -dev The deleted device must be consistent through an interactive auth session -dev Users receive device_list updates for their own devices -pre GET /presence/:user_id/status fetches initial status -pre PUT /presence/:user_id/status updates my presence -crm POST /createRoom makes a public room -crm POST /createRoom makes a private room -crm POST /createRoom makes a private room with invites -crm POST /createRoom makes a room with a name -crm POST /createRoom makes a room with a topic -syn Can /sync newly created room -crm POST /createRoom creates a room with the given version -crm POST /createRoom rejects attempts to create rooms with numeric versions -crm POST /createRoom rejects attempts to create rooms with unknown versions -crm POST /createRoom ignores attempts to set the room version via creation_content -mem GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership -mem GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event -rst GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels -mem GET /rooms/:room_id/joined_members fetches my membership -v1s GET /rooms/:room_id/initialSync fetches initial sync state -pub GET /publicRooms lists newly-created room -ali GET /directory/room/:room_alias yields room ID -mem GET /joined_rooms lists newly-created room -rst POST /rooms/:room_id/state/m.room.name sets name -rst GET /rooms/:room_id/state/m.room.name gets name -rst POST /rooms/:room_id/state/m.room.topic sets topic -rst GET /rooms/:room_id/state/m.room.topic gets topic -rst GET /rooms/:room_id/state fetches entire room state -crm POST /createRoom with creation content -ali PUT /directory/room/:room_alias creates alias -nsp GET /rooms/:room_id/aliases lists aliases -jon POST /rooms/:room_id/join can join a room -jon POST /join/:room_alias can join a room -jon POST /join/:room_id can join a room -jon POST /join/:room_id can join a room with custom content -jon POST /join/:room_alias can join a room with custom content -lev POST /rooms/:room_id/leave can leave a room -inv POST /rooms/:room_id/invite can send an invite -ban POST /rooms/:room_id/ban can ban a user -snd POST /rooms/:room_id/send/:event_type sends a message -snd PUT /rooms/:room_id/send/:event_type/:txn_id sends a message -snd PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id -get GET /rooms/:room_id/messages returns a message -get GET /rooms/:room_id/messages lazy loads members correctly -typ PUT /rooms/:room_id/typing/:user_id sets typing notification -typ Typing notifications don't leak (3 subtests) -rst GET /rooms/:room_id/state/m.room.power_levels can fetch levels -rst PUT /rooms/:room_id/state/m.room.power_levels can set levels -rst PUT power_levels should not explode if the old power levels were empty -rst Both GET and PUT work -rct POST /rooms/:room_id/receipt can create receipts -red POST /rooms/:room_id/read_markers can create read marker -med POST /media/r0/upload can create an upload -med GET /media/r0/download can fetch the value again -cap GET /capabilities is present and well formed for registered user -cap GET /r0/capabilities is not public -reg Register with a recaptcha -reg registration is idempotent, without username specified -reg registration is idempotent, with username specified -reg registration remembers parameters -reg registration accepts non-ascii passwords -reg registration with inhibit_login inhibits login -reg User signups are forbidden from starting with '_' -reg Can register using an email address -log Can login with 3pid and password using m.login.password -log login types include SSO -log /login/cas/redirect redirects if the old m.login.cas login type is listed -log Can login with new user via CAS -lox Can logout current device -lox Can logout all devices -lox Request to logout with invalid an access token is rejected -lox Request to logout without an access token is rejected -log After changing password, can't log in with old password -log After changing password, can log in with new password -log After changing password, existing session still works -log After changing password, a different session no longer works by default -log After changing password, different sessions can optionally be kept -psh Pushers created with a different access token are deleted on password change -psh Pushers created with a the same access token are not deleted on password change -acc Can deactivate account -acc Can't deactivate account with wrong password -acc After deactivating account, can't log in with password -acc After deactivating account, can't log in with an email -v1s initialSync sees my presence status -pre Presence change reports an event to myself -pre Friends presence changes reports events -crm Room creation reports m.room.create to myself -crm Room creation reports m.room.member to myself -rst Setting room topic reports m.room.topic to myself -v1s Global initialSync -v1s Global initialSync with limit=0 gives no messages -v1s Room initialSync -v1s Room initialSync with limit=0 gives no messages -rst Setting state twice is idempotent -jon Joining room twice is idempotent -syn New room members see their own join event -v1s New room members see existing users' presence in room initialSync -syn Existing members see new members' join events -syn Existing members see new members' presence -v1s All room members see all room members' presence in global initialSync -f,jon Remote users can join room by alias -syn New room members see their own join event -v1s New room members see existing members' presence in room initialSync -syn Existing members see new members' join events -syn Existing members see new member's presence -v1s New room members see first user's profile information in global initialSync -v1s New room members see first user's profile information in per-room initialSync -f,jon Remote users may not join unfederated rooms -syn Local room members see posted message events -v1s Fetching eventstream a second time doesn't yield the message again -syn Local non-members don't see posted message events -get Local room members can get room messages -f,syn Remote room members also see posted message events -f,get Remote room members can get room messages -get Message history can be paginated -f,get Message history can be paginated over federation -eph Ephemeral messages received from clients are correctly expired -ali Room aliases can contain Unicode -f,ali Remote room alias queries can handle Unicode -ali Canonical alias can be set -ali Canonical alias can include alt_aliases -ali Regular users can add and delete aliases in the default room configuration -ali Regular users can add and delete aliases when m.room.aliases is restricted -ali Deleting a non-existent alias should return a 404 -ali Users can't delete other's aliases -ali Users with sufficient power-level can delete other's aliases -ali Can delete canonical alias -ali Alias creators can delete alias with no ops -ali Alias creators can delete canonical alias with no ops -ali Only room members can list aliases of a room -inv Can invite users to invite-only rooms -inv Uninvited users cannot join the room -inv Invited user can reject invite -f,inv Invited user can reject invite over federation -f,inv Invited user can reject invite over federation several times -inv Invited user can reject invite for empty room -f,inv Invited user can reject invite over federation for empty room -inv Invited user can reject local invite after originator leaves -inv Invited user can see room metadata -f,inv Remote invited user can see room metadata -inv Users cannot invite themselves to a room -inv Users cannot invite a user that is already in the room -ban Banned user is kicked and may not rejoin until unbanned -f,ban Remote banned user is kicked and may not rejoin until unbanned -ban 'ban' event respects room powerlevel -plv setting 'm.room.name' respects room powerlevel -plv setting 'm.room.power_levels' respects room powerlevel (2 subtests) -plv Unprivileged users can set m.room.topic if it only needs level 0 -plv Users cannot set ban powerlevel higher than their own (2 subtests) -plv Users cannot set kick powerlevel higher than their own (2 subtests) -plv Users cannot set redact powerlevel higher than their own (2 subtests) -v1s Check that event streams started after a client joined a room work (SYT-1) -v1s Event stream catches up fully after many messages -xxx POST /rooms/:room_id/redact/:event_id as power user redacts message -xxx POST /rooms/:room_id/redact/:event_id as original message sender redacts message -xxx POST /rooms/:room_id/redact/:event_id as random user does not redact message -xxx POST /redact disallows redaction of event in different room -xxx Redaction of a redaction redacts the redaction reason -v1s A departed room is still included in /initialSync (SPEC-216) -v1s Can get rooms/{roomId}/initialSync for a departed room (SPEC-216) -rst Can get rooms/{roomId}/state for a departed room (SPEC-216) -mem Can get rooms/{roomId}/members for a departed room (SPEC-216) -get Can get rooms/{roomId}/messages for a departed room (SPEC-216) -rst Can get 'm.room.name' state for a departed room (SPEC-216) -syn Getting messages going forward is limited for a departed room (SPEC-216) -3pd Can invite existing 3pid -3pd Can invite existing 3pid with no ops into a private room -3pd Can invite existing 3pid in createRoom -3pd Can invite unbound 3pid -f,3pd Can invite unbound 3pid over federation -3pd Can invite unbound 3pid with no ops into a private room -f,3pd Can invite unbound 3pid over federation with no ops into a private room -f,3pd Can invite unbound 3pid over federation with users from both servers -3pd Can accept unbound 3pid invite after inviter leaves -3pd Can accept third party invite with /join -3pd 3pid invite join with wrong but valid signature are rejected -3pd 3pid invite join valid signature but revoked keys are rejected -3pd 3pid invite join valid signature but unreachable ID server are rejected -gst Guest user cannot call /events globally -gst Guest users can join guest_access rooms -gst Guest users can send messages to guest_access rooms if joined -gst Guest user calling /events doesn't tightloop -gst Guest users are kicked from guest_access rooms on revocation of guest_access -gst Guest user can set display names -gst Guest users are kicked from guest_access rooms on revocation of guest_access over federation -gst Guest user can upgrade to fully featured user -gst Guest user cannot upgrade other users -pub GET /publicRooms lists rooms -pub GET /publicRooms includes avatar URLs -gst Guest users can accept invites to private rooms over federation -gst Guest users denied access over federation if guest access prohibited -mem Room members can override their displayname on a room-specific basis -mem Room members can join a room with an overridden displayname -mem Users cannot kick users from a room they are not in -mem Users cannot kick users who have already left a room -typ Typing notification sent to local room members -f,typ Typing notifications also sent to remote room members -typ Typing can be explicitly stopped -rct Read receipts are visible to /initialSync -rct Read receipts are sent as events -rct Receipts must be m.read -pro displayname updates affect room member events -pro avatar_url updates affect room member events -gst m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "shared" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "invited" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "joined" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "default" allows/forbids appropriately for Guest users -gst Guest non-joined user cannot call /events on shared room -gst Guest non-joined user cannot call /events on invited room -gst Guest non-joined user cannot call /events on joined room -gst Guest non-joined user cannot call /events on default room -gst Guest non-joined user can call /events on world_readable room -gst Guest non-joined users can get state for world_readable rooms -gst Guest non-joined users can get individual state for world_readable rooms -gst Guest non-joined users cannot room initalSync for non-world_readable rooms -gst Guest non-joined users can room initialSync for world_readable rooms -gst Guest non-joined users can get individual state for world_readable rooms after leaving -gst Guest non-joined users cannot send messages to guest_access rooms if not joined -gst Guest users can sync from world_readable guest_access rooms if joined -gst Guest users can sync from shared guest_access rooms if joined -gst Guest users can sync from invited guest_access rooms if joined -gst Guest users can sync from joined guest_access rooms if joined -gst Guest users can sync from default guest_access rooms if joined -ath m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users -ath m.room.history_visibility == "shared" allows/forbids appropriately for Real users -ath m.room.history_visibility == "invited" allows/forbids appropriately for Real users -ath m.room.history_visibility == "joined" allows/forbids appropriately for Real users -ath m.room.history_visibility == "default" allows/forbids appropriately for Real users -ath Real non-joined user cannot call /events on shared room -ath Real non-joined user cannot call /events on invited room -ath Real non-joined user cannot call /events on joined room -ath Real non-joined user cannot call /events on default room -ath Real non-joined user can call /events on world_readable room -ath Real non-joined users can get state for world_readable rooms -ath Real non-joined users can get individual state for world_readable rooms -ath Real non-joined users cannot room initalSync for non-world_readable rooms -ath Real non-joined users can room initialSync for world_readable rooms -ath Real non-joined users can get individual state for world_readable rooms after leaving -ath Real non-joined users cannot send messages to guest_access rooms if not joined -ath Real users can sync from world_readable guest_access rooms if joined -ath Real users can sync from shared guest_access rooms if joined -ath Real users can sync from invited guest_access rooms if joined -ath Real users can sync from joined guest_access rooms if joined -ath Real users can sync from default guest_access rooms if joined -ath Only see history_visibility changes on boundaries -f,ath Backfill works correctly with history visibility set to joined -fgt Forgotten room messages cannot be paginated -fgt Forgetting room does not show up in v2 /sync -fgt Can forget room you've been kicked from -fgt Can't forget room you're still in -fgt Can re-join room if re-invited -ath Only original members of the room can see messages from erased users -mem /joined_rooms returns only joined rooms -mem /joined_members return joined members -ctx /context/ on joined room works -ctx /context/ on non world readable room does not work -ctx /context/ returns correct number of events -ctx /context/ with lazy_load_members filter works -get /event/ on joined room works -get /event/ on non world readable room does not work -get /event/ does not allow access to events before the user joined -mem Can get rooms/{roomId}/members -mem Can get rooms/{roomId}/members at a given point -mem Can filter rooms/{roomId}/members -upg /upgrade creates a new room -upg /upgrade should preserve room visibility for public rooms -upg /upgrade should preserve room visibility for private rooms -upg /upgrade copies >100 power levels to the new room -upg /upgrade copies the power levels to the new room -upg /upgrade preserves the power level of the upgrading user in old and new rooms -upg /upgrade copies important state to the new room -upg /upgrade copies ban events to the new room -upg local user has push rules copied to upgraded room -f,upg remote user has push rules copied to upgraded room -upg /upgrade moves aliases to the new room -upg /upgrade moves remote aliases to the new room -upg /upgrade preserves direct room state -upg /upgrade preserves room federation ability -upg /upgrade restricts power levels in the old room -upg /upgrade restricts power levels in the old room when the old PLs are unusual -upg /upgrade to an unknown version is rejected -upg /upgrade is rejected if the user can't send state events -upg /upgrade of a bogus room fails gracefully -upg Cannot send tombstone event that points to the same room -f,upg Local and remote users' homeservers remove a room from their public directory on upgrade -rst Name/topic keys are correct -f,pub Can get remote public room list -pub Can paginate public room list -pub Can search public room list -syn Can create filter -syn Can download filter -syn Can sync -syn Can sync a joined room -syn Full state sync includes joined rooms -syn Newly joined room is included in an incremental sync -syn Newly joined room has correct timeline in incremental sync -syn Newly joined room includes presence in incremental sync -syn Get presence for newly joined members in incremental sync -syn Can sync a room with a single message -syn Can sync a room with a message with a transaction id -syn A message sent after an initial sync appears in the timeline of an incremental sync. -syn A filtered timeline reaches its limit -syn Syncing a new room with a large timeline limit isn't limited -syn A full_state incremental update returns only recent timeline -syn A prev_batch token can be used in the v1 messages API -syn A next_batch token can be used in the v1 messages API -syn User sees their own presence in a sync -syn User is offline if they set_presence=offline in their sync -syn User sees updates to presence from other users in the incremental sync. -syn State is included in the timeline in the initial sync -f,syn State from remote users is included in the state in the initial sync -syn Changes to state are included in an incremental sync -syn Changes to state are included in an gapped incremental sync -f,syn State from remote users is included in the timeline in an incremental sync -syn A full_state incremental update returns all state -syn When user joins a room the state is included in the next sync -syn A change to displayname should not result in a full state sync -syn A change to displayname should appear in incremental /sync -syn When user joins a room the state is included in a gapped sync -syn When user joins and leaves a room in the same batch, the full state is still included in the next sync -syn Current state appears in timeline in private history -syn Current state appears in timeline in private history with many messages before -syn Current state appears in timeline in private history with many messages after -syn Rooms a user is invited to appear in an initial sync -syn Rooms a user is invited to appear in an incremental sync -syn Newly joined room is included in an incremental sync after invite -syn Sync can be polled for updates -syn Sync is woken up for leaves -syn Left rooms appear in the leave section of sync -syn Newly left rooms appear in the leave section of incremental sync -syn We should see our own leave event, even if history_visibility is restricted (SYN-662) -syn We should see our own leave event when rejecting an invite, even if history_visibility is restricted (riot-web/3462) -syn Newly left rooms appear in the leave section of gapped sync -syn Previously left rooms don't appear in the leave section of sync -syn Left rooms appear in the leave section of full state sync -syn Archived rooms only contain history from before the user left -syn Banned rooms appear in the leave section of sync -syn Newly banned rooms appear in the leave section of incremental sync -syn Newly banned rooms appear in the leave section of incremental sync -syn Typing events appear in initial sync -syn Typing events appear in incremental sync -syn Typing events appear in gapped sync -syn Read receipts appear in initial v2 /sync -syn New read receipts appear in incremental v2 /sync -syn Can pass a JSON filter as a query parameter -syn Can request federation format via the filter -syn Read markers appear in incremental v2 /sync -syn Read markers appear in initial v2 /sync -syn Read markers can be updated -syn Lazy loading parameters in the filter are strictly boolean -syn The only membership state included in an initial sync is for all the senders in the timeline -syn The only membership state included in an incremental sync is for senders in the timeline -syn The only membership state included in a gapped incremental sync is for senders in the timeline -syn Gapped incremental syncs include all state changes -syn Old leaves are present in gapped incremental syncs -syn Leaves are present in non-gapped incremental syncs -syn Old members are included in gappy incr LL sync if they start speaking -syn Members from the gap are included in gappy incr LL sync -syn We don't send redundant membership state across incremental syncs by default -syn We do send redundant membership state across incremental syncs if asked -syn Unnamed room comes with a name summary -syn Named room comes with just joined member count summary -syn Room summary only has 5 heroes -syn Room summary counts change when membership changes -rmv User can create and send/receive messages in a room with version 1 -rmv User can create and send/receive messages in a room with version 1 (2 subtests) -rmv local user can join room with version 1 -rmv User can invite local user to room with version 1 -rmv remote user can join room with version 1 -rmv User can invite remote user to room with version 1 -rmv Remote user can backfill in a room with version 1 -rmv Can reject invites over federation for rooms with version 1 -rmv Can receive redactions from regular users over federation in room version 1 -rmv User can create and send/receive messages in a room with version 2 -rmv User can create and send/receive messages in a room with version 2 (2 subtests) -rmv local user can join room with version 2 -rmv User can invite local user to room with version 2 -rmv remote user can join room with version 2 -rmv User can invite remote user to room with version 2 -rmv Remote user can backfill in a room with version 2 -rmv Can reject invites over federation for rooms with version 2 -rmv Can receive redactions from regular users over federation in room version 2 -rmv User can create and send/receive messages in a room with version 3 -rmv User can create and send/receive messages in a room with version 3 (2 subtests) -rmv local user can join room with version 3 -rmv User can invite local user to room with version 3 -rmv remote user can join room with version 3 -rmv User can invite remote user to room with version 3 -rmv Remote user can backfill in a room with version 3 -rmv Can reject invites over federation for rooms with version 3 -rmv Can receive redactions from regular users over federation in room version 3 -rmv User can create and send/receive messages in a room with version 4 -rmv User can create and send/receive messages in a room with version 4 (2 subtests) -rmv local user can join room with version 4 -rmv User can invite local user to room with version 4 -rmv remote user can join room with version 4 -rmv User can invite remote user to room with version 4 -rmv Remote user can backfill in a room with version 4 -rmv Can reject invites over federation for rooms with version 4 -rmv Can receive redactions from regular users over federation in room version 4 -rmv User can create and send/receive messages in a room with version 5 -rmv User can create and send/receive messages in a room with version 5 (2 subtests) -rmv local user can join room with version 5 -rmv User can invite local user to room with version 5 -rmv remote user can join room with version 5 -rmv User can invite remote user to room with version 5 -rmv Remote user can backfill in a room with version 5 -rmv Can reject invites over federation for rooms with version 5 -rmv Can receive redactions from regular users over federation in room version 5 -rmv User can create and send/receive messages in a room with version 6 -rmv User can create and send/receive messages in a room with version 6 (2 subtests) -rmv local user can join room with version 6 -rmv User can invite local user to room with version 6 -rmv remote user can join room with version 6 -rmv User can invite remote user to room with version 6 -rmv Remote user can backfill in a room with version 6 -rmv Can reject invites over federation for rooms with version 6 -rmv Can receive redactions from regular users over federation in room version 6 -rmv Inbound federation rejects invites which include invalid JSON for room version 6 -rmv Outbound federation rejects invite response which include invalid JSON for room version 6 -rmv Inbound federation rejects invite rejections which include invalid JSON for room version 6 -rmv Server rejects invalid JSON in a version 6 room -pre Presence changes are reported to local room members -f,pre Presence changes are also reported to remote room members -pre Presence changes to UNAVAILABLE are reported to local room members -f,pre Presence changes to UNAVAILABLE are reported to remote room members -v1s Newly created users see their own presence in /initialSync (SYT-34) -dvk Can upload device keys -dvk Should reject keys claiming to belong to a different user -dvk Can query device keys using POST -dvk Can query specific device keys using POST -dvk query for user with no keys returns empty key dict -dvk Can claim one time key using POST -f,dvk Can query remote device keys using POST -f,dvk Can claim remote one time key using POST -dvk Local device key changes appear in v2 /sync -dvk Local new device changes appear in v2 /sync -dvk Local delete device changes appear in v2 /sync -dvk Local update device changes appear in v2 /sync -dvk Can query remote device keys using POST after notification -f,dev Device deletion propagates over federation -f,dev If remote user leaves room, changes device and rejoins we see update in sync -f,dev If remote user leaves room we no longer receive device updates -dvk Local device key changes appear in /keys/changes -dvk New users appear in /keys/changes -f,dvk If remote user leaves room, changes device and rejoins we see update in /keys/changes -dvk Get left notifs in sync and /keys/changes when other user leaves -dvk Get left notifs for other users in sync and /keys/changes when user leaves -f,dvk If user leaves room, remote user changes device and rejoins we see update in /sync and /keys/changes -dkb Can create backup version -dkb Can update backup version -dkb Responds correctly when backup is empty -dkb Can backup keys -dkb Can update keys with better versions -dkb Will not update keys with worse versions -dkb Will not back up to an old backup version -dkb Can delete backup -dkb Deleted & recreated backups are empty -dkb Can create more than 10 backup versions -xsk Can upload self-signing keys -xsk Fails to upload self-signing keys with no auth -xsk Fails to upload self-signing key without master key -xsk Changing master key notifies local users -xsk Changing user-signing key notifies local users -f,xsk can fetch self-signing keys over federation -f,xsk uploading self-signing key notifies over federation -f,xsk uploading signed devices gets propagated over federation -tag Can add tag -tag Can remove tag -tag Can list tags for a room -v1s Tags appear in the v1 /events stream -v1s Tags appear in the v1 /initalSync -v1s Tags appear in the v1 room initial sync -tag Tags appear in an initial v2 /sync -tag Newly updated tags appear in an incremental v2 /sync -tag Deleted tags appear in an incremental v2 /sync -tag local user has tags copied to the new room -f,tag remote user has tags copied to the new room -sch Can search for an event by body -sch Can get context around search results -sch Can back-paginate search results -sch Search works across an upgraded room and its predecessor -sch Search results with rank ordering do not include redacted events -sch Search results with recent ordering do not include redacted events -acc Can add account data -acc Can add account data to room -acc Can get account data without syncing -acc Can get room account data without syncing -v1s Latest account data comes down in /initialSync -v1s Latest account data comes down in room initialSync -v1s Account data appears in v1 /events stream -v1s Room account data appears in v1 /events stream -acc Latest account data appears in v2 /sync -acc New account data appears in incremental v2 /sync -oid Can generate a openid access_token that can be exchanged for information about a user -oid Invalid openid access tokens are rejected -oid Requests to userinfo without access tokens are rejected -std Can send a message directly to a device using PUT /sendToDevice -std Can recv a device message using /sync -std Can recv device messages until they are acknowledged -std Device messages with the same txn_id are deduplicated -std Device messages wake up /sync -std Can recv device messages over federation -fsd Device messages over federation wake up /sync -std Can send messages with a wildcard device id -std Can send messages with a wildcard device id to two devices -std Wildcard device messages wake up /sync -fsd Wildcard device messages over federation wake up /sync -adm /whois -nsp /purge_history -nsp /purge_history by ts -nsp Can backfill purged history -nsp Shutdown room -ign Ignore user in existing room -ign Ignore invite in full sync -ign Ignore invite in incremental sync -fky Checking local federation server -fky Federation key API allows unsigned requests for keys -fky Federation key API can act as a notary server via a GET request -fky Federation key API can act as a notary server via a POST request -fky Key notary server should return an expired key if it can't find any others -fky Key notary server must not overwrite a valid key with a spurious result from the origin server -fqu Non-numeric ports in server names are rejected -fqu Outbound federation can query profile data -fqu Inbound federation can query profile data -fqu Outbound federation can query room alias directory -fqu Inbound federation can query room alias directory -fsj Outbound federation can query v1 /send_join -fsj Outbound federation can query v2 /send_join -fmj Outbound federation passes make_join failures through to the client -fsj Inbound federation can receive v1 /send_join -fsj Inbound federation can receive v2 /send_join -fmj Inbound /v1/make_join rejects remote attempts to join local users to rooms -fsj Inbound /v1/send_join rejects incorrectly-signed joins -fsj Inbound /v1/send_join rejects joins from other servers -fau Inbound federation rejects remote attempts to kick local users to rooms -frv Inbound federation rejects attempts to join v1 rooms from servers without v1 support -frv Inbound federation rejects attempts to join v2 rooms from servers lacking version support -frv Inbound federation rejects attempts to join v2 rooms from servers only supporting v1 -frv Inbound federation accepts attempts to join v2 rooms from servers with support -frv Outbound federation correctly handles unsupported room versions -frv A pair of servers can establish a join in a v2 room -fsj Outbound federation rejects send_join responses with no m.room.create event -frv Outbound federation rejects m.room.create events with an unknown room version -fsj Event with an invalid signature in the send_join response should not cause room join to fail -fsj Inbound: send_join rejects invalid JSON for room version 6 -fed Outbound federation can send events -fed Inbound federation can receive events -fed Inbound federation can receive redacted events -fed Ephemeral messages received from servers are correctly expired -fed Events whose auth_events are in the wrong room do not mess up the room state -fed Inbound federation can return events -fed Inbound federation redacts events from erased users -fme Outbound federation can request missing events -fme Inbound federation can return missing events for world_readable visibility -fme Inbound federation can return missing events for shared visibility -fme Inbound federation can return missing events for invite visibility -fme Inbound federation can return missing events for joined visibility -fme outliers whose auth_events are in a different room are correctly rejected -fbk Outbound federation can backfill events -fbk Inbound federation can backfill events -fbk Backfill checks the events requested belong to the room -fbk Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination -fiv Outbound federation can send invites via v1 API -fiv Outbound federation can send invites via v2 API -fiv Inbound federation can receive invites via v1 API -fiv Inbound federation can receive invites via v2 API -fiv Inbound federation can receive invite and reject when remote replies with a 403 -fiv Inbound federation can receive invite and reject when remote replies with a 500 -fiv Inbound federation can receive invite and reject when remote is unreachable -fiv Inbound federation rejects invites which are not signed by the sender -fiv Inbound federation can receive invite rejections -fiv Inbound federation rejects incorrectly-signed invite rejections -fsl Inbound /v1/send_leave rejects leaves from other servers -fst Inbound federation can get state for a room -fst Inbound federation of state requires event_id as a mandatory paramater -fst Inbound federation can get state_ids for a room -fst Inbound federation of state_ids requires event_id as a mandatory paramater -fst Federation rejects inbound events where the prev_events cannot be found -fst Room state at a rejected message event is the same as its predecessor -fst Room state at a rejected state event is the same as its predecessor -fst Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state -fst Federation handles empty auth_events in state_ids sanely -fst Getting state checks the events requested belong to the room -fst Getting state IDs checks the events requested belong to the room -fst Should not be able to take over the room by pretending there is no PL event -fpb Inbound federation can get public room list -fed Outbound federation sends receipts -fed Inbound federation rejects receipts from wrong remote -fed Inbound federation ignores redactions from invalid servers room > v3 -fed An event which redacts an event in a different room should be ignored -fed An event which redacts itself should be ignored -fed A pair of events which redact each other should be ignored -fdk Local device key changes get to remote servers -fdk Server correctly handles incoming m.device_list_update -fdk Server correctly resyncs when client query keys and there is no remote cache -fdk Server correctly resyncs when server leaves and rejoins a room -fdk Local device key changes get to remote servers with correct prev_id -fdk Device list doesn't change if remote server is down -fdk If a device list update goes missing, the server resyncs on the next one -fst Name/topic keys are correct -fau Remote servers cannot set power levels in rooms without existing powerlevels -fau Remote servers should reject attempts by non-creators to set the power levels -fau Inbound federation rejects typing notifications from wrong remote -fau Users cannot set notifications powerlevel higher than their own -fed Forward extremities remain so even after the next events are populated as outliers -fau Banned servers cannot send events -fau Banned servers cannot /make_join -fau Banned servers cannot /send_join -fau Banned servers cannot /make_leave -fau Banned servers cannot /send_leave -fau Banned servers cannot /invite -fau Banned servers cannot get room state -fau Banned servers cannot get room state ids -fau Banned servers cannot backfill -fau Banned servers cannot /event_auth -fau Banned servers cannot get missing events -fau Server correctly handles transactions that break edu limits -fau Inbound federation correctly soft fails events -fau Inbound federation accepts a second soft-failed event -fau Inbound federation correctly handles soft failed events as extremities -med Can upload with Unicode file name -med Can download with Unicode file name locally -f,med Can download with Unicode file name over federation -med Alternative server names do not cause a routing loop -med Can download specifying a different Unicode file name -med Can upload without a file name -med Can download without a file name locally -f,med Can download without a file name over federation -med Can upload with ASCII file name -med Can download file 'ascii' -med Can download file 'name with spaces' -med Can download file 'name;with;semicolons' -med Can download specifying a different ASCII file name -med Can send image in room message -med Can fetch images in room -med POSTed media can be thumbnailed -f,med Remote media can be thumbnailed -med Test URL preview -med Can read configuration endpoint -nsp Can quarantine media in rooms -udr User appears in user directory -udr User in private room doesn't appear in user directory -udr User joining then leaving public room appears and dissappears from directory -udr Users appear/disappear from directory when join_rules are changed -udr Users appear/disappear from directory when history_visibility are changed -udr Users stay in directory when join_rules are changed but history_visibility is world_readable -f,udr User in remote room doesn't appear in user directory after server left room -udr User directory correctly update on display name change -udr User in shared private room does appear in user directory -udr User in shared private room does appear in user directory until leave -udr User in dir while user still shares private rooms -nsp Create group -nsp Add group rooms -nsp Remove group rooms -nsp Get local group profile -nsp Get local group users -nsp Add/remove local group rooms -nsp Get local group summary -nsp Get remote group profile -nsp Get remote group users -nsp Add/remove remote group rooms -nsp Get remote group summary -nsp Add local group users -nsp Remove self from local group -nsp Remove other from local group -nsp Add remote group users -nsp Remove self from remote group -nsp Listing invited users of a remote group when not a member returns a 403 -nsp Add group category -nsp Remove group category -nsp Get group categories -nsp Add group role -nsp Remove group role -nsp Get group roles -nsp Add room to group summary -nsp Adding room to group summary keeps room_id when fetching rooms in group -nsp Adding multiple rooms to group summary have correct order -nsp Remove room from group summary -nsp Add room to group summary with category -nsp Remove room from group summary with category -nsp Add user to group summary -nsp Adding multiple users to group summary have correct order -nsp Remove user from group summary -nsp Add user to group summary with role -nsp Remove user from group summary with role -nsp Local group invites come down sync -nsp Group creator sees group in sync -nsp Group creator sees group in initial sync -nsp Get/set local group publicity -nsp Bulk get group publicity -nsp Joinability comes down summary -nsp Set group joinable and join it -nsp Group is not joinable by default -nsp Group is joinable over federation -nsp Room is transitioned on local and remote groups upon room upgrade -3pd Can bind 3PID via home server -3pd Can bind and unbind 3PID via homeserver -3pd Can unbind 3PID via homeserver when bound out of band -3pd 3PIDs are unbound after account deactivation -3pd Can bind and unbind 3PID via /unbind by specifying the identity server -3pd Can bind and unbind 3PID via /unbind without specifying the identity server -app AS can create a user -app AS can create a user with an underscore -app AS can create a user with inhibit_login -app AS cannot create users outside its own namespace -app Regular users cannot register within the AS namespace -app AS can make room aliases -app Regular users cannot create room aliases within the AS namespace -app AS-ghosted users can use rooms via AS -app AS-ghosted users can use rooms themselves -app Ghost user must register before joining room -app AS can set avatar for ghosted users -app AS can set displayname for ghosted users -app AS can't set displayname for random users -app Inviting an AS-hosted user asks the AS server -app Accesing an AS-hosted room alias asks the AS server -app Events in rooms with AS-hosted room aliases are sent to AS server -app AS user (not ghost) can join room without registering -app AS user (not ghost) can join room without registering, with user_id query param -app HS provides query metadata -app HS can provide query metadata on a single protocol -app HS will proxy request for 3PU mapping -app HS will proxy request for 3PL mapping -app AS can publish rooms in their own list -app AS and main public room lists are separate -app AS can deactivate a user -psh Test that a message is pushed -psh Invites are pushed -psh Rooms with names are correctly named in pushed -psh Rooms with canonical alias are correctly named in pushed -psh Rooms with many users are correctly pushed -psh Don't get pushed for rooms you've muted -psh Rejected events are not pushed -psh Can add global push rule for room -psh Can add global push rule for sender -psh Can add global push rule for content -psh Can add global push rule for override -psh Can add global push rule for underride -psh Can add global push rule for content -psh New rules appear before old rules by default -psh Can add global push rule before an existing rule -psh Can add global push rule after an existing rule -psh Can delete a push rule -psh Can disable a push rule -psh Adding the same push rule twice is idempotent -psh Messages that notify from another user increment unread notification count -psh Messages that highlight from another user increment unread highlight count -psh Can change the actions of default rules -psh Changing the actions of an unknown default rule fails with 404 -psh Can change the actions of a user specified rule -psh Changing the actions of an unknown rule fails with 404 -psh Can fetch a user's pushers -psh Push rules come down in an initial /sync -psh Adding a push rule wakes up an incremental /sync -psh Disabling a push rule wakes up an incremental /sync -psh Enabling a push rule wakes up an incremental /sync -psh Setting actions for a push rule wakes up an incremental /sync -psh Can enable/disable default rules -psh Enabling an unknown default rule fails with 404 -psh Test that rejected pushers are removed. -psh Notifications can be viewed with GET /notifications -psh Trying to add push rule with no scope fails with 400 -psh Trying to add push rule with invalid scope fails with 400 -psh Trying to add push rule with missing template fails with 400 -psh Trying to add push rule with missing rule_id fails with 400 -psh Trying to add push rule with empty rule_id fails with 400 -psh Trying to add push rule with invalid template fails with 400 -psh Trying to add push rule with rule_id with slashes fails with 400 -psh Trying to add push rule with override rule without conditions fails with 400 -psh Trying to add push rule with underride rule without conditions fails with 400 -psh Trying to add push rule with condition without kind fails with 400 -psh Trying to add push rule with content rule without pattern fails with 400 -psh Trying to add push rule with no actions fails with 400 -psh Trying to add push rule with invalid action fails with 400 -psh Trying to add push rule with invalid attr fails with 400 -psh Trying to add push rule with invalid value for enabled fails with 400 -psh Trying to get push rules with no trailing slash fails with 400 -psh Trying to get push rules with scope without trailing slash fails with 400 -psh Trying to get push rules with template without tailing slash fails with 400 -psh Trying to get push rules with unknown scope fails with 400 -psh Trying to get push rules with unknown template fails with 400 -psh Trying to get push rules with unknown attribute fails with 400 -psh Trying to get push rules with unknown rule_id fails with 404 -psh Rooms with names are correctly named in pushes -v1s GET /initialSync with non-numeric 'limit' -v1s GET /events with non-numeric 'limit' -v1s GET /events with negative 'limit' -v1s GET /events with non-numeric 'timeout' -ath Event size limits -syn Check creating invalid filters returns 4xx -f,pre New federated private chats get full presence information (SYN-115) -pre Left room members do not cause problems for presence -crm Rooms can be created with an initial invite list (SYN-205) (1 subtests) -typ Typing notifications don't leak -ban Non-present room members cannot ban others -psh Getting push rules doesn't corrupt the cache SYN-390 -inv Test that we can be reinvited to a room we created -syn Multiple calls to /sync should not cause 500 errors -gst Guest user can call /events on another world_readable room (SYN-606) -gst Real user can call /events on another world_readable room (SYN-606) -gst Events come down the correct room -pub Asking for a remote rooms list, but supplying the local server's name, returns the local rooms list -std Can send a to-device message to two users which both receive it using /sync -fme Outbound federation will ignore a missing event with bad JSON for room version 6 -fbk Outbound federation rejects backfill containing invalid JSON for events in room version 6 -jso Invalid JSON integers -jso Invalid JSON floats -jso Invalid JSON special values -inv Can invite users to invite-only rooms (2 subtests) -plv setting 'm.room.name' respects room powerlevel (2 subtests) -psh Messages that notify from another user increment notification_count -psh Messages that org.matrix.msc2625.mark_unread from another user increment org.matrix.msc2625.unread_count -dvk Can claim one time key using POST (2 subtests) -fdk Can query remote device keys using POST (1 subtests) -fdk Can claim remote one time key using POST (2 subtests) -fmj Inbound /make_join rejects attempts to join rooms where all users have left \ No newline at end of file diff --git a/tests/sytest/are-we-synapse-yet.py b/tests/sytest/are-we-synapse-yet.py deleted file mode 100755 index 3d21fa41..00000000 --- a/tests/sytest/are-we-synapse-yet.py +++ /dev/null @@ -1,266 +0,0 @@ -#!/usr/bin/env python3 - -from __future__ import division -import argparse -import re -import sys - -# Usage: $ ./are-we-synapse-yet.py [-v] results.tap -# This script scans a results.tap file from Dendrite's CI process and spits out -# a rating of how close we are to Synapse parity, based purely on SyTests. -# The main complexity is grouping tests sensibly into features like 'Registration' -# and 'Federation'. Then it just checks the ones which are passing and calculates -# percentages for each group. Produces results like: -# -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# Login : 7% (1/15 tests) -# V1 CS APIs : 10% (3/30 tests) -# ... -# -# or in verbose mode: -# -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# ✓ GET /register yields a set of flows -# ✓ POST /register can create a user -# ✓ POST /register downcases capitals in usernames -# ... -# -# You can also tack `-v` on to see exactly which tests each category falls under. - -test_mappings = { - "nsp": "Non-Spec API", - "unk": "Unknown API (no group specified)", - "app": "Application Services API", - "f": "Federation", # flag to mark test involves federation - - "federation_apis": { - "fky": "Key API", - "fsj": "send_join API", - "fmj": "make_join API", - "fsl": "send_leave API", - "fiv": "Invite API", - "fqu": "Query API", - "frv": "room versions", - "fau": "Auth", - "fbk": "Backfill API", - "fme": "get_missing_events API", - "fst": "State APIs", - "fpb": "Public Room API", - "fdk": "Device Key APIs", - "fed": "Federation API", - "fsd": "Send-to-Device APIs", - }, - - "client_apis": { - "reg": "Registration", - "log": "Login", - "lox": "Logout", - "v1s": "V1 CS APIs", - "csa": "Misc CS APIs", - "pro": "Profile", - "dev": "Devices", - "dvk": "Device Keys", - "dkb": "Device Key Backup", - "xsk": "Cross-signing Keys", - "pre": "Presence", - "crm": "Create Room", - "syn": "Sync API", - "rmv": "Room Versions", - "rst": "Room State APIs", - "pub": "Public Room APIs", - "mem": "Room Membership", - "ali": "Room Aliases", - "jon": "Joining Rooms", - "lev": "Leaving Rooms", - "inv": "Inviting users to Rooms", - "ban": "Banning users", - "snd": "Sending events", - "get": "Getting events for Rooms", - "rct": "Receipts", - "red": "Read markers", - "med": "Media APIs", - "cap": "Capabilities API", - "typ": "Typing API", - "psh": "Push APIs", - "acc": "Account APIs", - "eph": "Ephemeral Events", - "plv": "Power Levels", - "xxx": "Redaction", - "3pd": "Third-Party ID APIs", - "gst": "Guest APIs", - "ath": "Room Auth", - "fgt": "Forget APIs", - "ctx": "Context APIs", - "upg": "Room Upgrade APIs", - "tag": "Tagging APIs", - "sch": "Search APIs", - "oid": "OpenID API", - "std": "Send-to-Device APIs", - "adm": "Server Admin API", - "ign": "Ignore Users", - "udr": "User Directory APIs", - "jso": "Enforced canonical JSON", - }, -} - -# optional 'not ' with test number then anything but '#' -re_testname = re.compile(r"^(not )?ok [0-9]+ ([^#]+)") - -# Parses lines like the following: -# -# SUCCESS: ok 3 POST /register downcases capitals in usernames -# FAIL: not ok 54 (expected fail) POST /createRoom creates a room with the given version -# SKIP: ok 821 Multiple calls to /sync should not cause 500 errors # skip lack of can_post_room_receipts -# EXPECT FAIL: not ok 822 (expected fail) Guest user can call /events on another world_readable room (SYN-606) # TODO expected fail -# -# Only SUCCESS lines are treated as success, the rest are not implemented. -# -# Returns a dict like: -# { name: "...", ok: True } -def parse_test_line(line): - if not line.startswith("ok ") and not line.startswith("not ok "): - return - re_match = re_testname.match(line) - test_name = re_match.groups()[1].replace("(expected fail) ", "").strip() - test_pass = False - if line.startswith("ok ") and not "# skip " in line: - test_pass = True - return { - "name": test_name, - "ok": test_pass, - } - -# Prints the stats for a complete section. -# header_name => "Client-Server APIs" -# gid_to_tests => { gid: { : True|False }} -# gid_to_name => { gid: "Group Name" } -# verbose => True|False -# Produces: -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# Login : 7% (1/15 tests) -# V1 CS APIs : 10% (3/30 tests) -# ... -# or in verbose mode: -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# ✓ GET /register yields a set of flows -# ✓ POST /register can create a user -# ✓ POST /register downcases capitals in usernames -# ... -def print_stats(header_name, gid_to_tests, gid_to_name, verbose): - subsections = [] # Registration: 100% (13/13 tests) - subsection_test_names = {} # 'subsection name': ["✓ Test 1", "✓ Test 2", "× Test 3"] - total_passing = 0 - total_tests = 0 - for gid, tests in gid_to_tests.items(): - group_total = len(tests) - if group_total == 0: - continue - group_passing = 0 - test_names_and_marks = [] - for name, passing in tests.items(): - if passing: - group_passing += 1 - test_names_and_marks.append(f"{'✓' if passing else '×'} {name}") - - total_tests += group_total - total_passing += group_passing - pct = "{0:.0f}%".format(group_passing/group_total * 100) - line = "%s: %s (%d/%d tests)" % (gid_to_name[gid].ljust(25, ' '), pct.rjust(4, ' '), group_passing, group_total) - subsections.append(line) - subsection_test_names[line] = test_names_and_marks - - pct = "{0:.0f}%".format(total_passing/total_tests * 100) - print("%s: %s (%d/%d tests)" % (header_name, pct, total_passing, total_tests)) - print("-" * (len(header_name)+1)) - for line in subsections: - print(" %s" % (line,)) - if verbose: - for test_name_and_pass_mark in subsection_test_names[line]: - print(" %s" % (test_name_and_pass_mark,)) - print("") - print("") - -def main(results_tap_path, verbose): - # Load up test mappings - test_name_to_group_id = {} - fed_tests = set() - client_tests = set() - with open("./are-we-synapse-yet.list", "r") as f: - for line in f.readlines(): - test_name = " ".join(line.split(" ")[1:]).strip() - groups = line.split(" ")[0].split(",") - for gid in groups: - if gid == "f" or gid in test_mappings["federation_apis"]: - fed_tests.add(test_name) - else: - client_tests.add(test_name) - if gid == "f": - continue # we expect another group ID - test_name_to_group_id[test_name] = gid - - # parse results.tap - summary = { - "client": { - # gid: { - # test_name: OK - # } - }, - "federation": { - # gid: { - # test_name: OK - # } - }, - "appservice": { - "app": {}, - }, - "nonspec": { - "nsp": {}, - "unk": {} - }, - } - with open(results_tap_path, "r") as f: - for line in f.readlines(): - test_result = parse_test_line(line) - if not test_result: - continue - name = test_result["name"] - group_id = test_name_to_group_id.get(name) - if not group_id: - summary["nonspec"]["unk"][name] = test_result["ok"] - if group_id == "nsp": - summary["nonspec"]["nsp"][name] = test_result["ok"] - elif group_id == "app": - summary["appservice"]["app"][name] = test_result["ok"] - elif group_id in test_mappings["federation_apis"]: - group = summary["federation"].get(group_id, {}) - group[name] = test_result["ok"] - summary["federation"][group_id] = group - elif group_id in test_mappings["client_apis"]: - group = summary["client"].get(group_id, {}) - group[name] = test_result["ok"] - summary["client"][group_id] = group - - print("Are We Synapse Yet?") - print("===================") - print("") - print_stats("Non-Spec APIs", summary["nonspec"], test_mappings, verbose) - print_stats("Client-Server APIs", summary["client"], test_mappings["client_apis"], verbose) - print_stats("Federation APIs", summary["federation"], test_mappings["federation_apis"], verbose) - print_stats("Application Services APIs", summary["appservice"], test_mappings, verbose) - - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("tap_file", help="path to results.tap") - parser.add_argument("-v", action="store_true", help="show individual test names in output") - args = parser.parse_args() - main(args.tap_file, args.v) \ No newline at end of file diff --git a/tests/sytest/show-expected-fail-tests.sh b/tests/sytest/show-expected-fail-tests.sh deleted file mode 100755 index 320d4ebd..00000000 --- a/tests/sytest/show-expected-fail-tests.sh +++ /dev/null @@ -1,105 +0,0 @@ -#! /bin/bash -# -# Parses a results.tap file from SyTest output and a file containing test names (a test whitelist) -# and checks whether a test name that exists in the whitelist (that should pass), failed or not. -# -# An optional blacklist file can be added, also containing test names, where if a test name is -# present, the script will not error even if the test is in the whitelist file and failed -# -# For each of these files, lines starting with '#' are ignored. -# -# Usage ./show-expected-fail-tests.sh results.tap whitelist [blacklist] - -results_file=$1 -whitelist_file=$2 -blacklist_file=$3 - -fail_build=0 - -if [ $# -lt 2 ]; then - echo "Usage: $0 results.tap whitelist [blacklist]" - exit 1 -fi - -if [ ! -f "$results_file" ]; then - echo "ERROR: Specified results file '${results_file}' doesn't exist." - fail_build=1 -fi - -if [ ! -f "$whitelist_file" ]; then - echo "ERROR: Specified test whitelist '${whitelist_file}' doesn't exist." - fail_build=1 -fi - -blacklisted_tests=() - -# Check if a blacklist file was provided -if [ $# -eq 3 ]; then - # Read test blacklist file - if [ ! -f "$blacklist_file" ]; then - echo "ERROR: Specified test blacklist file '${blacklist_file}' doesn't exist." - fail_build=1 - fi - - # Read each line, ignoring those that start with '#' - blacklisted_tests="" - search_non_comments=$(grep -v '^#' ${blacklist_file}) - while read -r line ; do - # Record the blacklisted test name - blacklisted_tests+=("${line}") - done <<< "${search_non_comments}" # This allows us to edit blacklisted_tests in the while loop -fi - -[ "$fail_build" = 0 ] || exit 1 - -passed_but_expected_fail=$(grep ' # TODO passed but expected fail' ${results_file} | sed -E 's/^ok [0-9]+ (\(expected fail\) )?//' | sed -E 's/( \([0-9]+ subtests\))? # TODO passed but expected fail$//') -tests_to_add="" -already_in_whitelist="" - -while read -r test_name; do - # Ignore empty lines - [ "${test_name}" = "" ] && continue - - grep "^${test_name}" "${whitelist_file}" > /dev/null 2>&1 - if [ "$?" != "0" ]; then - # Check if this test name is blacklisted - if printf '%s\n' "${blacklisted_tests[@]}" | grep -q -P "^${test_name}$"; then - # Don't notify about this test - continue - fi - - # Append this test_name to the existing list - tests_to_add="${tests_to_add}${test_name}\n" - fail_build=1 - else - already_in_whitelist="${already_in_whitelist}${test_name}\n" - fi -done <<< "${passed_but_expected_fail}" - -# TODO: Check that the same test doesn't exist in both the whitelist and blacklist -# TODO: Check that the same test doesn't appear twice in the whitelist|blacklist - -# Trim test output strings -tests_to_add=$(IFS=$'\n' echo "${tests_to_add[*]%%'\n'}") -already_in_whitelist=$(IFS=$'\n' echo "${already_in_whitelist[*]%%'\n'}") - -# Format output with markdown for buildkite annotation rendering purposes -if [ -n "${tests_to_add}" ] && [ -n "${already_in_whitelist}" ]; then - echo "### 📜 SyTest Whitelist Maintenance" -fi - -if [ -n "${tests_to_add}" ]; then - echo "**ERROR**: The following tests passed but are not present in \`$2\`. Please append them to the file:" - echo "\`\`\`" - echo -e "${tests_to_add}" - echo "\`\`\`" -fi - -if [ -n "${already_in_whitelist}" ]; then - echo "**WARN**: Tests in the whitelist still marked as **expected fail**:" - echo "\`\`\`" - echo -e "${already_in_whitelist}" - echo "\`\`\`" -fi - -exit ${fail_build} diff --git a/tests/sytest/sytest-blacklist b/tests/sytest/sytest-blacklist deleted file mode 100644 index 009de225..00000000 --- a/tests/sytest/sytest-blacklist +++ /dev/null @@ -1,7 +0,0 @@ -# This test checks for a room-alias key in the response which is not in the spec, we must add it back in whitelist when https://github.com/matrix-org/sytest/pull/880 is merged -POST /createRoom makes a public room -# These fails because they use a endpoint which is not in the spec, we must add them back in whitelist when https://github.com/matrix-org/sytest/issues/878 is closed -POST /createRoom makes a room with a name -POST /createRoom makes a room with a topic -Can /sync newly created room -POST /createRoom ignores attempts to set the room version via creation_content \ No newline at end of file diff --git a/tests/sytest/sytest-whitelist b/tests/sytest/sytest-whitelist deleted file mode 100644 index 1c969dba..00000000 --- a/tests/sytest/sytest-whitelist +++ /dev/null @@ -1,516 +0,0 @@ -/event/ does not allow access to events before the user joined -/event/ on joined room works -/event/ on non world readable room does not work -/joined_members return joined members -/joined_rooms returns only joined rooms -/whois -3pid invite join valid signature but revoked keys are rejected -3pid invite join valid signature but unreachable ID server are rejected -3pid invite join with wrong but valid signature are rejected -A change to displayname should appear in incremental /sync -A full_state incremental update returns all state -A full_state incremental update returns only recent timeline -A message sent after an initial sync appears in the timeline of an incremental sync. -A next_batch token can be used in the v1 messages API -A pair of events which redact each other should be ignored -A pair of servers can establish a join in a v2 room -A prev_batch token can be used in the v1 messages API -AS can create a user -AS can create a user with an underscore -AS can create a user with inhibit_login -AS can set avatar for ghosted users -AS can set displayname for ghosted users -AS can't set displayname for random users -AS cannot create users outside its own namespace -AS user (not ghost) can join room without registering -AS user (not ghost) can join room without registering, with user_id query param -After changing password, a different session no longer works by default -After changing password, can log in with new password -After changing password, can't log in with old password -After changing password, different sessions can optionally be kept -After changing password, existing session still works -After deactivating account, can't log in with an email -After deactivating account, can't log in with password -Alias creators can delete alias with no ops -Alias creators can delete canonical alias with no ops -Alternative server names do not cause a routing loop -An event which redacts an event in a different room should be ignored -An event which redacts itself should be ignored -Asking for a remote rooms list, but supplying the local server's name, returns the local rooms list -Backfill checks the events requested belong to the room -Backfill works correctly with history visibility set to joined -Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination -Banned servers cannot /event_auth -Banned servers cannot /invite -Banned servers cannot /make_join -Banned servers cannot /make_leave -Banned servers cannot /send_join -Banned servers cannot /send_leave -Banned servers cannot backfill -Banned servers cannot get missing events -Banned servers cannot get room state -Banned servers cannot get room state ids -Banned servers cannot send events -Banned user is kicked and may not rejoin until unbanned -Both GET and PUT work -Can /sync newly created room -Can add account data -Can add account data to room -Can add tag -Can claim one time key using POST -Can claim remote one time key using POST -Can create filter -Can deactivate account -Can delete canonical alias -Can download file 'ascii' -Can download file 'name with spaces' -Can download file 'name;with;semicolons' -Can download filter -Can download specifying a different ASCII file name -Can download specifying a different Unicode file name -Can download with Unicode file name locally -Can download with Unicode file name over federation -Can download without a file name locally -Can download without a file name over federation -Can forget room you've been kicked from -Can get 'm.room.name' state for a departed room (SPEC-216) -Can get account data without syncing -Can get remote public room list -Can get room account data without syncing -Can get rooms/{roomId}/members -Can get rooms/{roomId}/members for a departed room (SPEC-216) -Can get rooms/{roomId}/state for a departed room (SPEC-216) -Can invite users to invite-only rooms -Can list tags for a room -Can logout all devices -Can logout current device -Can paginate public room list -Can pass a JSON filter as a query parameter -Can query device keys using POST -Can query remote device keys using POST -Can query specific device keys using POST -Can re-join room if re-invited -Can read configuration endpoint -Can receive redactions from regular users over federation in room version 1 -Can receive redactions from regular users over federation in room version 2 -Can receive redactions from regular users over federation in room version 3 -Can receive redactions from regular users over federation in room version 4 -Can receive redactions from regular users over federation in room version 5 -Can receive redactions from regular users over federation in room version 6 -Can recv a device message using /sync -Can recv a device message using /sync -Can recv device messages over federation -Can recv device messages until they are acknowledged -Can recv device messages until they are acknowledged -Can reject invites over federation for rooms with version 1 -Can reject invites over federation for rooms with version 2 -Can reject invites over federation for rooms with version 3 -Can reject invites over federation for rooms with version 4 -Can reject invites over federation for rooms with version 5 -Can reject invites over federation for rooms with version 6 -Can remove tag -Can search public room list -Can send a message directly to a device using PUT /sendToDevice -Can send a message directly to a device using PUT /sendToDevice -Can send a to-device message to two users which both receive it using /sync -Can send image in room message -Can send messages with a wildcard device id -Can send messages with a wildcard device id -Can send messages with a wildcard device id to two devices -Can send messages with a wildcard device id to two devices -Can sync -Can sync a joined room -Can sync a room with a message with a transaction id -Can sync a room with a single message -Can upload device keys -Can upload with ASCII file name -Can upload with Unicode file name -Can upload without a file name -Can't deactivate account with wrong password -Can't forget room you're still in -Changes to state are included in an gapped incremental sync -Changes to state are included in an incremental sync -Changing the actions of an unknown default rule fails with 404 -Changing the actions of an unknown rule fails with 404 -Checking local federation server -Creators can delete alias -Current state appears in timeline in private history -Current state appears in timeline in private history with many messages before -DELETE /device/{deviceId} -DELETE /device/{deviceId} requires UI auth user to match device owner -DELETE /device/{deviceId} with no body gives a 401 -Deleted tags appear in an incremental v2 /sync -Deleting a non-existent alias should return a 404 -Device list doesn't change if remote server is down -Device messages over federation wake up /sync -Device messages wake up /sync -Device messages wake up /sync -Device messages with the same txn_id are deduplicated -Device messages with the same txn_id are deduplicated -Enabling an unknown default rule fails with 404 -Event size limits -Event with an invalid signature in the send_join response should not cause room join to fail -Events come down the correct room -Events whose auth_events are in the wrong room do not mess up the room state -Existing members see new members' join events -Federation key API allows unsigned requests for keys -Federation key API can act as a notary server via a GET request -Federation key API can act as a notary server via a POST request -Federation rejects inbound events where the prev_events cannot be found -Fetching eventstream a second time doesn't yield the message again -Forgetting room does not show up in v2 /sync -Full state sync includes joined rooms -GET /capabilities is present and well formed for registered user -GET /device/{deviceId} -GET /device/{deviceId} gives a 404 for unknown devices -GET /devices -GET /directory/room/:room_alias yields room ID -GET /events initially -GET /events with negative 'limit' -GET /events with non-numeric 'limit' -GET /events with non-numeric 'timeout' -GET /initialSync initially -GET /joined_rooms lists newly-created room -GET /login yields a set of flows -GET /media/r0/download can fetch the value again -GET /profile/:user_id/avatar_url publicly accessible -GET /profile/:user_id/displayname publicly accessible -GET /publicRooms includes avatar URLs -GET /publicRooms lists newly-created room -GET /publicRooms lists rooms -GET /r0/capabilities is not public -GET /register yields a set of flows -GET /rooms/:room_id/joined_members fetches my membership -GET /rooms/:room_id/messages returns a message -GET /rooms/:room_id/state fetches entire room state -GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership -GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event -GET /rooms/:room_id/state/m.room.name gets name -GET /rooms/:room_id/state/m.room.power_levels can fetch levels -GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels -GET /rooms/:room_id/state/m.room.topic gets topic -Get left notifs for other users in sync and /keys/changes when user leaves -Getting messages going forward is limited for a departed room (SPEC-216) -Getting push rules doesn't corrupt the cache SYN-390 -Getting state IDs checks the events requested belong to the room -Getting state checks the events requested belong to the room -Ghost user must register before joining room -Guest non-joined user cannot call /events on default room -Guest non-joined user cannot call /events on invited room -Guest non-joined user cannot call /events on joined room -Guest non-joined user cannot call /events on shared room -Guest non-joined users can get individual state for world_readable rooms -Guest non-joined users can get individual state for world_readable rooms after leaving -Guest non-joined users can get state for world_readable rooms -Guest non-joined users cannot room initalSync for non-world_readable rooms -Guest non-joined users cannot send messages to guest_access rooms if not joined -Guest user can set display names -Guest user cannot call /events globally -Guest user cannot upgrade other users -Guest users can accept invites to private rooms over federation -Guest users can join guest_access rooms -Guest users can send messages to guest_access rooms if joined -If a device list update goes missing, the server resyncs on the next one -If remote user leaves room we no longer receive device updates -If remote user leaves room, changes device and rejoins we see update in /keys/changes -If remote user leaves room, changes device and rejoins we see update in sync -Inbound /make_join rejects attempts to join rooms where all users have left -Inbound /v1/make_join rejects remote attempts to join local users to rooms -Inbound /v1/send_join rejects incorrectly-signed joins -Inbound /v1/send_join rejects joins from other servers -Inbound /v1/send_leave rejects leaves from other servers -Inbound federation accepts a second soft-failed event -Inbound federation accepts attempts to join v2 rooms from servers with support -Inbound federation can backfill events -Inbound federation can get public room list -Inbound federation can get state for a room -Inbound federation can get state_ids for a room -Inbound federation can query profile data -Inbound federation can query room alias directory -Inbound federation can receive events -Inbound federation can receive invites via v1 API -Inbound federation can receive invites via v2 API -Inbound federation can receive redacted events -Inbound federation can receive v1 /send_join -Inbound federation can receive v2 /send_join -Inbound federation can return events -Inbound federation can return missing events for invite visibility -Inbound federation can return missing events for world_readable visibility -Inbound federation correctly soft fails events -Inbound federation of state requires event_id as a mandatory paramater -Inbound federation of state_ids requires event_id as a mandatory paramater -Inbound federation rejects attempts to join v1 rooms from servers without v1 support -Inbound federation rejects attempts to join v2 rooms from servers lacking version support -Inbound federation rejects attempts to join v2 rooms from servers only supporting v1 -Inbound federation rejects invite rejections which include invalid JSON for room version 6 -Inbound federation rejects invites which include invalid JSON for room version 6 -Inbound federation rejects receipts from wrong remote -Inbound federation rejects remote attempts to join local users to rooms -Inbound federation rejects remote attempts to kick local users to rooms -Inbound federation rejects typing notifications from wrong remote -Inbound: send_join rejects invalid JSON for room version 6 -Invalid JSON floats -Invalid JSON integers -Invalid JSON special values -Invited user can reject invite -Invited user can reject invite over federation -Invited user can reject invite over federation for empty room -Invited user can reject invite over federation several times -Invited user can see room metadata -Inviting an AS-hosted user asks the AS server -Lazy loading parameters in the filter are strictly boolean -Left rooms appear in the leave section of full state sync -Local delete device changes appear in v2 /sync -Local device key changes appear in /keys/changes -Local device key changes appear in v2 /sync -Local device key changes get to remote servers -Local new device changes appear in v2 /sync -Local non-members don't see posted message events -Local room members can get room messages -Local room members see posted message events -Local update device changes appear in v2 /sync -Local users can peek by room alias -Local users can peek into world_readable rooms by room ID -Message history can be paginated -Message history can be paginated over federation -Name/topic keys are correct -New account data appears in incremental v2 /sync -New read receipts appear in incremental v2 /sync -New room members see their own join event -New users appear in /keys/changes -Newly banned rooms appear in the leave section of incremental sync -Newly joined room is included in an incremental sync -Newly joined room is included in an incremental sync after invite -Newly left rooms appear in the leave section of gapped sync -Newly left rooms appear in the leave section of incremental sync -Newly updated tags appear in an incremental v2 /sync -Non-numeric ports in server names are rejected -Outbound federation can backfill events -Outbound federation can query profile data -Outbound federation can query room alias directory -Outbound federation can query v1 /send_join -Outbound federation can query v2 /send_join -Outbound federation can request missing events -Outbound federation can send events -Outbound federation can send invites via v1 API -Outbound federation can send invites via v2 API -Outbound federation can send room-join requests -Outbound federation correctly handles unsupported room versions -Outbound federation passes make_join failures through to the client -Outbound federation rejects backfill containing invalid JSON for events in room version 6 -Outbound federation rejects m.room.create events with an unknown room version -Outbound federation rejects send_join responses with no m.room.create event -Outbound federation sends receipts -Outbound federation will ignore a missing event with bad JSON for room version 6 -POST /createRoom creates a room with the given version -POST /createRoom ignores attempts to set the room version via creation_content -POST /createRoom makes a private room -POST /createRoom makes a private room with invites -POST /createRoom makes a public room -POST /createRoom makes a room with a name -POST /createRoom makes a room with a topic -POST /createRoom rejects attempts to create rooms with numeric versions -POST /createRoom rejects attempts to create rooms with unknown versions -POST /createRoom with creation content -POST /join/:room_alias can join a room -POST /join/:room_alias can join a room with custom content -POST /join/:room_id can join a room -POST /join/:room_id can join a room with custom content -POST /login as non-existing user is rejected -POST /login can log in as a user -POST /login can log in as a user with just the local part of the id -POST /login returns the same device_id as that in the request -POST /login wrong password is rejected -POST /media/r0/upload can create an upload -POST /redact disallows redaction of event in different room -POST /register allows registration of usernames with '-' -POST /register allows registration of usernames with '.' -POST /register allows registration of usernames with '/' -POST /register allows registration of usernames with '3' -POST /register allows registration of usernames with '=' -POST /register allows registration of usernames with '_' -POST /register allows registration of usernames with 'q' -POST /register can create a user -POST /register downcases capitals in usernames -POST /register rejects registration of usernames with '!' -POST /register rejects registration of usernames with '"' -POST /register rejects registration of usernames with ''' -POST /register rejects registration of usernames with ':' -POST /register rejects registration of usernames with '?' -POST /register rejects registration of usernames with '@' -POST /register rejects registration of usernames with '[' -POST /register rejects registration of usernames with '\' -POST /register rejects registration of usernames with '\n' -POST /register rejects registration of usernames with ']' -POST /register rejects registration of usernames with '{' -POST /register rejects registration of usernames with '|' -POST /register rejects registration of usernames with '}' -POST /register rejects registration of usernames with '£' -POST /register rejects registration of usernames with 'é' -POST /register returns the same device_id as that in the request -POST /rooms/:room_id/ban can ban a user -POST /rooms/:room_id/invite can send an invite -POST /rooms/:room_id/join can join a room -POST /rooms/:room_id/leave can leave a room -POST /rooms/:room_id/read_markers can create read marker -POST /rooms/:room_id/receipt can create receipts -POST /rooms/:room_id/redact/:event_id as original message sender redacts message -POST /rooms/:room_id/redact/:event_id as power user redacts message -POST /rooms/:room_id/redact/:event_id as random user does not redact message -POST /rooms/:room_id/send/:event_type sends a message -POST /rooms/:room_id/state/m.room.name sets name -POST /rooms/:room_id/state/m.room.topic sets topic -POST /rooms/:room_id/upgrade can upgrade a room version -POST rejects invalid utf-8 in JSON -POSTed media can be thumbnailed -PUT /device/{deviceId} gives a 404 for unknown devices -PUT /device/{deviceId} updates device fields -PUT /directory/room/:room_alias creates alias -PUT /profile/:user_id/avatar_url sets my avatar -PUT /profile/:user_id/displayname sets my name -PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id -PUT /rooms/:room_id/send/:event_type/:txn_id sends a message -PUT /rooms/:room_id/state/m.room.power_levels can set levels -PUT /rooms/:room_id/typing/:user_id sets typing notification -PUT power_levels should not explode if the old power levels were empty -Peeked rooms only turn up in the sync for the device who peeked them -Previously left rooms don't appear in the leave section of sync -Push rules come down in an initial /sync -Read markers appear in incremental v2 /sync -Read markers appear in initial v2 /sync -Read markers can be updated -Read receipts appear in initial v2 /sync -Real non-joined user cannot call /events on default room -Real non-joined user cannot call /events on invited room -Real non-joined user cannot call /events on joined room -Real non-joined user cannot call /events on shared room -Real non-joined users can get individual state for world_readable rooms -Real non-joined users can get individual state for world_readable rooms after leaving -Real non-joined users can get state for world_readable rooms -Real non-joined users cannot room initalSync for non-world_readable rooms -Real non-joined users cannot send messages to guest_access rooms if not joined -Receipts must be m.read -Redaction of a redaction redacts the redaction reason -Regular users can add and delete aliases in the default room configuration -Regular users can add and delete aliases when m.room.aliases is restricted -Regular users cannot create room aliases within the AS namespace -Regular users cannot register within the AS namespace -Remote media can be thumbnailed -Remote room alias queries can handle Unicode -Remote room members also see posted message events -Remote room members can get room messages -Remote user can backfill in a room with version 1 -Remote user can backfill in a room with version 2 -Remote user can backfill in a room with version 3 -Remote user can backfill in a room with version 4 -Remote user can backfill in a room with version 5 -Remote user can backfill in a room with version 6 -Remote users can join room by alias -Remote users may not join unfederated rooms -Request to logout with invalid an access token is rejected -Request to logout without an access token is rejected -Room aliases can contain Unicode -Room creation reports m.room.create to myself -Room creation reports m.room.member to myself -Room members can join a room with an overridden displayname -Room members can override their displayname on a room-specific basis -Room state at a rejected message event is the same as its predecessor -Room state at a rejected state event is the same as its predecessor -Rooms a user is invited to appear in an incremental sync -Rooms a user is invited to appear in an initial sync -Rooms can be created with an initial invite list (SYN-205) -Server correctly handles incoming m.device_list_update -Server correctly handles transactions that break edu limits -Server correctly resyncs when client query keys and there is no remote cache -Server correctly resyncs when server leaves and rejoins a room -Server rejects invalid JSON in a version 6 room -Setting room topic reports m.room.topic to myself -Should not be able to take over the room by pretending there is no PL event -Should reject keys claiming to belong to a different user -State from remote users is included in the state in the initial sync -State from remote users is included in the timeline in an incremental sync -State is included in the timeline in the initial sync -Sync can be polled for updates -Sync is woken up for leaves -Syncing a new room with a large timeline limit isn't limited -Tags appear in an initial v2 /sync -Trying to get push rules with unknown rule_id fails with 404 -Typing can be explicitly stopped -Typing events appear in gapped sync -Typing events appear in incremental sync -Typing events appear in initial sync -Typing notification sent to local room members -Typing notifications also sent to remote room members -Typing notifications don't leak -Uninvited users cannot join the room -Unprivileged users can set m.room.topic if it only needs level 0 -User appears in user directory -User in private room doesn't appear in user directory -User joining then leaving public room appears and dissappears from directory -User in shared private room does appear in user directory until leave -User can create and send/receive messages in a room with version 1 -User can create and send/receive messages in a room with version 2 -User can create and send/receive messages in a room with version 3 -User can create and send/receive messages in a room with version 4 -User can create and send/receive messages in a room with version 5 -User can create and send/receive messages in a room with version 6 -User can invite local user to room with version 1 -User can invite local user to room with version 2 -User can invite local user to room with version 3 -User can invite local user to room with version 4 -User can invite local user to room with version 5 -User can invite local user to room with version 6 -User can invite remote user to room with version 1 -User can invite remote user to room with version 2 -User can invite remote user to room with version 3 -User can invite remote user to room with version 4 -User can invite remote user to room with version 5 -User can invite remote user to room with version 6 -User directory correctly update on display name change -User in dir while user still shares private rooms -User in shared private room does appear in user directory -User is offline if they set_presence=offline in their sync -User signups are forbidden from starting with '_' -Users can't delete other's aliases -Users cannot invite a user that is already in the room -Users cannot invite themselves to a room -Users cannot kick users from a room they are not in -Users cannot kick users who have already left a room -Users cannot set ban powerlevel higher than their own -Users cannot set kick powerlevel higher than their own -Users cannot set notifications powerlevel higher than their own -Users cannot set redact powerlevel higher than their own -Users receive device_list updates for their own devices -Users with sufficient power-level can delete other's aliases -Version responds 200 OK with valid structure -We can't peek into rooms with invited history_visibility -We can't peek into rooms with joined history_visibility -We can't peek into rooms with shared history_visibility -We don't send redundant membership state across incremental syncs by default -We should see our own leave event when rejecting an invite, even if history_visibility is restricted (riot-web/3462) -We should see our own leave event, even if history_visibility is restricted (SYN-662) -Wildcard device messages over federation wake up /sync -Wildcard device messages wake up /sync -Wildcard device messages wake up /sync -avatar_url updates affect room member events -displayname updates affect room member events -local user can join room with version 1 -local user can join room with version 2 -local user can join room with version 3 -local user can join room with version 4 -local user can join room with version 5 -local user can join room with version 6 -m.room.history_visibility == "joined" allows/forbids appropriately for Guest users -m.room.history_visibility == "joined" allows/forbids appropriately for Real users -m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users -m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users -query for user with no keys returns empty key dict -remote user can join room with version 1 -remote user can join room with version 2 -remote user can join room with version 3 -remote user can join room with version 4 -remote user can join room with version 5 -remote user can join room with version 6 -setting 'm.room.name' respects room powerlevel -setting 'm.room.power_levels' respects room powerlevel -Federation publicRoom Name/topic keys are correct From e704bbaf1166d0082a7aac27fdbd72e37d8fd664 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 10 Mar 2025 12:30:39 -0400 Subject: [PATCH 239/328] update complement test results Signed-off-by: June Clementine Strawberry --- tests/test_results/complement/test_results.jsonl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 7b06510b..5fb850f1 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -534,10 +534,10 @@ {"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel"} {"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_accepts_present_aliases"} {"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_accepts_present_alt_aliases"} -{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alias_pointing_to_different_local_room"} -{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alt_alias_pointing_to_different_local_room"} -{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases"} -{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases#01"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alias_pointing_to_different_local_room"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alt_alias_pointing_to_different_local_room"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases#01"} {"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_missing_aliases"} {"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_missing_aliases#01"} {"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_setting_rejects_deleted_aliases"} From 889fb3cf262d433bf2da461a7482a3e7400fc41f Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 10 Mar 2025 13:36:56 -0400 Subject: [PATCH 240/328] add download-artifact pattern for OCI images only Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cd7d2484..3fd834e0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -550,6 +550,8 @@ jobs: - name: Download artifacts uses: actions/download-artifact@v4 + with: + pattern: "oci*" - name: Move OCI images into position run: | From 56dba8acb7b873c890313991630ebd23bbb47376 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 10 Mar 2025 17:15:21 -0400 Subject: [PATCH 241/328] misc docs updates Signed-off-by: June Clementine Strawberry --- README.md | 62 +++-- arch/conduwuit.service | 1 + book.toml | 5 +- debian/conduwuit.service | 16 +- docs/SUMMARY.md | 1 - docs/assets/conduwuit_logo.svg | 36 +++ docs/assets/gay dog anarchists.png | Bin 0 -> 11533 bytes docs/deploying/generic.md | 22 -- docs/development/hot_reload.md | 3 + docs/development/testing.md | 19 +- docs/differences.md | 379 ----------------------------- docs/introduction.md | 4 - 12 files changed, 107 insertions(+), 441 deletions(-) create mode 100644 docs/assets/conduwuit_logo.svg create mode 100644 docs/assets/gay dog anarchists.png delete mode 100644 docs/differences.md diff --git a/README.md b/README.md index 13a1c67f..d8f99d45 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,16 @@ # conduwuit -[![conduwuit main room](https://img.shields.io/matrix/conduwuit%3Apuppygock.gay?server_fqdn=matrix.transfem.dev&style=flat&logo=matrix&logoColor=%23f5b3ff&label=%23conduwuit%3Apuppygock.gay&color=%23f652ff)](https://matrix.to/#/#conduwuit:puppygock.gay) [![conduwuit space](https://img.shields.io/matrix/conduwuit-space%3Apuppygock.gay?server_fqdn=matrix.transfem.dev&style=flat&logo=matrix&logoColor=%23f5b3ff&label=%23conduwuit-space%3Apuppygock.gay&color=%23f652ff)](https://matrix.to/#/#conduwuit-space:puppygock.gay) [![CI and Artifacts](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml) +[![conduwuit main room](https://img.shields.io/matrix/conduwuit%3Apuppygock.gay?server_fqdn=matrix.transfem.dev&style=flat&logo=matrix&logoColor=%23f5b3ff&label=%23conduwuit%3Apuppygock.gay&color=%23f652ff)](https://matrix.to/#/#conduwuit:puppygock.gay) [![conduwuit space](https://img.shields.io/matrix/conduwuit-space%3Apuppygock.gay?server_fqdn=matrix.transfem.dev&style=flat&logo=matrix&logoColor=%23f5b3ff&label=%23conduwuit-space%3Apuppygock.gay&color=%23f652ff)](https://matrix.to/#/#conduwuit-space:puppygock.gay) + +[![CI and Artifacts](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml) + +![GitHub Repo stars](https://img.shields.io/github/stars/girlbossceo/conduwuit?style=flat&color=%23fcba03&link=https%3A%2F%2Fgithub.com%2Fgirlbossceo%2Fconduwuit) ![GitHub commit activity](https://img.shields.io/github/commit-activity/m/girlbossceo/conduwuit?style=flat&color=%2303fcb1&link=https%3A%2F%2Fgithub.com%2Fgirlbossceo%2Fconduwuit%2Fpulse%2Fmonthly) ![GitHub Created At](https://img.shields.io/github/created-at/girlbossceo/conduwuit) ![GitHub Sponsors](https://img.shields.io/github/sponsors/girlbossceo?color=%23fc03ba&link=https%3A%2F%2Fgithub.com%2Fsponsors%2Fgirlbossceo) ![GitHub License](https://img.shields.io/github/license/girlbossceo/conduwuit) + + + +![Docker Image Size (tag)](https://img.shields.io/docker/image-size/girlbossceo/conduwuit/latest?label=image%20size%20(latest)&link=https%3A%2F%2Fhub.docker.com%2Frepository%2Fdocker%2Fgirlbossceo%2Fconduwuit%2Ftags%3Fname%3Dlatest) ![Docker Image Size (tag)](https://img.shields.io/docker/image-size/girlbossceo/conduwuit/main?label=image%20size%20(main)&link=https%3A%2F%2Fhub.docker.com%2Frepository%2Fdocker%2Fgirlbossceo%2Fconduwuit%2Ftags%3Fname%3Dmain) + + @@ -53,6 +63,19 @@ A lot of critical stability and performance issues have been fixed, and a lot of necessary groundwork has finished; making this project way better than it was back in the start at ~early 2024. +#### Where is the differences page? + +conduwuit historically had a "differences" page that listed each and every single +different thing about conduwuit from Conduit, as a way to promote and advertise +conduwuit by showing significant amounts of work done. While this was feasible to +maintain back when the project was new in early-2024, this became impossible +very quickly and has unfortunately became heavily outdated, missing tons of things, etc. + +It's difficult to list out what we do differently, what are our notable features, etc +when there's so many things and features and bug fixes and performance optimisations, +the list goes on. We simply recommend folks to just try out conduwuit, or ask us +what features you are looking for and if they're implemented in conduwuit. + #### How is conduwuit funded? Is conduwuit sustainable? conduwuit has no external funding. This is made possible purely in my freetime with @@ -64,17 +87,15 @@ and we have no plans in stopping or slowing down any time soon! #### Can I migrate or switch from Conduit? -conduwuit is a complete drop-in replacement for Conduit. As long as you are using RocksDB, -the only "migration" you need to do is replace the binary or container image. There -is no harm or additional steps required for using conduwuit. See the -[Migrating from Conduit](https://conduwuit.puppyirl.gay/deploying/generic.html#migrating-from-conduit) section -on the generic deploying guide. +conduwuit had drop-in migration/replacement support for Conduit for about 12 months before +bugs somewhere along the line broke it. Maintaining this has been difficult and +the majority of Conduit users have already migrated, additionally debugging Conduit +is not one of our interests, and so Conduit migration no longer works. We also +feel that 12 months has been plenty of time for people to seamlessly migrate. -Note that as of conduwuit version 0.5.0, backwards compatibility with Conduit is -no longer supported. We only support migrating *from* Conduit, not back to -Conduit like before. If you are truly finding yourself wanting to migrate back -to Conduit, we would appreciate all your feedback and if we can assist with -any issues or concerns. +If you are a Conduit user looking to migrate, you will have to wipe and reset +your database. We may fix seamless migration support at some point, but it's not an interest +from us. #### Can I migrate from Synapse or Dendrite? @@ -98,9 +119,10 @@ is the official project Matrix room. You can get support here, ask questions or concerns, get assistance setting up conduwuit, etc. This room should stay relevant and focused on conduwuit. An offtopic general -chatter room can be found there as well. +chatter room can be found in the room topic there as well. + +Please keep the issue trackers focused on *actual* bug reports and enhancement requests. -Please keep the issue trackers focused on bug reports and enhancement requests. General support is extremely difficult to be offered over an issue tracker, and simple questions should be asked directly in an interactive platform like our Matrix room above as they can turn into a relevant discussion and/or may not be @@ -108,24 +130,34 @@ simple to answer. If you're not sure, just ask in the Matrix room. If you have a bug or feature to request: [Open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new) +If you need to contact the primary maintainer, my contact methods are on my website: https://girlboss.ceo + #### Donate conduwuit development is purely made possible by myself and contributors. I do not get paid to work on this, and I work on it in my free time. Donations are heavily appreciated! 💜🥺 -- Liberapay (preferred): -- GitHub Sponsors (preferred): +- Liberapay: +- GitHub Sponsors: - Ko-fi: I do not and will not accept cryptocurrency donations, including things related. +Note that donations will NOT guarantee you or give you any kind of tangible product, +feature prioritisation, etc. By donating, you are agreeing that conduwuit is NOT +going to provide you any goods or services as part of your donation, and this +donation is purely a generous donation. We will not provide things like paid +personal/direct support, feature request priority, merchandise, etc. + #### Logo Original repo and Matrix room picture was from bran (<3). Current banner image and logo is directly from [this cohost post](https://web.archive.org/web/20241126004041/https://cohost.org/RatBaby/post/1028290-finally-a-flag-for). +An SVG logo made by [@nktnet1](https://github.com/nktnet1) is available here: + #### Is it conduwuit or Conduwuit? Both, but I prefer conduwuit. diff --git a/arch/conduwuit.service b/arch/conduwuit.service index fa3616d8..4f45ddc0 100644 --- a/arch/conduwuit.service +++ b/arch/conduwuit.service @@ -4,6 +4,7 @@ Wants=network-online.target After=network-online.target Documentation=https://conduwuit.puppyirl.gay/ RequiresMountsFor=/var/lib/private/conduwuit +Alias=matrix-conduwuit.service [Service] DynamicUser=yes diff --git a/book.toml b/book.toml index 1d32c766..7eb1983b 100644 --- a/book.toml +++ b/book.toml @@ -13,12 +13,15 @@ create-missing = true extra-watch-dirs = ["debian", "docs"] [rust] -edition = "2021" +edition = "2024" [output.html] git-repository-url = "https://github.com/girlbossceo/conduwuit" edit-url-template = "https://github.com/girlbossceo/conduwuit/edit/main/{path}" git-repository-icon = "fa-github-square" +[output.html.redirect] +"/differences.html" = "https://conduwuit.puppyirl.gay/#where-is-the-differences-page" + [output.html.search] limit-results = 15 diff --git a/debian/conduwuit.service b/debian/conduwuit.service index 4d6f4eef..a079499e 100644 --- a/debian/conduwuit.service +++ b/debian/conduwuit.service @@ -2,26 +2,14 @@ Description=conduwuit Matrix homeserver Wants=network-online.target After=network-online.target +Alias=matrix-conduwuit.service Documentation=https://conduwuit.puppyirl.gay/ [Service] DynamicUser=yes User=conduwuit Group=conduwuit -Type=notify-reload -ReloadSignal=SIGUSR1 - -TTYPath=/dev/tty25 -DeviceAllow=char-tty -StandardInput=tty-force -StandardOutput=tty -StandardError=journal+console -TTYReset=yes -# uncomment to allow buffer to be cleared every restart -TTYVTDisallocate=no - -TTYColumns=120 -TTYRows=40 +Type=notify Environment="CONDUWUIT_CONFIG=/etc/conduwuit/conduwuit.toml" diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 8e07adc2..ad0f8135 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -1,7 +1,6 @@ # Summary - [Introduction](introduction.md) -- [Differences from upstream Conduit](differences.md) - [Configuration](configuration.md) - [Examples](configuration/examples.md) - [Deploying](deploying.md) diff --git a/docs/assets/conduwuit_logo.svg b/docs/assets/conduwuit_logo.svg new file mode 100644 index 00000000..9be5b453 --- /dev/null +++ b/docs/assets/conduwuit_logo.svg @@ -0,0 +1,36 @@ + + + + + + diff --git a/docs/assets/gay dog anarchists.png b/docs/assets/gay dog anarchists.png new file mode 100644 index 0000000000000000000000000000000000000000..871cf302a8f27d6ed02c983241c7c5ca6029ce99 GIT binary patch literal 11533 zcmd^l`9IX(`}fP1$`YwavWDy=`&QWrW#7i2ELoBoM$9m6mK0^*DTYe6gc=O8y~~!7 zEg>WF%962;{XXOK{@nNd{TseNJs;;>*LAMtoa=S2nRCTlF*js8%zGGuAU0zoJxd5W z&+`LHQqE{g$ zsAMIhX*sTTt?=_k=lJ&2!rtB+im94gKC-TBk0rx*^;sl%-pN zcQ9Dowdtoc#jYa_}hWPfyC&r%KM(}?T&jK9%m+P$?GL3a90I(p2*^u#~RjDwq+Touy; z;fx$mt`-BXf1@X&&klkNw|XLS_vEl_(5-?k>Siv);EEu&rvk7EZ=-?!?o&Cs1C2hc zlcqSj@}nM@L-LTV8y}LM6?c4yM!K+ENCu>@A7JWHSLnPylE%SXtwUtwfx1G^(%@;R zDh$8(W%@C#>aYQn9a3 z(ik6o+jmshXWx6eRhpQ5tr|^>eC6GJDE_z)`5XI?skYCOcy}LSKkmbW4H{e-JxWve z$}gR!&brE)rcTmv-i72e~Kn{>zy8r(L9OfVWY7nZ4K3n`%)Gu zPiS*o9yGqiGyAdDytp3_Lti198-_k2`*1^aAH2nB!0aor4?a@+a6@Jvy!ZW%_fepY z;=L7VaKuMxAG}p)5LUIn!&=|tXj%&f_I-P?L-S_NUYBO1!2l;MYM&hUV^N+%BW+(Z zqnS)~L+pd6KMg{v+i0}6=fJv+(IG52F2?GH%j<{8tYk5vsTHt4_|L&8%MbJp>{PUJOM zgT9fjWw3Nbi|*%DvaiWzDYSTyq9lVwLLtwn-^HsEJSz`xF5&CO^I3GwK^5EH|C9A*TO1F&;W4 zLi8~nH^!N9B3BFkiZ(_@mt~O9T&{{Ob_jUuP&p^LxO(L@(-0%Y2DzC-?}wvi=x&zM zqbx98B3LAyx;Sw%h!2s^g_iSq79LMn(jiOQ$YEOi3X2K?qJ{hw->CE-t!E6#rNGf=bKaJJR~8JT9{D z4OoNyD-)%vZbgfE21hk98V|ztb0JT{TkPex$x@=&-*mQSn5lxlT8)om$_iFSneAmL z$;re$NJ^AQA{~dyO~_Jm2nI;%4AJpfDLt;A6X|a>NZ0r!y3Dpfiq6%h#ZH4%1(gev zi$!4{$exYTpMuhY-ae%Im=S~_+l!bgaRe76B}jBkDm{#wjww^d&(h}wl8bHNFOYWz z)yIyY4DpH)9aBo#ah@?{)%aP4ysPBmAowtycLLQ1NicwT&kzR$5u#8RCvx^#DHpCS zx~$5`?(hYxmch^hU#2`EvacDbpZ+T=rAihKrSpDBb<9MyLgz1HN-PMT5U(I{;4DH3 z3MBhxlY$sH#E1iu2sH?t#ClQ{0|!`K8leR#kbOrny97!4n0 zNP9&6gCw*;o`S?`L4*g?A5#Wme)56`*;k5`eBeql^?MwugwFa5vHA?cAL@@T^EVzm zbwQNu>q|P$eB}}Kdk(6L&KgLcMFc_OT*#56QVkqeOj(JE9e<;7%V03x_sCC1vTp~e zpLy{**>{Ar%DkvY_MIgiKe%{?9E)P3*Str)grfty{vowg7bg-UM>Mq)ZoJyUt%LXF z{CSWZ8-Xw81i2pj0;NUIXN0+GVt1zTVhi^*ID=6YM|HO&NYe3(64Me&O>u_Na)o#) zuAdjlu~<^op+$(&8V>(q1gTDWhQiX%2@yl=376=^V&wP}ORwS%kz?EO#m8KusALXU zor#AN$tQ+*4KYz#Uy!N}%L@`!oe7R~Ak$S12rhKt97sNKLVWei#}*mc z$>6ADV^R={Uk;UggOtp2eqpXPHubkoeJF*jKNW$*P^^yg;Wk;9oNTHBIo8Vhbx@v&mt25&JHl!VktKhW3z=2u(#vpK zwKn{YcgW5y^wyGb?ir%0%@mn=U~X#uIlMYL8(Y&Jo&ImQoNGTTbDZc;h|_EF2B^0&E=o!3eO4$Qsb zLJGf4sKiF`yYN>!PtT7{YPjrBkt`lv zZs<{3F`iVH#w2cW>CrU5_lDW^?L!@(dlzSm=50d4G&aiW!=#95R`UT&m({PA%>i#? z1!Aklowe(}%x89({AC;~n1!jKOARVoTH6u30fgq>s>kC8hh|$JR7RoKz4#Y)ml||L zC=(9t%Z(8x0)=sL>VcQTlve|Yd(UnLuDm4;((x~TU7TH5+ibe`ZMY*gDkb#PL#@x& z16jM)l=*Mcly+2n{xu~c>tbGJeelm`7zx9)mrO;sfA1=*wTs_tU_Ylcl;YZnw;cY&pJ-vQyEZ*DIW_KbxolSc zX-L7ShNTyEbu@Og_DoZnaXsPDA+Nq8N6G>U|2NqH7gbwe6-V2?@eX}ZutG^HsS5o zdyjQvKy3*z-KdLIX+PvC#N6NI*yK0ZWnO2UTk;)2_Ld<(Xgkt4t$p}zmMi~*t0SGF zod)yLCtj&xseKD#H@2Gi&u!l6(s4aKo+k$f8wPlmxmC~j)ji(4nKGFZ9RK>ZSbKQB z+Mo&x=GX8H`iZgLAIH4t;T+1>nNervg*Yz1oV>YDU6*PKw!fZhx-4utcvQc0;EBbV z+zN-d!N<~lse`Y!uLi0#Z=A9+SZ_ZgY$IdU>i6a4i*vj3rkq7jZ3ShO2OW3Q?(T&V zCVP|dU()77G(r~@mh-%`T&u?X`cr%RUVY>iWKR-l&>@|UFO{}y_iLS@8=6ZD&1YQZ z@iS55AUH238a8-knOf+xoH#ck?>JZ8sQbFtBo18}@pH3IMlt*av+H+XbTUJ^zBtF<vslBt7b8Uqt58`lCnD-`-(l`YPUC zl_3n3I7|F=-n>`2J@fv9MnqvP|ESS1#ZgJC>%FZv^FMdA<}ROq%acuvbH23E(H6*O zee|`by3I`qmku8z?#-|jVe9UdS9}aIM4;3c(5xyRid1V?Qt!HnY6TU1NoPOKP~K>?#BLH641Q97+$$YH&oeMhLJl^Yu70#cI1zoYzv>T4I@hi@mN6C z;cNqY)+S4qh3}S{VUPJMY*?APge-O6<-sNu&s7znQ*2F^+`UQeZ0;!ADryi#Xmn%d z5}+FQ=D4>vaof3_Y_B{=8rLf;NYmYxq7mP}gpCv~uAB=x-Wp~jJ;|e$&MH9Ck>-^1 zER6Ki?|VxA9kdAd>Fcu9GPOQCU#C?1;lz=qSAtHxsC;kprqzhnXb}yMfrk?!<-d4a z!dmMo98V5^5;c=^9QLp6d7aN<)xrIS##U5#x>fZ&tz|?_jWcG@f>c;X;sR9 z-OzSRbq4Qh4dyq$cHhh=yZx9R6gtr!);jo}HNsi-dCp;ysF{H5_ZvygpF6w$dW83X z6PWb07ajBY4AUr)@s6KN#T^~d-00qJ>_?S4rCd^dJbQsuhU~HB;0eWpvA)Bbmo_I7 zQd*H;*A}NRX6xGO&q}=sS2XEO<3l6!n@V&TPnB-@{=8l?A<5psp@ty5gg-6XBv1@x znr~FD3>3YeI1-z=(wY?Hep0x|)b2cj$DFN|S*!JyJXXQM>*&Lzc&QBjW6z)GzUI}Y4zD@MMBU>I(`~$P^(;JQ z$V@tJcI5H=J2+MXeP5}3Mb=x@WAGC0c+anWS6DLm>wJnW*i#QLT5S*is-IxOhY6fE z;y$mX8c=LO{NQfKb**<|t~9$dOB(TYC9h)soyz1`mkpOn;oD<$ZJ$X@W=A6ZCBlDg zRNj91Sb&jd#~8?Z&(%e_+vw;Vl;2F0^$wMvOQfKKHBZ%lOZfXQ2hZB)m8u_Hu3Z0P zTwCvM$k3t7#FLC?$zW$qEoqNms`nBZdU!y2CfPzf!dOLTKo0R0l~=_O%Q%!Iw$QnF z|6e!Q&vUz$l@9f`WS?}oLRjy;hq~BzZ<@aIDj%0)+7GNoT^{OhJ_G!?kfDo-=P9r4 zm$H9@!_3GNMY&}9$HeaZX%1ID&kuy@`2Vt&)UT)7vWF7OF zd+`P4@_#lvRIpL?pYkhwEln^j)R9=&f79LDwxV8WbaUn+p#W~W<@xH-;Ono269<-i z-~q!U;AkCj40|E!&s#cCe(*W%5FUwq`Z}g2_-nz$RpJRD%m0XlS#Sl+=jp2q+`8gG zD1@6<^Aze$k*p<>-S*~aZNjtC8=+U;%F5n`-rrDtK-C_l&gc?e!JlrOd^w3px9k+$ zO}U9#5kmB%Uf8z=CK7oaQ3`LLAEA9?NH!oRx<2d5i?N~=GA|ayp=|o1CfqE8?^pPK zu=~m?oLtb6q0K6AU+q~AqWRIA0YOV+!dqB3(Bzq#UfW~3w8WX$XApxZck07tKH+Q4 zYSYDKaJ8WvDacIxW|z=AqyiMSr#ZSdqfaP@TRPmL2u4V@rxH0hhffz};7^6z&1!mK z=@Q3^j1W6H3!x|x#PG_?G~GILB%6%dsa=8sl%IpT(HBdWj~j%GUx>B}@ws>yYr&+# zQ?1wHQo+>e)__w1^!YDQ{^0wg*6y+C<84UkYZLR7G#OMLRh9alH0Zdbt9EbZ5YKUZ zY-aee4pmL2$y#t3)r&@3CzamZyjr*T`H~({!cNcyYmC+;B*+G+pTo#4_m4Vk-uAv2 z82PQ=E9%x2D?%M?q~`ETl^xUp(l3fiQP{r3rNQbTKIhbpRYSsfZ#wE;U*he9(^odT zY9*FCB#722rMC!{(t3637D#pnl_~nT;v=-$fFx>e_vKNv7-x-t7P1<5)rrssciio* zc&qp0U10c*5B>A+Bz<#a_m^uAvd&f^CbvWXvP!%$wNuN!>kusC3)lE$A?*bE>f_k) z9a&>_5+f-LYSlNy8+J`<)WPq`JE4l|>N9`DuooGhtVImdwP}(bnWGI7qtQqG!erYv zJm{D!jqKESJ|?@41P4nr*c%WA;GSWPL)9K~ZSf7#7p}{~?nSzj%+#ZPy4}HrN2a(d zjZ-zWcy&Idx@J!$8>vTUe&P<3CIZ1&tpCAY?dg|LW!q# zAs73~1&3D{$$KWq25$zh8DRXo7bsG>CHmJ-z>Y<}eO^Mu%g7UoBPWBM%zC6r$>u$U zDFLca!&yq(Yj%22Mfs-irJ`$N%+&~u_1!X5alRp}UWDY@m@<>hthlqDJ!guA&lNdw zZFo?tgeJEFB!pM-#aS-%(}UR)gN09|8ZWWGG38dv{*H(Uxk0T`m}FeOecZ=%P!$V) zPsFZ>R(uG&k2R{%2u+j?+fDePC@$@ZP1(p(ae=jf>k7~$Q zgz1Y6C;i_p`Boa}c!2WOhCqbBQhTGezn3pL>qXHf>foLbIxz9Ba%r$L71f?E1uqrq z)GWnD)SvB`SVwi_i^G2kHRPtab*E0r`X2x}AJ~`Ko79`U{k`tC$%Yl-ckgrX8OZEq zm)~kQE6!PO|0tAHW#(mIp_$dt6e3eE#M573nn}#6mksUr*bMc)s8-^qqPRbobY$*t z)_6z#ZLeBTMkbW{5tgc*nc1lU5LRWW*f{ zODnl>{gr*R_eo~jM0lOd0oS;ndGgMAo7$u6GkSzKuwexwB?x@YE(h{OaXVS+gDHc% zKR$J=Onpsr$K3=)AozHzxUii{QY_#(#cvWoo6$E@dF`pY2lpQZy5S!t0%v6=NaY!jkcg0rT$7t|;J&)|f2XODmNKSL~(Rh3_%#Rr| zsXcsfs8zbMS@o45q7U^l?II?V$eVGV4D7OBhzu8e=HvFbnY?Dqn27w^Npq#W zj0G%85YdZrqaK{or(N8Eg4q4?LBH>TJDVYxNrUp=fMVjo>_w+{EKhbAd2B#&!q3W` z>d@R2-YlY6pOl{XpBbT?BDu&&HVePNW6a{Fpbp}x#;3xuFWl&#yxA|5#2FnP{uBP+ zQT+62{ht3jqDvv3a1?8NB?-!`3%EcYIs+T{ej!8ipn}GiROY%9uHTYmz18J4qbJXS ztJ_c2)`C`lfGJRaJo}G{baZWQ-8B28GHB&c>%=F_;HT`9V`)AAt#vY3%I!>}nt9)s z*R`?8yp{rv=Y51U1XKb_Gltx?>lEmp(5gvxR))EtP4|UErPqf7P-_2*JrI0u zq>t`k2n^#!xlDL=WhBKS`x081xAMI$@DGkp5NJaKV@DTPHI@v}G z%BF{FoBAm_^Td2!zQeX$dS)y1CetNA~+gNMg6(slV{U#=f~ zBALKsrV`n~F*<{1AV_bo-*o1oyV%&D_D;eJbjziK;x=hWxp%B`Bq z0&v4&?7HVi(Vn8tb@}A9%ZfOo|L>ZFU=9s&jFOA{b z)w&;+9#xwr^Rg(;S*tH`;*~h&&)o{!pX)2!(tkG={|0h9wOwT;bqQJ;zR7|+7C%~r zG|b&9Y5cviUd+_0-ZHfKAw(BlUV2X(s)eDlS7MsYourbd8VqRaAt`oaQ?d? za{E8itrVit!q)#TS{o=v>l`yAiMyyd&e2t#Px47A^VqcR6l#l;mMs0m+micOXlSWD zFiFl^QMNDxZ<>Dd__bW_7qMr<|2RFMTB$KQZT@Z@d3m0-@s-zvWg)VLjkT#X=*x{M z4!;R~<<`UO4DynW)d{eD#jf0WMyUg?ND{I%GD)|*R$rVm)(RtYv7k_tC7pO&=e>w- zIdJ(<`&VRzam#J`-lKXxj^)o_=fEs$_k+QcgJ z>P$(Bs+Lucu{ReCo&5F?`6W5Ja<7r+cj_TMpUjUBhttSL?5&SZYxP>LR*$(K zAi8!ulLNyY?imGD+RA5Il`_^5?El;e_|mhJvFoDrM)RuA7~`qmsfq=!0@~<3W4lvl zpE2Y!yT(ac+RKq~%fj%kFBQE9j=d4no) z`Omp{@Q33hoS)9+mxQzx)b^}9RbaXl^5IY%Bz1)g!9V0NmFcU+gL4brH>ymE3b8&g?q;4wnvs?hpi=@czDZRdwEOZsf~cL-@`|g`TroWMiDP;-G!S6otG%`JuIv&iw)vs$XEXvMY3jY1OWqd5cWG|IeX;Q*yA{XrF zFp9s~BX)^o+89?}DKIxSTVyyiGFr84SBOe?Xbq=lnCw4pXO|iz@UZ^-2DB0M#A4D4 zAFel)xIp?f(~-bCZCizJYzXFhI~_M1(`B$UtJ?6yVr?fdRW{D0n{wM7o~Y3fZb-AM zkE6$rGS(7aDILtTpBHP0TgCWDwAXiBi*!@ro;+i7a(z*qf4_OsA4&T@82X1rv$QCDxudLT^PSFv-L=iGjgCJCzzI@QB0GB?6jTZg%`JK8!m)xA)Qh(t=~ zyO-Us*xO#;%go9r)MCHukx`g>^lqE+n$DrH3>~cU}vqoIEi?e3APveT4R>NK}xw5KZ-G5k_4@4uL6OnFGhcS81iPSy z=k@OtHx~z+?!+{NKX+*<(BMCPSCzPnZWw=M`*6pz&BwDLVtc+(nx z4JT(^vJY}0SvFQE9l41LYrh4M+B|nTQ^C)8avpGGyW^15Xp3D-kwq>sd}Cs=$RZ)u zwqT1Np{hzGZcwR|iZE=o%?T>)f5*l^O3>f+X6{2t59Gq0;vx#d1d7oZ40d^9hRrYn zbDm0nfHWGP9Oe$DI*`l(Y+Kuzt2Y~s2zwPiMJv5$aT=9=Kyp+k<|wi;6t8ji zBALa$5_Lq>07EyMvhea@w4Cca37d3~!t@qNNIVLJ`3u4%Vu9@fq|u~Dz;$dZfny@zjONPMWwW+7Y~I<7AB19l>K>BZ$0Aa*Uj- z6lo{r4RGZ&9A~OeW*JIA_279aOn#<>SxX>ZEjEgN4a7$h7wW*HHcrN5-Njsti1Rl= zBh5i1OH9_0<{)D8CTLf48r^i=HRm$mPg(e+`5eg0jz--*N@04o&;nky={a1%?n4Xs zS_Q+SNN60trC`X3gmmy4=L|WJ(4NWqb#^f#6iGs)GsMW@ED3UVI4Df=DGQx^7cg{l zChMNySsN-Ws8x&y?0ux2bY{SYC7sS-j+VpikhC)DTeur_Nr((BfXyUqhB2Atrvhx= z#YkaV{D3O+jH1%3>Jgk2g^3V*;=U2*|HVr!nB@HTL1dsFA=LtAbjpC%`_?8)rZp z<25$KXk&cX#sPr5q*2VB7VZnD;c%h?nI$g;WjKC}!gR%gP!s^R`<29Kzltt~?pyN0 z>?t84p#yD- zq3e_y{jC9FHJ!4sf&igjHeKIj2iipoYO!b_evY(5R0QG@aNPKD3R8I&s^>4zZf{AL zMFZ{9Qlsk{KzmBc!ZreE?=W58We3_HqSldGU^kyH>`93eAxqMsavw5_pm;W&6xg=} zl0>;97)oU`fDRUjL^1{)EG&X@*-W%ucYJ340CccG{zku>zJRrC3%J~DKp-?o>7av! zB~KK#1|2L+UsF)Bpo4|p+f4l==wP9HFP_Z?QXXNeuuQvP`8xPEfqd6< z0NWIOm_ zdNLr}sb2#fEKq2QdHOI-8h^)A43N=qLHZ*=MymUQY=^1^vz7aSG>EyG2QW#B6k`RX zhvHDvpo0afK9jB952XE#f18T}vM=eg6_6gvL9KyghpNwJYl99JXe6mSmkX>?Vj}+4 z7z8aCuQ - -See the `[global.well_known]` config section, or configure your web server -appropriately to send the delegation responses. - ## Adding a conduwuit user While conduwuit can run as any user it is better to use dedicated users for diff --git a/docs/development/hot_reload.md b/docs/development/hot_reload.md index 018eb4b3..65fd4adf 100644 --- a/docs/development/hot_reload.md +++ b/docs/development/hot_reload.md @@ -1,5 +1,8 @@ # Hot Reloading ("Live" Development) +Note that hot reloading has not been refactored in quite a while and is not +guaranteed to work at this time. + ### Summary When developing in debug-builds with the nightly toolchain, conduwuit is modular diff --git a/docs/development/testing.md b/docs/development/testing.md index 2d421767..a577698a 100644 --- a/docs/development/testing.md +++ b/docs/development/testing.md @@ -5,12 +5,11 @@ Have a look at [Complement's repository][complement] for an explanation of what it is. -To test against Complement, with Nix (or [Lix](https://lix.systems) and direnv -installed and set up, you can: +To test against Complement, with Nix (or [Lix](https://lix.systems) and +[direnv installed and set up][direnv] (run `direnv allow` after setting up the hook), you can: -* Run `./bin/complement "$COMPLEMENT_SRC" ./path/to/logs.jsonl -./path/to/results.jsonl` to build a Complement image, run the tests, and output -the logs and results to the specified paths. This will also output the OCI image +* Run `./bin/complement "$COMPLEMENT_SRC"` to build a Complement image, run +the tests, and output the logs and results to the specified paths. This will also output the OCI image at `result` * Run `nix build .#complement` from the root of the repository to just build a Complement OCI image outputted to `result` (it's a `.tar.gz` file) @@ -18,5 +17,15 @@ Complement OCI image outputted to `result` (it's a `.tar.gz` file) output from the commit/revision you want to test (e.g. from main) [here][ci-workflows] +If you want to use your own prebuilt OCI image (such as from our CI) without needing +Nix installed, put the image at `complement_oci_image.tar.gz` in the root of the repo +and run the script. + +If you're on macOS and need to build an image, run `nix build .#linux-complement`. + +We have a Complement fork as some tests have needed to be fixed. This can be found +at: + [ci-workflows]: https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml?query=event%3Apush+is%3Asuccess+actor%3Agirlbossceo [complement]: https://github.com/matrix-org/complement +[direnv]: https://direnv.net/docs/hook.html diff --git a/docs/differences.md b/docs/differences.md deleted file mode 100644 index 18ea7a1f..00000000 --- a/docs/differences.md +++ /dev/null @@ -1,379 +0,0 @@ -#### **Note: This list may not up to date. There are rapidly more and more -improvements, fixes, changes, etc being made that it is becoming more difficult -to maintain this list. I recommend that you give conduwuit a try and see the -differences for yourself. If you have any concerns, feel free to join the -conduwuit Matrix room and ask any pre-usage questions.** - -### list of features, bug fixes, etc that conduwuit does that Conduit does not - -Outgoing typing indicators, outgoing read receipts, **and** outgoing presence! - -## Performance - -- Concurrency support for individual homeserver key fetching for faster remote -room joins and room joins that will error less frequently -- Send `Cache-Control` response header with `immutable` and 1 year cache length -for all media requests (download and thumbnail) to instruct clients to cache -media, and reduce server load from media requests that could be otherwise cached -- Add feature flags and config options to enable/build with zstd, brotli, and/or -gzip HTTP body compression (response and request) -- Eliminate all usage of the thread-blocking `getaddrinfo(3)` call upon DNS -queries, significantly improving federation latency/ping and cache DNS results -(NXDOMAINs, successful queries, etc) using hickory-dns / hickory-resolver -- Enable HTTP/2 support on all requests -- Vastly improve RocksDB default settings to use new features that help with -performance significantly, uses settings tailored to SSDs, various ways to tweak -RocksDB, and a conduwuit setting to tell RocksDB to use settings that are -tailored to HDDs or slow spinning rust storage or buggy filesystems. -- Implement database flush and cleanup conduwuit operations when using RocksDB -- Implement RocksDB write buffer corking and coalescing in database write-heavy -areas -- Perform connection pooling and keepalives where necessary to significantly -improve federation performance and latency -- Various config options to tweak connection pooling, request timeouts, -connection timeouts, DNS timeouts and settings, etc with good defaults which -also help huge with performance via reusing connections and retrying where -needed -- Properly get and use the amount of parallelism / tokio workers -- Implement building conduwuit with jemalloc (which extends to the RocksDB -jemalloc feature for maximum gains) or hardened_malloc light variant, and -io_uring support, and produce CI builds with jemalloc and io_uring by default -for performance (Nix doesn't seem to build -[hardened_malloc-rs](https://github.com/girlbossceo/hardened_malloc-rs) -properly) -- Add support for caching DNS results with hickory-dns / hickory-resolver in -conduwuit (not a replacement for a proper resolver cache, but still far better -than nothing), also properly falls back on TCP for UDP errors or if a SRV -response is too large -- Add config option for using DNS over TCP, and config option for controlling -A/AAAA record lookup strategy (e.g. don't query AAAA records if you don't have -IPv6 connectivity) -- Overall significant database, Client-Server, and federation performance and -latency improvements (check out the ping room leaderboards if you don't believe -me :>) -- Add config options for RocksDB compression and bottommost compression, -including choosing the algorithm and compression level -- Use [loole](https://github.com/mahdi-shojaee/loole) MPSC channels instead of -tokio MPSC channels for huge performance boosts in sending channels (mainly -relevant for federation) and presence channels -- Use `tracing`/`log`'s `release_max_level_info` feature to improve performance, -build speeds, binary size, and CPU usage in release builds by avoid compiling -debug/trace log level macros that users will generally never use (can be -disabled with a build-time feature flag) -- Remove some unnecessary checks on EDU handling for incoming transactions, -effectively speeding them up -- Simplify, dedupe, etc huge chunks of the codebase, including some that were -unnecessary overhead, binary bloats, or preventing compiler/linker optimisations -- Implement zero-copy RocksDB database accessors, substantially improving -performance caused by unnecessary memory allocations - -## General Fixes/Features - -- Add legacy Element client hack fixing password changes and deactivations on -legacy Element Android/iOS due to usage of an unspecced `user` field for UIAA -- Raise and improve all the various request timeouts making some things like -room joins and client bugs error less or none at all than they should, and make -them all user configurable -- Add missing `reason` field to user ban events (`/ban`) -- Safer and cleaner shutdowns across incoming/outgoing requests (graceful -shutdown) and the database -- Stop sending `make_join` requests on room joins if 15 servers respond with -`M_UNSUPPORTED_ROOM_VERSION` or `M_INVALID_ROOM_VERSION` -- Stop sending `make_join` requests if 50 servers cannot provide `make_join` for -us -- Respect *most* client parameters for `/media/` requests (`allow_redirect` -still needs work) -- Return joined member count of rooms for push rules/conditions instead of a -hardcoded value of 10 -- Make `CONDUIT_CONFIG` optional, relevant for container users that configure -only by environment variables and no longer need to set `CONDUIT_CONFIG` to an -empty string. -- Allow HEAD and PATCH (MSC4138) HTTP requests in CORS for clients (despite not -being explicity mentioned in Matrix spec, HTTP spec says all HEAD requests need -to behave the same as GET requests, Synapse supports HEAD requests) -- Fix using conduwuit with flake-compat on NixOS -- Resolve and remove some "features" from upstream that result in concurrency -hazards, exponential backoff issues, or arbitrary performance limiters -- Find more servers for outbound federation `/hierarchy` requests instead of -just the room ID server name -- Support for suggesting servers to join through at -`/_matrix/client/v3/directory/room/{roomAlias}` -- Support for suggesting servers to join through us at -`/_matrix/federation/v1/query/directory` -- Misc edge-case search fixes (e.g. potentially missing some events) -- Misc `/sync` fixes (e.g. returning unnecessary data or incorrect/invalid -responses) -- Add `replaces_state` and `prev_sender` in `unsigned` for state event changes -which primarily makes Element's "See history" button on a state event functional -- Fix Conduit not allowing incoming federation requests for various world -readable rooms -- Fix Conduit not respecting the client-requested file name on media requests -- Prevent sending junk / non-membership events to `/send_join` and `/send_leave` -endpoints -- Only allow the requested membership type on `/send_join` and `/send_leave` -endpoints (e.g. don't allow leave memberships on join endpoints) -- Prevent state key impersonation on `/send_join` and `/send_leave` endpoints -- Validate `X-Matrix` origin and request body `"origin"` field on incoming -transactions -- Add `GET /_matrix/client/v1/register/m.login.registration_token/validity` -endpoint -- Explicitly define support for sliding sync at `/_matrix/client/versions` -(`org.matrix.msc3575`) -- Fix seeing empty status messages on user presences - -## Moderation - -- (Also see [Admin Room](#admin-room) for all the admin commands pertaining to -moderation, there's a lot!) -- Add support for room banning/blocking by ID using admin command -- Add support for serving `support` well-known from `[global.well_known]` -(MSC1929) (`/.well-known/matrix/support`) -- Config option to forbid publishing rooms to the room directory -(`lockdown_public_room_directory`) except for admins -- Admin commands to delete room aliases and unpublish rooms from our room -directory -- For all -[`/report`](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3roomsroomidreporteventid) -requests: check if the reported event ID belongs to the reported room ID, raise -report reasoning character limit to 750, fix broken formatting, make a small -delayed random response per spec suggestion on privacy, and check if the sender -user is in the reported room. -- Support blocking servers from downloading remote media from, returning a 404 -- Don't allow `m.call.invite` events to be sent in public rooms (prevents -calling the entire room) -- On new public room creations, only allow moderators to send `m.call.invite`, -`org.matrix.msc3401.call`, and `org.matrix.msc3401.call.member` events to -prevent unprivileged users from calling the entire room -- Add support for a "global ACLs" feature (`forbidden_remote_server_names`) that -blocks inbound remote room invites, room joins by room ID on server name, room -joins by room alias on server name, incoming federated joins, and incoming -federated room directory requests. This is very helpful for blocking servers -that are purely toxic/bad and serve no value in allowing our users to suffer -from things like room invite spam or such. Please note that this is not a -substitute for room ACLs. -- Add support for a config option to forbid our local users from sending -federated room directory requests for -(`forbidden_remote_room_directory_server_names`). Similar to above, useful for -blocking servers that help prevent our users from wandering into bad areas of -Matrix via room directories of those malicious servers. -- Add config option for auto remediating/deactivating local non-admin users who -attempt to join bad/forbidden rooms (`auto_deactivate_banned_room_attempts`) -- Deactivating users will remove their profile picture, blurhash, display name, -and leave all rooms by default just like Synapse and for additional privacy -- Reject some EDUs from ACL'd users such as read receipts and typing indicators - -## Privacy/Security - -- Add config option for device name federation with a privacy-friendly default -(disabled) -- Add config option for requiring authentication to the `/publicRooms` endpoint -(room directory) with a default enabled for privacy -- Add config option for federating `/publicRooms` endpoint (room directory) to -other servers with a default disabled for privacy -- Uses proper `argon2` crate by RustCrypto instead of questionable `rust-argon2` -crate -- Generate passwords with 25 characters instead of 15 -- Config option `ip_range_denylist` to support refusing to send requests -(typically federation) to specific IP ranges, typically RFC 1918, non-routable, -testnet, etc addresses like Synapse for security (note: this is not a guaranteed -protection, and you should be using a firewall with zones if you want guaranteed -protection as doing this on the application level is prone to bypasses). -- Config option to block non-admin users from sending room invites or receiving -remote room invites. Admin users are still allowed. -- Config option to disable incoming and/or outgoing remote read receipts -- Config option to disable incoming and/or outgoing remote typing indicators -- Config option to disable incoming, outgoing, and/or local presence and for -timing out remote users -- Sanitise file names for the `Content-Disposition` header for all media -requests (thumbnails, downloads, uploads) -- Media repository on handling `Content-Disposition` and `Content-Type` is fully -spec compliant and secured -- Send secure default HTTP headers such as a strong restrictive CSP (see -MSC4149), deny iframes, disable `X-XSS-Protection`, disable interest cohort in -`Permission-Policy`, etc to mitigate any potential attack surface such as from -untrusted media - -## Administration/Logging - -- Commandline argument to specify the path to a config file instead of relying -on `CONDUIT_CONFIG` -- Revamped admin room infrastructure and commands -- Substantially clean up, improve, and fix logging (less noisy dead server -logging, registration attempts, more useful troubleshooting logging, proper -error propagation, etc) -- Configurable RocksDB logging (`LOG` files) with proper defaults (rotate, max -size, verbosity, etc) to stop LOG files from accumulating so much -- Explicit startup error if your configuration allows open registration without -a token or such like Synapse with a way to bypass it if needed -- Replace the lightning bolt emoji option with support for setting any arbitrary -text (e.g. another emoji) to suffix to all new user registrations, with a -conduwuit default of "🏳️‍⚧️" -- Implement config option to auto join rooms upon registration -- Warn on unknown config options specified -- Add `/_conduwuit/server_version` route to return the version of conduwuit -without relying on the federation API `/_matrix/federation/v1/version` -- Add `/_conduwuit/local_user_count` route to return the amount of registered -active local users on your homeserver *if federation is enabled* -- Add configurable RocksDB recovery modes to aid in recovering corrupted RocksDB -databases -- Support config options via `CONDUWUIT_` prefix and accessing non-global struct -config options with the `__` split (e.g. `CONDUWUIT_WELL_KNOWN__SERVER`) -- Add support for listening on multiple TCP ports and multiple addresses -- **Opt-in** Sentry.io telemetry and metrics, mainly used for crash reporting -- Log the client IP on various requests such as registrations, banned room join -attempts, logins, deactivations, federation transactions, etc -- Fix Conduit dropping some remote server federation response errors - -## Maintenance/Stability - -- GitLab CI ported to GitHub Actions -- Add support for the Matrix spec compliance test suite -[Complement](https://github.com/matrix-org/complement/) via the Nix flake and -various other fixes for it -- Implement running and diff'ing Complement results in CI and error if any -mismatch occurs to prevent large cases of conduwuit regressions -- Repo is (officially) mirrored to GitHub, GitLab, git.gay, git.girlcock.ceo, -sourcehut, and Codeberg (see README.md for their links) -- Docker container images published to GitLab Container Registry, GitHub -Container Registry, and Dockerhub -- Extensively revamp the example config to be extremely helpful and useful to -both new users and power users -- Fixed every single clippy (default lints) and rustc warnings, including some -that were performance related or potential safety issues / unsoundness -- Add a **lot** of other clippy and rustc lints and a rustfmt.toml file -- Repo uses [Renovate](https://docs.renovatebot.com/) and keeps ALL -dependencies as up to date as possible -- Purge unmaintained/irrelevant/broken database backends (heed, sled, persy) and -other unnecessary code or overhead -- webp support for images -- Add cargo audit support to CI -- Add documentation lints via lychee and markdownlint-cli to CI -- CI tests for all sorts of feature matrixes (jemalloc, non-defaullt, all -features, etc) -- Add static and dynamic linking smoke tests in CI to prevent any potential -linking regressions for Complement, static binaries, Nix devshells, etc -- Add timestamp by commit date when building OCI images for keeping image build -reproducibility and still have a meaningful "last modified date" for OCI image -- Add timestamp by commit date via `SOURCE_DATE_EPOCH` for Debian packages -- Startup check if conduwuit running in a container and is listening on -127.0.0.1 (generally containers are using NAT networking and 0.0.0.0 is the -intended listening address) -- Add a panic catcher layer to return panic messages in HTTP responses if a -panic occurs -- Add full compatibility support for SHA256 media file names instead of base64 -file names to overcome filesystem file name length limitations (OS error file -name too long) while still retaining upstream database compatibility -- Remove SQLite support due to being very poor performance, difficult to -maintain against RocksDB, and is a blocker to significantly improved database -code - -## Admin Room - -- Add support for a console CLI interface that can issue admin commands and -output them in your terminal -- Add support for an admin-user-only commandline admin room interface that can -be issued in any room with the `\\!admin` or `\!admin` prefix and returns the -response as yourself in the same room -- Add admin commands for uptime, server startup, server shutdown, and server -restart -- Fix admin room handler to not panic/crash if the admin room command response -fails (e.g. too large message) -- Add command to dynamically change conduwuit's tracing log level filter on the -fly -- Add admin command to fetch a server's `/.well-known/matrix/support` file -- Add debug admin command to force update user device lists (could potentially -resolve some E2EE flukes) -- Implement **RocksDB online backups**, listing RocksDB backups, and listing -database file counts all via admin commands -- Add various database visibility commands such as being able to query the -getters and iterators used in conduwuit, a very helpful online debugging utility -- Forbid the admin room from being made public or world readable history -- Add `!admin` as a way to call the admin bot -- Extend clear cache admin command to support clearing more caches such as DNS -and TLS name overrides -- Admin debug command to send a federation request/ping to a server's -`/_matrix/federation/v1/version` endpoint and measures the latency it took -- Add admin command to bulk delete media via a codeblock list of MXC URLs. -- Add admin command to delete both the thumbnail and media MXC URLs from an -event ID (e.g. from an abuse report) -- Add admin command to list all the rooms a local user is joined in -- Add admin command to list joined members in a room -- Add admin command to view the room topic of a room -- Add admin command to delete all remote media in the past X minutes as a form -of deleting media that you don't want on your server that a remote user posted -in a room, a `--force` flag to ignore errors, and support for reading `last -modified time` instead of `creation time` for filesystems that don't support -file created metadata -- Add admin command to return a room's full/complete state -- Admin debug command to fetch a PDU from a remote server and inserts it into -our database/timeline as backfill -- Add admin command to delete media via a specific MXC. This deletes the MXC -from our database, and the file locally. -- Add admin commands for banning (blocking) room IDs from our local users -joining (admins are always allowed) and evicts all our local users from that -room, in addition to bulk room banning support, and blocks room invites (remote -and local) to the banned room, as a moderation feature -- Add admin commands to output jemalloc memory stats and memory usage -- Add admin command to get rooms a *remote* user shares with us -- Add debug admin commands to get the earliest and latest PDU in a room -- Add debug admin command to echo a message -- Add admin command to insert rooms tags for a user, most useful for inserting -the `m.server_notice` tag on your admin room to make it "persistent" in the -"System Alerts" section of Element -- Add experimental admin debug command for Dendrite's `AdminDownloadState` -(`/admin/downloadState/{serverName}/{roomID}`) admin API endpoint to download -and use a remote server's room state in the room -- Disable URL previews by default in the admin room due to various command -outputs having "URLs" in them that clients may needlessly render/request -- Extend memory usage admin server command to support showing memory allocator -stats such as jemalloc's -- Add admin debug command to see memory allocator's full extended debug -statistics such as jemalloc's - -## Misc - -- Add guest support for accessing TURN servers via `turn_allow_guests` like -Synapse -- Support for creating rooms with custom room IDs like Maunium Synapse -(`room_id` request body field to `/createRoom`) -- Query parameter `?format=event|content` for returning either the room state -event's content (default) for the full room state event on -`/_matrix/client/v3/rooms/{roomId}/state/{eventType}[/{stateKey}]` requests (see -) -- Send a User-Agent on all of our requests -- Send `avatar_url` on invite room membership events/changes -- Support sending [`well_known` response to client login -responses](https://spec.matrix.org/v1.10/client-server-api/#post_matrixclientv3login) -if using config option `[well_known.client]` -- Implement `include_state` search criteria support for `/search` requests -(response now can include room states) -- Declare various missing Matrix versions and features at -`/_matrix/client/versions` -- Implement legacy Matrix `/v1/` media endpoints that some clients and servers -may still call -- Config option to change Conduit's behaviour of homeserver key fetching -(`query_trusted_key_servers_first`). This option sets whether conduwuit will -query trusted notary key servers first before the individual homeserver(s), or -vice versa which may help in joining certain rooms. -- Implement unstable MSC2666 support for querying mutual rooms with a user -- Implement unstable MSC3266 room summary API support -- Implement unstable MSC4125 support for specifying servers to join via on -federated invites -- Make conduwuit build and be functional under Nix + macOS -- Log out all sessions after unsetting the emergency password -- Assume well-knowns are broken if they exceed past 12288 characters. -- Add support for listening on both HTTP and HTTPS if using direct TLS with -conduwuit for usecases such as Complement -- Add config option for disabling RocksDB Direct IO if needed -- Add various documentation on maintaining conduwuit, using RocksDB online -backups, some troubleshooting, using admin commands, moderation documentation, -etc -- (Developers): Add support for [hot reloadable/"live" modular -development](development/hot_reload.md) -- (Developers): Add support for tokio-console -- (Developers): Add support for tracing flame graphs -- No cryptocurrency donations allowed, conduwuit is fully maintained by -independent queer maintainers, and with a strong priority on inclusitivity and -comfort for protected groups 🏳️‍⚧️ -- [Add a community Code of Conduct for all conduwuit community spaces, primarily -the Matrix space](https://conduwuit.puppyirl.gay/conduwuit_coc.html) diff --git a/docs/introduction.md b/docs/introduction.md index 9db76681..9d3a294a 100644 --- a/docs/introduction.md +++ b/docs/introduction.md @@ -4,10 +4,6 @@ {{#include ../README.md:body}} -#### What's different about your fork than upstream Conduit? - -See the [differences](differences.md) page - #### How can I deploy my own? - [Deployment options](deploying.md) From 1e23c95ec6e059c5d9b2b0083868596f1d38f5aa Mon Sep 17 00:00:00 2001 From: Tamara Schmitz <15906939+tamara-schmitz@users.noreply.github.com> Date: Mon, 10 Mar 2025 21:27:53 +0000 Subject: [PATCH 242/328] docs: refactor reverse proxy setup sections (#701) --- docs/deploying/generic.md | 59 ++++++++++++++++++++------------------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index 88ba01d5..a07da560 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -145,25 +145,32 @@ sudo chmod 700 /var/lib/conduwuit/ ## Setting up the Reverse Proxy -Refer to the documentation or various guides online of your chosen reverse proxy -software. There are many examples of basic Apache/Nginx reverse proxy setups -out there. +We recommend Caddy as a reverse proxy, as it is trivial to use, handling TLS certificates, reverse proxy headers, etc transparently with proper defaults. +For other software, please refer to their respective documentation or online guides. -A [Caddy](https://caddyserver.com/) example will be provided as this -is the recommended reverse proxy for new users and is very trivial to use -(handles TLS, reverse proxy headers, etc transparently with proper defaults). +### Caddy -Lighttpd is not supported as it seems to mess with the `X-Matrix` Authorization -header, making federation non-functional. If a workaround is found, feel free to share to get it added to the documentation here. +After installing Caddy via your preferred method, create `/etc/caddy/conf.d/conduwuit_caddyfile` +and enter this (substitute for your server name). -If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent this (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can). +```caddyfile +your.server.name, your.server.name:8448 { + # TCP reverse_proxy + reverse_proxy 127.0.0.1:6167 + # UNIX socket + #reverse_proxy unix//run/conduwuit/conduwuit.sock +} +``` -If using Nginx, you need to give conduwuit the request URI using `$request_uri`, or like so: -- `proxy_pass http://127.0.0.1:6167$request_uri;` -- `proxy_pass http://127.0.0.1:6167;` +That's it! Just start and enable the service and you're set. -Nginx users need to increase `client_max_body_size` (default is 1M) to match -`max_request_size` defined in conduwuit.toml. +```bash +sudo systemctl enable --now caddy +``` + +### Other Reverse Proxies + +As we would prefer our users to use Caddy, we will not provide configuration files for other proxys. You will need to reverse proxy everything under following routes: - `/_matrix/` - core Matrix C-S and S-S APIs @@ -186,25 +193,19 @@ Examples of delegation: - - -### Caddy +For Apache and Nginx there are many examples available online. -Create `/etc/caddy/conf.d/conduwuit_caddyfile` and enter this (substitute for -your server name). +Lighttpd is not supported as it seems to mess with the `X-Matrix` Authorization +header, making federation non-functional. If a workaround is found, feel free to share to get it added to the documentation here. -```caddyfile -your.server.name, your.server.name:8448 { - # TCP reverse_proxy - reverse_proxy 127.0.0.1:6167 - # UNIX socket - #reverse_proxy unix//run/conduwuit/conduwuit.sock -} -``` +If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from messing with the `X-Matrix` header (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can). -That's it! Just start and enable the service and you're set. +If using Nginx, you need to give conduwuit the request URI using `$request_uri`, or like so: +- `proxy_pass http://127.0.0.1:6167$request_uri;` +- `proxy_pass http://127.0.0.1:6167;` -```bash -sudo systemctl enable --now caddy -``` +Nginx users need to increase `client_max_body_size` (default is 1M) to match +`max_request_size` defined in conduwuit.toml. ## You're done From 1366a3092f5be044fbe39225dd606ef3445899d5 Mon Sep 17 00:00:00 2001 From: Ginger <75683114+gingershaped@users.noreply.github.com> Date: Mon, 10 Mar 2025 17:28:19 -0400 Subject: [PATCH 243/328] Check the `room_types` filter when searching for local public rooms (#698) --- src/api/client/directory.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 88f0e668..7ce32e4c 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -15,7 +15,7 @@ use ruma::{ }, federation, }, - directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork}, + directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork, RoomTypeFilter}, events::{ StateEventType, room::{ @@ -289,6 +289,9 @@ pub(crate) async fn get_public_rooms_filtered_helper( .map(ToOwned::to_owned) .then(|room_id| public_rooms_chunk(services, room_id)) .filter_map(|chunk| async move { + if !filter.room_types.is_empty() && !filter.room_types.contains(&RoomTypeFilter::from(chunk.room_type.clone())) { + return None; + } if let Some(query) = filter.generic_search_term.as_ref().map(|q| q.to_lowercase()) { if let Some(name) = &chunk.name { if name.as_str().to_lowercase().contains(&query) { From c4b05e77f3dd66636e26b64f8f4852703816c399 Mon Sep 17 00:00:00 2001 From: Odd Eivind Ebbesen Date: Mon, 10 Mar 2025 22:28:29 +0100 Subject: [PATCH 244/328] Fix up wording in the doc comments for admin media deletion (#694) --- src/admin/media/mod.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/admin/media/mod.rs b/src/admin/media/mod.rs index d212aab4..405c26d5 100644 --- a/src/admin/media/mod.rs +++ b/src/admin/media/mod.rs @@ -27,18 +27,18 @@ pub(super) enum MediaCommand { DeleteList, /// - Deletes all remote (and optionally local) media created before or - /// after \[duration] time using filesystem metadata first created at - /// date, or fallback to last modified date. This will always ignore - /// errors by default. + /// after [duration] time using filesystem metadata first created at date, + /// or fallback to last modified date. This will always ignore errors by + /// default. DeletePastRemoteMedia { /// - The relative time (e.g. 30s, 5m, 7d) within which to search duration: String, - /// - Only delete media created more recently than \[duration] ago + /// - Only delete media created before [duration] ago #[arg(long, short)] before: bool, - /// - Only delete media created after \[duration] ago + /// - Only delete media created after [duration] ago #[arg(long, short)] after: bool, From 3104586884b0027a1404bfe1986d569ff9e492d4 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 18:05:36 -0400 Subject: [PATCH 245/328] bump tracing-subscriber, allowlist cargo-doc lint in admin room Signed-off-by: June Clementine Strawberry --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 10 +++++----- src/admin/media/mod.rs | 1 + 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 65e8eca1..22d93237 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -440,7 +440,7 @@ dependencies = [ "bitflags 2.9.0", "cexpr", "clang-sys", - "itertools 0.12.1", + "itertools 0.13.0", "proc-macro2", "quote", "regex", @@ -2382,7 +2382,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -4833,7 +4833,7 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" version = "0.1.41" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "log", "pin-project-lite", @@ -4844,7 +4844,7 @@ dependencies = [ [[package]] name = "tracing-attributes" version = "0.1.28" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "proc-macro2", "quote", @@ -4854,7 +4854,7 @@ dependencies = [ [[package]] name = "tracing-core" version = "0.1.33" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "once_cell", "valuable", @@ -4874,7 +4874,7 @@ dependencies = [ [[package]] name = "tracing-log" version = "0.2.0" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "log", "once_cell", @@ -4901,8 +4901,8 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +version = "0.3.19" +source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "matchers", "nu-ansi-term", diff --git a/Cargo.toml b/Cargo.toml index d611c08e..1528349c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -210,7 +210,7 @@ default-features = false version = "0.1.41" default-features = false [workspace.dependencies.tracing-subscriber] -version = "=0.3.18" +version = "0.3.19" default-features = false features = ["env-filter", "std", "tracing", "tracing-log", "ansi", "fmt"] [workspace.dependencies.tracing-core] @@ -541,16 +541,16 @@ version = "1.0.2" # https://github.com/girlbossceo/tracing/commit/b348dca742af641c47bc390261f60711c2af573c [patch.crates-io.tracing-subscriber] git = "https://github.com/girlbossceo/tracing" -rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" +rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" [patch.crates-io.tracing] git = "https://github.com/girlbossceo/tracing" -rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" +rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" [patch.crates-io.tracing-core] git = "https://github.com/girlbossceo/tracing" -rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" +rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" [patch.crates-io.tracing-log] git = "https://github.com/girlbossceo/tracing" -rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" +rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" # adds a tab completion callback: https://github.com/girlbossceo/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50 # adds event for CTRL+\: https://github.com/girlbossceo/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b diff --git a/src/admin/media/mod.rs b/src/admin/media/mod.rs index 405c26d5..641834b2 100644 --- a/src/admin/media/mod.rs +++ b/src/admin/media/mod.rs @@ -1,3 +1,4 @@ +#![allow(rustdoc::broken_intra_doc_links)] mod commands; use clap::Subcommand; From 7f95eef9abf86298a25fd0bd410835084742eaae Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 21:01:20 -0400 Subject: [PATCH 246/328] bump ruwuma Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 22d93237..c93716f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3507,7 +3507,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "assign", "js_int", @@ -3527,7 +3527,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "js_int", "ruma-common", @@ -3539,7 +3539,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "as_variant", "assign", @@ -3562,7 +3562,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "as_variant", "base64 0.22.1", @@ -3594,7 +3594,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3619,7 +3619,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "bytes", "headers", @@ -3641,7 +3641,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3650,7 +3650,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3675,7 +3675,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "js_int", "ruma-common", @@ -3687,7 +3687,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index 1528349c..c09cdaea 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +rev = "69133fd53ca063552788c8dfbaf5e01c98dec3e7" features = [ "compat", "rand", From ae818d5b25977a6c4543bca16b78af6f2fa0cca7 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 21:08:41 -0400 Subject: [PATCH 247/328] remove most of cargo test from engage as crane does that but with more caching Signed-off-by: June Clementine Strawberry --- engage.toml | 36 ------------------------------------ 1 file changed, 36 deletions(-) diff --git a/engage.toml b/engage.toml index 0a857b5a..210bafd5 100644 --- a/engage.toml +++ b/engage.toml @@ -161,24 +161,6 @@ name = "markdownlint" group = "lints" script = "markdownlint docs *.md || true" # TODO: fix the ton of markdown lints so we can drop `|| true` -[[task]] -name = "cargo/all" -group = "tests" -script = """ -env DIRENV_DEVSHELL=all-features \ - direnv exec . \ - cargo test \ - --workspace \ - --locked \ - --profile test \ - --all-targets \ - --no-fail-fast \ - --all-features \ - --color=always \ - -- \ - --color=always -""" - [[task]] name = "cargo/default" group = "tests" @@ -196,24 +178,6 @@ env DIRENV_DEVSHELL=default \ --color=always """ -[[task]] -name = "cargo/no-features" -group = "tests" -script = """ -env DIRENV_DEVSHELL=no-features \ - direnv exec . \ - cargo test \ - --workspace \ - --locked \ - --profile test \ - --all-targets \ - --no-fail-fast \ - --no-default-features \ - --color=always \ - -- \ - --color=always -""" - # Checks if the generated example config differs from the checked in repo's # example config. [[task]] From e920c44cb488d398bc57fe4ce7fdffb3ded5038a Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 21:15:11 -0400 Subject: [PATCH 248/328] ignore humantime dep as tracing console-subscriber uses it (somewhere) Signed-off-by: June Clementine Strawberry --- .cargo/audit.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cargo/audit.toml b/.cargo/audit.toml index bf44fbd6..37148cfb 100644 --- a/.cargo/audit.toml +++ b/.cargo/audit.toml @@ -1,5 +1,5 @@ [advisories] -ignore = ["RUSTSEC-2024-0436"] # advisory IDs to ignore e.g. ["RUSTSEC-2019-0001", ...] +ignore = ["RUSTSEC-2024-0436", "RUSTSEC-2025-0014"] # advisory IDs to ignore e.g. ["RUSTSEC-2019-0001", ...] informational_warnings = [] # warn for categories of informational advisories severity_threshold = "none" # CVSS severity ("none", "low", "medium", "high", "critical") From 0877f294393954bbe49279456f012e1fbb604f78 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 22:21:53 -0400 Subject: [PATCH 249/328] respect membership filters on /members Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +++++++------- Cargo.toml | 2 +- src/api/client/membership.rs | 56 ++++++++++++++++++++++++++++++++++-- 3 files changed, 66 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c93716f9..ab155fd0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3507,7 +3507,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "assign", "js_int", @@ -3527,7 +3527,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "js_int", "ruma-common", @@ -3539,7 +3539,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "as_variant", "assign", @@ -3562,7 +3562,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "as_variant", "base64 0.22.1", @@ -3594,7 +3594,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3619,7 +3619,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "bytes", "headers", @@ -3641,7 +3641,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3650,7 +3650,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3675,7 +3675,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "js_int", "ruma-common", @@ -3687,7 +3687,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index c09cdaea..2bf30d61 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "69133fd53ca063552788c8dfbaf5e01c98dec3e7" +rev = "24d018a0015bb85489ae84564701a49a643bcc57" features = [ "compat", "rand", diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 3f77e69e..11395e83 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -25,8 +25,9 @@ use ruma::{ error::ErrorKind, knock::knock_room, membership::{ - ThirdPartySigned, ban_user, forget_room, get_member_events, invite_user, - join_room_by_id, join_room_by_id_or_alias, + ThirdPartySigned, ban_user, forget_room, + get_member_events::{self, v3::MembershipEventFilter}, + invite_user, join_room_by_id, join_room_by_id_or_alias, joined_members::{self, v3::RoomMember}, joined_rooms, kick_user, leave_room, unban_user, }, @@ -768,6 +769,54 @@ pub(crate) async fn joined_rooms_route( }) } +fn membership_filter( + pdu: PduEvent, + for_membership: Option<&MembershipEventFilter>, + not_membership: Option<&MembershipEventFilter>, +) -> Option { + let membership_state_filter = match for_membership { + | Some(MembershipEventFilter::Ban) => MembershipState::Ban, + | Some(MembershipEventFilter::Invite) => MembershipState::Invite, + | Some(MembershipEventFilter::Knock) => MembershipState::Knock, + | Some(MembershipEventFilter::Leave) => MembershipState::Leave, + | Some(_) | None => MembershipState::Join, + }; + + let not_membership_state_filter = match not_membership { + | Some(MembershipEventFilter::Ban) => MembershipState::Ban, + | Some(MembershipEventFilter::Invite) => MembershipState::Invite, + | Some(MembershipEventFilter::Join) => MembershipState::Join, + | Some(MembershipEventFilter::Knock) => MembershipState::Knock, + | Some(_) | None => MembershipState::Leave, + }; + + let evt_membership = pdu.get_content::().ok()?.membership; + + if for_membership.is_some() && not_membership.is_some() { + if membership_state_filter != evt_membership + || not_membership_state_filter == evt_membership + { + None + } else { + Some(pdu) + } + } else if for_membership.is_some() && not_membership.is_none() { + if membership_state_filter != evt_membership { + None + } else { + Some(pdu) + } + } else if not_membership.is_some() && for_membership.is_none() { + if not_membership_state_filter == evt_membership { + None + } else { + Some(pdu) + } + } else { + Some(pdu) + } +} + /// # `POST /_matrix/client/r0/rooms/{roomId}/members` /// /// Lists all joined users in a room (TODO: at a specific point in time, with a @@ -779,6 +828,8 @@ pub(crate) async fn get_member_events_route( body: Ruma, ) -> Result { let sender_user = body.sender_user(); + let membership = body.membership.as_ref(); + let not_membership = body.not_membership.as_ref(); if !services .rooms @@ -797,6 +848,7 @@ pub(crate) async fn get_member_events_route( .ready_filter_map(Result::ok) .ready_filter(|((ty, _), _)| *ty == StateEventType::RoomMember) .map(at!(1)) + .ready_filter_map(|pdu| membership_filter(pdu, membership, not_membership)) .map(PduEvent::into_member_event) .collect() .await, From 1d1ccec532bf3eaebf499d3ff4c9f7a24369c389 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 23:05:56 -0400 Subject: [PATCH 250/328] fix some nightly clippy lints Signed-off-by: June Clementine Strawberry --- Cargo.toml | 3 +++ clippy.toml | 3 ++- src/admin/processor.rs | 8 +++++--- src/api/client/account.rs | 4 ++-- src/api/client/state.rs | 2 +- src/core/utils/string.rs | 1 + 6 files changed, 14 insertions(+), 7 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 2bf30d61..fd477850 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -841,6 +841,9 @@ unused_crate_dependencies = "allow" unsafe_code = "allow" variant_size_differences = "allow" +# we check nightly clippy lints +unknown_lints = "allow" + ####################################### # # Clippy lints diff --git a/clippy.toml b/clippy.toml index 42427101..863759aa 100644 --- a/clippy.toml +++ b/clippy.toml @@ -2,9 +2,10 @@ array-size-threshold = 4096 cognitive-complexity-threshold = 94 # TODO reduce me ALARA excessive-nesting-threshold = 11 # TODO reduce me to 4 or 5 future-size-threshold = 7745 # TODO reduce me ALARA -stack-size-threshold = 196608 # reduce me ALARA +stack-size-threshold = 196608 # TODO reduce me ALARA too-many-lines-threshold = 780 # TODO reduce me to <= 100 type-complexity-threshold = 250 # reduce me to ~200 +large-error-threshold = 256 # TODO reduce me ALARA disallowed-macros = [ { path = "log::error", reason = "use conduwuit_core::error" }, diff --git a/src/admin/processor.rs b/src/admin/processor.rs index 77a60959..53a15098 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -91,6 +91,7 @@ async fn process_command(services: Arc, input: &CommandInput) -> Proce } } +#[allow(clippy::result_large_err)] fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { let link = "Please submit a [bug report](https://github.com/girlbossceo/conduwuit/issues/new). 🥺"; @@ -100,7 +101,7 @@ fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { Err(reply(content, command.reply_id.as_deref())) } -// Parse and process a message from the admin room +/// Parse and process a message from the admin room async fn process( context: &Command<'_>, command: AdminCommand, @@ -164,7 +165,8 @@ fn capture_create(context: &Command<'_>) -> (Arc, Arc>) { (capture, logs) } -// Parse chat messages from the admin room into an AdminCommand object +/// Parse chat messages from the admin room into an AdminCommand object +#[allow(clippy::result_large_err)] fn parse<'a>( services: &Arc, input: &'a CommandInput, @@ -232,7 +234,7 @@ fn complete_command(mut cmd: clap::Command, line: &str) -> String { ret.join(" ") } -// Parse chat messages from the admin room into an AdminCommand object +/// Parse chat messages from the admin room into an AdminCommand object fn parse_line(command_line: &str) -> Vec { let mut argv = command_line .split_whitespace() diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 2b8209d4..32438098 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -109,7 +109,7 @@ pub(crate) async fn get_register_available_route( if !info.is_user_match(&user_id) { return Err!(Request(Exclusive("Username is not in an appservice namespace."))); } - }; + } if services.appservice.is_exclusive_user_id(&user_id).await { return Err!(Request(Exclusive("Username is reserved by an appservice."))); @@ -159,7 +159,7 @@ pub(crate) async fn register_route( | (None, _) => { info!(%is_guest, "Rejecting registration attempt as registration is disabled"); }, - }; + } return Err!(Request(Forbidden("Registration has been disabled."))); } diff --git a/src/api/client/state.rs b/src/api/client/state.rs index db79735f..9563c26d 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -254,7 +254,7 @@ async fn allowed_to_send_state_event( "Room server ACL event is invalid: {e}" )))); }, - }; + } }, | StateEventType::RoomEncryption => // Forbid m.room.encryption if encryption is disabled diff --git a/src/core/utils/string.rs b/src/core/utils/string.rs index 9340d009..d8fa3f95 100644 --- a/src/core/utils/string.rs +++ b/src/core/utils/string.rs @@ -60,6 +60,7 @@ pub fn camel_to_snake_string(s: &str) -> String { } #[inline] +#[allow(clippy::unbuffered_bytes)] // these are allocated string utilities, not file I/O utils pub fn camel_to_snake_case(output: &mut O, input: I) -> Result<()> where I: std::io::Read, From 5dea52f0f87dc640274e0f3ecb38b96ac9293f44 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 23:45:53 -0400 Subject: [PATCH 251/328] stop doing complement cert gen and just use self-signed cert Signed-off-by: June Clementine Strawberry --- bin/complement | 2 +- flake.lock | 6 +++--- nix/pkgs/complement/certificate.crt | 21 +++++++++++++++++++ nix/pkgs/complement/default.nix | 19 +---------------- nix/pkgs/complement/signing_request.csr | 28 ++++++++++++------------- nix/pkgs/complement/v3.ext | 6 ++++++ 6 files changed, 46 insertions(+), 36 deletions(-) create mode 100644 nix/pkgs/complement/certificate.crt diff --git a/bin/complement b/bin/complement index 92539f97..3aa5a6f5 100755 --- a/bin/complement +++ b/bin/complement @@ -68,7 +68,7 @@ set +o pipefail env \ -C "$COMPLEMENT_SRC" \ COMPLEMENT_BASE_IMAGE="$COMPLEMENT_BASE_IMAGE" \ - go test -tags="conduwuit_blacklist" -timeout 1h -json ./tests/... | tee "$LOG_FILE" + go test -tags="conduwuit_blacklist" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail # Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results diff --git a/flake.lock b/flake.lock index 03fc205c..63cc2787 100644 --- a/flake.lock +++ b/flake.lock @@ -80,11 +80,11 @@ "complement": { "flake": false, "locked": { - "lastModified": 1741378155, - "narHash": "sha256-rJSfqf3q4oWxcAwENtAowLZeCi8lktwKVH9XQvvZR64=", + "lastModified": 1741757487, + "narHash": "sha256-Fkx/krwI3h6wJ6Mj199KlXUNJNEwl7h1pR4/d2ncmKw=", "owner": "girlbossceo", "repo": "complement", - "rev": "1502a00d8551d0f6e8954a23e43868877c3e57d9", + "rev": "40982a261cfc36650f74967f99fb1a049b13e065", "type": "github" }, "original": { diff --git a/nix/pkgs/complement/certificate.crt b/nix/pkgs/complement/certificate.crt new file mode 100644 index 00000000..5dd4fdea --- /dev/null +++ b/nix/pkgs/complement/certificate.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDfzCCAmegAwIBAgIUcrZdSPmCh33Evys/U6mTPpShqdcwDQYJKoZIhvcNAQEL +BQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29mZXJz +IGluYy4xDDAKBgNVBAMMA2hzMTAgFw0yNTAzMTMxMjU4NTFaGA8yMDUyMDcyODEy +NTg1MVowPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29m +ZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjHuCLZLpYt +/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZRxmOhtp88 +awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZbo61q8HBp +L0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42BhGtnJZsK +K5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBevUdBh8gl +8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaNxMG8wCQYDVR0TBAIwADALBgNV +HQ8EBAMCBPAwNgYDVR0RBC8wLYIRKi5kb2NrZXIuaW50ZXJuYWyCA2hzMYIDaHMy +ggNoczOCA2hzNIcEfwAAATAdBgNVHQ4EFgQUr4VYrmW1d+vjBTJewvy7fJYhLDYw +DQYJKoZIhvcNAQELBQADggEBADkYqkjNYxjWX8hUUAmFHNdCwzT1CpYe/5qzLiyJ +irDSdMlC5g6QqMUSrpu7nZxo1lRe1dXGroFVfWpoDxyCjSQhplQZgtYqtyLfOIx+ +HQ7cPE/tUU/KsTGc0aL61cETB6u8fj+rQKUGdfbSlm0Rpu4v0gC8RnDj06X/hZ7e +VkWU+dOBzxlqHuLlwFFtVDgCyyTatIROx5V+GpMHrVqBPO7HcHhwqZ30k2kMM8J3 +y1CWaliQM85jqtSZV+yUHKQV8EksSowCFJuguf+Ahz0i0/koaI3i8m4MRN/1j13d +jbTaX5a11Ynm3A27jioZdtMRty6AJ88oCp18jxVzqTxNNO4= +-----END CERTIFICATE----- diff --git a/nix/pkgs/complement/default.nix b/nix/pkgs/complement/default.nix index bbd1bd74..9b010e14 100644 --- a/nix/pkgs/complement/default.nix +++ b/nix/pkgs/complement/default.nix @@ -3,10 +3,8 @@ , buildEnv , coreutils , dockerTools -, gawk , lib , main -, openssl , stdenv , tini , writeShellScriptBin @@ -42,21 +40,6 @@ let start = writeShellScriptBin "start" '' set -euxo pipefail - cp ${./v3.ext} /complement/v3.ext - echo "DNS.1 = $SERVER_NAME" >> /complement/v3.ext - echo "IP.1 = $(${lib.getExe gawk} 'END{print $1}' /etc/hosts)" \ - >> /complement/v3.ext - ${lib.getExe openssl} x509 \ - -req \ - -extfile /complement/v3.ext \ - -in ${./signing_request.csr} \ - -CA /complement/ca/ca.crt \ - -CAkey /complement/ca/ca.key \ - -CAcreateserial \ - -out /complement/certificate.crt \ - -days 1 \ - -sha256 - ${lib.getExe' coreutils "env"} \ CONDUWUIT_SERVER_NAME="$SERVER_NAME" \ ${lib.getExe main'} @@ -93,7 +76,7 @@ dockerTools.buildImage { Env = [ "CONDUWUIT_TLS__KEY=${./private_key.key}" - "CONDUWUIT_TLS__CERTS=/complement/certificate.crt" + "CONDUWUIT_TLS__CERTS=${./certificate.crt}" "CONDUWUIT_CONFIG=${./config.toml}" "RUST_BACKTRACE=full" ]; diff --git a/nix/pkgs/complement/signing_request.csr b/nix/pkgs/complement/signing_request.csr index 707e73b4..e2aa658e 100644 --- a/nix/pkgs/complement/signing_request.csr +++ b/nix/pkgs/complement/signing_request.csr @@ -1,16 +1,16 @@ -----BEGIN CERTIFICATE REQUEST----- -MIICkTCCAXkCAQAwTDELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRYwFAYDVQQK -DA13b29mZXJzLCBpbmMuMRgwFgYDVQQDDA9jb21wbGVtZW50LW9ubHkwggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDS/odmZivxajebiyT7SMuhXqnMm+hF -+zEARLcbieem0wG4x7gi2S6WLf8DlifdXax6me13eYk4rBnTLvGEvNNx0px5M54H -+FVyoVa3c1tmA66WUcZjobafPGsDh5j+5qpScgWwjkMPGg1a09CphCFswO4PpxUU -ORX/OTGj/rEKxximW6OtavBwaS9F7mqjXJK7lCrcZxKq5uccebGMmCoO660hROST -BaFigdRTVicclk+NgYRrZyWbCiuXPjQ0jlOE2rcaDepqTUgaQs/2tdT4kBzBH6kZ -OiQOIN/ddXaj032QXr1HQYfIJfJmiM6nmRob8nik5rpZdWNO/Ncsro/fAgMBAAGg -ADANBgkqhkiG9w0BAQsFAAOCAQEAjW+aD4E0phtRT5b2RyedY1uiSe7LQECsQnIO -wUSyGGG1GXYlJscyxxyzE9W9+QIALrxZkmc/+e02u+bFb1zQXW/uB/7u7FgXzrj6 -2YSDiWYXiYKvgGWEfCi3lpcTJK9x6WWkR+iREaoKRjcl0ynhhGuR7YwP38TNyu+z -FN6B1Lo398fvJkaTCiiHngWiwztXZ2d0MxkicuwZ1LJhIQA72OTl3QoRb5uiqbze -T9QJfU6W3v8cB8c8PuKMv5gl1QsGNtlfyQB56/X0cMxWl25vWXd2ankLkAGRTDJ8 -9YZHxP1ki4/yh75AknFq02nCOsmxYrAazCYgP2TzIPhQwBurKQ== +MIIChDCCAWwCAQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQK +DAx3b29mZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjH +uCLZLpYt/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZR +xmOhtp88awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZb +o61q8HBpL0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42B +hGtnJZsKK5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBe +vUdBh8gl8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaAAMA0GCSqGSIb3DQEB +CwUAA4IBAQDR/gjfxN0IID1MidyhZB4qpdWn3m6qZnEQqoTyHHdWalbfNXcALC79 +ffS+Smx40N5hEPvqy6euR89N5YuYvt8Hs+j7aWNBn7Wus5Favixcm2JcfCTJn2R3 +r8FefuSs2xGkoyGsPFFcXE13SP/9zrZiwvOgSIuTdz/Pbh6GtEx7aV4DqHJsrXnb +XuPxpQleoBqKvQgSlmaEBsJg13TQB+Fl2foBVUtqAFDQiv+RIuircf0yesMCKJaK +MPH4Oo+r3pR8lI8ewfJPreRhCoV+XrGYMubaakz003TJ1xlOW8M+N9a6eFyMVh76 +U1nY/KP8Ua6Lgaj9PRz7JCRzNoshZID/ -----END CERTIFICATE REQUEST----- diff --git a/nix/pkgs/complement/v3.ext b/nix/pkgs/complement/v3.ext index 6083d960..0deaa48a 100644 --- a/nix/pkgs/complement/v3.ext +++ b/nix/pkgs/complement/v3.ext @@ -4,3 +4,9 @@ keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment subjectAltName = @alt_names [alt_names] +DNS.1 = *.docker.internal +DNS.2 = hs1 +DNS.3 = hs2 +DNS.4 = hs3 +DNS.5 = hs4 +IP.1 = 127.0.0.1 From 258b399de93e74b00695ab42697dc31f5a49aa81 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 13 Mar 2025 10:52:13 -0400 Subject: [PATCH 252/328] bump ruwuma Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- flake.lock | 6 +++--- nix/pkgs/complement/config.toml | 6 +++--- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ab155fd0..c28f4eab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3507,7 +3507,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "assign", "js_int", @@ -3527,7 +3527,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "js_int", "ruma-common", @@ -3539,7 +3539,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "as_variant", "assign", @@ -3562,7 +3562,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "as_variant", "base64 0.22.1", @@ -3594,7 +3594,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3619,7 +3619,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "bytes", "headers", @@ -3641,7 +3641,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3650,7 +3650,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3675,7 +3675,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "js_int", "ruma-common", @@ -3687,7 +3687,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index fd477850..db55b9b8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "24d018a0015bb85489ae84564701a49a643bcc57" +rev = "d197318a2507d38ffe6ee524d0d52728ca72538a" features = [ "compat", "rand", diff --git a/flake.lock b/flake.lock index 63cc2787..1f87b9b6 100644 --- a/flake.lock +++ b/flake.lock @@ -80,11 +80,11 @@ "complement": { "flake": false, "locked": { - "lastModified": 1741757487, - "narHash": "sha256-Fkx/krwI3h6wJ6Mj199KlXUNJNEwl7h1pR4/d2ncmKw=", + "lastModified": 1741891349, + "narHash": "sha256-YvrzOWcX7DH1drp5SGa+E/fc7wN3hqFtPbqPjZpOu1Q=", "owner": "girlbossceo", "repo": "complement", - "rev": "40982a261cfc36650f74967f99fb1a049b13e065", + "rev": "e587b3df569cba411aeac7c20b6366d03c143745", "type": "github" }, "original": { diff --git a/nix/pkgs/complement/config.toml b/nix/pkgs/complement/config.toml index 759f8d78..7f4ecef7 100644 --- a/nix/pkgs/complement/config.toml +++ b/nix/pkgs/complement/config.toml @@ -6,7 +6,7 @@ allow_public_room_directory_over_federation = true allow_public_room_directory_without_auth = true allow_registration = true database_path = "/database" -log = "trace,h2=warn,hyper=warn" +log = "trace,h2=debug,hyper=debug" port = [8008, 8448] trusted_servers = [] only_query_trusted_key_servers = false @@ -19,11 +19,11 @@ url_preview_domain_explicit_denylist = ["*"] media_compat_file_link = false media_startup_check = true prune_missing_media = true -log_colors = false +log_colors = true admin_room_notices = false allow_check_for_updates = false intentionally_unknown_config_option_for_testing = true -rocksdb_log_level = "debug" +rocksdb_log_level = "info" rocksdb_max_log_files = 1 rocksdb_recovery_mode = 0 rocksdb_paranoid_file_checks = true From 6c29792b3d9dfe1e65c5d3545296d431e058e375 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 22:21:42 -0400 Subject: [PATCH 253/328] respect include_leave syncv3 filter Signed-off-by: June Clementine Strawberry --- src/api/client/sync/v3.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index fb59837b..70c4c6a7 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -219,6 +219,7 @@ pub(crate) async fn build_sync_events( sender_user, next_batch, full_state, + filter.room.include_leave, &filter, ) .map_ok(move |left_room| (room_id, left_room)) @@ -412,6 +413,7 @@ async fn handle_left_room( sender_user: &UserId, next_batch: u64, full_state: bool, + include_leave: bool, filter: &FilterDefinition, ) -> Result> { let left_count = services @@ -540,6 +542,10 @@ async fn handle_left_room( continue; }; + if !include_leave && pdu.sender == sender_user { + continue; + } + left_state_events.push(pdu.to_sync_state_event()); } } From ee3c585555a80c037bdaa861beeecbf6e19a7f04 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 14 Mar 2025 15:57:18 -0400 Subject: [PATCH 254/328] skip a few flakey complement tests Signed-off-by: June Clementine Strawberry --- bin/complement | 4 +- .../complement/test_results.jsonl | 82 ++----------------- 2 files changed, 8 insertions(+), 78 deletions(-) diff --git a/bin/complement b/bin/complement index 3aa5a6f5..c437503e 100755 --- a/bin/complement +++ b/bin/complement @@ -18,7 +18,7 @@ RESULTS_FILE="${3:-complement_test_results.jsonl}" COMPLEMENT_BASE_IMAGE="${COMPLEMENT_BASE_IMAGE:-complement-conduwuit:main}" # Complement tests that are skipped due to flakiness/reliability issues or we don't implement such features and won't for a long time -#SKIPPED_COMPLEMENT_TESTS='-skip=TestPartialStateJoin.*' +SKIPPED_COMPLEMENT_TESTS='TestPartialStateJoin.*|TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_when_m.*|TestRoomDeleteAlias/Parallel/Can_delete_canonical_alias|TestUnbanViaInvite.*|TestRoomState/Parallel/GET_/publicRooms_lists.*"|TestRoomDeleteAlias/Parallel/Users_with_sufficient_power-level_can_delete_other.*' # $COMPLEMENT_SRC needs to be a directory to Complement source code if [ -f "$COMPLEMENT_SRC" ]; then @@ -68,7 +68,7 @@ set +o pipefail env \ -C "$COMPLEMENT_SRC" \ COMPLEMENT_BASE_IMAGE="$COMPLEMENT_BASE_IMAGE" \ - go test -tags="conduwuit_blacklist" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" + go test -tags="conduwuit_blacklist" -skip="$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail # Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 5fb850f1..6b5f670e 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -174,10 +174,10 @@ {"Action":"pass","Test":"TestFilter"} {"Action":"fail","Test":"TestFilterMessagesByRelType"} {"Action":"pass","Test":"TestGappedSyncLeaveSection"} -{"Action":"fail","Test":"TestGetFilteredRoomMembers"} -{"Action":"fail","Test":"TestGetFilteredRoomMembers/membership/join"} -{"Action":"fail","Test":"TestGetFilteredRoomMembers/membership/leave"} -{"Action":"fail","Test":"TestGetFilteredRoomMembers/not_membership"} +{"Action":"pass","Test":"TestGetFilteredRoomMembers"} +{"Action":"pass","Test":"TestGetFilteredRoomMembers/membership/join"} +{"Action":"pass","Test":"TestGetFilteredRoomMembers/membership/leave"} +{"Action":"pass","Test":"TestGetFilteredRoomMembers/not_membership"} {"Action":"fail","Test":"TestGetMissingEventsGapFilling"} {"Action":"pass","Test":"TestGetRoomMembers"} {"Action":"fail","Test":"TestGetRoomMembersAtPoint"} @@ -360,72 +360,6 @@ {"Action":"pass","Test":"TestOutboundFederationProfile"} {"Action":"pass","Test":"TestOutboundFederationProfile/Outbound_federation_can_query_profile_data"} {"Action":"pass","Test":"TestOutboundFederationSend"} -{"Action":"fail","Test":"TestPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanFastJoinDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanLazyLoadingSyncDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveDeviceListUpdateDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsWithHalfMissingGrandparentsDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsWithHalfMissingParentsDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsWithMissingParentsDuringPartialStateJoin"} -{"Action":"skip","Test":"TestPartialStateJoin/CanReceivePresenceDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveReceiptDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveSigningKeyUpdateDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveToDeviceDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveTypingDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanSendEventsDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/Can_change_display_name_during_partial_state_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_for_user_incorrectly_believed_to_be_in_room"} -{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_when_failing_to_complete_partial_state_join"} -{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_when_leaving_partial_state_room"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_when_new_member_leaves_partial_state_room"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracked_for_new_members_in_partial_state_room"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_pre-existing_members_in_partial_state_room"} -{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_user_incorrectly_believed_to_be_in_room_when_they_join_another_shared_room_before_partial_state_join_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_user_incorrectly_believed_to_be_in_room_when_they_rejoin_after_partial_state_join_completes"} -{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_user_incorrectly_believed_to_be_in_room_when_they_rejoin_before_partial_state_join_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_when_pre-existing_members_in_partial_state_room_join_another_shared_room"} -{"Action":"fail","Test":"TestPartialStateJoin/EagerIncrementalSyncDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/EagerInitialSyncDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/EagerLongPollingSyncWokenWhenResyncCompletes"} -{"Action":"fail","Test":"TestPartialStateJoin/GappySyncAfterPartialStateSynced"} -{"Action":"fail","Test":"TestPartialStateJoin/Lazy-loading_gappy_sync_includes_remote_memberships_during_partial_state_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Lazy-loading_incremental_sync_includes_remote_memberships_during_partial_state_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Lazy-loading_initial_sync_includes_remote_memberships_during_partial_state_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/can_be_triggered_by_remote_ban"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/can_be_triggered_by_remote_kick"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/does_not_wait_for_resync"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/is_seen_after_the_resync"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/succeeds,_then_another_user_can_join_without_resync_completing"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/succeeds,_then_rejoin_succeeds_without_resync_completing"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/works_after_a_second_partial_join"} -{"Action":"fail","Test":"TestPartialStateJoin/MembersRequestBlocksDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_no_longer_reach_departed_servers_after_partial_state_join_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_all_servers_in_partial_state_rooms"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_absent_servers_once_partial_state_join_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_absent_servers_once_partial_state_join_completes_even_though_remote_server_left_room"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_kicked_servers_once_partial_state_join_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_kicked_servers_once_partial_state_join_completes_even_though_remote_server_left_room"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_newly_joined_servers_in_partial_state_rooms"} -{"Action":"fail","Test":"TestPartialStateJoin/PartialStateJoinContinuesAfterRestart"} -{"Action":"fail","Test":"TestPartialStateJoin/PartialStateJoinSyncsUsingOtherHomeservers"} -{"Action":"skip","Test":"TestPartialStateJoin/Purge_during_resync"} -{"Action":"fail","Test":"TestPartialStateJoin/Rejected_events_remain_rejected_after_resync"} -{"Action":"fail","Test":"TestPartialStateJoin/Rejects_make_join_during_partial_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Rejects_make_knock_during_partial_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Rejects_send_join_during_partial_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Rejects_send_knock_during_partial_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Resync_completes_even_when_events_arrive_before_their_prev_events"} -{"Action":"fail","Test":"TestPartialStateJoin/Room_aliases_can_be_added_and_deleted_during_a_resync"} -{"Action":"fail","Test":"TestPartialStateJoin/Room_aliases_can_be_added_and_queried_during_a_resync"} -{"Action":"skip","Test":"TestPartialStateJoin/Room_stats_are_correctly_updated_once_state_re-sync_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/State_accepted_incorrectly"} -{"Action":"fail","Test":"TestPartialStateJoin/State_rejected_incorrectly"} -{"Action":"fail","Test":"TestPartialStateJoin/User_directory_is_correctly_updated_once_state_re-sync_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/joined_members_blocks_during_partial_state_join"} {"Action":"fail","Test":"TestPollsLocalPushRules"} {"Action":"fail","Test":"TestPollsLocalPushRules/Polls_push_rules_are_correctly_presented_to_the_client"} {"Action":"pass","Test":"TestPowerLevels"} @@ -561,16 +495,13 @@ {"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.member_to_myself"} {"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_room_topic_reports_m.room.topic_to_myself"} {"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_state_twice_is_idempotent"} -{"Action":"fail","Test":"TestRoomDeleteAlias"} -{"Action":"fail","Test":"TestRoomDeleteAlias/Parallel"} +{"Action":"pass","Test":"TestRoomDeleteAlias"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel"} {"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Alias_creators_can_delete_alias_with_no_ops"} {"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Alias_creators_can_delete_canonical_alias_with_no_ops"} -{"Action":"fail","Test":"TestRoomDeleteAlias/Parallel/Can_delete_canonical_alias"} {"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Deleting_a_non-existent_alias_should_return_a_404"} {"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_in_the_default_room_configuration"} -{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_when_m.room.aliases_is_restricted"} {"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Users_can't_delete_other's_aliases"} -{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Users_with_sufficient_power-level_can_delete_other's_aliases"} {"Action":"fail","Test":"TestRoomForget"} {"Action":"fail","Test":"TestRoomForget/Parallel"} {"Action":"pass","Test":"TestRoomForget/Parallel/Can't_forget_room_you're_still_in"} @@ -687,7 +618,6 @@ {"Action":"pass","Test":"TestTyping"} {"Action":"pass","Test":"TestTyping/Typing_can_be_explicitly_stopped"} {"Action":"pass","Test":"TestTyping/Typing_notification_sent_to_local_room_members"} -{"Action":"fail","Test":"TestUnbanViaInvite"} {"Action":"fail","Test":"TestUnknownEndpoints"} {"Action":"pass","Test":"TestUnknownEndpoints/Client-server_endpoints"} {"Action":"fail","Test":"TestUnknownEndpoints/Key_endpoints"} From 4518f554081532400bfae64b931cd135dbceb755 Mon Sep 17 00:00:00 2001 From: cy Date: Wed, 12 Mar 2025 20:46:14 -0400 Subject: [PATCH 255/328] guard against using someone else's access token in UIAA --- src/service/uiaa/mod.rs | 10 ++++++++-- tests/test_results/complement/test_results.jsonl | 2 +- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 39dd2b41..7803c736 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -4,7 +4,7 @@ use std::{ }; use conduwuit::{ - Error, Result, err, error, implement, utils, + Err, Error, Result, err, error, implement, utils, utils::{hash, string::EMPTY}, }; use database::{Deserialized, Json, Map}; @@ -150,12 +150,18 @@ pub async fn try_auth( )); }; - let user_id = UserId::parse_with_server_name( + let user_id_from_username = UserId::parse_with_server_name( username.clone(), self.services.globals.server_name(), ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid."))?; + // Check if the access token being used matches the credentials used for UIAA + if user_id.localpart() != user_id_from_username.localpart() { + return Err!(Request(Forbidden("User ID and access token mismatch."))); + } + let user_id = user_id_from_username; + // Check if password is correct if let Ok(hash) = self.services.users.password_hash(&user_id).await { let hash_matches = hash::verify_password(password, &hash).is_ok(); diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 6b5f670e..01d2ca4a 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -127,7 +127,7 @@ {"Action":"fail","Test":"TestDeviceListsUpdateOverFederationOnRoomJoin"} {"Action":"fail","Test":"TestDeviceManagement"} {"Action":"fail","Test":"TestDeviceManagement/DELETE_/device/{deviceId}"} -{"Action":"fail","Test":"TestDeviceManagement/DELETE_/device/{deviceId}_requires_UI_auth_user_to_match_device_owner"} +{"Action":"pass","Test":"TestDeviceManagement/DELETE_/device/{deviceId}_requires_UI_auth_user_to_match_device_owner"} {"Action":"pass","Test":"TestDeviceManagement/GET_/device/{deviceId}"} {"Action":"pass","Test":"TestDeviceManagement/GET_/device/{deviceId}_gives_a_404_for_unknown_devices"} {"Action":"pass","Test":"TestDeviceManagement/GET_/devices"} From 658c19d55eb5fdf30f27e189c414208e2eae6e24 Mon Sep 17 00:00:00 2001 From: cy Date: Fri, 14 Mar 2025 23:01:28 -0400 Subject: [PATCH 256/328] check if we already have a more preferable key backup before adding --- src/api/client/backup.rs | 81 ++++++++++++++++--- .../complement/test_results.jsonl | 8 +- 2 files changed, 76 insertions(+), 13 deletions(-) diff --git a/src/api/client/backup.rs b/src/api/client/backup.rs index 714e3f86..63c47e01 100644 --- a/src/api/client/backup.rs +++ b/src/api/client/backup.rs @@ -1,3 +1,5 @@ +use std::cmp::Ordering; + use axum::extract::State; use conduwuit::{Err, err}; use ruma::{ @@ -232,16 +234,77 @@ pub(crate) async fn add_backup_keys_for_session_route( ))); } - services + // Check if we already have a better key + let mut ok_to_replace = true; + if let Some(old_key) = &services .key_backups - .add_key( - body.sender_user(), - &body.version, - &body.room_id, - &body.session_id, - &body.session_data, - ) - .await?; + .get_session(body.sender_user(), &body.version, &body.room_id, &body.session_id) + .await + .ok() + { + let old_is_verified = old_key + .get_field::("is_verified")? + .unwrap_or_default(); + + let new_is_verified = body + .session_data + .get_field::("is_verified")? + .ok_or_else(|| err!(Request(BadJson("`is_verified` field should exist"))))?; + + // Prefer key that `is_verified` + if old_is_verified != new_is_verified { + if old_is_verified { + ok_to_replace = false; + } + } else { + // If both have same `is_verified`, prefer the one with lower + // `first_message_index` + let old_first_message_index = old_key + .get_field::("first_message_index")? + .unwrap_or(UInt::MAX); + + let new_first_message_index = body + .session_data + .get_field::("first_message_index")? + .ok_or_else(|| { + err!(Request(BadJson("`first_message_index` field should exist"))) + })?; + + ok_to_replace = match new_first_message_index.cmp(&old_first_message_index) { + | Ordering::Less => true, + | Ordering::Greater => false, + | Ordering::Equal => { + // If both have same `first_message_index`, prefer the one with lower + // `forwarded_count` + let old_forwarded_count = old_key + .get_field::("forwarded_count")? + .unwrap_or(UInt::MAX); + + let new_forwarded_count = body + .session_data + .get_field::("forwarded_count")? + .ok_or_else(|| { + err!(Request(BadJson("`forwarded_count` field should exist"))) + })?; + + new_forwarded_count < old_forwarded_count + }, + }; + }; + } + + if ok_to_replace { + services + .key_backups + .add_key( + body.sender_user(), + &body.version, + &body.room_id, + &body.session_id, + &body.session_data, + ) + .await?; + } Ok(add_backup_keys_for_session::v3::Response { count: services diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 01d2ca4a..97170a5c 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -134,10 +134,10 @@ {"Action":"pass","Test":"TestDeviceManagement/PUT_/device/{deviceId}_gives_a_404_for_unknown_devices"} {"Action":"pass","Test":"TestDeviceManagement/PUT_/device/{deviceId}_updates_device_fields"} {"Action":"pass","Test":"TestDisplayNameUpdate"} -{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules"} -{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel"} -{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:false_firstMessageIndex:10_forwardedCount:5}"} -{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:true_firstMessageIndex:10_forwardedCount:5}"} +{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules"} +{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel"} +{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:false_firstMessageIndex:10_forwardedCount:5}"} +{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:true_firstMessageIndex:10_forwardedCount:5}"} {"Action":"pass","Test":"TestEvent"} {"Action":"pass","Test":"TestEvent/Parallel"} {"Action":"pass","Test":"TestEvent/Parallel/Large_Event"} From 7bf92c8a3710eeff229bd86bc81a89daa94b66d5 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 17 Mar 2025 22:50:29 -0400 Subject: [PATCH 257/328] replace unnecessary check when updating device keys Signed-off-by: June Clementine Strawberry --- src/api/client/backup.rs | 2 +- src/api/client/keys.rs | 20 ++++++++++++++++---- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/src/api/client/backup.rs b/src/api/client/backup.rs index 63c47e01..83955fea 100644 --- a/src/api/client/backup.rs +++ b/src/api/client/backup.rs @@ -290,7 +290,7 @@ pub(crate) async fn add_backup_keys_for_session_route( new_forwarded_count < old_forwarded_count }, }; - }; + } } if ok_to_replace { diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 9cd50e85..f50d7afa 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -80,14 +80,26 @@ pub(crate) async fn upload_keys_route( ))); } - // TODO: merge this and the existing event? - // This check is needed to assure that signatures are kept - if services + if let Ok(existing_keys) = services .users .get_device_keys(sender_user, sender_device) .await - .is_err() { + if existing_keys.json().get() == device_keys.json().get() { + debug!( + ?sender_user, + ?sender_device, + ?device_keys, + "Ignoring user uploaded keys as they are an exact copy already in the \ + database" + ); + } else { + services + .users + .add_device_keys(sender_user, sender_device, device_keys) + .await; + } + } else { services .users .add_device_keys(sender_user, sender_device, device_keys) From 33c5afe050491988ee8224af25b9b06e892f4b50 Mon Sep 17 00:00:00 2001 From: cy Date: Wed, 19 Mar 2025 20:55:14 -0400 Subject: [PATCH 258/328] delete pushers created with different access token on password change --- src/api/client/account.rs | 23 ++++++++++++- src/api/client/push.rs | 2 +- src/database/maps.rs | 4 +++ src/service/pusher/mod.rs | 34 +++++++++++++------ .../complement/test_results.jsonl | 4 +-- 5 files changed, 53 insertions(+), 14 deletions(-) diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 32438098..5dd622d7 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -4,7 +4,8 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ Err, Error, PduBuilder, Result, debug_info, err, error, info, is_equal_to, utils, - utils::ReadyExt, warn, + utils::{ReadyExt, stream::BroadbandExt}, + warn, }; use futures::{FutureExt, StreamExt}; use register::RegistrationKind; @@ -627,6 +628,26 @@ pub(crate) async fn change_password_route( .ready_filter(|id| *id != sender_device) .for_each(|id| services.users.remove_device(sender_user, id)) .await; + + // Remove all pushers except the ones associated with this session + services + .pusher + .get_pushkeys(sender_user) + .map(ToOwned::to_owned) + .broad_filter_map(|pushkey| async move { + services + .pusher + .get_pusher_device(&pushkey) + .await + .ok() + .filter(|pusher_device| pusher_device != sender_device) + .is_some() + .then_some(pushkey) + }) + .for_each(|pushkey| async move { + services.pusher.delete_pusher(sender_user, &pushkey).await; + }) + .await; } info!("User {sender_user} changed their password."); diff --git a/src/api/client/push.rs b/src/api/client/push.rs index 384b9dbc..cc1d3be2 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -503,7 +503,7 @@ pub(crate) async fn set_pushers_route( services .pusher - .set_pusher(sender_user, &body.action) + .set_pusher(sender_user, body.sender_device(), &body.action) .await?; Ok(set_pusher::v3::Response::new()) diff --git a/src/database/maps.rs b/src/database/maps.rs index 138bb038..1da9acc0 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -219,6 +219,10 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "senderkey_pusher", ..descriptor::RANDOM_SMALL }, + Descriptor { + name: "pushkey_deviceid", + ..descriptor::RANDOM_SMALL + }, Descriptor { name: "server_signingkeys", ..descriptor::RANDOM diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 2b269b3d..27490fb8 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -10,7 +10,7 @@ use database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::{Stream, StreamExt}; use ipaddress::IPAddress; use ruma::{ - RoomId, UInt, UserId, + DeviceId, OwnedDeviceId, RoomId, UInt, UserId, api::{ IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, client::push::{Pusher, PusherKind, set_pusher}, @@ -48,6 +48,7 @@ struct Services { struct Data { senderkey_pusher: Arc, + pushkey_deviceid: Arc, } impl crate::Service for Service { @@ -55,6 +56,7 @@ impl crate::Service for Service { Ok(Arc::new(Self { db: Data { senderkey_pusher: args.db["senderkey_pusher"].clone(), + pushkey_deviceid: args.db["pushkey_deviceid"].clone(), }, services: Services { globals: args.depend::("globals"), @@ -75,6 +77,7 @@ impl Service { pub async fn set_pusher( &self, sender: &UserId, + sender_device: &DeviceId, pusher: &set_pusher::v3::PusherAction, ) -> Result { match pusher { @@ -123,24 +126,35 @@ impl Service { } } - let key = (sender, data.pusher.ids.pushkey.as_str()); + let pushkey = data.pusher.ids.pushkey.as_str(); + let key = (sender, pushkey); self.db.senderkey_pusher.put(key, Json(pusher)); + self.db.pushkey_deviceid.insert(pushkey, sender_device); }, | set_pusher::v3::PusherAction::Delete(ids) => { - let key = (sender, ids.pushkey.as_str()); - self.db.senderkey_pusher.del(key); - - self.services - .sending - .cleanup_events(None, Some(sender), Some(ids.pushkey.as_str())) - .await - .ok(); + self.delete_pusher(sender, ids.pushkey.as_str()).await; }, } Ok(()) } + pub async fn delete_pusher(&self, sender: &UserId, pushkey: &str) { + let key = (sender, pushkey); + self.db.senderkey_pusher.del(key); + self.db.pushkey_deviceid.remove(pushkey); + + self.services + .sending + .cleanup_events(None, Some(sender), Some(pushkey)) + .await + .ok(); + } + + pub async fn get_pusher_device(&self, pushkey: &str) -> Result { + self.db.pushkey_deviceid.get(pushkey).await.deserialized() + } + pub async fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result { let senderkey = (sender, pushkey); self.db diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 97170a5c..ac2733f8 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -69,8 +69,8 @@ {"Action":"pass","Test":"TestChangePassword/After_changing_password,_can_log_in_with_new_password"} {"Action":"pass","Test":"TestChangePassword/After_changing_password,_different_sessions_can_optionally_be_kept"} {"Action":"pass","Test":"TestChangePassword/After_changing_password,_existing_session_still_works"} -{"Action":"fail","Test":"TestChangePasswordPushers"} -{"Action":"fail","Test":"TestChangePasswordPushers/Pushers_created_with_a_different_access_token_are_deleted_on_password_change"} +{"Action":"pass","Test":"TestChangePasswordPushers"} +{"Action":"pass","Test":"TestChangePasswordPushers/Pushers_created_with_a_different_access_token_are_deleted_on_password_change"} {"Action":"pass","Test":"TestChangePasswordPushers/Pushers_created_with_the_same_access_token_are_not_deleted_on_password_change"} {"Action":"fail","Test":"TestClientSpacesSummary"} {"Action":"fail","Test":"TestClientSpacesSummary/max_depth"} From 07ec9d6d852a8ebb623c96b580af36e0d0d11697 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 26 Mar 2025 01:32:45 +0000 Subject: [PATCH 259/328] re-sort pushkey_deviceid (33c5afe050) Signed-off-by: Jason Volk --- src/database/maps.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/database/maps.rs b/src/database/maps.rs index 1da9acc0..311c629f 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -121,14 +121,18 @@ pub(super) static MAPS: &[Descriptor] = &[ index_size: 512, ..descriptor::SEQUENTIAL }, - Descriptor { - name: "presenceid_presence", - ..descriptor::SEQUENTIAL_SMALL - }, Descriptor { name: "publicroomids", ..descriptor::RANDOM_SMALL }, + Descriptor { + name: "pushkey_deviceid", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "presenceid_presence", + ..descriptor::SEQUENTIAL_SMALL + }, Descriptor { name: "readreceiptid_readreceipt", ..descriptor::RANDOM @@ -219,10 +223,6 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "senderkey_pusher", ..descriptor::RANDOM_SMALL }, - Descriptor { - name: "pushkey_deviceid", - ..descriptor::RANDOM_SMALL - }, Descriptor { name: "server_signingkeys", ..descriptor::RANDOM From aa4d2e236330693c61d5cb116b4c438b15431aec Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 9 Mar 2025 03:14:00 +0000 Subject: [PATCH 260/328] fix unused import without feature jemalloc_conf fix span passed by value Signed-off-by: Jason Volk --- src/core/alloc/je.rs | 3 +-- src/router/request.rs | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 6870c1c0..51caf3a3 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -8,7 +8,6 @@ use std::{ }; use arrayvec::ArrayVec; -use const_str::concat_bytes; use tikv_jemalloc_ctl as mallctl; use tikv_jemalloc_sys as ffi; use tikv_jemallocator as jemalloc; @@ -20,7 +19,7 @@ use crate::{ #[cfg(feature = "jemalloc_conf")] #[unsafe(no_mangle)] -pub static malloc_conf: &[u8] = concat_bytes!( +pub static malloc_conf: &[u8] = const_str::concat_bytes!( "lg_extent_max_active_fit:4", ",oversize_threshold:16777216", ",tcache_max:2097152", diff --git a/src/router/request.rs b/src/router/request.rs index 00769b3f..dba90324 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -37,7 +37,7 @@ pub(crate) async fn handle( let parent = Span::current(); let task = services.server.runtime().spawn(async move { tokio::select! { - response = execute(&services_, req, next, parent) => response, + response = execute(&services_, req, next, &parent) => response, response = services_.server.until_shutdown() .then(|()| { let timeout = services_.server.config.client_shutdown_timeout; @@ -79,7 +79,7 @@ async fn execute( services: &Arc, req: http::Request, next: axum::middleware::Next, - parent: Span, + parent: &Span, ) -> Response { #[cfg(debug_assertions)] conduwuit::defer! {{ From 7294368015025ae4d7677c28837d3ac0a79539e6 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 12 Mar 2025 23:10:38 +0000 Subject: [PATCH 261/328] parallelize IO for PublicRoomsChunk vector Signed-off-by: Jason Volk --- src/api/client/directory.rs | 118 +++++++++++++++++++----------------- 1 file changed, 64 insertions(+), 54 deletions(-) diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 7ce32e4c..80b314b9 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -1,7 +1,17 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, Error, Result, info, warn}; -use futures::{StreamExt, TryFutureExt}; +use conduwuit::{ + Err, Error, Result, info, + utils::{ + TryFutureExtExt, + stream::{ReadyExt, WidebandExt}, + }, + warn, +}; +use futures::{ + FutureExt, StreamExt, TryFutureExt, + future::{join, join4, join5}, +}; use ruma::{ OwnedRoomId, RoomId, ServerName, UInt, UserId, api::{ @@ -287,8 +297,8 @@ pub(crate) async fn get_public_rooms_filtered_helper( .directory .public_rooms() .map(ToOwned::to_owned) - .then(|room_id| public_rooms_chunk(services, room_id)) - .filter_map(|chunk| async move { + .wide_then(|room_id| public_rooms_chunk(services, room_id)) + .ready_filter_map(|chunk| { if !filter.room_types.is_empty() && !filter.room_types.contains(&RoomTypeFilter::from(chunk.room_type.clone())) { return None; } @@ -394,60 +404,60 @@ async fn user_can_publish_room( } async fn public_rooms_chunk(services: &Services, room_id: OwnedRoomId) -> PublicRoomsChunk { + let name = services.rooms.state_accessor.get_name(&room_id).ok(); + + let room_type = services.rooms.state_accessor.get_room_type(&room_id).ok(); + + let canonical_alias = services + .rooms + .state_accessor + .get_canonical_alias(&room_id) + .ok(); + + let avatar_url = services.rooms.state_accessor.get_avatar(&room_id); + + let topic = services.rooms.state_accessor.get_room_topic(&room_id).ok(); + + let world_readable = services.rooms.state_accessor.is_world_readable(&room_id); + + let join_rule = services + .rooms + .state_accessor + .room_state_get_content(&room_id, &StateEventType::RoomJoinRules, "") + .map_ok(|c: RoomJoinRulesEventContent| match c.join_rule { + | JoinRule::Public => PublicRoomJoinRule::Public, + | JoinRule::Knock => "knock".into(), + | JoinRule::KnockRestricted(_) => "knock_restricted".into(), + | _ => "invite".into(), + }); + + let guest_can_join = services.rooms.state_accessor.guest_can_join(&room_id); + + let num_joined_members = services.rooms.state_cache.room_joined_count(&room_id); + + let ( + (avatar_url, canonical_alias, guest_can_join, join_rule, name), + (num_joined_members, room_type, topic, world_readable), + ) = join( + join5(avatar_url, canonical_alias, guest_can_join, join_rule, name), + join4(num_joined_members, room_type, topic, world_readable), + ) + .boxed() + .await; + PublicRoomsChunk { - canonical_alias: services - .rooms - .state_accessor - .get_canonical_alias(&room_id) - .await - .ok(), - name: services.rooms.state_accessor.get_name(&room_id).await.ok(), - num_joined_members: services - .rooms - .state_cache - .room_joined_count(&room_id) - .await + avatar_url: avatar_url.into_option().unwrap_or_default().url, + canonical_alias, + guest_can_join, + join_rule: join_rule.unwrap_or_default(), + name, + num_joined_members: num_joined_members .unwrap_or(0) .try_into() .expect("joined count overflows ruma UInt"), - topic: services - .rooms - .state_accessor - .get_room_topic(&room_id) - .await - .ok(), - world_readable: services - .rooms - .state_accessor - .is_world_readable(&room_id) - .await, - guest_can_join: services.rooms.state_accessor.guest_can_join(&room_id).await, - avatar_url: services - .rooms - .state_accessor - .get_avatar(&room_id) - .await - .into_option() - .unwrap_or_default() - .url, - join_rule: services - .rooms - .state_accessor - .room_state_get_content(&room_id, &StateEventType::RoomJoinRules, "") - .map_ok(|c: RoomJoinRulesEventContent| match c.join_rule { - | JoinRule::Public => PublicRoomJoinRule::Public, - | JoinRule::Knock => "knock".into(), - | JoinRule::KnockRestricted(_) => "knock_restricted".into(), - | _ => "invite".into(), - }) - .await - .unwrap_or_default(), - room_type: services - .rooms - .state_accessor - .get_room_type(&room_id) - .await - .ok(), room_id, + room_type, + topic, + world_readable, } } From a57336ec1388ab26a692cf26768474bc3069df75 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 14 Mar 2025 06:54:08 +0000 Subject: [PATCH 262/328] assume canonical order in db serialization test Signed-off-by: Jason Volk --- src/database/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/tests.rs b/src/database/tests.rs index 140bc56d..1446a1fc 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -152,8 +152,8 @@ fn ser_json_macro() { let content = serde_json::to_value(content).expect("failed to serialize content"); let sender: &UserId = "@foo:example.com".try_into().unwrap(); let serialized = serialize_to_vec(Json(json!({ - "sender": sender, "content": content, + "sender": sender, }))) .expect("failed to serialize value"); From 17003ba773228055de107f9d8baf1b2848d86c1f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 15 Mar 2025 01:23:54 +0000 Subject: [PATCH 263/328] add FIFO compaction for persistent-cache descriptor; comments/cleanup Signed-off-by: Jason Volk --- src/database/engine/cf_opts.rs | 14 ++++++++--- src/database/engine/descriptor.rs | 39 ++++++++++++++++++++++++------- src/database/engine/open.rs | 6 ++--- src/database/maps.rs | 4 ++-- 4 files changed, 46 insertions(+), 17 deletions(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 5ddb9473..7ceec722 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -1,8 +1,8 @@ use conduwuit::{Config, Result, err, utils::math::Expected}; use rocksdb::{ BlockBasedIndexType, BlockBasedOptions, BlockBasedPinningTier, Cache, - DBCompressionType as CompressionType, DataBlockIndexType, LruCacheOptions, Options, - UniversalCompactOptions, UniversalCompactionStopStyle, + DBCompressionType as CompressionType, DataBlockIndexType, FifoCompactOptions, + LruCacheOptions, Options, UniversalCompactOptions, UniversalCompactionStopStyle, }; use super::descriptor::{CacheDisp, Descriptor}; @@ -16,7 +16,7 @@ pub(super) const SENTINEL_COMPRESSION_LEVEL: i32 = 32767; pub(crate) fn cf_options(ctx: &Context, opts: Options, desc: &Descriptor) -> Result { let cache = get_cache(ctx, desc); let config = &ctx.server.config; - descriptor_cf_options(opts, desc.clone(), config, cache.as_ref()) + descriptor_cf_options(opts, *desc, config, cache.as_ref()) } fn descriptor_cf_options( @@ -46,6 +46,7 @@ fn descriptor_cf_options( opts.set_compaction_style(desc.compaction); opts.set_compaction_pri(desc.compaction_pri); opts.set_universal_compaction_options(&uc_options(&desc)); + opts.set_fifo_compaction_options(&fifo_options(&desc)); let compression_shape: Vec<_> = desc .compression_shape @@ -142,6 +143,13 @@ fn set_compression(desc: &mut Descriptor, config: &Config) { } } +fn fifo_options(desc: &Descriptor) -> FifoCompactOptions { + let mut opts = FifoCompactOptions::default(); + opts.set_max_table_files_size(desc.limit_size); + + opts +} + fn uc_options(desc: &Descriptor) -> UniversalCompactOptions { let mut opts = UniversalCompactOptions::default(); opts.set_stop_style(UniversalCompactionStopStyle::Total); diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index 816555d2..2274da9c 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -6,14 +6,8 @@ use rocksdb::{ use super::cf_opts::SENTINEL_COMPRESSION_LEVEL; +/// Column Descriptor #[derive(Debug, Clone, Copy)] -pub(crate) enum CacheDisp { - Unique, - Shared, - SharedWith(&'static str), -} - -#[derive(Debug, Clone)] pub(crate) struct Descriptor { pub(crate) name: &'static str, pub(crate) dropped: bool, @@ -30,6 +24,7 @@ pub(crate) struct Descriptor { pub(crate) file_shape: i32, pub(crate) level0_width: i32, pub(crate) merge_width: (i32, i32), + pub(crate) limit_size: u64, pub(crate) ttl: u64, pub(crate) compaction: CompactionStyle, pub(crate) compaction_pri: CompactionPri, @@ -46,7 +41,16 @@ pub(crate) struct Descriptor { pub(crate) auto_readahead_max: usize, } -pub(crate) static BASE: Descriptor = Descriptor { +/// Cache Disposition +#[derive(Debug, Clone, Copy)] +pub(crate) enum CacheDisp { + Unique, + Shared, + SharedWith(&'static str), +} + +/// Base descriptor supplying common defaults to all derived descriptors. +static BASE: Descriptor = Descriptor { name: EMPTY, dropped: false, cache_disp: CacheDisp::Shared, @@ -62,6 +66,7 @@ pub(crate) static BASE: Descriptor = Descriptor { file_shape: 2, level0_width: 2, merge_width: (2, 16), + limit_size: 0, ttl: 60 * 60 * 24 * 21, compaction: CompactionStyle::Level, compaction_pri: CompactionPri::MinOverlappingRatio, @@ -78,6 +83,10 @@ pub(crate) static BASE: Descriptor = Descriptor { auto_readahead_max: 1024 * 1024 * 2, }; +/// Tombstone descriptor for columns which have been or will be deleted. +pub(crate) static DROPPED: Descriptor = Descriptor { dropped: true, ..BASE }; + +/// Descriptor for large datasets with random updates across the keyspace. pub(crate) static RANDOM: Descriptor = Descriptor { compaction_pri: CompactionPri::OldestSmallestSeqFirst, write_size: 1024 * 1024 * 32, @@ -88,6 +97,7 @@ pub(crate) static RANDOM: Descriptor = Descriptor { ..BASE }; +/// Descriptor for large datasets with updates to the end of the keyspace. pub(crate) static SEQUENTIAL: Descriptor = Descriptor { compaction_pri: CompactionPri::OldestLargestSeqFirst, write_size: 1024 * 1024 * 64, @@ -101,6 +111,7 @@ pub(crate) static SEQUENTIAL: Descriptor = Descriptor { ..BASE }; +/// Descriptor for small datasets with random updates across the keyspace. pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { compaction: CompactionStyle::Universal, write_size: 1024 * 1024 * 16, @@ -117,6 +128,7 @@ pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { ..RANDOM }; +/// Descriptor for small datasets with updates to the end of the keyspace. pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { compaction: CompactionStyle::Universal, write_size: 1024 * 1024 * 16, @@ -132,3 +144,14 @@ pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { compressed_index: false, ..SEQUENTIAL }; + +/// Descriptor for small persistent caches with random updates. Oldest entries +/// are deleted after limit_size reached. +pub(crate) static RANDOM_SMALL_CACHE: Descriptor = Descriptor { + compaction: CompactionStyle::Fifo, + cache_disp: CacheDisp::Unique, + limit_size: 1024 * 1024 * 64, + ttl: 60 * 60 * 24 * 14, + file_shape: 2, + ..RANDOM_SMALL +}; diff --git a/src/database/engine/open.rs b/src/database/engine/open.rs index 24010c3a..84e59a6a 100644 --- a/src/database/engine/open.rs +++ b/src/database/engine/open.rs @@ -101,13 +101,11 @@ fn configure_cfds( debug!("Creating new column {name:?} not previously found in existing database."); }); - let missing_descriptors = missing - .clone() - .map(|_| Descriptor { dropped: true, ..descriptor::BASE }); + let missing_descriptors = missing.clone().map(|_| descriptor::DROPPED); let cfopts: Vec<_> = desc .iter() - .cloned() + .copied() .chain(missing_descriptors) .map(|ref desc| cf_options(ctx, db_opts.clone(), desc)) .collect::>()?; diff --git a/src/database/maps.rs b/src/database/maps.rs index 311c629f..19f9ced4 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -233,7 +233,7 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "servername_destination", - ..descriptor::RANDOM_SMALL + ..descriptor::RANDOM_SMALL_CACHE }, Descriptor { name: "servername_educount", @@ -241,7 +241,7 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "servername_override", - ..descriptor::RANDOM_SMALL + ..descriptor::RANDOM_SMALL_CACHE }, Descriptor { name: "servernameevent_data", From d8ea8b378cf2ee9ff7644fdb6c5a33d05923a51d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 15 Mar 2025 02:35:10 +0000 Subject: [PATCH 264/328] add Map::clear() to db interface Signed-off-by: Jason Volk --- src/database/map.rs | 1 + src/database/map/clear.rs | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 src/database/map/clear.rs diff --git a/src/database/map.rs b/src/database/map.rs index c5a908ba..ed38e1fc 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -1,3 +1,4 @@ +mod clear; pub mod compact; mod contains; mod count; diff --git a/src/database/map/clear.rs b/src/database/map/clear.rs new file mode 100644 index 00000000..321ec79c --- /dev/null +++ b/src/database/map/clear.rs @@ -0,0 +1,30 @@ +use std::sync::Arc; + +use conduwuit::{ + Result, implement, + utils::stream::{ReadyExt, TryIgnore}, +}; +use futures::{Stream, TryStreamExt}; + +use crate::keyval::Key; + +/// Delete all data stored in this map. !!! USE WITH CAUTION !!! +/// +/// See for_clear() with additional details. +#[implement(super::Map)] +#[tracing::instrument(level = "trace")] +pub async fn clear(self: &Arc) { + self.for_clear().ignore_err().ready_for_each(|_| ()).await; +} + +/// Delete all data stored in this map. !!! USE WITH CAUTION !!! +/// +/// Provides stream of keys undergoing deletion along with any errors. +/// +/// Note this operation applies to a snapshot of the data when invoked. +/// Additional data written during or after this call may be missed. +#[implement(super::Map)] +#[tracing::instrument(level = "trace")] +pub fn for_clear(self: &Arc) -> impl Stream>> + Send { + self.raw_keys().inspect_ok(|key| self.remove(key)) +} From 9ce95a703038e8603da62f15516f205ca70ad962 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 15 Mar 2025 04:07:53 +0000 Subject: [PATCH 265/328] make service memory_usage()/clear_cache() async trait Signed-off-by: Jason Volk --- src/service/globals/mod.rs | 6 ++- src/service/rooms/event_handler/mod.rs | 4 +- src/service/rooms/spaces/mod.rs | 14 +++++- src/service/rooms/state/mod.rs | 4 +- src/service/rooms/state_accessor/mod.rs | 6 ++- src/service/rooms/state_compressor/mod.rs | 6 ++- src/service/rooms/timeline/mod.rs | 4 +- src/service/service.rs | 4 +- src/service/services.rs | 57 ++++++++++------------- 9 files changed, 61 insertions(+), 44 deletions(-) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 74f83228..1dd7db8e 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -7,6 +7,7 @@ use std::{ time::Instant, }; +use async_trait::async_trait; use conduwuit::{Result, Server, error, utils::bytes::pretty}; use data::Data; use regex::RegexSet; @@ -27,6 +28,7 @@ pub struct Service { type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let db = Data::new(&args); @@ -73,7 +75,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let (ber_count, ber_bytes) = self.bad_event_ratelimiter.read()?.iter().fold( (0_usize, 0_usize), |(mut count, mut bytes), (event_id, _)| { @@ -89,7 +91,7 @@ impl crate::Service for Service { Ok(()) } - fn clear_cache(&self) { + async fn clear_cache(&self) { self.bad_event_ratelimiter .write() .expect("locked for writing") diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index e9e79ce4..4944f3ec 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -17,6 +17,7 @@ use std::{ time::Instant, }; +use async_trait::async_trait; use conduwuit::{ Err, PduEvent, Result, RoomVersion, Server, utils::{MutexMap, TryFutureExtExt}, @@ -54,6 +55,7 @@ struct Services { type RoomMutexMap = MutexMap; type HandleTimeMap = HashMap; +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { @@ -79,7 +81,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result<()> { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let mutex_federation = self.mutex_federation.len(); writeln!(out, "federation_mutex: {mutex_federation}")?; diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 1da38234..55897f9c 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -2,8 +2,9 @@ mod pagination_token; #[cfg(test)] mod tests; -use std::sync::Arc; +use std::{fmt::Write, sync::Arc}; +use async_trait::async_trait; use conduwuit::{ Err, Error, Result, implement, utils::{ @@ -70,6 +71,7 @@ pub enum Identifier<'a> { type Cache = LruCache>; +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let config = &args.server.config; @@ -90,6 +92,16 @@ impl crate::Service for Service { })) } + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { + let roomid_spacehierarchy_cache = self.roomid_spacehierarchy_cache.lock().await.len(); + + writeln!(out, "roomid_spacehierarchy_cache: {roomid_spacehierarchy_cache}")?; + + Ok(()) + } + + async fn clear_cache(&self) { self.roomid_spacehierarchy_cache.lock().await.clear(); } + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 8683a3be..56955497 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,5 +1,6 @@ use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc}; +use async_trait::async_trait; use conduwuit::{ PduEvent, Result, err, result::FlatOk, @@ -56,6 +57,7 @@ struct Data { type RoomMutexMap = MutexMap; pub type RoomMutexGuard = MutexMapGuard; +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { @@ -79,7 +81,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let mutex = self.mutex.len(); writeln!(out, "state_mutex: {mutex}")?; diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 7004e35a..652fdbd7 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -8,6 +8,7 @@ use std::{ sync::{Arc, Mutex as StdMutex, Mutex}, }; +use async_trait::async_trait; use conduwuit::{ Result, err, utils, utils::math::{Expected, usize_from_f64}, @@ -57,6 +58,7 @@ struct Data { shorteventid_shortstatehash: Arc, } +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let config = &args.server.config; @@ -86,7 +88,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { use utils::bytes::pretty; let (svc_count, svc_bytes) = self.server_visibility_cache.lock()?.iter().fold( @@ -119,7 +121,7 @@ impl crate::Service for Service { Ok(()) } - fn clear_cache(&self) { + async fn clear_cache(&self) { self.server_visibility_cache.lock().expect("locked").clear(); self.user_visibility_cache.lock().expect("locked").clear(); } diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 305d3187..56a91d0e 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -5,6 +5,7 @@ use std::{ sync::{Arc, Mutex}, }; +use async_trait::async_trait; use conduwuit::{ Result, arrayvec::ArrayVec, @@ -65,6 +66,7 @@ type ParentStatesVec = Vec; pub type CompressedState = BTreeSet; pub type CompressedStateEvent = [u8; 2 * size_of::()]; +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let config = &args.server.config; @@ -82,7 +84,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let (cache_len, ents) = { let cache = self.stateinfo_cache.lock().expect("locked"); let ents = cache.iter().map(at!(1)).flat_map(|vec| vec.iter()).fold( @@ -108,7 +110,7 @@ impl crate::Service for Service { Ok(()) } - fn clear_cache(&self) { self.stateinfo_cache.lock().expect("locked").clear(); } + async fn clear_cache(&self) { self.stateinfo_cache.lock().expect("locked").clear(); } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 826a1dae..dc359d22 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -9,6 +9,7 @@ use std::{ sync::Arc, }; +use async_trait::async_trait; use conduwuit::{ Err, Error, Result, Server, at, debug, debug_warn, err, error, implement, info, pdu::{EventHash, PduBuilder, PduCount, PduEvent, gen_event_id}, @@ -109,6 +110,7 @@ struct Services { type RoomMutexMap = MutexMap; pub type RoomMutexGuard = MutexMapGuard; +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { @@ -142,7 +144,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result<()> { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let mutex_insert = self.mutex_insert.len(); writeln!(out, "insert_mutex: {mutex_insert}")?; diff --git a/src/service/service.rs b/src/service/service.rs index 2907a562..574efd8f 100644 --- a/src/service/service.rs +++ b/src/service/service.rs @@ -31,10 +31,10 @@ pub(crate) trait Service: Any + Send + Sync { fn interrupt(&self) {} /// Clear any caches or similar runtime state. - fn clear_cache(&self) {} + async fn clear_cache(&self) {} /// Memory usage report in a markdown string. - fn memory_usage(&self, _out: &mut dyn Write) -> Result<()> { Ok(()) } + async fn memory_usage(&self, _out: &mut (dyn Write + Send)) -> Result { Ok(()) } /// Return the name of the service. /// i.e. `crate::service::make_name(std::module_path!())` diff --git a/src/service/services.rs b/src/service/services.rs index 269a1f87..dc390054 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -1,12 +1,12 @@ use std::{ any::Any, collections::BTreeMap, - fmt::Write, sync::{Arc, RwLock}, }; -use conduwuit::{Result, Server, debug, debug_info, info, trace}; +use conduwuit::{Result, Server, debug, debug_info, info, trace, utils::stream::IterStream}; use database::Database; +use futures::{Stream, StreamExt, TryStreamExt}; use tokio::sync::Mutex; use crate::{ @@ -171,40 +171,21 @@ impl Services { } pub async fn clear_cache(&self) { - for (service, ..) in self.service.read().expect("locked for reading").values() { - if let Some(service) = service.upgrade() { - service.clear_cache(); - } - } - - //TODO - self.rooms - .spaces - .roomid_spacehierarchy_cache - .lock() - .await - .clear(); + self.services() + .for_each(|service| async move { + service.clear_cache().await; + }) + .await; } pub async fn memory_usage(&self) -> Result { - let mut out = String::new(); - for (service, ..) in self.service.read().expect("locked for reading").values() { - if let Some(service) = service.upgrade() { - service.memory_usage(&mut out)?; - } - } - - //TODO - let roomid_spacehierarchy_cache = self - .rooms - .spaces - .roomid_spacehierarchy_cache - .lock() + self.services() + .map(Ok) + .try_fold(String::new(), |mut out, service| async move { + service.memory_usage(&mut out).await?; + Ok(out) + }) .await - .len(); - writeln!(out, "roomid_spacehierarchy_cache: {roomid_spacehierarchy_cache}")?; - - Ok(out) } fn interrupt(&self) { @@ -217,6 +198,18 @@ impl Services { } } + /// Iterate from snapshot of the services map + fn services(&self) -> impl Stream> + Send { + self.service + .read() + .expect("locked for reading") + .values() + .filter_map(|val| val.0.upgrade()) + .collect::>() + .into_iter() + .stream() + } + #[inline] pub fn try_get(&self, name: &str) -> Result> where From 8010505853c1c0a78254b0fd31e83d90baff7af3 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 15 Mar 2025 04:08:57 +0000 Subject: [PATCH 266/328] implement clear_cache() for resolver service Signed-off-by: Jason Volk --- src/service/resolver/cache.rs | 17 ++++++++++++++++- src/service/resolver/dns.rs | 4 ++++ src/service/resolver/mod.rs | 7 +++++++ 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index 6b05c00c..cfea7187 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -7,7 +7,7 @@ use conduwuit::{ utils::{math::Expected, rand, stream::TryIgnore}, }; use database::{Cbor, Deserialized, Map}; -use futures::{Stream, StreamExt}; +use futures::{Stream, StreamExt, future::join}; use ruma::ServerName; use serde::{Deserialize, Serialize}; @@ -45,6 +45,21 @@ impl Cache { } } +#[implement(Cache)] +pub async fn clear(&self) { join(self.clear_destinations(), self.clear_overrides()).await; } + +#[implement(Cache)] +pub async fn clear_destinations(&self) { self.destinations.clear().await; } + +#[implement(Cache)] +pub async fn clear_overrides(&self) { self.overrides.clear().await; } + +#[implement(Cache)] +pub fn del_destination(&self, name: &ServerName) { self.destinations.remove(name); } + +#[implement(Cache)] +pub fn del_override(&self, name: &ServerName) { self.overrides.remove(name); } + #[implement(Cache)] pub fn set_destination(&self, name: &ServerName, dest: &CachedDest) { self.destinations.raw_put(name, Cbor(dest)); diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index 98ad7e60..e4245a5b 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -78,6 +78,10 @@ impl Resolver { server: server.clone(), })) } + + /// Clear the in-memory hickory-dns caches + #[inline] + pub fn clear_cache(&self) { self.resolver.clear_cache(); } } impl Resolve for Resolver { diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 2ec9c0ef..246d6bc1 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -6,6 +6,7 @@ mod tests; use std::sync::Arc; +use async_trait::async_trait; use conduwuit::{Result, Server, arrayvec::ArrayString, utils::MutexMap}; use self::{cache::Cache, dns::Resolver}; @@ -26,6 +27,7 @@ struct Services { type Resolving = MutexMap; type NameBuf = ArrayString<256>; +#[async_trait] impl crate::Service for Service { #[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] fn build(args: crate::Args<'_>) -> Result> { @@ -41,5 +43,10 @@ impl crate::Service for Service { })) } + async fn clear_cache(&self) { + self.resolver.clear_cache(); + self.cache.clear().await; + } + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } From 23e3f6526fd0318525a4cd1fe065dcf7f1d56935 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 19 Mar 2025 03:49:12 +0000 Subject: [PATCH 267/328] split well_known resolver into unit Signed-off-by: Jason Volk --- src/service/resolver/actual.rs | 55 +++--------------------------- src/service/resolver/mod.rs | 2 ++ src/service/resolver/tests.rs | 2 -- src/service/resolver/well_known.rs | 49 ++++++++++++++++++++++++++ 4 files changed, 55 insertions(+), 53 deletions(-) create mode 100644 src/service/resolver/well_known.rs diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index b037cf77..1ad76f66 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -3,7 +3,7 @@ use std::{ net::{IpAddr, SocketAddr}, }; -use conduwuit::{Err, Result, debug, debug_error, debug_info, debug_warn, err, error, trace}; +use conduwuit::{Err, Result, debug, debug_info, err, error, trace}; use futures::{FutureExt, TryFutureExt}; use hickory_resolver::error::ResolveError; use ipaddress::IPAddress; @@ -72,6 +72,9 @@ impl super::Service { if let Some(pos) = dest.as_str().find(':') { self.actual_dest_2(dest, cache, pos).await? } else { + self.conditional_query_and_cache(dest.as_str(), 8448, true) + .await?; + self.services.server.check_running()?; match self.request_well_known(dest.as_str()).await? { | Some(delegated) => self.actual_dest_3(&mut host, cache, delegated).await?, @@ -243,56 +246,6 @@ impl super::Service { Ok(add_port_to_hostname(dest.as_str())) } - #[tracing::instrument(name = "well-known", level = "debug", skip(self, dest))] - async fn request_well_known(&self, dest: &str) -> Result> { - self.conditional_query_and_cache(dest, 8448, true).await?; - - self.services.server.check_running()?; - trace!("Requesting well known for {dest}"); - let response = self - .services - .client - .well_known - .get(format!("https://{dest}/.well-known/matrix/server")) - .send() - .await; - - trace!("response: {response:?}"); - if let Err(e) = &response { - debug!("error: {e:?}"); - return Ok(None); - } - - let response = response?; - if !response.status().is_success() { - debug!("response not 2XX"); - return Ok(None); - } - - let text = response.text().await?; - trace!("response text: {text:?}"); - if text.len() >= 12288 { - debug_warn!("response contains junk"); - return Ok(None); - } - - let body: serde_json::Value = serde_json::from_str(&text).unwrap_or_default(); - - let m_server = body - .get("m.server") - .unwrap_or(&serde_json::Value::Null) - .as_str() - .unwrap_or_default(); - - if ruma::identifiers_validation::server_name::validate(m_server).is_err() { - debug_error!("response content missing or invalid"); - return Ok(None); - } - - debug_info!("{dest:?} found at {m_server:?}"); - Ok(Some(m_server.to_owned())) - } - #[inline] async fn conditional_query_and_cache( &self, diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 246d6bc1..c513cec9 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -2,7 +2,9 @@ pub mod actual; pub mod cache; mod dns; pub mod fed; +#[cfg(test)] mod tests; +mod well_known; use std::sync::Arc; diff --git a/src/service/resolver/tests.rs b/src/service/resolver/tests.rs index 6e9d0e71..068e08bd 100644 --- a/src/service/resolver/tests.rs +++ b/src/service/resolver/tests.rs @@ -1,5 +1,3 @@ -#![cfg(test)] - use super::fed::{FedDest, add_port_to_hostname, get_ip_with_port}; #[test] diff --git a/src/service/resolver/well_known.rs b/src/service/resolver/well_known.rs new file mode 100644 index 00000000..68a8e620 --- /dev/null +++ b/src/service/resolver/well_known.rs @@ -0,0 +1,49 @@ +use conduwuit::{Result, debug, debug_error, debug_info, debug_warn, implement, trace}; + +#[implement(super::Service)] +#[tracing::instrument(name = "well-known", level = "debug", skip(self, dest))] +pub(super) async fn request_well_known(&self, dest: &str) -> Result> { + trace!("Requesting well known for {dest}"); + let response = self + .services + .client + .well_known + .get(format!("https://{dest}/.well-known/matrix/server")) + .send() + .await; + + trace!("response: {response:?}"); + if let Err(e) = &response { + debug!("error: {e:?}"); + return Ok(None); + } + + let response = response?; + if !response.status().is_success() { + debug!("response not 2XX"); + return Ok(None); + } + + let text = response.text().await?; + trace!("response text: {text:?}"); + if text.len() >= 12288 { + debug_warn!("response contains junk"); + return Ok(None); + } + + let body: serde_json::Value = serde_json::from_str(&text).unwrap_or_default(); + + let m_server = body + .get("m.server") + .unwrap_or(&serde_json::Value::Null) + .as_str() + .unwrap_or_default(); + + if ruma::identifiers_validation::server_name::validate(m_server).is_err() { + debug_error!("response content missing or invalid"); + return Ok(None); + } + + debug_info!("{dest:?} found at {m_server:?}"); + Ok(Some(m_server.to_owned())) +} From d1b82ea2253179836cf7400f70960d583b25af50 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 21 Mar 2025 08:10:44 +0000 Subject: [PATCH 268/328] use #[ignore] for todo'ed tests Signed-off-by: Jason Volk --- src/database/tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/database/tests.rs b/src/database/tests.rs index 1446a1fc..c1a9f47c 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -325,8 +325,8 @@ fn ser_array() { assert_eq!(&s, &v, "vec serialization does not match"); } -#[cfg(todo)] #[test] +#[ignore] fn de_array() { let a: u64 = 123_456; let b: u64 = 987_654; @@ -357,8 +357,8 @@ fn de_array() { assert_eq!(vec[1], b, "deserialized vec [1] does not match"); } -#[cfg(todo)] #[test] +#[ignore] fn de_complex() { type Key<'a> = (&'a UserId, ArrayVec, &'a RoomId); From 9d0ce3965ea655943304b41ca679507b850130d3 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 22 Mar 2025 07:09:11 +0000 Subject: [PATCH 269/328] fix lints Signed-off-by: Jason Volk --- src/api/client/context.rs | 2 +- src/core/error/response.rs | 2 +- src/service/media/preview.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/api/client/context.rs b/src/api/client/context.rs index cb95dfef..b109711e 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -105,7 +105,7 @@ pub(crate) async fn get_context_route( .collect(); let (base_event, events_before, events_after): (_, Vec<_>, Vec<_>) = - join3(base_event, events_before, events_after).await; + join3(base_event, events_before, events_after).boxed().await; let lazy_loading_context = lazy_loading::Context { user_id: sender_user, diff --git a/src/core/error/response.rs b/src/core/error/response.rs index 00ade5ae..ae6fce62 100644 --- a/src/core/error/response.rs +++ b/src/core/error/response.rs @@ -86,7 +86,7 @@ pub(super) fn bad_request_code(kind: &ErrorKind) -> StatusCode { pub(super) fn ruma_error_message(error: &ruma::api::client::error::Error) -> String { if let ErrorBody::Standard { message, .. } = &error.body { - return message.to_string(); + return message.clone(); } format!("{error}") diff --git a/src/service/media/preview.rs b/src/service/media/preview.rs index ba5be7d4..91660a58 100644 --- a/src/service/media/preview.rs +++ b/src/service/media/preview.rs @@ -256,7 +256,7 @@ pub fn url_preview_allowed(&self, url: &Url) -> bool { if allowlist_url_contains .iter() - .any(|url_s| url.to_string().contains(&url_s.to_string())) + .any(|url_s| url.to_string().contains(url_s)) { debug!("URL {} is allowed by url_preview_url_contains_allowlist (check 4/4)", &host); return true; From 07ba00f74e2dfea314d0e5236f0415b2de6d543c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 26 Mar 2025 04:40:38 +0000 Subject: [PATCH 270/328] abstract raw query command iterations Signed-off-by: Jason Volk --- src/admin/query/raw.rs | 141 ++++++++++------------------------------- 1 file changed, 35 insertions(+), 106 deletions(-) diff --git a/src/admin/query/raw.rs b/src/admin/query/raw.rs index 23f11cc8..c503eee5 100644 --- a/src/admin/query/raw.rs +++ b/src/admin/query/raw.rs @@ -1,15 +1,16 @@ -use std::{borrow::Cow, collections::BTreeMap, ops::Deref}; +use std::{borrow::Cow, collections::BTreeMap, ops::Deref, sync::Arc}; use clap::Subcommand; use conduwuit::{ Err, Result, apply, at, is_zero, utils::{ - IterStream, - stream::{ReadyExt, TryIgnore, TryParallelExt}, + stream::{IterStream, ReadyExt, TryIgnore, TryParallelExt}, string::EMPTY, }, }; -use futures::{FutureExt, StreamExt, TryStreamExt}; +use conduwuit_database::Map; +use conduwuit_service::Services; +use futures::{FutureExt, Stream, StreamExt, TryStreamExt}; use ruma::events::room::message::RoomMessageEventContent; use tokio::time::Instant; @@ -172,22 +173,18 @@ pub(super) async fn compact( ) -> Result { use conduwuit_database::compact::Options; - let default_all_maps = map - .is_none() - .then(|| { - self.services - .db - .keys() - .map(Deref::deref) - .map(ToOwned::to_owned) - }) - .into_iter() - .flatten(); + let default_all_maps: Option<_> = map.is_none().then(|| { + self.services + .db + .keys() + .map(Deref::deref) + .map(ToOwned::to_owned) + }); let maps: Vec<_> = map .unwrap_or_default() .into_iter() - .chain(default_all_maps) + .chain(default_all_maps.into_iter().flatten()) .map(|map| self.services.db.get(&map)) .filter_map(Result::ok) .cloned() @@ -237,25 +234,8 @@ pub(super) async fn raw_count( ) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let count = maps - .iter() - .stream() + let count = with_maps_or(map.as_deref(), self.services) .then(|map| map.raw_count_prefix(&prefix)) .ready_fold(0_usize, usize::saturating_add) .await; @@ -300,25 +280,8 @@ pub(super) async fn raw_keys_sizes( ) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let result = maps - .iter() - .stream() + let result = with_maps_or(map.as_deref(), self.services) .map(|map| map.raw_keys_prefix(&prefix)) .flatten() .ignore_err() @@ -345,25 +308,8 @@ pub(super) async fn raw_keys_total( ) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let result = maps - .iter() - .stream() + let result = with_maps_or(map.as_deref(), self.services) .map(|map| map.raw_keys_prefix(&prefix)) .flatten() .ignore_err() @@ -387,25 +333,8 @@ pub(super) async fn raw_vals_sizes( ) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let result = maps - .iter() - .stream() + let result = with_maps_or(map.as_deref(), self.services) .map(|map| map.raw_stream_prefix(&prefix)) .flatten() .ignore_err() @@ -433,25 +362,8 @@ pub(super) async fn raw_vals_total( ) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let result = maps - .iter() - .stream() + let result = with_maps_or(map.as_deref(), self.services) .map(|map| map.raw_stream_prefix(&prefix)) .flatten() .ignore_err() @@ -573,3 +485,20 @@ pub(super) async fn raw_maps(&self) -> Result { Ok(RoomMessageEventContent::notice_markdown(format!("{list:#?}"))) } + +fn with_maps_or<'a>( + map: Option<&'a str>, + services: &'a Services, +) -> impl Stream> + Send + 'a { + let default_all_maps = map + .is_none() + .then(|| services.db.keys().map(Deref::deref)) + .into_iter() + .flatten(); + + map.into_iter() + .chain(default_all_maps) + .map(|map| services.db.get(map)) + .filter_map(Result::ok) + .stream() +} From dfe058a244ad7592114c86d504fb6fed744ad524 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 27 Mar 2025 01:08:42 +0000 Subject: [PATCH 271/328] default config item to 'none' when zstd_compression not featured Signed-off-by: Jason Volk --- src/core/config/mod.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 6b669ad3..52df19ac 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -2158,7 +2158,12 @@ fn default_rocksdb_max_log_file_size() -> usize { fn default_rocksdb_parallelism_threads() -> usize { 0 } -fn default_rocksdb_compression_algo() -> String { "zstd".to_owned() } +fn default_rocksdb_compression_algo() -> String { + cfg!(feature = "zstd_compression") + .then_some("zstd") + .unwrap_or("none") + .to_owned() +} /// Default RocksDB compression level is 32767, which is internally read by /// RocksDB as the default magic number and translated to the library's default From c99f5770a01ebae978461605c0f6eb954f7bad1b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 27 Mar 2025 04:07:24 +0000 Subject: [PATCH 272/328] mark get_summary_and_children_federation Send Signed-off-by: Jason Volk --- src/service/rooms/spaces/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 55897f9c..af597445 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -238,7 +238,7 @@ async fn get_summary_and_children_federation( fn get_stripped_space_child_events<'a>( &'a self, room_id: &'a RoomId, -) -> impl Stream> + 'a { +) -> impl Stream> + Send + 'a { self.services .state .get_room_shortstatehash(room_id) From 7f448d88a430cc2869fe9ab366fd29b3fddb0f13 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 27 Mar 2025 03:34:33 +0000 Subject: [PATCH 273/328] use qualified crate names from within workspace Signed-off-by: Jason Volk --- src/main/clap.rs | 9 +++++++-- src/main/logging.rs | 2 +- src/main/main.rs | 4 +--- src/main/mods.rs | 8 ++++---- src/main/restart.rs | 2 +- src/main/runtime.rs | 11 ++++++----- src/main/sentry.rs | 4 ++-- src/main/server.rs | 10 +++++----- src/main/signal.rs | 2 +- 9 files changed, 28 insertions(+), 24 deletions(-) diff --git a/src/main/clap.rs b/src/main/clap.rs index c7f33bfe..35a7ea41 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -3,7 +3,7 @@ use std::path::PathBuf; use clap::{ArgAction, Parser}; -use conduwuit::{ +use conduwuit_core::{ Err, Result, config::{Figment, FigmentValue}, err, toml, @@ -12,7 +12,12 @@ use conduwuit::{ /// Commandline arguments #[derive(Parser, Debug)] -#[clap(version = conduwuit::version(), about, long_about = None, name = "conduwuit")] +#[clap( + about, + long_about = None, + name = "conduwuit", + version = conduwuit_core::version(), +)] pub(crate) struct Args { #[arg(short, long)] /// Path to the config TOML file (optional) diff --git a/src/main/logging.rs b/src/main/logging.rs index 7ce86d56..eeeda127 100644 --- a/src/main/logging.rs +++ b/src/main/logging.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{ +use conduwuit_core::{ Result, config::Config, debug_warn, err, diff --git a/src/main/main.rs b/src/main/main.rs index 2bfc3c06..fbc63b17 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -7,11 +7,9 @@ mod sentry; mod server; mod signal; -extern crate conduwuit_core as conduwuit; - use std::sync::{Arc, atomic::Ordering}; -use conduwuit::{Error, Result, debug_info, error, rustc_flags_capture}; +use conduwuit_core::{Error, Result, debug_info, error, rustc_flags_capture}; use server::Server; rustc_flags_capture! {} diff --git a/src/main/mods.rs b/src/main/mods.rs index 6dc79b2f..d585a381 100644 --- a/src/main/mods.rs +++ b/src/main/mods.rs @@ -9,13 +9,13 @@ use std::{ sync::{Arc, atomic::Ordering}, }; -use conduwuit::{Error, Result, debug, error, mods}; +use conduwuit_core::{Error, Result, debug, error, mods}; use conduwuit_service::Services; use crate::Server; type StartFuncResult = Pin>> + Send>>; -type StartFuncProto = fn(&Arc) -> StartFuncResult; +type StartFuncProto = fn(&Arc) -> StartFuncResult; type RunFuncResult = Pin> + Send>>; type RunFuncProto = fn(&Arc) -> RunFuncResult; @@ -34,8 +34,8 @@ const MODULE_NAMES: &[&str] = &[ ]; #[cfg(panic_trap)] -conduwuit::mod_init! {{ - conduwuit::debug::set_panic_trap(); +conduwuit_core::mod_init! {{ + conduwuit_core::debug::set_panic_trap(); }} pub(crate) async fn run(server: &Arc, starts: bool) -> Result<(bool, bool), Error> { diff --git a/src/main/restart.rs b/src/main/restart.rs index e6f45b82..b9d1dc94 100644 --- a/src/main/restart.rs +++ b/src/main/restart.rs @@ -2,7 +2,7 @@ use std::{env, os::unix::process::CommandExt, process::Command}; -use conduwuit::{debug, info, utils}; +use conduwuit_core::{debug, info, utils}; #[cold] pub(super) fn restart() -> ! { diff --git a/src/main/runtime.rs b/src/main/runtime.rs index b3174e9c..b1657289 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -9,8 +9,8 @@ use std::{ }; #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] -use conduwuit::result::LogDebugErr; -use conduwuit::{ +use conduwuit_core::result::LogDebugErr; +use conduwuit_core::{ Result, is_true, utils::sys::compute::{nth_core_available, set_affinity}, }; @@ -122,7 +122,7 @@ fn set_worker_affinity() { #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] fn set_worker_mallctl(id: usize) { - use conduwuit::alloc::je::{ + use conduwuit_core::alloc::je::{ is_affine_arena, this_thread::{set_arena, set_muzzy_decay}, }; @@ -135,7 +135,8 @@ fn set_worker_mallctl(id: usize) { .get() .expect("GC_MUZZY initialized by runtime::new()"); - let muzzy_auto_disable = conduwuit::utils::available_parallelism() >= DISABLE_MUZZY_THRESHOLD; + let muzzy_auto_disable = + conduwuit_core::utils::available_parallelism() >= DISABLE_MUZZY_THRESHOLD; if matches!(muzzy_option, Some(false) | None if muzzy_auto_disable) { set_muzzy_decay(-1).log_debug_err().ok(); } @@ -188,7 +189,7 @@ fn thread_park() { fn gc_on_park() { #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] - conduwuit::alloc::je::this_thread::decay() + conduwuit_core::alloc::je::this_thread::decay() .log_debug_err() .ok(); } diff --git a/src/main/sentry.rs b/src/main/sentry.rs index 1ea1f3ae..68f12eb7 100644 --- a/src/main/sentry.rs +++ b/src/main/sentry.rs @@ -5,7 +5,7 @@ use std::{ sync::{Arc, OnceLock}, }; -use conduwuit::{config::Config, debug, trace}; +use conduwuit_core::{config::Config, debug, trace}; use sentry::{ Breadcrumb, ClientOptions, Level, types::{ @@ -43,7 +43,7 @@ fn options(config: &Config) -> ClientOptions { traces_sample_rate: config.sentry_traces_sample_rate, debug: cfg!(debug_assertions), release: sentry::release_name!(), - user_agent: conduwuit::version::user_agent().into(), + user_agent: conduwuit_core::version::user_agent().into(), attach_stacktrace: config.sentry_attach_stacktrace, before_send: Some(Arc::new(before_send)), before_breadcrumb: Some(Arc::new(before_breadcrumb)), diff --git a/src/main/server.rs b/src/main/server.rs index 44ca69b0..8f697ca4 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -1,6 +1,6 @@ use std::{path::PathBuf, sync::Arc}; -use conduwuit::{ +use conduwuit_core::{ Error, Result, config::Config, info, @@ -14,7 +14,7 @@ use crate::{clap::Args, logging::TracingFlameGuard}; /// Server runtime state; complete pub(crate) struct Server { /// Server runtime state; public portion - pub(crate) server: Arc, + pub(crate) server: Arc, pub(crate) services: Mutex>>, @@ -25,7 +25,7 @@ pub(crate) struct Server { #[cfg(all(conduwuit_mods, feature = "conduwuit_mods"))] // Module instances; TODO: move to mods::loaded mgmt vector - pub(crate) mods: tokio::sync::RwLock>, + pub(crate) mods: tokio::sync::RwLock>, } impl Server { @@ -66,11 +66,11 @@ impl Server { database_path = ?config.database_path, log_levels = %config.log, "{}", - conduwuit::version(), + conduwuit_core::version(), ); Ok(Arc::new(Self { - server: Arc::new(conduwuit::Server::new(config, runtime.cloned(), Log { + server: Arc::new(conduwuit_core::Server::new(config, runtime.cloned(), Log { reload: tracing_reload_handle, capture, })), diff --git a/src/main/signal.rs b/src/main/signal.rs index 343b95c9..a5d07774 100644 --- a/src/main/signal.rs +++ b/src/main/signal.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{debug_error, trace, warn}; +use conduwuit_core::{debug_error, trace, warn}; use tokio::signal; use super::server::Server; From b2bf35cfab8aac82e4cde1c7c5a7b6e713bba5db Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 28 Mar 2025 06:42:30 +0000 Subject: [PATCH 274/328] fix benches from state-res Signed-off-by: Jason Volk --- src/core/state_res/benches.rs | 672 ++++++++++++++++++++++++++ src/core/state_res/mod.rs | 3 + src/core/state_res/state_res_bench.rs | 648 ------------------------- 3 files changed, 675 insertions(+), 648 deletions(-) create mode 100644 src/core/state_res/benches.rs delete mode 100644 src/core/state_res/state_res_bench.rs diff --git a/src/core/state_res/benches.rs b/src/core/state_res/benches.rs new file mode 100644 index 00000000..7a1ae5bf --- /dev/null +++ b/src/core/state_res/benches.rs @@ -0,0 +1,672 @@ +#[cfg(conduwuit_bench)] +extern crate test; + +use std::{ + borrow::Borrow, + collections::{HashMap, HashSet}, + sync::{ + Arc, + atomic::{AtomicU64, Ordering::SeqCst}, + }, +}; + +use futures::{future, future::ready}; +use maplit::{btreemap, hashmap, hashset}; +use ruma::{ + EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, Signatures, UserId, + events::{ + StateEventType, TimelineEventType, + pdu::{EventHash, Pdu, RoomV3Pdu}, + room::{ + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + }, + int, room_id, uint, user_id, +}; +use serde_json::{ + json, + value::{RawValue as RawJsonValue, to_raw_value as to_raw_json_value}, +}; + +use self::event::PduEvent; +use crate::state_res::{self as state_res, Error, Event, Result, StateMap}; + +static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); + +#[cfg(conduwuit_bench)] +#[cfg_attr(conduwuit_bench, bench)] +fn lexico_topo_sort(c: &mut test::Bencher) { + let graph = hashmap! { + event_id("l") => hashset![event_id("o")], + event_id("m") => hashset![event_id("n"), event_id("o")], + event_id("n") => hashset![event_id("o")], + event_id("o") => hashset![], // "o" has zero outgoing edges but 4 incoming edges + event_id("p") => hashset![event_id("o")], + }; + + c.iter(|| { + let _ = state_res::lexicographical_topological_sort(&graph, &|_| { + future::ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) + }); + }); +} + +#[cfg(conduwuit_bench)] +#[cfg_attr(conduwuit_bench, bench)] +fn resolution_shallow_auth_chain(c: &mut test::Bencher) { + let parallel_fetches = 32; + let mut store = TestStore(hashmap! {}); + + // build up the DAG + let (state_at_bob, state_at_charlie, _) = store.set_up(); + + c.iter(|| async { + let ev_map = store.0.clone(); + let state_sets = [&state_at_bob, &state_at_charlie]; + let fetch = |id: OwnedEventId| ready(ev_map.get(&id).map(Arc::clone)); + let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some()); + let auth_chain_sets: Vec> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let _ = match state_res::resolve( + &RoomVersionId::V6, + state_sets.into_iter(), + &auth_chain_sets, + &fetch, + &exists, + parallel_fetches, + ) + .await + { + | Ok(state) => state, + | Err(e) => panic!("{e}"), + }; + }); +} + +#[cfg(conduwuit_bench)] +#[cfg_attr(conduwuit_bench, bench)] +fn resolve_deeper_event_set(c: &mut test::Bencher) { + let parallel_fetches = 32; + let mut inner = INITIAL_EVENTS(); + let ban = BAN_STATE_SET(); + + inner.extend(ban); + let store = TestStore(inner.clone()); + + let state_set_a = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("MB")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + let state_set_b = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("IME")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + c.iter(|| async { + let state_sets = [&state_set_a, &state_set_b]; + let auth_chain_sets: Vec> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let fetch = |id: OwnedEventId| ready(inner.get(&id).map(Arc::clone)); + let exists = |id: OwnedEventId| ready(inner.get(&id).is_some()); + let _ = match state_res::resolve( + &RoomVersionId::V6, + state_sets.into_iter(), + &auth_chain_sets, + &fetch, + &exists, + parallel_fetches, + ) + .await + { + | Ok(state) => state, + | Err(_) => panic!("resolution failed during benchmarking"), + }; + }); +} + +//*///////////////////////////////////////////////////////////////////// +// +// IMPLEMENTATION DETAILS AHEAD +// +/////////////////////////////////////////////////////////////////////*/ +struct TestStore(HashMap>); + +#[allow(unused)] +impl TestStore { + fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result> { + self.0 + .get(event_id) + .map(Arc::clone) + .ok_or_else(|| Error::NotFound(format!("{} not found", event_id))) + } + + /// Returns the events that correspond to the `event_ids` sorted in the same + /// order. + fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result>> { + let mut events = vec![]; + for id in event_ids { + events.push(self.get_event(room_id, id)?); + } + Ok(events) + } + + /// Returns a Vec of the related auth events to the given `event`. + fn auth_event_ids(&self, room_id: &RoomId, event_ids: Vec) -> Result> { + let mut result = HashSet::new(); + let mut stack = event_ids; + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if result.contains(&ev_id) { + continue; + } + + result.insert(ev_id.clone()); + + let event = self.get_event(room_id, ev_id.borrow())?; + + stack.extend(event.auth_events().map(ToOwned::to_owned)); + } + + Ok(result) + } + + /// Returns a vector representing the difference in auth chains of the given + /// `events`. + fn auth_chain_diff( + &self, + room_id: &RoomId, + event_ids: Vec>, + ) -> Result> { + let mut auth_chain_sets = vec![]; + for ids in event_ids { + // TODO state store `auth_event_ids` returns self in the event ids list + // when an event returns `auth_event_ids` self is not contained + let chain = self + .auth_event_ids(room_id, ids)? + .into_iter() + .collect::>(); + auth_chain_sets.push(chain); + } + + if let Some(first) = auth_chain_sets.first().cloned() { + let common = auth_chain_sets + .iter() + .skip(1) + .fold(first, |a, b| a.intersection(b).cloned().collect::>()); + + Ok(auth_chain_sets + .into_iter() + .flatten() + .filter(|id| !common.contains(id.borrow())) + .collect()) + } else { + Ok(vec![]) + } + } +} + +impl TestStore { + #[allow(clippy::type_complexity)] + fn set_up( + &mut self, + ) -> (StateMap, StateMap, StateMap) { + let create_event = to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ); + let cre = create_event.event_id().to_owned(); + self.0.insert(cre.clone(), Arc::clone(&create_event)); + + let alice_mem = to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().to_string().as_str()), + member_content_join(), + &[cre.clone()], + &[cre.clone()], + ); + self.0 + .insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem)); + + let join_rules = to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &[cre.clone(), alice_mem.event_id().to_owned()], + &[alice_mem.event_id().to_owned()], + ); + self.0 + .insert(join_rules.event_id().to_owned(), join_rules.clone()); + + // Bob and Charlie join at the same time, so there is a fork + // this will be represented in the state_sets when we resolve + let bob_mem = to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_join(), + &[cre.clone(), join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0 + .insert(bob_mem.event_id().to_owned(), bob_mem.clone()); + + let charlie_mem = to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().to_string().as_str()), + member_content_join(), + &[cre, join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0 + .insert(charlie_mem.event_id().to_owned(), charlie_mem.clone()); + + let state_at_bob = [&create_event, &alice_mem, &join_rules, &bob_mem] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + let state_at_charlie = [&create_event, &alice_mem, &join_rules, &charlie_mem] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + let expected = [&create_event, &alice_mem, &join_rules, &bob_mem, &charlie_mem] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + (state_at_bob, state_at_charlie, expected) + } +} + +fn event_id(id: &str) -> OwnedEventId { + if id.contains('$') { + return id.try_into().unwrap(); + } + format!("${}:foo", id).try_into().unwrap() +} + +fn alice() -> &'static UserId { user_id!("@alice:foo") } + +fn bob() -> &'static UserId { user_id!("@bob:foo") } + +fn charlie() -> &'static UserId { user_id!("@charlie:foo") } + +fn ella() -> &'static UserId { user_id!("@ella:foo") } + +fn room_id() -> &'static RoomId { room_id!("!test:foo") } + +fn member_content_ban() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Ban)).unwrap() +} + +fn member_content_join() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap() +} + +fn to_pdu_event( + id: &str, + sender: &UserId, + ev_type: TimelineEventType, + state_key: Option<&str>, + content: Box, + auth_events: &[S], + prev_events: &[S], +) -> Arc +where + S: AsRef, +{ + // We don't care if the addition happens in order just that it is atomic + // (each event has its own value) + let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); + let id = if id.contains('$') { + id.to_owned() + } else { + format!("${}:foo", id) + }; + let auth_events = auth_events + .iter() + .map(AsRef::as_ref) + .map(event_id) + .collect::>(); + let prev_events = prev_events + .iter() + .map(AsRef::as_ref) + .map(event_id) + .collect::>(); + + let state_key = state_key.map(ToOwned::to_owned); + Arc::new(PduEvent { + event_id: id.try_into().unwrap(), + rest: Pdu::RoomV3Pdu(RoomV3Pdu { + room_id: room_id().to_owned(), + sender: sender.to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), + state_key, + kind: ev_type, + content, + redacts: None, + unsigned: btreemap! {}, + auth_events, + prev_events, + depth: uint!(0), + hashes: EventHash::new(String::new()), + signatures: Signatures::new(), + }), + }) +} + +// all graphs start with these input events +#[allow(non_snake_case)] +fn INITIAL_EVENTS() -> HashMap> { + vec![ + to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ), + to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_join(), + &["CREATE"], + &["CREATE"], + ), + to_pdu_event( + "IPOWER", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100 } })).unwrap(), + &["CREATE", "IMA"], + &["IMA"], + ), + to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["IPOWER"], + ), + to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IJR"], + ), + to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().to_string().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IMB"], + ), + to_pdu_event::<&EventId>( + "START", + charlie(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + to_pdu_event::<&EventId>( + "END", + charlie(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + ] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +// all graphs start with these input events +#[allow(non_snake_case)] +fn BAN_STATE_SET() -> HashMap> { + vec![ + to_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], // auth_events + &["START"], // prev_events + ), + to_pdu_event( + "PB", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["END"], + ), + to_pdu_event( + "MB", + alice(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_ban(), + &["CREATE", "IMA", "PB"], + &["PA"], + ), + to_pdu_event( + "IME", + ella(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_join(), + &["CREATE", "IJR", "PA"], + &["MB"], + ), + ] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +/// Convenience trait for adding event type plus state key to state maps. +trait EventTypeExt { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String); +} + +impl EventTypeExt for &TimelineEventType { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + (self.to_string().into(), state_key.into()) + } +} + +mod event { + use ruma::{ + MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, + events::{TimelineEventType, pdu::Pdu}, + }; + use serde::{Deserialize, Serialize}; + use serde_json::value::RawValue as RawJsonValue; + + use super::Event; + + impl Event for PduEvent { + type Id = OwnedEventId; + + fn event_id(&self) -> &Self::Id { &self.event_id } + + fn room_id(&self) -> &RoomId { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.room_id, + | Pdu::RoomV3Pdu(ev) => &ev.room_id, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn sender(&self) -> &UserId { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.sender, + | Pdu::RoomV3Pdu(ev) => &ev.sender, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn event_type(&self) -> &TimelineEventType { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.kind, + | Pdu::RoomV3Pdu(ev) => &ev.kind, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn content(&self) -> &RawJsonValue { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.content, + | Pdu::RoomV3Pdu(ev) => &ev.content, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.origin_server_ts, + | Pdu::RoomV3Pdu(ev) => ev.origin_server_ts, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn state_key(&self) -> Option<&str> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.state_key.as_deref(), + | Pdu::RoomV3Pdu(ev) => ev.state_key.as_deref(), + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn prev_events(&self) -> Box + Send + '_> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn auth_events(&self) -> Box + Send + '_> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn redacts(&self) -> Option<&Self::Id> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), + | Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + } + + #[derive(Clone, Debug, Deserialize, Serialize)] + pub(crate) struct PduEvent { + pub(crate) event_id: OwnedEventId, + #[serde(flatten)] + pub(crate) rest: Pdu, + } +} diff --git a/src/core/state_res/mod.rs b/src/core/state_res/mod.rs index 6bff0cf8..2020d65c 100644 --- a/src/core/state_res/mod.rs +++ b/src/core/state_res/mod.rs @@ -9,6 +9,9 @@ mod state_event; #[cfg(test)] mod test_utils; +#[cfg(test)] +mod benches; + use std::{ borrow::Borrow, cmp::{Ordering, Reverse}, diff --git a/src/core/state_res/state_res_bench.rs b/src/core/state_res/state_res_bench.rs deleted file mode 100644 index a2bd2c23..00000000 --- a/src/core/state_res/state_res_bench.rs +++ /dev/null @@ -1,648 +0,0 @@ -// Because of criterion `cargo bench` works, -// but if you use `cargo bench -- --save-baseline ` -// or pass any other args to it, it fails with the error -// `cargo bench unknown option --save-baseline`. -// To pass args to criterion, use this form -// `cargo bench --bench -- --save-baseline `. - -#![allow(clippy::exhaustive_structs)] - -use std::{ - borrow::Borrow, - collections::{HashMap, HashSet}, - sync::{ - atomic::{AtomicU64, Ordering::SeqCst}, - Arc, - }, -}; - -use criterion::{criterion_group, criterion_main, Criterion}; -use event::PduEvent; -use futures::{future, future::ready}; -use ruma::{int, uint}; -use maplit::{btreemap, hashmap, hashset}; -use ruma::{ - room_id, user_id, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, - Signatures, UserId, -}; -use ruma::events::{ - pdu::{EventHash, Pdu, RoomV3Pdu}, - room::{ - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - }, - StateEventType, TimelineEventType, -}; -use conduwuit::state_res::{self as state_res, Error, Event, Result, StateMap}; -use serde_json::{ - json, - value::{to_raw_value as to_raw_json_value, RawValue as RawJsonValue}, -}; - -static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); - -fn lexico_topo_sort(c: &mut Criterion) { - c.bench_function("lexicographical topological sort", |b| { - let graph = hashmap! { - event_id("l") => hashset![event_id("o")], - event_id("m") => hashset![event_id("n"), event_id("o")], - event_id("n") => hashset![event_id("o")], - event_id("o") => hashset![], // "o" has zero outgoing edges but 4 incoming edges - event_id("p") => hashset![event_id("o")], - }; - b.iter(|| { - let _ = state_res::lexicographical_topological_sort(&graph, &|_| { - future::ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) - }); - }); - }); -} - -fn resolution_shallow_auth_chain(c: &mut Criterion) { - c.bench_function("resolve state of 5 events one fork", |b| { - let mut store = TestStore(hashmap! {}); - - // build up the DAG - let (state_at_bob, state_at_charlie, _) = store.set_up(); - - b.iter(|| async { - let ev_map = store.0.clone(); - let state_sets = [&state_at_bob, &state_at_charlie]; - let fetch = |id: OwnedEventId| ready(ev_map.get(&id).map(Arc::clone)); - let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some()); - let auth_chain_sets = state_sets - .iter() - .map(|map| { - store.auth_event_ids(room_id(), map.values().cloned().collect()).unwrap() - }) - .collect(); - - let _ = match state_res::resolve( - &RoomVersionId::V6, - state_sets.into_iter(), - &auth_chain_sets, - &fetch, - &exists, - ) - .await - { - Ok(state) => state, - Err(e) => panic!("{e}"), - }; - }); - }); -} - -fn resolve_deeper_event_set(c: &mut Criterion) { - c.bench_function("resolve state of 10 events 3 conflicting", |b| { - let mut inner = INITIAL_EVENTS(); - let ban = BAN_STATE_SET(); - - inner.extend(ban); - let store = TestStore(inner.clone()); - - let state_set_a = [ - inner.get(&event_id("CREATE")).unwrap(), - inner.get(&event_id("IJR")).unwrap(), - inner.get(&event_id("IMA")).unwrap(), - inner.get(&event_id("IMB")).unwrap(), - inner.get(&event_id("IMC")).unwrap(), - inner.get(&event_id("MB")).unwrap(), - inner.get(&event_id("PA")).unwrap(), - ] - .iter() - .map(|ev| { - (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id().to_owned()) - }) - .collect::>(); - - let state_set_b = [ - inner.get(&event_id("CREATE")).unwrap(), - inner.get(&event_id("IJR")).unwrap(), - inner.get(&event_id("IMA")).unwrap(), - inner.get(&event_id("IMB")).unwrap(), - inner.get(&event_id("IMC")).unwrap(), - inner.get(&event_id("IME")).unwrap(), - inner.get(&event_id("PA")).unwrap(), - ] - .iter() - .map(|ev| { - (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id().to_owned()) - }) - .collect::>(); - - b.iter(|| async { - let state_sets = [&state_set_a, &state_set_b]; - let auth_chain_sets = state_sets - .iter() - .map(|map| { - store.auth_event_ids(room_id(), map.values().cloned().collect()).unwrap() - }) - .collect(); - - let fetch = |id: OwnedEventId| ready(inner.get(&id).map(Arc::clone)); - let exists = |id: OwnedEventId| ready(inner.get(&id).is_some()); - let _ = match state_res::resolve( - &RoomVersionId::V6, - state_sets.into_iter(), - &auth_chain_sets, - &fetch, - &exists, - ) - .await - { - Ok(state) => state, - Err(_) => panic!("resolution failed during benchmarking"), - }; - }); - }); -} - -criterion_group!( - benches, - lexico_topo_sort, - resolution_shallow_auth_chain, - resolve_deeper_event_set -); - -criterion_main!(benches); - -//*///////////////////////////////////////////////////////////////////// -// -// IMPLEMENTATION DETAILS AHEAD -// -/////////////////////////////////////////////////////////////////////*/ -struct TestStore(HashMap>); - -#[allow(unused)] -impl TestStore { - fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result> { - self.0 - .get(event_id) - .map(Arc::clone) - .ok_or_else(|| Error::NotFound(format!("{} not found", event_id))) - } - - /// Returns the events that correspond to the `event_ids` sorted in the same order. - fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result>> { - let mut events = vec![]; - for id in event_ids { - events.push(self.get_event(room_id, id)?); - } - Ok(events) - } - - /// Returns a Vec of the related auth events to the given `event`. - fn auth_event_ids(&self, room_id: &RoomId, event_ids: Vec) -> Result> { - let mut result = HashSet::new(); - let mut stack = event_ids; - - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if result.contains(&ev_id) { - continue; - } - - result.insert(ev_id.clone()); - - let event = self.get_event(room_id, ev_id.borrow())?; - - stack.extend(event.auth_events().map(ToOwned::to_owned)); - } - - Ok(result) - } - - /// Returns a vector representing the difference in auth chains of the given `events`. - fn auth_chain_diff(&self, room_id: &RoomId, event_ids: Vec>) -> Result> { - let mut auth_chain_sets = vec![]; - for ids in event_ids { - // TODO state store `auth_event_ids` returns self in the event ids list - // when an event returns `auth_event_ids` self is not contained - let chain = self.auth_event_ids(room_id, ids)?.into_iter().collect::>(); - auth_chain_sets.push(chain); - } - - if let Some(first) = auth_chain_sets.first().cloned() { - let common = auth_chain_sets - .iter() - .skip(1) - .fold(first, |a, b| a.intersection(b).cloned().collect::>()); - - Ok(auth_chain_sets - .into_iter() - .flatten() - .filter(|id| !common.contains(id.borrow())) - .collect()) - } else { - Ok(vec![]) - } - } -} - -impl TestStore { - #[allow(clippy::type_complexity)] - fn set_up( - &mut self, - ) -> (StateMap, StateMap, StateMap) { - let create_event = to_pdu_event::<&EventId>( - "CREATE", - alice(), - TimelineEventType::RoomCreate, - Some(""), - to_raw_json_value(&json!({ "creator": alice() })).unwrap(), - &[], - &[], - ); - let cre = create_event.event_id().to_owned(); - self.0.insert(cre.clone(), Arc::clone(&create_event)); - - let alice_mem = to_pdu_event( - "IMA", - alice(), - TimelineEventType::RoomMember, - Some(alice().to_string().as_str()), - member_content_join(), - &[cre.clone()], - &[cre.clone()], - ); - self.0.insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem)); - - let join_rules = to_pdu_event( - "IJR", - alice(), - TimelineEventType::RoomJoinRules, - Some(""), - to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), - &[cre.clone(), alice_mem.event_id().to_owned()], - &[alice_mem.event_id().to_owned()], - ); - self.0.insert(join_rules.event_id().to_owned(), join_rules.clone()); - - // Bob and Charlie join at the same time, so there is a fork - // this will be represented in the state_sets when we resolve - let bob_mem = to_pdu_event( - "IMB", - bob(), - TimelineEventType::RoomMember, - Some(bob().to_string().as_str()), - member_content_join(), - &[cre.clone(), join_rules.event_id().to_owned()], - &[join_rules.event_id().to_owned()], - ); - self.0.insert(bob_mem.event_id().to_owned(), bob_mem.clone()); - - let charlie_mem = to_pdu_event( - "IMC", - charlie(), - TimelineEventType::RoomMember, - Some(charlie().to_string().as_str()), - member_content_join(), - &[cre, join_rules.event_id().to_owned()], - &[join_rules.event_id().to_owned()], - ); - self.0.insert(charlie_mem.event_id().to_owned(), charlie_mem.clone()); - - let state_at_bob = [&create_event, &alice_mem, &join_rules, &bob_mem] - .iter() - .map(|e| { - (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) - }) - .collect::>(); - - let state_at_charlie = [&create_event, &alice_mem, &join_rules, &charlie_mem] - .iter() - .map(|e| { - (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) - }) - .collect::>(); - - let expected = [&create_event, &alice_mem, &join_rules, &bob_mem, &charlie_mem] - .iter() - .map(|e| { - (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) - }) - .collect::>(); - - (state_at_bob, state_at_charlie, expected) - } -} - -fn event_id(id: &str) -> OwnedEventId { - if id.contains('$') { - return id.try_into().unwrap(); - } - format!("${}:foo", id).try_into().unwrap() -} - -fn alice() -> &'static UserId { - user_id!("@alice:foo") -} - -fn bob() -> &'static UserId { - user_id!("@bob:foo") -} - -fn charlie() -> &'static UserId { - user_id!("@charlie:foo") -} - -fn ella() -> &'static UserId { - user_id!("@ella:foo") -} - -fn room_id() -> &'static RoomId { - room_id!("!test:foo") -} - -fn member_content_ban() -> Box { - to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Ban)).unwrap() -} - -fn member_content_join() -> Box { - to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap() -} - -fn to_pdu_event( - id: &str, - sender: &UserId, - ev_type: TimelineEventType, - state_key: Option<&str>, - content: Box, - auth_events: &[S], - prev_events: &[S], -) -> Arc -where - S: AsRef, -{ - // We don't care if the addition happens in order just that it is atomic - // (each event has its own value) - let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); - let id = if id.contains('$') { id.to_owned() } else { format!("${}:foo", id) }; - let auth_events = auth_events.iter().map(AsRef::as_ref).map(event_id).collect::>(); - let prev_events = prev_events.iter().map(AsRef::as_ref).map(event_id).collect::>(); - - let state_key = state_key.map(ToOwned::to_owned); - Arc::new(PduEvent { - event_id: id.try_into().unwrap(), - rest: Pdu::RoomV3Pdu(RoomV3Pdu { - room_id: room_id().to_owned(), - sender: sender.to_owned(), - origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), - state_key, - kind: ev_type, - content, - redacts: None, - unsigned: btreemap! {}, - auth_events, - prev_events, - depth: uint!(0), - hashes: EventHash::new(String::new()), - signatures: Signatures::new(), - }), - }) -} - -// all graphs start with these input events -#[allow(non_snake_case)] -fn INITIAL_EVENTS() -> HashMap> { - vec![ - to_pdu_event::<&EventId>( - "CREATE", - alice(), - TimelineEventType::RoomCreate, - Some(""), - to_raw_json_value(&json!({ "creator": alice() })).unwrap(), - &[], - &[], - ), - to_pdu_event( - "IMA", - alice(), - TimelineEventType::RoomMember, - Some(alice().as_str()), - member_content_join(), - &["CREATE"], - &["CREATE"], - ), - to_pdu_event( - "IPOWER", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100 } })).unwrap(), - &["CREATE", "IMA"], - &["IMA"], - ), - to_pdu_event( - "IJR", - alice(), - TimelineEventType::RoomJoinRules, - Some(""), - to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), - &["CREATE", "IMA", "IPOWER"], - &["IPOWER"], - ), - to_pdu_event( - "IMB", - bob(), - TimelineEventType::RoomMember, - Some(bob().to_string().as_str()), - member_content_join(), - &["CREATE", "IJR", "IPOWER"], - &["IJR"], - ), - to_pdu_event( - "IMC", - charlie(), - TimelineEventType::RoomMember, - Some(charlie().to_string().as_str()), - member_content_join(), - &["CREATE", "IJR", "IPOWER"], - &["IMB"], - ), - to_pdu_event::<&EventId>( - "START", - charlie(), - TimelineEventType::RoomTopic, - Some(""), - to_raw_json_value(&json!({})).unwrap(), - &[], - &[], - ), - to_pdu_event::<&EventId>( - "END", - charlie(), - TimelineEventType::RoomTopic, - Some(""), - to_raw_json_value(&json!({})).unwrap(), - &[], - &[], - ), - ] - .into_iter() - .map(|ev| (ev.event_id().to_owned(), ev)) - .collect() -} - -// all graphs start with these input events -#[allow(non_snake_case)] -fn BAN_STATE_SET() -> HashMap> { - vec![ - to_pdu_event( - "PA", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), - &["CREATE", "IMA", "IPOWER"], // auth_events - &["START"], // prev_events - ), - to_pdu_event( - "PB", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), - &["CREATE", "IMA", "IPOWER"], - &["END"], - ), - to_pdu_event( - "MB", - alice(), - TimelineEventType::RoomMember, - Some(ella().as_str()), - member_content_ban(), - &["CREATE", "IMA", "PB"], - &["PA"], - ), - to_pdu_event( - "IME", - ella(), - TimelineEventType::RoomMember, - Some(ella().as_str()), - member_content_join(), - &["CREATE", "IJR", "PA"], - &["MB"], - ), - ] - .into_iter() - .map(|ev| (ev.event_id().to_owned(), ev)) - .collect() -} - -/// Convenience trait for adding event type plus state key to state maps. -trait EventTypeExt { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String); -} - -impl EventTypeExt for &TimelineEventType { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { - (self.to_string().into(), state_key.into()) - } -} - -mod event { - use ruma_common::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; - use ruma_events::{pdu::Pdu, TimelineEventType}; - use ruma_state_res::Event; - use serde::{Deserialize, Serialize}; - use serde_json::value::RawValue as RawJsonValue; - - impl Event for PduEvent { - type Id = OwnedEventId; - - fn event_id(&self) -> &Self::Id { - &self.event_id - } - - fn room_id(&self) -> &RoomId { - match &self.rest { - Pdu::RoomV1Pdu(ev) => &ev.room_id, - Pdu::RoomV3Pdu(ev) => &ev.room_id, - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn sender(&self) -> &UserId { - match &self.rest { - Pdu::RoomV1Pdu(ev) => &ev.sender, - Pdu::RoomV3Pdu(ev) => &ev.sender, - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn event_type(&self) -> &TimelineEventType { - match &self.rest { - Pdu::RoomV1Pdu(ev) => &ev.kind, - Pdu::RoomV3Pdu(ev) => &ev.kind, - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn content(&self) -> &RawJsonValue { - match &self.rest { - Pdu::RoomV1Pdu(ev) => &ev.content, - Pdu::RoomV3Pdu(ev) => &ev.content, - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { - match &self.rest { - Pdu::RoomV1Pdu(ev) => ev.origin_server_ts, - Pdu::RoomV3Pdu(ev) => ev.origin_server_ts, - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn state_key(&self) -> Option<&str> { - match &self.rest { - Pdu::RoomV1Pdu(ev) => ev.state_key.as_deref(), - Pdu::RoomV3Pdu(ev) => ev.state_key.as_deref(), - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn prev_events(&self) -> Box + Send + '_> { - match &self.rest { - Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), - Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn auth_events(&self) -> Box + Send + '_> { - match &self.rest { - Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), - Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn redacts(&self) -> Option<&Self::Id> { - match &self.rest { - Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), - Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - } - - #[derive(Clone, Debug, Deserialize, Serialize)] - pub(crate) struct PduEvent { - pub(crate) event_id: OwnedEventId, - #[serde(flatten)] - pub(crate) rest: Pdu, - } -} From 6365f1a887a02564237fd6176ee7e3d72480ffbf Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 28 Mar 2025 14:14:48 -0400 Subject: [PATCH 275/328] remove sccache from ci for now Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3fd834e0..5043f23b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,16 +21,6 @@ concurrency: cancel-in-progress: true env: - # sccache only on main repo - SCCACHE_GHA_ENABLED: "${{ !startsWith(github.ref, 'refs/tags/') && (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'true' || 'false' }}" - RUSTC_WRAPPER: "${{ !startsWith(github.ref, 'refs/tags/') && (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'sccache' || '' }}" - SCCACHE_BUCKET: "${{ (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'sccache' || '' }}" - SCCACHE_S3_USE_SSL: ${{ vars.SCCACHE_S3_USE_SSL }} - SCCACHE_REGION: ${{ vars.SCCACHE_REGION }} - SCCACHE_ENDPOINT: ${{ vars.SCCACHE_ENDPOINT }} - SCCACHE_CACHE_MULTIARCH: ${{ vars.SCCACHE_CACHE_MULTIARCH }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} # Required to make some things output color TERM: ansi # Publishing to my nix binary cache @@ -123,13 +113,6 @@ jobs: bin/nix-build-and-cache just '.#devShells.x86_64-linux.all-features' bin/nix-build-and-cache just '.#devShells.x86_64-linux.dynamic' - # use sccache for Rust - - name: Run sccache-cache - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - #if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} - uses: mozilla-actions/sccache-action@main - # use rust-cache - uses: Swatinem/rust-cache@v2 # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting @@ -247,13 +230,6 @@ jobs: direnv allow nix develop .#all-features --command true --impure - # use sccache for Rust - - name: Run sccache-cache - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - #if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} - uses: mozilla-actions/sccache-action@main - # use rust-cache - uses: Swatinem/rust-cache@v2 # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting From 75b6daa67f31d29035113d217accc80505119e63 Mon Sep 17 00:00:00 2001 From: Ginger <75683114+gingershaped@users.noreply.github.com> Date: Fri, 28 Mar 2025 12:22:23 -0400 Subject: [PATCH 276/328] Fix off-by-one error when fetching room hierarchy --- src/api/client/space.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/client/space.rs b/src/api/client/space.rs index a667f852..a55c927d 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -159,7 +159,7 @@ where break; } - if parents.len() >= max_depth { + if parents.len() > max_depth { continue; } From 3e57b7d35d5bd6cfed5900b377f7c68970213518 Mon Sep 17 00:00:00 2001 From: Ginger <75683114+gingershaped@users.noreply.github.com> Date: Fri, 28 Mar 2025 12:58:18 -0400 Subject: [PATCH 277/328] Update expected test results --- tests/test_results/complement/test_results.jsonl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index ac2733f8..c0e28750 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -73,7 +73,7 @@ {"Action":"pass","Test":"TestChangePasswordPushers/Pushers_created_with_a_different_access_token_are_deleted_on_password_change"} {"Action":"pass","Test":"TestChangePasswordPushers/Pushers_created_with_the_same_access_token_are_not_deleted_on_password_change"} {"Action":"fail","Test":"TestClientSpacesSummary"} -{"Action":"fail","Test":"TestClientSpacesSummary/max_depth"} +{"Action":"pass","Test":"TestClientSpacesSummary/max_depth"} {"Action":"fail","Test":"TestClientSpacesSummary/pagination"} {"Action":"fail","Test":"TestClientSpacesSummary/query_whole_graph"} {"Action":"fail","Test":"TestClientSpacesSummary/redact_link"} From 0e2009dbf5c3dfe1cfd1fd37078c74e871ffa5c6 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 28 Mar 2025 22:47:00 +0000 Subject: [PATCH 278/328] fix client hierarchy loop condition Signed-off-by: Jason Volk --- src/api/client/space.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/api/client/space.rs b/src/api/client/space.rs index a55c927d..567ac62f 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -155,10 +155,6 @@ where break; } - if children.is_empty() { - break; - } - if parents.len() > max_depth { continue; } From d0132706cd9b5bd0c6df5507cb42bcbade86f28b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 28 Mar 2025 23:34:42 +0000 Subject: [PATCH 279/328] add --read-only and --maintenance program option Signed-off-by: Jason Volk --- Cargo.toml | 1 + src/main/clap.rs | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index db55b9b8..8b49c3b8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -892,6 +892,7 @@ needless_continue = { level = "allow", priority = 1 } no_effect_underscore_binding = { level = "allow", priority = 1 } similar_names = { level = "allow", priority = 1 } single_match_else = { level = "allow", priority = 1 } +struct_excessive_bools = { level = "allow", priority = 1 } struct_field_names = { level = "allow", priority = 1 } unnecessary_wraps = { level = "allow", priority = 1 } unused_async = { level = "allow", priority = 1 } diff --git a/src/main/clap.rs b/src/main/clap.rs index 35a7ea41..707a1c76 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -27,6 +27,14 @@ pub(crate) struct Args { #[arg(long, short('O'))] pub(crate) option: Vec, + /// Run in a stricter read-only --maintenance mode. + #[arg(long)] + pub(crate) read_only: bool, + + /// Run in maintenance mode while refusing connections. + #[arg(long)] + pub(crate) maintenance: bool, + #[cfg(feature = "console")] /// Activate admin command console automatically after startup. #[arg(long, num_args(0))] @@ -121,6 +129,15 @@ pub(super) fn parse() -> Args { Args::parse() } /// Synthesize any command line options with configuration file options. pub(crate) fn update(mut config: Figment, args: &Args) -> Result { + if args.read_only { + config = config.join(("rocksdb_read_only", true)); + } + + if args.maintenance || args.read_only { + config = config.join(("startup_netburst", false)); + config = config.join(("listening", false)); + } + #[cfg(feature = "console")] // Indicate the admin console should be spawned automatically if the // configuration file hasn't already. From b03c493bf994449c8c5dd5b1122ab9c87a289df5 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 28 Mar 2025 20:33:38 +0000 Subject: [PATCH 280/328] add stub for database benches Signed-off-by: Jason Volk --- src/database/benches.rs | 17 +++++++++++++++++ src/database/mod.rs | 2 ++ 2 files changed, 19 insertions(+) create mode 100644 src/database/benches.rs diff --git a/src/database/benches.rs b/src/database/benches.rs new file mode 100644 index 00000000..56d1411c --- /dev/null +++ b/src/database/benches.rs @@ -0,0 +1,17 @@ +#[cfg(conduwuit_bench)] +extern crate test; + +#[cfg(conduwuit_bench)] +#[cfg_attr(conduwuit_bench, bench)] +fn ser_str(b: &mut test::Bencher) { + use conduwuit::ruma::{RoomId, UserId}; + + use crate::ser::serialize_to_vec; + + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + b.iter(|| { + let key = (user_id, room_id); + let _s = serialize_to_vec(key).expect("failed to serialize user_id"); + }); +} diff --git a/src/database/mod.rs b/src/database/mod.rs index 0481d1bd..1262a79a 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -5,6 +5,8 @@ conduwuit::mod_ctor! {} conduwuit::mod_dtor! {} conduwuit::rustc_flags_capture! {} +#[cfg(test)] +mod benches; mod cork; mod de; mod deserialized; From a93cb34dd6e10038d6504af209c78e4967467bcb Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 02:48:08 +0000 Subject: [PATCH 281/328] disambiguate UInt/u64 type related in client/api/directory; use err macros. Signed-off-by: Jason Volk --- src/api/client/directory.rs | 86 +++++++++++++------------------------ 1 file changed, 30 insertions(+), 56 deletions(-) diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 80b314b9..f2f668c8 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -1,12 +1,13 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Err, Error, Result, info, + Err, Result, err, info, utils::{ TryFutureExtExt, + math::Expected, + result::FlatOk, stream::{ReadyExt, WidebandExt}, }, - warn, }; use futures::{ FutureExt, StreamExt, TryFutureExt, @@ -20,7 +21,6 @@ use ruma::{ get_public_rooms, get_public_rooms_filtered, get_room_visibility, set_room_visibility, }, - error::ErrorKind, room, }, federation, @@ -71,11 +71,7 @@ pub(crate) async fn get_public_rooms_filtered_route( ) .await .map_err(|e| { - warn!(?body.server, "Failed to return /publicRooms: {e}"); - Error::BadRequest( - ErrorKind::Unknown, - "Failed to return the requested server's public room list.", - ) + err!(Request(Unknown(warn!(?body.server, "Failed to return /publicRooms: {e}")))) })?; Ok(response) @@ -113,11 +109,7 @@ pub(crate) async fn get_public_rooms_route( ) .await .map_err(|e| { - warn!(?body.server, "Failed to return /publicRooms: {e}"); - Error::BadRequest( - ErrorKind::Unknown, - "Failed to return the requested server's public room list.", - ) + err!(Request(Unknown(warn!(?body.server, "Failed to return /publicRooms: {e}")))) })?; Ok(get_public_rooms::v3::Response { @@ -137,7 +129,7 @@ pub(crate) async fn set_room_visibility_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if !services.rooms.metadata.exists(&body.room_id).await { // Return 404 if the room doesn't exist @@ -181,10 +173,9 @@ pub(crate) async fn set_room_visibility_route( .await; } - return Err(Error::BadRequest( - ErrorKind::forbidden(), + return Err!(Request(Forbidden( "Publishing rooms to the room directory is not allowed", - )); + ))); } services.rooms.directory.set_public(&body.room_id); @@ -202,10 +193,7 @@ pub(crate) async fn set_room_visibility_route( }, | room::Visibility::Private => services.rooms.directory.set_not_public(&body.room_id), | _ => { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Room visibility type is not supported.", - )); + return Err!(Request(InvalidParam("Room visibility type is not supported.",))); }, } @@ -221,7 +209,7 @@ pub(crate) async fn get_room_visibility_route( ) -> Result { if !services.rooms.metadata.exists(&body.room_id).await { // Return 404 if the room doesn't exist - return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found")); + return Err!(Request(NotFound("Room not found"))); } Ok(get_room_visibility::v3::Response { @@ -269,8 +257,8 @@ pub(crate) async fn get_public_rooms_filtered_helper( } // Use limit or else 10, with maximum 100 - let limit = limit.map_or(10, u64::from); - let mut num_since: u64 = 0; + let limit: usize = limit.map_or(10_u64, u64::from).try_into()?; + let mut num_since: usize = 0; if let Some(s) = &since { let mut characters = s.chars(); @@ -278,14 +266,14 @@ pub(crate) async fn get_public_rooms_filtered_helper( | Some('n') => false, | Some('p') => true, | _ => { - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token")); + return Err!(Request(InvalidParam("Invalid `since` token"))); }, }; num_since = characters .collect::() .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token."))?; + .map_err(|_| err!(Request(InvalidParam("Invalid `since` token."))))?; if backwards { num_since = num_since.saturating_sub(limit); @@ -302,6 +290,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( if !filter.room_types.is_empty() && !filter.room_types.contains(&RoomTypeFilter::from(chunk.room_type.clone())) { return None; } + if let Some(query) = filter.generic_search_term.as_ref().map(|q| q.to_lowercase()) { if let Some(name) = &chunk.name { if name.as_str().to_lowercase().contains(&query) { @@ -333,40 +322,24 @@ pub(crate) async fn get_public_rooms_filtered_helper( all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); - let total_room_count_estimate = UInt::try_from(all_rooms.len()).unwrap_or_else(|_| uint!(0)); + let total_room_count_estimate = UInt::try_from(all_rooms.len()) + .unwrap_or_else(|_| uint!(0)) + .into(); - let chunk: Vec<_> = all_rooms - .into_iter() - .skip( - num_since - .try_into() - .expect("num_since should not be this high"), - ) - .take(limit.try_into().expect("limit should not be this high")) - .collect(); + let chunk: Vec<_> = all_rooms.into_iter().skip(num_since).take(limit).collect(); - let prev_batch = if num_since == 0 { - None - } else { - Some(format!("p{num_since}")) - }; + let prev_batch = num_since.ne(&0).then_some(format!("p{num_since}")); - let next_batch = if chunk.len() < limit.try_into().unwrap() { - None - } else { - Some(format!( - "n{}", - num_since - .checked_add(limit) - .expect("num_since and limit should not be that large") - )) - }; + let next_batch = chunk + .len() + .ge(&limit) + .then_some(format!("n{}", num_since.expected_add(limit))); Ok(get_public_rooms_filtered::v3::Response { chunk, prev_batch, next_batch, - total_room_count_estimate: Some(total_room_count_estimate), + total_room_count_estimate, }) } @@ -384,7 +357,7 @@ async fn user_can_publish_room( .await { | Ok(event) => serde_json::from_str(event.content.get()) - .map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels")) + .map_err(|_| err!(Database("Invalid event content for m.room.power_levels"))) .map(|content: RoomPowerLevelsEventContent| { RoomPowerLevels::from(content) .user_can_send_state(user_id, StateEventType::RoomHistoryVisibility) @@ -452,9 +425,10 @@ async fn public_rooms_chunk(services: &Services, room_id: OwnedRoomId) -> Public join_rule: join_rule.unwrap_or_default(), name, num_joined_members: num_joined_members - .unwrap_or(0) - .try_into() - .expect("joined count overflows ruma UInt"), + .map(TryInto::try_into) + .map(Result::ok) + .flat_ok() + .unwrap_or_else(|| uint!(0)), room_id, room_type, topic, From 095734a8e7835abf793911ff24ddf0f55c89012f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 03:01:58 +0000 Subject: [PATCH 282/328] bump tokio to 1.44.1 Signed-off-by: Jason Volk --- Cargo.lock | 559 +++++++++++++++++++++++++++-------------------------- Cargo.toml | 2 +- 2 files changed, 291 insertions(+), 270 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c28f4eab..8c0e797b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -55,9 +55,9 @@ checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anyhow" -version = "1.0.96" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b964d184e89d9b6b67dd2715bc8e74cf3107fb2b529990c90cf517326150bf4" +checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" [[package]] name = "arbitrary" @@ -79,7 +79,7 @@ checksum = "0ae92a5119aa49cdbcf6b9f893fe4e1d98b04ccbf82ee0584ad948a44a734dea" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -105,9 +105,9 @@ dependencies = [ [[package]] name = "as_variant" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f38fa22307249f86fb7fad906fcae77f2564caeb56d7209103c551cd1cf4798f" +checksum = "9dbc3a507a82b17ba0d98f6ce8fd6954ea0c8152e98009d36a40d8dcc8ce078a" [[package]] name = "assign" @@ -128,9 +128,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.20" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310c9bcae737a48ef5cdee3174184e6d548b292739ede61a1f955ef76a738861" +checksum = "59a194f9d963d8099596278594b3107448656ba73831c9d8c783e613ce86da64" dependencies = [ "brotli", "flate2", @@ -161,18 +161,18 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "async-trait" -version = "0.1.86" +version = "0.1.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644dd749086bf3771a2fbc5f256fdb982d53f011c7d5d560304eafeecebce79d" +checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -221,27 +221,25 @@ dependencies = [ [[package]] name = "aws-lc-rs" -version = "1.12.5" +version = "1.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4e8200b9a4a5801a769d50eeabc05670fec7e959a8cb7a63a93e4e519942ae" +checksum = "dabb68eb3a7aa08b46fddfd59a3d55c978243557a90ab804769f7e20e67d2b01" dependencies = [ "aws-lc-sys", - "paste", "zeroize", ] [[package]] name = "aws-lc-sys" -version = "0.26.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f9dd2e03ee80ca2822dd6ea431163d2ef259f2066a4d6ccaca6d9dcb386aa43" +checksum = "77926887776171ced7d662120a75998e444d3750c951abfe07f90da130514b1f" dependencies = [ "bindgen 0.69.5", "cc", "cmake", "dunce", "fs_extra", - "paste", ] [[package]] @@ -334,16 +332,15 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56bac90848f6a9393ac03c63c640925c4b7c8ca21654de40d53f55964667c7d8" +checksum = "495c05f60d6df0093e8fb6e74aa5846a0ad06abaf96d76166283720bf740f8ab" dependencies = [ "arc-swap", "bytes", - "futures-util", + "fs-err", "http", "http-body", - "http-body-util", "hyper", "hyper-util", "pin-project-lite", @@ -352,7 +349,6 @@ dependencies = [ "rustls-pki-types", "tokio", "tokio-rustls", - "tower 0.4.13", "tower-service", ] @@ -404,9 +400,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.6.0" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" [[package]] name = "bindgen" @@ -427,7 +423,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.98", + "syn 2.0.100", "which", ] @@ -446,7 +442,7 @@ dependencies = [ "regex", "rustc-hash 2.1.1", "shlex", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -535,9 +531,9 @@ checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" [[package]] name = "bytemuck" -version = "1.21.0" +version = "1.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" +checksum = "b6b1fc10dbac614ebc03540c9dbd60e83887fda27794998c6528f1782047d540" [[package]] name = "byteorder" @@ -553,9 +549,9 @@ checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495" [[package]] name = "bytes" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "bytesize" @@ -585,9 +581,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.16" +version = "1.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" +checksum = "1fcb57c740ae1daf453ae85f16e37396f672b039e00d9d866e07ddb24e328e3a" dependencies = [ "jobserver", "libc", @@ -656,9 +652,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.31" +version = "4.5.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "027bb0d98429ae334a8698531da7077bdf906419543a35a55c2cb1b66437d767" +checksum = "e958897981290da2a852763fe9cdb89cd36977a5d729023127095fa94d95e2ff" dependencies = [ "clap_builder", "clap_derive", @@ -666,9 +662,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.31" +version = "4.5.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5589e0cba072e0f3d23791efac0fd8627b49c829c196a492e88168e6a669d863" +checksum = "83b0f35019843db2160b5bb19ae09b4e6411ac33fc6a712003c33e03090e2489" dependencies = [ "anstyle", "clap_lex", @@ -676,14 +672,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.28" +version = "4.5.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4ced95c6f4a675af3da73304b9ac4ed991640c36374e4b46795c49e17cf1ed" +checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -787,7 +783,7 @@ dependencies = [ "ipaddress", "itertools 0.13.0", "log", - "rand", + "rand 0.8.5", "reqwest", "ruma", "serde", @@ -830,7 +826,7 @@ dependencies = [ "maplit", "nix", "num-traits", - "rand", + "rand 0.8.5", "regex", "reqwest", "ring", @@ -842,7 +838,7 @@ dependencies = [ "serde_yaml", "smallstr", "smallvec", - "thiserror 2.0.11", + "thiserror 2.0.12", "tikv-jemalloc-ctl", "tikv-jemalloc-sys", "tikv-jemallocator", @@ -880,7 +876,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -937,7 +933,7 @@ dependencies = [ "log", "loole", "lru-cache", - "rand", + "rand 0.8.5", "regex", "reqwest", "ruma", @@ -1194,7 +1190,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -1221,7 +1217,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -1264,9 +1260,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.11" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "28cfac68e08048ae1883171632c2aef3ebc555621ae56fbccce1cbf22dd7f058" dependencies = [ "powerfmt", ] @@ -1290,7 +1286,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -1317,7 +1313,7 @@ checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", - "rand_core", + "rand_core 0.6.4", "serde", "sha2", "subtle", @@ -1326,9 +1322,9 @@ dependencies = [ [[package]] name = "either" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7914353092ddf589ad78f25c5c1c21b7f80b0ff8621e7c814c3485b5306da9d" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" dependencies = [ "serde", ] @@ -1342,7 +1338,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -1373,9 +1369,9 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" +checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" dependencies = [ "event-listener", "pin-project-lite", @@ -1472,6 +1468,16 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "fs-err" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f89bda4c2a21204059a977ed3bfe746677dfd137b83c339e702b0ac91d482aa" +dependencies = [ + "autocfg", + "tokio", +] + [[package]] name = "fs_extra" version = "1.3.0" @@ -1543,7 +1549,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -1601,14 +1607,16 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" dependencies = [ "cfg-if", + "js-sys", "libc", - "wasi 0.13.3+wasi-0.2.2", - "windows-targets 0.52.6", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", + "wasm-bindgen", ] [[package]] @@ -1645,7 +1653,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.7.1", + "indexmap 2.8.0", "slab", "tokio", "tokio-util", @@ -1654,9 +1662,9 @@ dependencies = [ [[package]] name = "half" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +checksum = "7db2ff139bba50379da6aa0766b52fdcb62cb5b263009b09ed58ba604e14bbd1" dependencies = [ "cfg-if", "crunchy", @@ -1751,7 +1759,7 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand", + "rand 0.8.5", "thiserror 1.0.69", "tinyvec", "tokio", @@ -1772,7 +1780,7 @@ dependencies = [ "lru-cache", "once_cell", "parking_lot", - "rand", + "rand 0.8.5", "resolv-conf", "smallvec", "thiserror 1.0.69", @@ -1798,17 +1806,6 @@ dependencies = [ "windows-sys 0.59.0", ] -[[package]] -name = "hostname" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" -dependencies = [ - "libc", - "match_cfg", - "winapi", -] - [[package]] name = "hostname" version = "0.4.0" @@ -1831,14 +1828,14 @@ dependencies = [ "markup5ever", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "http" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", @@ -1866,12 +1863,12 @@ dependencies = [ [[package]] name = "http-body-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", - "futures-util", + "futures-core", "http", "http-body", "pin-project-lite", @@ -1879,9 +1876,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" [[package]] name = "httpdate" @@ -1891,9 +1888,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" [[package]] name = "hyper" @@ -2009,9 +2006,9 @@ dependencies = [ [[package]] name = "icu_locid_transform_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" +checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" [[package]] name = "icu_normalizer" @@ -2033,9 +2030,9 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" +checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" [[package]] name = "icu_properties" @@ -2054,9 +2051,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" +checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" [[package]] name = "icu_provider" @@ -2083,7 +2080,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2109,9 +2106,9 @@ dependencies = [ [[package]] name = "image" -version = "0.25.5" +version = "0.25.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd6f44aed642f18953a158afeb30206f4d50da59fbc66ecb53c66488de73563b" +checksum = "db35664ce6b9810857a38a906215e75a9c879f0696556a39f59c62829710251a" dependencies = [ "bytemuck", "byteorder-lite", @@ -2137,7 +2134,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b77d01e822461baa8409e156015a1d91735549f0f2c17691bd2d996bef238f7f" dependencies = [ "byteorder-lite", - "quick-error 2.0.1", + "quick-error", ] [[package]] @@ -2158,9 +2155,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" +checksum = "3954d50fe15b02142bf25d3b8bdadb634ec3948f103d04ffe3031bc8fe9d7058" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -2187,7 +2184,7 @@ checksum = "c34819042dc3d3971c46c2190835914dfbe0c3c13f61449b2997f4e9722dfa60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2251,9 +2248,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" @@ -2338,7 +2335,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2361,9 +2358,9 @@ checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8" [[package]] name = "libc" -version = "0.2.170" +version = "0.2.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" +checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" [[package]] name = "libfuzzer-sys" @@ -2387,9 +2384,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.21" +version = "1.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" +checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" dependencies = [ "cc", "pkg-config", @@ -2426,9 +2423,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.26" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" [[package]] name = "loole" @@ -2506,12 +2503,6 @@ dependencies = [ "xml5ever", ] -[[package]] -name = "match_cfg" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" - [[package]] name = "matchers" version = "0.1.0" @@ -2566,7 +2557,7 @@ checksum = "bd2209fff77f705b00c737016a48e73733d7fbccb8b007194db148f03561fb70" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2713,7 +2704,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2777,9 +2768,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.3" +version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "openssl-probe" @@ -2795,7 +2786,7 @@ checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a" dependencies = [ "futures-core", "futures-sink", - "indexmap 2.7.1", + "indexmap 2.8.0", "js-sys", "once_cell", "pin-project-lite", @@ -2844,7 +2835,7 @@ dependencies = [ "opentelemetry", "ordered-float 4.6.0", "percent-encoding", - "rand", + "rand 0.8.5", "thiserror 1.0.69", "tokio", "tokio-stream", @@ -2921,7 +2912,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -2951,7 +2942,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2986,7 +2977,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ "phf_shared", - "rand", + "rand 0.8.5", ] [[package]] @@ -3000,22 +2991,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.9" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfe2e71e1471fe07709406bf725f710b02927c9c54b2b5b2ec0e8087d97c327d" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.9" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6e859e6e5bd50440ab63c47e3ebabc90f26251f7c73c3d3e837b74a1cc3fa67" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -3042,9 +3033,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "png" @@ -3067,9 +3058,9 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ "zerocopy", ] @@ -3082,28 +3073,28 @@ checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" [[package]] name = "prettyplease" -version = "0.2.29" +version = "0.2.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" +checksum = "5316f57387668042f561aae71480de936257848f9c43ce528e311d89a07cadeb" dependencies = [ "proc-macro2", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "proc-macro-crate" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" +checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" dependencies = [ "toml_edit", ] [[package]] name = "proc-macro2" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" dependencies = [ "unicode-ident", ] @@ -3116,7 +3107,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "version_check", "yansi", ] @@ -3137,7 +3128,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a65f2e60fbf1063868558d69c6beacf412dc755f9fc020f514b7955fc914fe30" dependencies = [ "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -3160,7 +3151,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -3199,12 +3190,6 @@ dependencies = [ "bytemuck", ] -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - [[package]] name = "quick-error" version = "2.0.1" @@ -3213,37 +3198,39 @@ checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" [[package]] name = "quinn" -version = "0.11.6" +version = "0.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" +checksum = "c3bd15a6f2967aef83887dcb9fec0014580467e33720d073560cf015a5683012" dependencies = [ "bytes", + "cfg_aliases", "pin-project-lite", "quinn-proto", "quinn-udp", "rustc-hash 2.1.1", "rustls", "socket2", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "tracing", + "web-time 1.1.0", ] [[package]] name = "quinn-proto" -version = "0.11.9" +version = "0.11.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" +checksum = "b820744eb4dc9b57a3398183639c511b5a26d2ed702cedd3febaa1393caa22cc" dependencies = [ "bytes", - "getrandom 0.2.15", - "rand", + "getrandom 0.3.2", + "rand 0.9.0", "ring", "rustc-hash 2.1.1", "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.11", + "thiserror 2.0.12", "tinyvec", "tracing", "web-time 1.1.0", @@ -3251,9 +3238,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e46f3055866785f6b92bc6164b76be02ca8f2eb4b002c0354b28cf4c119e5944" +checksum = "541d0f57c6ec747a90738a52741d3221f7960e8ac2f0ff4b1a63680e033b4ab5" dependencies = [ "cfg_aliases", "libc", @@ -3265,13 +3252,19 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" + [[package]] name = "rand" version = "0.8.5" @@ -3279,8 +3272,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", + "zerocopy", ] [[package]] @@ -3290,7 +3294,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", ] [[package]] @@ -3302,6 +3316,15 @@ dependencies = [ "getrandom 0.2.15", ] +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.2", +] + [[package]] name = "rav1e" version = "0.7.1" @@ -3328,8 +3351,8 @@ dependencies = [ "once_cell", "paste", "profiling", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "simd_helpers", "system-deps", "thiserror 1.0.69", @@ -3346,7 +3369,7 @@ dependencies = [ "avif-serialize", "imgref", "loop9", - "quick-error 2.0.1", + "quick-error", "rav1e", "rayon", "rgb", @@ -3374,9 +3397,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82b568323e98e49e2a0899dcee453dd679fae22d69adf9b11dd508d1549b7e2f" +checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1" dependencies = [ "bitflags 2.9.0", ] @@ -3476,12 +3499,11 @@ dependencies = [ [[package]] name = "resolv-conf" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +checksum = "48375394603e3dd4b2d64371f7148fd8c7baa2680e28741f2cb8d23b59e3d4c4" dependencies = [ - "hostname 0.3.1", - "quick-error 1.2.3", + "hostname", ] [[package]] @@ -3492,9 +3514,9 @@ checksum = "57397d16646700483b67d2dd6511d79318f9d057fdbd21a4066aeac8b41d310a" [[package]] name = "ring" -version = "0.17.12" +version = "0.17.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed9b823fa29b721a59671b41d6b06e66b29e0628e207e8b1c3ceeda701ec928d" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", @@ -3554,7 +3576,7 @@ dependencies = [ "serde", "serde_html_form", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.12", "url", "web-time 1.1.0", ] @@ -3570,11 +3592,11 @@ dependencies = [ "form_urlencoded", "getrandom 0.2.15", "http", - "indexmap 2.7.1", + "indexmap 2.8.0", "js_int", "konst", "percent-encoding", - "rand", + "rand 0.8.5", "regex", "ruma-identifiers-validation", "ruma-macros", @@ -3582,7 +3604,7 @@ dependencies = [ "serde_html_form", "serde_json", "smallvec", - "thiserror 2.0.11", + "thiserror 2.0.12", "time", "tracing", "url", @@ -3597,7 +3619,7 @@ version = "0.28.1" source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "as_variant", - "indexmap 2.7.1", + "indexmap 2.8.0", "js_int", "js_option", "percent-encoding", @@ -3609,7 +3631,7 @@ dependencies = [ "serde", "serde_json", "smallvec", - "thiserror 2.0.11", + "thiserror 2.0.12", "tracing", "url", "web-time 1.1.0", @@ -3629,12 +3651,12 @@ dependencies = [ "js_int", "memchr", "mime", - "rand", + "rand 0.8.5", "ruma-common", "ruma-events", "serde", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.12", "tracing", ] @@ -3644,7 +3666,7 @@ version = "0.9.5" source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "js_int", - "thiserror 2.0.11", + "thiserror 2.0.12", ] [[package]] @@ -3668,7 +3690,7 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.98", + "syn 2.0.100", "toml", ] @@ -3692,12 +3714,12 @@ dependencies = [ "base64 0.22.1", "ed25519-dalek", "pkcs8", - "rand", + "rand 0.8.5", "ruma-common", "serde_json", "sha2", "subslice", - "thiserror 2.0.11", + "thiserror 2.0.12", ] [[package]] @@ -3768,9 +3790,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.23" +version = "0.23.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" +checksum = "822ee9188ac4ec04a2f0531e55d035fb2de73f18b41a63c70c2712503b6fb13c" dependencies = [ "aws-lc-rs", "log", @@ -3814,9 +3836,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.8" +version = "0.103.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +checksum = "fef8b8769aaccf73098557a87cd1816b4f9c7c16811c9c77142aa695c16f2c03" dependencies = [ "aws-lc-rs", "ring", @@ -3826,9 +3848,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" +checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" [[package]] name = "rustyline-async" @@ -3840,16 +3862,16 @@ dependencies = [ "futures-util", "pin-project", "thingbuf", - "thiserror 2.0.11", + "thiserror 2.0.12", "unicode-segmentation", "unicode-width 0.2.0", ] [[package]] name = "ryu" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "sanitize-filename" @@ -3909,9 +3931,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "sentry" @@ -3953,7 +3975,7 @@ version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eba8754ec3b9279e00aa6d64916f211d44202370a1699afde1db2c16cbada089" dependencies = [ - "hostname 0.4.0", + "hostname", "libc", "os_info", "rustc_version", @@ -3968,7 +3990,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9f8b6dcd4fbae1e3e22b447f32670360b27e31b62ab040f7fb04e0f80c04d92" dependencies = [ "once_cell", - "rand", + "rand 0.8.5", "sentry-types", "serde", "serde_json", @@ -4039,7 +4061,7 @@ checksum = "a71ed3a389948a6a6d92b98e997a2723ca22f09660c5a7b7388ecd509a70a527" dependencies = [ "debugid", "hex", - "rand", + "rand 0.8.5", "serde", "serde_json", "thiserror 1.0.69", @@ -4050,22 +4072,22 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.218" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.218" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -4075,7 +4097,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" dependencies = [ "form_urlencoded", - "indexmap 2.7.1", + "indexmap 2.8.0", "itoa", "ryu", "serde", @@ -4083,9 +4105,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.139" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44f86c3acccc9c65b153fe1b85a3be07fe5515274ec9f0653b4a0875731c72a6" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" dependencies = [ "itoa", "memchr", @@ -4095,9 +4117,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" dependencies = [ "itoa", "serde", @@ -4140,7 +4162,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.8.0", "itoa", "ryu", "serde", @@ -4220,7 +4242,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -4306,9 +4328,9 @@ checksum = "f42444fea5b87a39db4218d9422087e66a85d0e7a0963a439b07bcdf91804006" [[package]] name = "string_cache" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938d512196766101d333398efde81bc1f37b00cb42c2f8350e5df639f040bbbe" +checksum = "bf776ba3fa74f83bf4b63c3dcbbf82173db2632ed8452cb2d891d33f459de70f" dependencies = [ "new_debug_unreachable", "parking_lot", @@ -4357,9 +4379,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.98" +version = "2.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" +checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" dependencies = [ "proc-macro2", "quote", @@ -4383,7 +4405,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -4453,11 +4475,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" dependencies = [ - "thiserror-impl 2.0.11", + "thiserror-impl 2.0.12", ] [[package]] @@ -4468,18 +4490,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "thiserror-impl" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -4555,9 +4577,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.37" +version = "0.3.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" dependencies = [ "deranged", "itoa", @@ -4570,15 +4592,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" [[package]] name = "time-macros" -version = "0.2.19" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" +checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" dependencies = [ "num-conv", "time-core", @@ -4596,9 +4618,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" dependencies = [ "tinyvec_macros", ] @@ -4611,9 +4633,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.43.0" +version = "1.44.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" +checksum = "f382da615b842244d4b8738c82ed1275e6c5dd90c459a30941cd07080b06c91a" dependencies = [ "backtrace", "bytes", @@ -4635,7 +4657,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -4685,9 +4707,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.13" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" +checksum = "6b9590b93e6fcc1739458317cccd391ad3955e2bde8913edf6f95f9e65a8f034" dependencies = [ "bytes", "futures-core", @@ -4723,7 +4745,7 @@ version = "0.22.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.8.0", "serde", "serde_spanned", "toml_datetime", @@ -4771,7 +4793,7 @@ dependencies = [ "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand", + "rand 0.8.5", "slab", "tokio", "tokio-util", @@ -4835,7 +4857,6 @@ name = "tracing" version = "0.1.41" source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ - "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -4848,7 +4869,7 @@ source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -4969,9 +4990,9 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00e2473a93778eb0bad35909dff6a10d28e63f792f16ed15e404fca9d5eeedbe" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" [[package]] name = "unicode-segmentation" @@ -5056,11 +5077,11 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.15.1" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0f540e3240398cce6128b64ba83fdbdd86129c16a3aa1a3a252efd66eb3d587" +checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" dependencies = [ - "getrandom 0.3.1", + "getrandom 0.3.2", "serde", ] @@ -5116,9 +5137,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasi" -version = "0.13.3+wasi-0.2.2" +version = "0.14.2+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" dependencies = [ "wit-bindgen-rt", ] @@ -5145,7 +5166,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "wasm-bindgen-shared", ] @@ -5180,7 +5201,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5265,9 +5286,9 @@ dependencies = [ [[package]] name = "widestring" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" +checksum = "dd7cf3379ca1aac9eea11fba24fd7e315d621f8dfe35c8d7d2be8b793726e07d" [[package]] name = "wildmatch" @@ -5496,9 +5517,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7f4ea97f6f78012141bcdb6a216b2609f0979ada50b20ca5b52dde2eac2bb1" +checksum = "0e97b544156e9bebe1a0ffbc03484fc1ffe3100cbce3ffb17eac35f7cdd7ab36" dependencies = [ "memchr", ] @@ -5515,9 +5536,9 @@ dependencies = [ [[package]] name = "wit-bindgen-rt" -version = "0.33.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ "bitflags 2.9.0", ] @@ -5571,29 +5592,28 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "synstructure", ] [[package]] name = "zerocopy" -version = "0.7.35" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879" dependencies = [ - "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.35" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -5613,7 +5633,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "synstructure", ] @@ -5642,7 +5662,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -5656,19 +5676,20 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "7.2.3" +version = "7.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3051792fbdc2e1e143244dc28c60f73d8470e93f3f9cbd0ead44da5ed802722" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.14+zstd.1.5.7" +version = "2.0.15+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb060d4926e4ac3a3ad15d864e99ceb5f343c6b34f5bd6d81ae6ed417311be5" +checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" dependencies = [ + "bindgen 0.71.1", "cc", "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index 8b49c3b8..ab7a935c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -242,7 +242,7 @@ default-features = false features = ["std", "async-await"] [workspace.dependencies.tokio] -version = "1.42.0" +version = "1.44.1" default-features = false features = [ "fs", From 5bf5afaec83d4e68cbfd5220cd760a7940e7dda5 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 01:54:55 +0000 Subject: [PATCH 283/328] instrument tokio before/after poll hooks Signed-off-by: Jason Volk --- src/main/runtime.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/main/runtime.rs b/src/main/runtime.rs index b1657289..920476db 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -61,6 +61,8 @@ pub(super) fn new(args: &Args) -> Result { #[cfg(tokio_unstable)] builder .on_task_spawn(task_spawn) + .on_before_task_poll(task_enter) + .on_after_task_poll(task_leave) .on_task_terminate(task_terminate); #[cfg(tokio_unstable)] @@ -215,3 +217,25 @@ fn task_spawn(meta: &tokio::runtime::TaskMeta<'_>) {} ), )] fn task_terminate(meta: &tokio::runtime::TaskMeta<'_>) {} + +#[cfg(tokio_unstable)] +#[tracing::instrument( + name = "enter", + level = "trace", + skip_all, + fields( + id = %meta.id() + ), +)] +fn task_enter(meta: &tokio::runtime::TaskMeta<'_>) {} + +#[cfg(tokio_unstable)] +#[tracing::instrument( + name = "leave", + level = "trace", + skip_all, + fields( + id = %meta.id() + ), +)] +fn task_leave(meta: &tokio::runtime::TaskMeta<'_>) {} From dc6e9e74d9e9fb0bbdddb35c6b00d16544860095 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 01:56:00 +0000 Subject: [PATCH 284/328] add spans for for jemalloc mallctl points Signed-off-by: Jason Volk --- src/core/alloc/je.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 51caf3a3..2424e99c 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -335,6 +335,12 @@ where Ok(res) } +#[tracing::instrument( + name = "get", + level = "trace" + skip_all, + fields(?key) +)] fn get(key: &Key) -> Result where T: Copy + Debug, @@ -346,6 +352,12 @@ where unsafe { mallctl::raw::read_mib(key.as_slice()) }.map_err(map_err) } +#[tracing::instrument( + name = "xchg", + level = "trace" + skip_all, + fields(?key, ?val) +)] fn xchg(key: &Key, val: T) -> Result where T: Copy + Debug, From bee4c6255a815a9c7bc577d7afa66f69e26ea735 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 21:19:47 +0000 Subject: [PATCH 285/328] reorg PduEvent strip tools and callsites Signed-off-by: Jason Volk --- src/api/client/context.rs | 6 +- src/api/client/message.rs | 2 +- src/api/client/room/event.rs | 2 +- src/api/client/room/initial_sync.rs | 2 +- src/api/client/search.rs | 2 +- src/api/client/sync/v3.rs | 8 +- src/api/client/sync/v4.rs | 4 +- src/api/client/sync/v5.rs | 4 +- src/api/client/threads.rs | 2 +- src/core/pdu/strip.rs | 182 ++++++++++++++++++++-------- src/service/rooms/spaces/mod.rs | 5 +- src/service/rooms/state/mod.rs | 2 +- src/service/sending/sender.rs | 2 +- 13 files changed, 152 insertions(+), 71 deletions(-) diff --git a/src/api/client/context.rs b/src/api/client/context.rs index b109711e..1dda7b53 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -182,7 +182,7 @@ pub(crate) async fn get_context_route( .await; Ok(get_context::v3::Response { - event: base_event.map(at!(1)).as_ref().map(PduEvent::to_room_event), + event: base_event.map(at!(1)).map(PduEvent::into_room_event), start: events_before .last() @@ -201,13 +201,13 @@ pub(crate) async fn get_context_route( events_before: events_before .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(), events_after: events_after .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(), state, diff --git a/src/api/client/message.rs b/src/api/client/message.rs index c755cc47..03c7335a 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -157,7 +157,7 @@ pub(crate) async fn get_message_events_route( let chunk = events .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(); Ok(get_message_events::v3::Response { diff --git a/src/api/client/room/event.rs b/src/api/client/room/event.rs index 84b591cd..2b115b5c 100644 --- a/src/api/client/room/event.rs +++ b/src/api/client/room/event.rs @@ -40,5 +40,5 @@ pub(crate) async fn get_room_event_route( event.add_age().ok(); - Ok(get_room_event::v3::Response { event: event.to_room_event() }) + Ok(get_room_event::v3::Response { event: event.into_room_event() }) } diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs index e4c76ae0..ca63610b 100644 --- a/src/api/client/room/initial_sync.rs +++ b/src/api/client/room/initial_sync.rs @@ -55,7 +55,7 @@ pub(crate) async fn room_initial_sync_route( chunk: events .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(), }; diff --git a/src/api/client/search.rs b/src/api/client/search.rs index f3366843..d66df881 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -143,7 +143,7 @@ async fn category_room_events( .map(at!(2)) .flatten() .stream() - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .map(|result| SearchResult { rank: None, result: Some(result), diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 70c4c6a7..a8248f95 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -461,7 +461,7 @@ async fn handle_left_room( events: Vec::new(), }, state: RoomState { - events: vec![event.to_sync_state_event()], + events: vec![event.into_sync_state_event()], }, })); } @@ -546,7 +546,7 @@ async fn handle_left_room( continue; } - left_state_events.push(pdu.to_sync_state_event()); + left_state_events.push(pdu.into_sync_state_event()); } } @@ -865,8 +865,8 @@ async fn load_joined_room( }, state: RoomState { events: state_events - .iter() - .map(PduEvent::to_sync_state_event) + .into_iter() + .map(PduEvent::into_sync_state_event) .collect(), }, ephemeral: Ephemeral { events: edus }, diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 5fdcbab8..7e902973 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -6,7 +6,7 @@ use std::{ use axum::extract::State; use conduwuit::{ - Error, PduCount, Result, debug, error, extract_variant, + Error, PduCount, PduEvent, Result, debug, error, extract_variant, utils::{ BoolExt, IterStream, ReadyExt, TryFutureExtExt, math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, @@ -634,7 +634,7 @@ pub(crate) async fn sync_events_v4_route( .state_accessor .room_state_get(room_id, &state.0, &state.1) .await - .map(|s| s.to_sync_state_event()) + .map(PduEvent::into_sync_state_event) .ok() }) .collect() diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index b4c1b815..48b41b21 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -6,7 +6,7 @@ use std::{ use axum::extract::State; use conduwuit::{ - Error, Result, TypeStateKey, debug, error, extract_variant, trace, + Error, PduEvent, Result, TypeStateKey, debug, error, extract_variant, trace, utils::{ BoolExt, IterStream, ReadyExt, TryFutureExtExt, math::{ruma_from_usize, usize_from_ruma}, @@ -507,7 +507,7 @@ async fn process_rooms( .state_accessor .room_state_get(room_id, &state.0, &state.1) .await - .map(|s| s.to_sync_state_event()) + .map(PduEvent::into_sync_state_event) .ok() }) .collect() diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs index d25e52c0..00bfe553 100644 --- a/src/api/client/threads.rs +++ b/src/api/client/threads.rs @@ -53,7 +53,7 @@ pub(crate) async fn get_threads_route( chunk: threads .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(), }) } diff --git a/src/core/pdu/strip.rs b/src/core/pdu/strip.rs index 4e7c5b83..3683caaa 100644 --- a/src/core/pdu/strip.rs +++ b/src/core/pdu/strip.rs @@ -10,35 +10,18 @@ use serde_json::{json, value::Value as JsonValue}; use crate::implement; -#[must_use] -#[implement(super::Pdu)] -pub fn to_sync_room_event(&self) -> Raw { - let (redacts, content) = self.copy_redacts(); - let mut json = json!({ - "content": content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &redacts { - json["redacts"] = json!(redacts); - } - - serde_json::from_value(json).expect("Raw::from_value always works") -} - /// This only works for events that are also AnyRoomEvents. #[must_use] #[implement(super::Pdu)] -pub fn to_any_event(&self) -> Raw { +pub fn into_any_event(self) -> Raw { + serde_json::from_value(self.into_any_event_value()).expect("Raw::from_value always works") +} + +/// This only works for events that are also AnyRoomEvents. +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_any_event_value(self) -> JsonValue { let (redacts, content) = self.copy_redacts(); let mut json = json!({ "content": content, @@ -59,12 +42,24 @@ pub fn to_any_event(&self) -> Raw { json["redacts"] = json!(redacts); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } -#[must_use] #[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_room_event(self) -> Raw { self.to_room_event() } + +#[implement(super::Pdu)] +#[must_use] pub fn to_room_event(&self) -> Raw { + serde_json::from_value(self.to_room_event_value()).expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn to_room_event_value(&self) -> JsonValue { let (redacts, content) = self.copy_redacts(); let mut json = json!({ "content": content, @@ -85,12 +80,25 @@ pub fn to_room_event(&self) -> Raw { json["redacts"] = json!(redacts); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } -#[must_use] #[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_message_like_event(self) -> Raw { self.to_message_like_event() } + +#[implement(super::Pdu)] +#[must_use] pub fn to_message_like_event(&self) -> Raw { + serde_json::from_value(self.to_message_like_event_value()) + .expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn to_message_like_event_value(&self) -> JsonValue { let (redacts, content) = self.copy_redacts(); let mut json = json!({ "content": content, @@ -111,11 +119,55 @@ pub fn to_message_like_event(&self) -> Raw { json["redacts"] = json!(redacts); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } -#[must_use] #[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_sync_room_event(self) -> Raw { self.to_sync_room_event() } + +#[implement(super::Pdu)] +#[must_use] +pub fn to_sync_room_event(&self) -> Raw { + serde_json::from_value(self.to_sync_room_event_value()).expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn to_sync_room_event_value(&self) -> JsonValue { + let (redacts, content) = self.copy_redacts(); + let mut json = json!({ + "content": content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + }); + + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + if let Some(state_key) = &self.state_key { + json["state_key"] = json!(state_key); + } + if let Some(redacts) = &redacts { + json["redacts"] = json!(redacts); + } + + json +} + +#[implement(super::Pdu)] +#[must_use] +pub fn into_state_event(self) -> Raw { + serde_json::from_value(self.into_state_event_value()).expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] pub fn into_state_event_value(self) -> JsonValue { let mut json = json!({ "content": self.content, @@ -134,15 +186,17 @@ pub fn into_state_event_value(self) -> JsonValue { json } -#[must_use] #[implement(super::Pdu)] -pub fn into_state_event(self) -> Raw { - serde_json::from_value(self.into_state_event_value()).expect("Raw::from_value always works") +#[must_use] +pub fn into_sync_state_event(self) -> Raw { + serde_json::from_value(self.into_sync_state_event_value()) + .expect("Raw::from_value always works") } -#[must_use] #[implement(super::Pdu)] -pub fn to_sync_state_event(&self) -> Raw { +#[must_use] +#[inline] +pub fn into_sync_state_event_value(self) -> JsonValue { let mut json = json!({ "content": self.content, "type": self.kind, @@ -156,39 +210,65 @@ pub fn to_sync_state_event(&self) -> Raw { json["unsigned"] = json!(unsigned); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } -#[must_use] #[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_stripped_state_event(self) -> Raw { + self.to_stripped_state_event() +} + +#[implement(super::Pdu)] +#[must_use] pub fn to_stripped_state_event(&self) -> Raw { - let json = json!({ + serde_json::from_value(self.to_stripped_state_event_value()) + .expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn to_stripped_state_event_value(&self) -> JsonValue { + json!({ "content": self.content, "type": self.kind, "sender": self.sender, "state_key": self.state_key, - }); - - serde_json::from_value(json).expect("Raw::from_value always works") + }) } -#[must_use] #[implement(super::Pdu)] -pub fn to_stripped_spacechild_state_event(&self) -> Raw { - let json = json!({ +#[must_use] +pub fn into_stripped_spacechild_state_event(self) -> Raw { + serde_json::from_value(self.into_stripped_spacechild_state_event_value()) + .expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_stripped_spacechild_state_event_value(self) -> JsonValue { + json!({ "content": self.content, "type": self.kind, "sender": self.sender, "state_key": self.state_key, "origin_server_ts": self.origin_server_ts, - }); - - serde_json::from_value(json).expect("Raw::from_value always works") + }) } -#[must_use] #[implement(super::Pdu)] +#[must_use] pub fn into_member_event(self) -> Raw> { + serde_json::from_value(self.into_member_event_value()).expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_member_event_value(self) -> JsonValue { let mut json = json!({ "content": self.content, "type": self.kind, @@ -204,5 +284,5 @@ pub fn into_member_event(self) -> Raw> { json["unsigned"] = json!(unsigned); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index af597445..a10fe7fc 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -6,7 +6,7 @@ use std::{fmt::Write, sync::Arc}; use async_trait::async_trait; use conduwuit::{ - Err, Error, Result, implement, + Err, Error, PduEvent, Result, implement, utils::{ IterStream, future::BoolExt, @@ -267,11 +267,12 @@ fn get_stripped_space_child_events<'a>( } if RoomId::parse(&state_key).is_ok() { - return Some(pdu.to_stripped_spacechild_state_event()); + return Some(pdu); } None }) + .map(PduEvent::into_stripped_spacechild_state_event) } /// Gets the summary of a space using either local or remote (federation) diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 56955497..803ba9d7 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -341,7 +341,7 @@ impl Service { .await .into_iter() .filter_map(Result::ok) - .map(|e| e.to_stripped_state_event()) + .map(PduEvent::into_stripped_state_event) .chain(once(event.to_stripped_state_event())) .collect() } diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 616f0846..fab02f6b 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -697,7 +697,7 @@ impl Service { match event { | SendingEvent::Pdu(pdu_id) => { if let Ok(pdu) = self.services.timeline.get_pdu_from_id(pdu_id).await { - pdu_jsons.push(pdu.to_room_event()); + pdu_jsons.push(pdu.into_room_event()); } }, | SendingEvent::Edu(edu) => From db99d3a001841db61bb79544912099b7346456b4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 01:58:14 +0000 Subject: [PATCH 286/328] remove recently-made-unnecessary unsafe block Signed-off-by: Jason Volk --- src/core/utils/sys/storage.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/core/utils/sys/storage.rs b/src/core/utils/sys/storage.rs index b11df7bb..452b04b2 100644 --- a/src/core/utils/sys/storage.rs +++ b/src/core/utils/sys/storage.rs @@ -123,10 +123,7 @@ pub fn dev_from_path(path: &Path) -> Result<(dev_t, dev_t)> { let stat = fs::metadata(path)?; let dev_id = stat.dev().try_into()?; - - // SAFETY: These functions may not need to be marked as unsafe. - // see: https://github.com/rust-lang/libc/issues/3759 - let (major, minor) = unsafe { (libc::major(dev_id), libc::minor(dev_id)) }; + let (major, minor) = (libc::major(dev_id), libc::minor(dev_id)); Ok((major.try_into()?, minor.try_into()?)) } From d60920c72890b7ebf70d47bfc37f4477fa9716aa Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 22:59:29 +0000 Subject: [PATCH 287/328] workaround some large type name length issues Signed-off-by: Jason Volk --- src/api/mod.rs | 1 + src/core/error/err.rs | 1 + src/core/mod.rs | 2 ++ src/core/state_res/mod.rs | 4 ---- src/core/utils/mod.rs | 4 ++++ src/database/mod.rs | 2 ++ src/main/main.rs | 2 ++ src/router/mod.rs | 2 ++ src/service/mod.rs | 1 + src/service/rooms/event_handler/mod.rs | 15 +++--------- .../rooms/event_handler/resolve_state.rs | 6 +++-- src/service/rooms/spaces/mod.rs | 24 ++++++++++--------- .../rooms/state_accessor/room_state.rs | 4 ++-- src/service/rooms/state_accessor/state.rs | 5 +++- 14 files changed, 41 insertions(+), 32 deletions(-) diff --git a/src/api/mod.rs b/src/api/mod.rs index 8df17a59..090cf897 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,3 +1,4 @@ +#![type_length_limit = "16384"] //TODO: reduce me #![allow(clippy::toplevel_ref_arg)] pub mod client; diff --git a/src/core/error/err.rs b/src/core/error/err.rs index 0962c4ee..9c24d3b4 100644 --- a/src/core/error/err.rs +++ b/src/core/error/err.rs @@ -136,6 +136,7 @@ macro_rules! err_log { } #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! err_lev { (debug_warn) => { if $crate::debug::logging() { diff --git a/src/core/mod.rs b/src/core/mod.rs index cd56774a..80ebbdcb 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -1,3 +1,5 @@ +#![type_length_limit = "12288"] + pub mod alloc; pub mod config; pub mod debug; diff --git a/src/core/state_res/mod.rs b/src/core/state_res/mod.rs index 2020d65c..1db92e59 100644 --- a/src/core/state_res/mod.rs +++ b/src/core/state_res/mod.rs @@ -149,7 +149,6 @@ where &event_fetch, parallel_fetches, ) - .boxed() .await?; debug!(count = sorted_control_levels.len(), "power events"); @@ -164,7 +163,6 @@ where &event_fetch, parallel_fetches, ) - .boxed() .await?; debug!(count = resolved_control.len(), "resolved power events"); @@ -192,7 +190,6 @@ where let sorted_left_events = mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch, parallel_fetches) - .boxed() .await?; trace!(list = ?sorted_left_events, "events left, sorted"); @@ -204,7 +201,6 @@ where &event_fetch, parallel_fetches, ) - .boxed() .await?; // Add unconflicted state to the resolved state diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 53460c59..7593990c 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -173,6 +173,7 @@ macro_rules! is_equal { /// Functor for |x| *x.$i #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! deref_at { ($idx:tt) => { |t| *t.$idx @@ -181,6 +182,7 @@ macro_rules! deref_at { /// Functor for |ref x| x.$i #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! ref_at { ($idx:tt) => { |ref t| &t.$idx @@ -189,6 +191,7 @@ macro_rules! ref_at { /// Functor for |&x| x.$i #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! val_at { ($idx:tt) => { |&t| t.$idx @@ -197,6 +200,7 @@ macro_rules! val_at { /// Functor for |x| x.$i #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! at { ($idx:tt) => { |t| t.$idx diff --git a/src/database/mod.rs b/src/database/mod.rs index 1262a79a..ffcefee9 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,3 +1,5 @@ +#![type_length_limit = "3072"] + extern crate conduwuit_core as conduwuit; extern crate rust_rocksdb as rocksdb; diff --git a/src/main/main.rs b/src/main/main.rs index fbc63b17..52f40384 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -1,3 +1,5 @@ +#![type_length_limit = "49152"] //TODO: reduce me + pub(crate) mod clap; mod logging; mod mods; diff --git a/src/router/mod.rs b/src/router/mod.rs index f64dcb67..7038c5df 100644 --- a/src/router/mod.rs +++ b/src/router/mod.rs @@ -1,3 +1,5 @@ +#![type_length_limit = "32768"] //TODO: reduce me + mod layers; mod request; mod router; diff --git a/src/service/mod.rs b/src/service/mod.rs index 0bde0255..8f4a84b0 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,3 +1,4 @@ +#![type_length_limit = "2048"] #![allow(refining_impl_trait)] mod manager; diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 4944f3ec..45675da8 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -18,11 +18,7 @@ use std::{ }; use async_trait::async_trait; -use conduwuit::{ - Err, PduEvent, Result, RoomVersion, Server, - utils::{MutexMap, TryFutureExtExt}, -}; -use futures::TryFutureExt; +use conduwuit::{Err, PduEvent, Result, RoomVersion, Server, utils::MutexMap}; use ruma::{ OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, events::room::create::RoomCreateEventContent, @@ -103,13 +99,8 @@ impl Service { self.services.timeline.pdu_exists(&event_id).await } - async fn event_fetch(&self, event_id: OwnedEventId) -> Option> { - self.services - .timeline - .get_pdu(&event_id) - .map_ok(Arc::new) - .ok() - .await + async fn event_fetch(&self, event_id: OwnedEventId) -> Option { + self.services.timeline.get_pdu(&event_id).await.ok() } } diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 9033c3a8..b3a7a71b 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -110,12 +110,14 @@ pub async fn state_resolution<'a, StateSets>( where StateSets: Iterator> + Clone + Send, { + let event_fetch = |event_id| self.event_fetch(event_id); + let event_exists = |event_id| self.event_exists(event_id); state_res::resolve( room_version, state_sets, auth_chain_sets, - &|event_id| self.event_fetch(event_id), - &|event_id| self.event_exists(event_id), + &event_fetch, + &event_exists, automatic_width(), ) .map_err(|e| err!(error!("State resolution failed: {e:?}"))) diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index a10fe7fc..da52e095 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -9,7 +9,7 @@ use conduwuit::{ Err, Error, PduEvent, Result, implement, utils::{ IterStream, - future::BoolExt, + future::{BoolExt, TryExtExt}, math::usize_from_f64, stream::{BroadbandExt, ReadyExt}, }, @@ -36,7 +36,7 @@ use ruma::{ use tokio::sync::{Mutex, MutexGuard}; pub use self::pagination_token::PaginationToken; -use crate::{Dep, conduwuit::utils::TryFutureExtExt, rooms, sending}; +use crate::{Dep, rooms, sending}; pub struct Service { services: Services, @@ -141,7 +141,8 @@ pub async fn get_summary_and_children_local( } let children_pdus: Vec<_> = self - .get_stripped_space_child_events(current_room) + .get_space_child_events(current_room) + .map(PduEvent::into_stripped_spacechild_state_event) .collect() .await; @@ -235,10 +236,10 @@ async fn get_summary_and_children_federation( /// Simply returns the stripped m.space.child events of a room #[implement(Service)] -fn get_stripped_space_child_events<'a>( +fn get_space_child_events<'a>( &'a self, room_id: &'a RoomId, -) -> impl Stream> + Send + 'a { +) -> impl Stream + Send + 'a { self.services .state .get_room_shortstatehash(room_id) @@ -246,6 +247,7 @@ fn get_stripped_space_child_events<'a>( self.services .state_accessor .state_keys_with_ids(current_shortstatehash, &StateEventType::SpaceChild) + .boxed() }) .map(Result::into_iter) .map(IterStream::stream) @@ -256,8 +258,8 @@ fn get_stripped_space_child_events<'a>( .timeline .get_pdu(&event_id) .map_ok(move |pdu| (state_key, pdu)) - .await .ok() + .await }) .ready_filter_map(move |(state_key, pdu)| { if let Ok(content) = pdu.get_content::() { @@ -266,13 +268,12 @@ fn get_stripped_space_child_events<'a>( } } - if RoomId::parse(&state_key).is_ok() { - return Some(pdu); + if RoomId::parse(&state_key).is_err() { + return None; } - None + Some(pdu) }) - .map(PduEvent::into_stripped_spacechild_state_event) } /// Gets the summary of a space using either local or remote (federation) @@ -501,7 +502,8 @@ async fn cache_insert( allowed_room_ids, room_id: room_id.clone(), children_state: self - .get_stripped_space_child_events(&room_id) + .get_space_child_events(&room_id) + .map(PduEvent::into_stripped_spacechild_state_event) .collect() .await, }; diff --git a/src/service/rooms/state_accessor/room_state.rs b/src/service/rooms/state_accessor/room_state.rs index ff26b33a..642cd5d2 100644 --- a/src/service/rooms/state_accessor/room_state.rs +++ b/src/service/rooms/state_accessor/room_state.rs @@ -31,7 +31,7 @@ pub fn room_state_full<'a>( self.services .state .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok)) + .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok).boxed()) .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) .try_flatten_stream() } @@ -46,7 +46,7 @@ pub fn room_state_full_pdus<'a>( self.services .state .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok)) + .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok).boxed()) .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) .try_flatten_stream() } diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs index 02a6194e..8f2dd76f 100644 --- a/src/service/rooms/state_accessor/state.rs +++ b/src/service/rooms/state_accessor/state.rs @@ -235,6 +235,7 @@ pub fn state_keys_with_shortids<'a>( .ignore_err() .unzip() .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) + .boxed() .shared(); let shortstatekeys = short_ids @@ -390,8 +391,10 @@ pub fn state_full_shortids( .map(parse_compressed_state_event) .collect() }) - .map_ok(|vec: Vec<_>| vec.into_iter().try_stream()) + .map_ok(Vec::into_iter) + .map_ok(IterStream::try_stream) .try_flatten_stream() + .boxed() } #[implement(super::Service)] From d3b65af6163baed6e6f55922235ccc9e9f5a4e98 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 31 Mar 2025 02:28:01 +0000 Subject: [PATCH 288/328] remove several services.globals config wrappers Signed-off-by: Jason Volk --- src/api/client/account.rs | 10 ++++----- src/api/client/membership.rs | 4 ++-- src/api/client/presence.rs | 42 ++++++++++------------------------- src/api/client/profile.rs | 4 ++-- src/api/client/read_marker.rs | 4 ++-- src/api/client/room/create.rs | 2 +- src/api/client/send.rs | 3 +-- src/api/client/sync/v3.rs | 6 ++--- src/api/client/typing.rs | 2 +- src/api/client/unstable.rs | 8 +++---- src/api/server/invite.rs | 3 +-- src/service/globals/mod.rs | 30 ------------------------- 12 files changed, 34 insertions(+), 84 deletions(-) diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 5dd622d7..efa8b142 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -146,7 +146,7 @@ pub(crate) async fn register_route( let is_guest = body.kind == RegistrationKind::Guest; let emergency_mode_enabled = services.config.emergency_password.is_some(); - if !services.globals.allow_registration() && body.appservice_info.is_none() { + if !services.config.allow_registration && body.appservice_info.is_none() { match (body.username.as_ref(), body.initial_device_display_name.as_ref()) { | (Some(username), Some(device_display_name)) => { info!(%is_guest, user = %username, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled"); @@ -166,8 +166,8 @@ pub(crate) async fn register_route( } if is_guest - && (!services.globals.allow_guest_registration() - || (services.globals.allow_registration() + && (!services.config.allow_guest_registration + || (services.config.allow_registration && services.globals.registration_token.is_some())) { info!( @@ -441,7 +441,7 @@ pub(crate) async fn register_route( } // log in conduit admin channel if a guest registered - if body.appservice_info.is_none() && is_guest && services.globals.log_guest_registrations() { + if body.appservice_info.is_none() && is_guest && services.config.log_guest_registrations { debug_info!("New guest user \"{user_id}\" registered on this server."); if !device_display_name.is_empty() { @@ -490,7 +490,7 @@ pub(crate) async fn register_route( if body.appservice_info.is_none() && !services.server.config.auto_join_rooms.is_empty() - && (services.globals.allow_guests_auto_join_rooms() || !is_guest) + && (services.config.allow_guests_auto_join_rooms || !is_guest) { for room in &services.server.config.auto_join_rooms { let Ok(room_id) = services.rooms.alias.resolve(room).await else { diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 11395e83..315a363c 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -491,7 +491,7 @@ pub(crate) async fn invite_user_route( ) -> Result { let sender_user = body.sender_user(); - if !services.users.is_admin(sender_user).await && services.globals.block_non_admin_invites() { + if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { info!( "User {sender_user} is not an admin and attempted to send an invite to room {}", &body.room_id @@ -1628,7 +1628,7 @@ pub(crate) async fn invite_helper( reason: Option, is_direct: bool, ) -> Result { - if !services.users.is_admin(sender_user).await && services.globals.block_non_admin_invites() { + if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { info!( "User {sender_user} is not an admin and attempted to send an invite to room \ {room_id}" diff --git a/src/api/client/presence.rs b/src/api/client/presence.rs index 9b41a721..548e5cce 100644 --- a/src/api/client/presence.rs +++ b/src/api/client/presence.rs @@ -1,12 +1,10 @@ use std::time::Duration; use axum::extract::State; -use ruma::api::client::{ - error::ErrorKind, - presence::{get_presence, set_presence}, -}; +use conduwuit::{Err, Result}; +use ruma::api::client::presence::{get_presence, set_presence}; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/presence/{userId}/status` /// @@ -15,24 +13,17 @@ pub(crate) async fn set_presence_route( State(services): State, body: Ruma, ) -> Result { - if !services.globals.allow_local_presence() { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Presence is disabled on this server", - )); + if !services.config.allow_local_presence { + return Err!(Request(Forbidden("Presence is disabled on this server"))); } - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if sender_user != &body.user_id && body.appservice_info.is_none() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Not allowed to set presence of other users", - )); + if body.sender_user() != body.user_id && body.appservice_info.is_none() { + return Err!(Request(InvalidParam("Not allowed to set presence of other users"))); } services .presence - .set_presence(sender_user, &body.presence, None, None, body.status_msg.clone()) + .set_presence(body.sender_user(), &body.presence, None, None, body.status_msg.clone()) .await?; Ok(set_presence::v3::Response {}) @@ -47,21 +38,15 @@ pub(crate) async fn get_presence_route( State(services): State, body: Ruma, ) -> Result { - if !services.globals.allow_local_presence() { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Presence is disabled on this server", - )); + if !services.config.allow_local_presence { + return Err!(Request(Forbidden("Presence is disabled on this server",))); } - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut presence_event = None; - let has_shared_rooms = services .rooms .state_cache - .user_sees_user(sender_user, &body.user_id) + .user_sees_user(body.sender_user(), &body.user_id) .await; if has_shared_rooms { @@ -99,9 +84,6 @@ pub(crate) async fn get_presence_route( presence: presence.content.presence, }) }, - | _ => Err(Error::BadRequest( - ErrorKind::NotFound, - "Presence state for this user was not found", - )), + | _ => Err!(Request(NotFound("Presence state for this user was not found"))), } } diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index 12e5ebcc..5abe5b23 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -52,7 +52,7 @@ pub(crate) async fn set_displayname_route( update_displayname(&services, &body.user_id, body.displayname.clone(), &all_joined_rooms) .await; - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence @@ -147,7 +147,7 @@ pub(crate) async fn set_avatar_url_route( ) .await; - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index 187616b4..b334e356 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -50,7 +50,7 @@ pub(crate) async fn set_read_marker_route( } // ping presence - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { services .presence .ping_presence(sender_user, &ruma::presence::PresenceState::Online) @@ -126,7 +126,7 @@ pub(crate) async fn create_receipt_route( } // ping presence - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { services .presence .ping_presence(sender_user, &ruma::presence::PresenceState::Online) diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index bb06e966..bdc5d5a5 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -372,7 +372,7 @@ pub(crate) async fn create_room_route( // Silently skip encryption events if they are not allowed if pdu_builder.event_type == TimelineEventType::RoomEncryption - && !services.globals.allow_encryption() + && !services.config.allow_encryption { continue; } diff --git a/src/api/client/send.rs b/src/api/client/send.rs index b01d1ed6..1af74f57 100644 --- a/src/api/client/send.rs +++ b/src/api/client/send.rs @@ -25,8 +25,7 @@ pub(crate) async fn send_message_event_route( let appservice_info = body.appservice_info.as_ref(); // Forbid m.room.encrypted if encryption is disabled - if MessageLikeEventType::RoomEncrypted == body.event_type - && !services.globals.allow_encryption() + if MessageLikeEventType::RoomEncrypted == body.event_type && !services.config.allow_encryption { return Err!(Request(Forbidden("Encryption has been disabled"))); } diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index a8248f95..530c1278 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -118,7 +118,7 @@ pub(crate) async fn sync_events_route( let (sender_user, sender_device) = body.sender(); // Presence update - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { services .presence .ping_presence(sender_user, &body.body.set_presence) @@ -279,8 +279,8 @@ pub(crate) async fn build_sync_events( }); let presence_updates: OptionFuture<_> = services - .globals - .allow_local_presence() + .config + .allow_local_presence .then(|| process_presence_updates(services, since, sender_user)) .into(); diff --git a/src/api/client/typing.rs b/src/api/client/typing.rs index ccfa7340..b02cc473 100644 --- a/src/api/client/typing.rs +++ b/src/api/client/typing.rs @@ -64,7 +64,7 @@ pub(crate) async fn create_typing_event_route( } // ping presence - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { services .presence .ping_presence(&body.user_id, &ruma::presence::PresenceState::Online) diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index 08da5a37..45ad103e 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -205,7 +205,7 @@ pub(crate) async fn delete_timezone_key_route( services.users.set_timezone(&body.user_id, None); - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence @@ -233,7 +233,7 @@ pub(crate) async fn set_timezone_key_route( services.users.set_timezone(&body.user_id, body.tz.clone()); - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence @@ -326,7 +326,7 @@ pub(crate) async fn set_profile_key_route( ); } - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence @@ -385,7 +385,7 @@ pub(crate) async fn delete_profile_key_route( .set_profile_key(&body.user_id, &body.key_name, None); } - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index 463cb9ab..f4cc6eb2 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -103,8 +103,7 @@ pub(crate) async fn create_invite_route( return Err!(Request(Forbidden("This room is banned on this homeserver."))); } - if services.globals.block_non_admin_invites() && !services.users.is_admin(&invited_user).await - { + if services.config.block_non_admin_invites && !services.users.is_admin(&invited_user).await { return Err!(Request(Forbidden("This server does not allow room invites."))); } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 1dd7db8e..a7a9be9d 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -111,20 +111,6 @@ impl Service { #[inline] pub fn server_name(&self) -> &ServerName { self.server.name.as_ref() } - pub fn allow_registration(&self) -> bool { self.server.config.allow_registration } - - pub fn allow_guest_registration(&self) -> bool { self.server.config.allow_guest_registration } - - pub fn allow_guests_auto_join_rooms(&self) -> bool { - self.server.config.allow_guests_auto_join_rooms - } - - pub fn log_guest_registrations(&self) -> bool { self.server.config.log_guest_registrations } - - pub fn allow_encryption(&self) -> bool { self.server.config.allow_encryption } - - pub fn allow_federation(&self) -> bool { self.server.config.allow_federation } - pub fn allow_public_room_directory_over_federation(&self) -> bool { self.server .config @@ -183,22 +169,6 @@ impl Service { pub fn forbidden_usernames(&self) -> &RegexSet { &self.server.config.forbidden_usernames } - pub fn allow_local_presence(&self) -> bool { self.server.config.allow_local_presence } - - pub fn allow_incoming_presence(&self) -> bool { self.server.config.allow_incoming_presence } - - pub fn allow_outgoing_presence(&self) -> bool { self.server.config.allow_outgoing_presence } - - pub fn allow_incoming_read_receipts(&self) -> bool { - self.server.config.allow_incoming_read_receipts - } - - pub fn allow_outgoing_read_receipts(&self) -> bool { - self.server.config.allow_outgoing_read_receipts - } - - pub fn block_non_admin_invites(&self) -> bool { self.server.config.block_non_admin_invites } - /// checks if `user_id` is local to us via server_name comparison #[inline] pub fn user_is_local(&self, user_id: &UserId) -> bool { From 3f0f89cddb28041ddeec94d8c80410a04153235b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 31 Mar 2025 04:25:48 +0000 Subject: [PATCH 289/328] use async_trait without axum re-export Signed-off-by: Jason Volk --- Cargo.lock | 1 + src/api/Cargo.toml | 1 + src/api/router/args.rs | 3 ++- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 8c0e797b..aa639b30 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -766,6 +766,7 @@ dependencies = [ name = "conduwuit_api" version = "0.5.0" dependencies = [ + "async-trait", "axum", "axum-client-ip", "axum-extra", diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 385e786f..7890561c 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -35,6 +35,7 @@ brotli_compression = [ ] [dependencies] +async-trait.workspace = true axum-client-ip.workspace = true axum-extra.workspace = true axum.workspace = true diff --git a/src/api/router/args.rs b/src/api/router/args.rs index 65a68fa4..26713dcc 100644 --- a/src/api/router/args.rs +++ b/src/api/router/args.rs @@ -1,6 +1,7 @@ use std::{mem, ops::Deref}; -use axum::{async_trait, body::Body, extract::FromRequest}; +use async_trait::async_trait; +use axum::{body::Body, extract::FromRequest}; use bytes::{BufMut, Bytes, BytesMut}; use conduwuit::{Error, Result, debug, debug_warn, err, trace, utils::string::EMPTY}; use ruma::{ From 5768ca844295d892cfdcc9c80c8a57ef71c0e30c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 31 Mar 2025 04:23:47 +0000 Subject: [PATCH 290/328] upgrade dependency ByteSize Signed-off-by: Jason Volk --- Cargo.lock | 103 ++++++++++++++++++---------------------- Cargo.toml | 2 +- src/core/utils/bytes.rs | 6 +-- 3 files changed, 49 insertions(+), 62 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aa639b30..ab9af9e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -79,7 +79,7 @@ checksum = "0ae92a5119aa49cdbcf6b9f893fe4e1d98b04ccbf82ee0584ad948a44a734dea" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -161,7 +161,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -172,7 +172,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -423,7 +423,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.100", + "syn", "which", ] @@ -442,7 +442,7 @@ dependencies = [ "regex", "rustc-hash 2.1.1", "shlex", - "syn 2.0.100", + "syn", ] [[package]] @@ -555,9 +555,9 @@ checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "bytesize" -version = "1.3.2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2c12f985c78475a6b8d629afd0c360260ef34cfef52efccdcfd31972f81c2e" +checksum = "a3c8f83209414aacf0eeae3cf730b18d6981697fba62f200fcfb92b9f082acba" [[package]] name = "bzip2-sys" @@ -679,7 +679,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -877,7 +877,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -1062,9 +1062,9 @@ dependencies = [ [[package]] name = "crokey" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520e83558f4c008ac06fa6a86e5c1d4357be6f994cce7434463ebcdaadf47bb1" +checksum = "c5ff945e42bb93d29b10ba509970066a269903a932f0ea07d99d8621f97e90d7" dependencies = [ "crokey-proc_macros", "crossterm", @@ -1075,15 +1075,15 @@ dependencies = [ [[package]] name = "crokey-proc_macros" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "370956e708a1ce65fe4ac5bb7185791e0ece7485087f17736d54a23a0895049f" +checksum = "665f2180fd82d0ba2bf3deb45fafabb18f23451024ff71ee47f6bfdfb4bbe09e" dependencies = [ "crossterm", "proc-macro2", "quote", "strict", - "syn 1.0.109", + "syn", ] [[package]] @@ -1191,7 +1191,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -1218,7 +1218,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -1287,7 +1287,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -1339,7 +1339,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -1550,7 +1550,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -1829,7 +1829,7 @@ dependencies = [ "markup5ever", "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -2081,7 +2081,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -2185,7 +2185,7 @@ checksum = "c34819042dc3d3971c46c2190835914dfbe0c3c13f61449b2997f4e9722dfa60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -2336,7 +2336,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.100", + "syn", ] [[package]] @@ -2558,7 +2558,7 @@ checksum = "bd2209fff77f705b00c737016a48e73733d7fbccb8b007194db148f03561fb70" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -2705,7 +2705,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -2943,7 +2943,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -3007,7 +3007,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -3079,7 +3079,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5316f57387668042f561aae71480de936257848f9c43ce528e311d89a07cadeb" dependencies = [ "proc-macro2", - "syn 2.0.100", + "syn", ] [[package]] @@ -3108,7 +3108,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", "version_check", "yansi", ] @@ -3129,7 +3129,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a65f2e60fbf1063868558d69c6beacf412dc755f9fc020f514b7955fc914fe30" dependencies = [ "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -3152,7 +3152,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -3691,7 +3691,7 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.100", + "syn", "toml", ] @@ -4088,7 +4088,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -4297,9 +4297,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" +checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" dependencies = [ "libc", "windows-sys 0.52.0", @@ -4367,17 +4367,6 @@ version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - [[package]] name = "syn" version = "2.0.100" @@ -4406,7 +4395,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -4491,7 +4480,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -4502,7 +4491,7 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -4658,7 +4647,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -4870,7 +4859,7 @@ source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -5167,7 +5156,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.100", + "syn", "wasm-bindgen-shared", ] @@ -5202,7 +5191,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5593,7 +5582,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", "synstructure", ] @@ -5614,7 +5603,7 @@ checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -5634,7 +5623,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", "synstructure", ] @@ -5663,7 +5652,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index ab7a935c..e6751acf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -510,7 +510,7 @@ version = "1.0.37" version = "1.0.89" [workspace.dependencies.bytesize] -version = "1.3.2" +version = "2.0.1" [workspace.dependencies.core_affinity] version = "0.8.1" diff --git a/src/core/utils/bytes.rs b/src/core/utils/bytes.rs index 04101be4..507b9b9a 100644 --- a/src/core/utils/bytes.rs +++ b/src/core/utils/bytes.rs @@ -17,15 +17,13 @@ pub fn from_str(str: &str) -> Result { Ok(bytes) } -/// Output a human-readable size string w/ si-unit suffix +/// Output a human-readable size string w/ iec-unit suffix #[inline] #[must_use] pub fn pretty(bytes: usize) -> String { - const SI_UNITS: bool = true; - let bytes: u64 = bytes.try_into().expect("failed to convert usize to u64"); - bytesize::to_string(bytes, SI_UNITS) + ByteSize::b(bytes).display().iec().to_string() } #[inline] From bee1f896243f9fafc588b98f43412637f6a5dd90 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 31 Mar 2025 05:03:15 +0000 Subject: [PATCH 291/328] bump dependencies Signed-off-by: Jason Volk --- Cargo.lock | 136 +++++++++++++++++++++++++++++------------- Cargo.toml | 48 +++++++-------- src/core/Cargo.toml | 1 + src/core/error/mod.rs | 2 + 4 files changed, 121 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ab9af9e8..fb19dfdb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -731,8 +731,8 @@ dependencies = [ "opentelemetry-jaeger", "opentelemetry_sdk", "sentry", - "sentry-tower", - "sentry-tracing", + "sentry-tower 0.35.0", + "sentry-tracing 0.35.0", "tokio", "tokio-metrics", "tracing", @@ -782,7 +782,7 @@ dependencies = [ "http-body-util", "hyper", "ipaddress", - "itertools 0.13.0", + "itertools 0.14.0", "log", "rand 0.8.5", "reqwest", @@ -802,6 +802,7 @@ dependencies = [ "argon2", "arrayvec", "axum", + "axum-extra", "bytes", "bytesize", "cargo_toml", @@ -820,7 +821,7 @@ dependencies = [ "http", "http-body-util", "ipaddress", - "itertools 0.13.0", + "itertools 0.14.0", "libc", "libloading", "log", @@ -874,7 +875,7 @@ dependencies = [ name = "conduwuit_macros" version = "0.5.0" dependencies = [ - "itertools 0.13.0", + "itertools 0.14.0", "proc-macro2", "quote", "syn", @@ -904,8 +905,8 @@ dependencies = [ "rustls", "sd-notify", "sentry", - "sentry-tower", - "sentry-tracing", + "sentry-tower 0.35.0", + "sentry-tracing 0.35.0", "serde_json", "tokio", "tower 0.5.2", @@ -930,7 +931,7 @@ dependencies = [ "http", "image", "ipaddress", - "itertools 0.13.0", + "itertools 0.14.0", "log", "loole", "lru-cache", @@ -997,9 +998,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const-str" -version = "0.5.7" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3618cccc083bb987a415d85c02ca6c9994ea5b44731ec28b9ecf09658655fba9" +checksum = "9e991226a70654b49d34de5ed064885f0bef0348a8e70018b8ff1ac80aa984a2" [[package]] name = "const_panic" @@ -1948,9 +1949,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.8" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", @@ -1961,7 +1962,6 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower 0.4.13", "tower-service", "tracing", ] @@ -2543,18 +2543,18 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "minicbor" -version = "0.25.1" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0452a60c1863c1f50b5f77cd295e8d2786849f35883f0b9e18e7e6e1b5691b0" +checksum = "1936e27fffe7d8557c060eb82cb71668608cd1a5fb56b63e66d22ae8d7564321" dependencies = [ "minicbor-derive", ] [[package]] name = "minicbor-derive" -version = "0.15.3" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd2209fff77f705b00c737016a48e73733d7fbccb8b007194db148f03561fb70" +checksum = "a9882ef5c56df184b8ffc107fc6c61e33ee3a654b021961d790a78571bb9d67a" dependencies = [ "proc-macro2", "quote", @@ -2563,9 +2563,9 @@ dependencies = [ [[package]] name = "minicbor-serde" -version = "0.3.2" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "becf18ac384ecf6f53b2db3b1549eebff664c67ecf259ae99be5912193291686" +checksum = "54e45e8beeefea1b8b6f52fa188a5b6ea3746c2885606af8d4d8bf31cee633fb" dependencies = [ "minicbor", "serde", @@ -3938,21 +3938,21 @@ checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "sentry" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "016958f51b96861dead7c1e02290f138411d05e94fad175c8636a835dee6e51e" +checksum = "3a7332159e544e34db06b251b1eda5e546bd90285c3f58d9c8ff8450b484e0da" dependencies = [ "httpdate", "reqwest", "rustls", "sentry-backtrace", "sentry-contexts", - "sentry-core", + "sentry-core 0.36.0", "sentry-debug-images", "sentry-log", "sentry-panic", - "sentry-tower", - "sentry-tracing", + "sentry-tower 0.36.0", + "sentry-tracing 0.36.0", "tokio", "ureq", "webpki-roots", @@ -3960,27 +3960,27 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e57712c24e99252ef175b4b06c485294f10ad6bc5b5e1567ff3803ee7a0b7d3f" +checksum = "565ec31ad37bab8e6d9f289f34913ed8768347b133706192f10606dabd5c6bc4" dependencies = [ "backtrace", "once_cell", "regex", - "sentry-core", + "sentry-core 0.36.0", ] [[package]] name = "sentry-contexts" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba8754ec3b9279e00aa6d64916f211d44202370a1699afde1db2c16cbada089" +checksum = "e860275f25f27e8c0c7726ce116c7d5c928c5bba2ee73306e52b20a752298ea6" dependencies = [ "hostname", "libc", "os_info", "rustc_version", - "sentry-core", + "sentry-core 0.36.0", "uname", ] @@ -3992,40 +3992,53 @@ checksum = "f9f8b6dcd4fbae1e3e22b447f32670360b27e31b62ab040f7fb04e0f80c04d92" dependencies = [ "once_cell", "rand 0.8.5", - "sentry-types", + "sentry-types 0.35.0", + "serde", + "serde_json", +] + +[[package]] +name = "sentry-core" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "653942e6141f16651273159f4b8b1eaeedf37a7554c00cd798953e64b8a9bf72" +dependencies = [ + "once_cell", + "rand 0.8.5", + "sentry-types 0.36.0", "serde", "serde_json", ] [[package]] name = "sentry-debug-images" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8982a69133d3f5e4efdbfa0776937fca43c3a2e275a8fe184f50b1b0aa92e07c" +checksum = "2a60bc2154e6df59beed0ac13d58f8dfaf5ad20a88548a53e29e4d92e8e835c2" dependencies = [ "findshlibs", "once_cell", - "sentry-core", + "sentry-core 0.36.0", ] [[package]] name = "sentry-log" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efcbfbb74628eaef033c1154d4bb082437c7592ce2282c7c5ccb455c4c97a06d" +checksum = "1c96d796cba1b3a0793e7f53edc420c61f9419fba8fb34ad5519f5c7d01af6b2" dependencies = [ "log", - "sentry-core", + "sentry-core 0.36.0", ] [[package]] name = "sentry-panic" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de296dae6f01e931b65071ee5fe28d66a27909857f744018f107ed15fd1f6b25" +checksum = "105e3a956c8aa9dab1e4087b1657b03271bfc49d838c6ae9bfc7c58c802fd0ef" dependencies = [ "sentry-backtrace", - "sentry-core", + "sentry-core 0.36.0", ] [[package]] @@ -4033,10 +4046,21 @@ name = "sentry-tower" version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcdaf9b1939589476bd57751d12a9653bbfe356610fc476d03d7683189183ab7" +dependencies = [ + "sentry-core 0.35.0", + "tower-layer", + "tower-service", +] + +[[package]] +name = "sentry-tower" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "082f781dfc504d984e16d99f8dbf94d6ee4762dd0fc28de25713d0f900a8164d" dependencies = [ "http", "pin-project", - "sentry-core", + "sentry-core 0.36.0", "tower-layer", "tower-service", "url", @@ -4047,9 +4071,20 @@ name = "sentry-tracing" version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "263f73c757ed7915d3e1e34625eae18cad498a95b4261603d4ce3f87b159a6f0" +dependencies = [ + "sentry-core 0.35.0", + "tracing-core", + "tracing-subscriber", +] + +[[package]] +name = "sentry-tracing" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64e75c831b4d8b34a5aec1f65f67c5d46a26c7c5d3c7abd8b5ef430796900cf8" dependencies = [ "sentry-backtrace", - "sentry-core", + "sentry-core 0.36.0", "tracing-core", "tracing-subscriber", ] @@ -4071,6 +4106,23 @@ dependencies = [ "uuid", ] +[[package]] +name = "sentry-types" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d4203359e60724aa05cf2385aaf5d4f147e837185d7dd2b9ccf1ee77f4420c8" +dependencies = [ + "debugid", + "hex", + "rand 0.8.5", + "serde", + "serde_json", + "thiserror 1.0.69", + "time", + "url", + "uuid", +] + [[package]] name = "serde" version = "1.0.219" diff --git a/Cargo.toml b/Cargo.toml index e6751acf..ba706656 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,11 +27,11 @@ version = "0.5.0" name = "conduwuit" [workspace.dependencies.arrayvec] -version = "0.7.4" +version = "0.7.6" features = ["serde"] [workspace.dependencies.smallvec] -version = "1.13.2" +version = "1.14.0" features = [ "const_generics", "const_new", @@ -45,7 +45,7 @@ version = "0.3" features = ["ffi", "std", "union"] [workspace.dependencies.const-str] -version = "0.5.7" +version = "0.6.2" [workspace.dependencies.ctor] version = "0.2.9" @@ -81,13 +81,13 @@ version = "0.8.5" # Used for the http request / response body type for Ruma endpoints used with reqwest [workspace.dependencies.bytes] -version = "1.9.0" +version = "1.10.1" [workspace.dependencies.http-body-util] -version = "0.1.2" +version = "0.1.3" [workspace.dependencies.http] -version = "1.2.0" +version = "1.3.1" [workspace.dependencies.regex] version = "1.11.1" @@ -111,7 +111,7 @@ default-features = false features = ["typed-header", "tracing"] [workspace.dependencies.axum-server] -version = "0.7.1" +version = "0.7.2" default-features = false # to listen on both HTTP and HTTPS if listening on TLS dierctly from conduwuit for complement or sytest @@ -122,7 +122,7 @@ version = "0.7" version = "0.6.1" [workspace.dependencies.tower] -version = "0.5.1" +version = "0.5.2" default-features = false features = ["util"] @@ -156,12 +156,12 @@ features = [ ] [workspace.dependencies.serde] -version = "1.0.216" +version = "1.0.219" default-features = false features = ["rc"] [workspace.dependencies.serde_json] -version = "1.0.133" +version = "1.0.140" default-features = false features = ["raw_value"] @@ -237,7 +237,7 @@ features = [ ] [workspace.dependencies.futures] -version = "0.3.30" +version = "0.3.31" default-features = false features = ["std", "async-await"] @@ -275,7 +275,7 @@ features = ["alloc", "std"] default-features = false [workspace.dependencies.hyper] -version = "1.5.1" +version = "1.6.0" default-features = false features = [ "server", @@ -285,7 +285,7 @@ features = [ [workspace.dependencies.hyper-util] # hyper-util >=0.1.9 seems to have DNS issues -version = "=0.1.8" +version = "0.1.10" default-features = false features = [ "server-auto", @@ -295,7 +295,7 @@ features = [ # to support multiple variations of setting a config option [workspace.dependencies.either] -version = "1.13.0" +version = "1.15.0" default-features = false features = ["serde"] @@ -311,7 +311,7 @@ default-features = false # Used for conduwuit::Error type [workspace.dependencies.thiserror] -version = "2.0.7" +version = "2.0.12" default-features = false # Used when hashing the state @@ -321,7 +321,7 @@ default-features = false # Used to make working with iterators easier, was already a transitive depdendency [workspace.dependencies.itertools] -version = "0.13.0" +version = "0.14.0" # to parse user-friendly time durations in admin commands #TODO: overlaps chrono? @@ -337,7 +337,7 @@ version = "0.4.0" version = "2.3.1" [workspace.dependencies.async-trait] -version = "0.1.83" +version = "0.1.88" [workspace.dependencies.lru-cache] version = "0.1.2" @@ -423,7 +423,7 @@ features = ["rt-tokio"] # optional sentry metrics for crash/panic reporting [workspace.dependencies.sentry] -version = "0.35.0" +version = "0.36.0" default-features = false features = [ "backtrace", @@ -499,18 +499,18 @@ default-features = false version = "0.1" [workspace.dependencies.syn] -version = "2.0.90" +version = "2.0" default-features = false features = ["full", "extra-traits"] [workspace.dependencies.quote] -version = "1.0.37" +version = "1.0" [workspace.dependencies.proc-macro2] -version = "1.0.89" +version = "1.0" [workspace.dependencies.bytesize] -version = "2.0.1" +version = "2.0" [workspace.dependencies.core_affinity] version = "0.8.1" @@ -522,11 +522,11 @@ version = "0.2" version = "0.2" [workspace.dependencies.minicbor] -version = "0.25.1" +version = "0.26.3" features = ["std"] [workspace.dependencies.minicbor-serde] -version = "0.3.2" +version = "0.4.1" features = ["std"] [workspace.dependencies.maplit] diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index b40dd3ad..4848e742 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -59,6 +59,7 @@ conduwuit_mods = [ argon2.workspace = true arrayvec.workspace = true axum.workspace = true +axum-extra.workspace = true bytes.workspace = true bytesize.workspace = true cargo_toml.workspace = true diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 02ab6fa3..e46edf09 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -81,6 +81,8 @@ pub enum Error { #[error("Tracing reload error: {0}")] TracingReload(#[from] tracing_subscriber::reload::Error), #[error(transparent)] + TypedHeader(#[from] axum_extra::typed_header::TypedHeaderRejection), + #[error(transparent)] Yaml(#[from] serde_yaml::Error), // ruma/conduwuit From 0f81c1e1ccdcb0c5c6d5a27e82f16eb37b1e61c8 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 1 Apr 2025 02:14:51 +0000 Subject: [PATCH 292/328] revert hyper-util upgrade due to continued DNS issues Signed-off-by: Jason Volk --- Cargo.lock | 5 +++-- Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb19dfdb..77d03506 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1949,9 +1949,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.10" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" +checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" dependencies = [ "bytes", "futures-channel", @@ -1962,6 +1962,7 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", + "tower 0.4.13", "tower-service", "tracing", ] diff --git a/Cargo.toml b/Cargo.toml index ba706656..62bbaf16 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -285,7 +285,7 @@ features = [ [workspace.dependencies.hyper-util] # hyper-util >=0.1.9 seems to have DNS issues -version = "0.1.10" +version = "=0.1.8" default-features = false features = [ "server-auto", From 1b71b99c514f69bdd2fbcdb7996dcc00860d2057 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 2 Apr 2025 10:49:38 -0400 Subject: [PATCH 293/328] fix weird issue with acl c2s check Signed-off-by: June Clementine Strawberry --- src/api/client/state.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 9563c26d..23583356 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -229,6 +229,9 @@ async fn allowed_to_send_state_event( if acl_content.deny.contains(&String::from("*")) && !acl_content.is_allowed(services.globals.server_name()) + && !acl_content + .allow + .contains(&services.globals.server_name().to_string()) { return Err!(Request(BadJson(debug_warn!( ?room_id, @@ -240,6 +243,9 @@ async fn allowed_to_send_state_event( if !acl_content.allow.contains(&String::from("*")) && !acl_content.is_allowed(services.globals.server_name()) + && !acl_content + .allow + .contains(&services.globals.server_name().to_string()) { return Err!(Request(BadJson(debug_warn!( ?room_id, From ea246d91d975a89a947c35260a4d50684fd2913b Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 2 Apr 2025 22:38:47 -0400 Subject: [PATCH 294/328] remove pointless and buggy *_visibility in-memory caches Signed-off-by: June Clementine Strawberry --- conduwuit-example.toml | 8 --- src/core/config/mod.rs | 12 ---- src/service/rooms/state_accessor/mod.rs | 68 ++----------------- .../rooms/state_accessor/server_can.rs | 22 +----- src/service/rooms/state_accessor/user_can.rs | 22 +----- 5 files changed, 10 insertions(+), 122 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 15e6dd37..75ecddab 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -195,14 +195,6 @@ # #servernameevent_data_cache_capacity = varies by system -# This item is undocumented. Please contribute documentation for it. -# -#server_visibility_cache_capacity = varies by system - -# This item is undocumented. Please contribute documentation for it. -# -#user_visibility_cache_capacity = varies by system - # This item is undocumented. Please contribute documentation for it. # #stateinfo_cache_capacity = varies by system diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 52df19ac..7be140a5 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -252,14 +252,6 @@ pub struct Config { #[serde(default = "default_servernameevent_data_cache_capacity")] pub servernameevent_data_cache_capacity: u32, - /// default: varies by system - #[serde(default = "default_server_visibility_cache_capacity")] - pub server_visibility_cache_capacity: u32, - - /// default: varies by system - #[serde(default = "default_user_visibility_cache_capacity")] - pub user_visibility_cache_capacity: u32, - /// default: varies by system #[serde(default = "default_stateinfo_cache_capacity")] pub stateinfo_cache_capacity: u32, @@ -2035,10 +2027,6 @@ fn default_servernameevent_data_cache_capacity() -> u32 { parallelism_scaled_u32(100_000).saturating_add(500_000) } -fn default_server_visibility_cache_capacity() -> u32 { parallelism_scaled_u32(500) } - -fn default_user_visibility_cache_capacity() -> u32 { parallelism_scaled_u32(1000) } - fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(100) } fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 652fdbd7..b57465ce 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -3,21 +3,13 @@ mod server_can; mod state; mod user_can; -use std::{ - fmt::Write, - sync::{Arc, Mutex as StdMutex, Mutex}, -}; +use std::sync::Arc; use async_trait::async_trait; -use conduwuit::{ - Result, err, utils, - utils::math::{Expected, usize_from_f64}, -}; +use conduwuit::{Result, err}; use database::Map; -use lru_cache::LruCache; use ruma::{ - EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, UserId, + EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, OwnedRoomId, RoomId, UserId, events::{ StateEventType, room::{ @@ -37,11 +29,9 @@ use ruma::{ space::SpaceRoomJoinRule, }; -use crate::{Dep, rooms, rooms::short::ShortStateHash}; +use crate::{Dep, rooms}; pub struct Service { - pub server_visibility_cache: Mutex>, - pub user_visibility_cache: Mutex>, services: Services, db: Data, } @@ -61,19 +51,7 @@ struct Data { #[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { - let config = &args.server.config; - let server_visibility_cache_capacity = - f64::from(config.server_visibility_cache_capacity) * config.cache_capacity_modifier; - let user_visibility_cache_capacity = - f64::from(config.user_visibility_cache_capacity) * config.cache_capacity_modifier; - Ok(Arc::new(Self { - server_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64( - server_visibility_cache_capacity, - )?)), - user_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64( - user_visibility_cache_capacity, - )?)), services: Services { state_cache: args.depend::("rooms::state_cache"), timeline: args.depend::("rooms::timeline"), @@ -88,44 +66,6 @@ impl crate::Service for Service { })) } - async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { - use utils::bytes::pretty; - - let (svc_count, svc_bytes) = self.server_visibility_cache.lock()?.iter().fold( - (0_usize, 0_usize), - |(count, bytes), (key, _)| { - ( - count.expected_add(1), - bytes - .expected_add(key.0.capacity()) - .expected_add(size_of_val(&key.1)), - ) - }, - ); - - let (uvc_count, uvc_bytes) = self.user_visibility_cache.lock()?.iter().fold( - (0_usize, 0_usize), - |(count, bytes), (key, _)| { - ( - count.expected_add(1), - bytes - .expected_add(key.0.capacity()) - .expected_add(size_of_val(&key.1)), - ) - }, - ); - - writeln!(out, "server_visibility_cache: {svc_count} ({})", pretty(svc_bytes))?; - writeln!(out, "user_visibility_cache: {uvc_count} ({})", pretty(uvc_bytes))?; - - Ok(()) - } - - async fn clear_cache(&self) { - self.server_visibility_cache.lock().expect("locked").clear(); - self.user_visibility_cache.lock().expect("locked").clear(); - } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } diff --git a/src/service/rooms/state_accessor/server_can.rs b/src/service/rooms/state_accessor/server_can.rs index 2e8f3325..7d1b197f 100644 --- a/src/service/rooms/state_accessor/server_can.rs +++ b/src/service/rooms/state_accessor/server_can.rs @@ -1,4 +1,4 @@ -use conduwuit::{error, implement, utils::stream::ReadyExt}; +use conduwuit::{debug_info, implement, utils::stream::ReadyExt}; use futures::StreamExt; use ruma::{ EventId, RoomId, ServerName, @@ -22,15 +22,6 @@ pub async fn server_can_see_event( return true; }; - if let Some(visibility) = self - .server_visibility_cache - .lock() - .expect("locked") - .get_mut(&(origin.to_owned(), shortstatehash)) - { - return *visibility; - } - let history_visibility = self .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") .await @@ -44,7 +35,7 @@ pub async fn server_can_see_event( .room_members(room_id) .ready_filter(|member| member.server_name() == origin); - let visibility = match history_visibility { + match history_visibility { | HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, | HistoryVisibility::Invited => { // Allow if any member on requesting server was AT LEAST invited, else deny @@ -62,12 +53,5 @@ pub async fn server_can_see_event( error!("Unknown history visibility {history_visibility}"); false }, - }; - - self.server_visibility_cache - .lock() - .expect("locked") - .insert((origin.to_owned(), shortstatehash), visibility); - - visibility + } } diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs index c30e1da8..32a766a8 100644 --- a/src/service/rooms/state_accessor/user_can.rs +++ b/src/service/rooms/state_accessor/user_can.rs @@ -1,4 +1,4 @@ -use conduwuit::{Err, Error, Result, error, implement, pdu::PduBuilder}; +use conduwuit::{Err, Error, Result, debug_info, implement, pdu::PduBuilder}; use ruma::{ EventId, RoomId, UserId, events::{ @@ -98,15 +98,6 @@ pub async fn user_can_see_event( return true; }; - if let Some(visibility) = self - .user_visibility_cache - .lock() - .expect("locked") - .get_mut(&(user_id.to_owned(), shortstatehash)) - { - return *visibility; - } - let currently_member = self.services.state_cache.is_joined(user_id, room_id).await; let history_visibility = self @@ -116,7 +107,7 @@ pub async fn user_can_see_event( c.history_visibility }); - let visibility = match history_visibility { + match history_visibility { | HistoryVisibility::WorldReadable => true, | HistoryVisibility::Shared => currently_member, | HistoryVisibility::Invited => { @@ -131,14 +122,7 @@ pub async fn user_can_see_event( error!("Unknown history visibility {history_visibility}"); false }, - }; - - self.user_visibility_cache - .lock() - .expect("locked") - .insert((user_id.to_owned(), shortstatehash), visibility); - - visibility + } } /// Whether a user is allowed to see an event, based on From 74012c5289831c16976fc283a4233bfb6b49ce8b Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 2 Apr 2025 22:44:44 -0400 Subject: [PATCH 295/328] significantly improve get_missing_events fed code Signed-off-by: June Clementine Strawberry --- src/api/server/backfill.rs | 12 ++- src/api/server/get_missing_events.rs | 111 ++++++++++++++------------- 2 files changed, 65 insertions(+), 58 deletions(-) diff --git a/src/api/server/backfill.rs b/src/api/server/backfill.rs index 5c875807..3cfbcedc 100644 --- a/src/api/server/backfill.rs +++ b/src/api/server/backfill.rs @@ -6,11 +6,17 @@ use conduwuit::{ utils::{IterStream, ReadyExt, stream::TryTools}, }; use futures::{FutureExt, StreamExt, TryStreamExt}; -use ruma::{MilliSecondsSinceUnixEpoch, api::federation::backfill::get_backfill, uint}; +use ruma::{MilliSecondsSinceUnixEpoch, api::federation::backfill::get_backfill}; use super::AccessCheck; use crate::Ruma; +/// arbitrary number but synapse's is 100 and we can handle lots of these +/// anyways +const LIMIT_MAX: usize = 150; +/// no spec defined number but we can handle a lot of these +const LIMIT_DEFAULT: usize = 50; + /// # `GET /_matrix/federation/v1/backfill/` /// /// Retrieves events from before the sender joined the room, if the room's @@ -30,9 +36,9 @@ pub(crate) async fn get_backfill_route( let limit = body .limit - .min(uint!(100)) .try_into() - .expect("UInt could not be converted to usize"); + .unwrap_or(LIMIT_DEFAULT) + .min(LIMIT_MAX); let from = body .v diff --git a/src/api/server/get_missing_events.rs b/src/api/server/get_missing_events.rs index 3d0bbb07..d72918fa 100644 --- a/src/api/server/get_missing_events.rs +++ b/src/api/server/get_missing_events.rs @@ -1,13 +1,19 @@ use axum::extract::State; -use conduwuit::{Error, Result}; -use ruma::{ - CanonicalJsonValue, EventId, RoomId, - api::{client::error::ErrorKind, federation::event::get_missing_events}, +use conduwuit::{ + Result, debug, debug_info, debug_warn, + utils::{self}, + warn, }; +use ruma::api::federation::event::get_missing_events; use super::AccessCheck; use crate::Ruma; +/// arbitrary number but synapse's is 20 and we can handle lots of these anyways +const LIMIT_MAX: usize = 50; +/// spec says default is 10 +const LIMIT_DEFAULT: usize = 10; + /// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` /// /// Retrieves events that the sender is missing. @@ -24,7 +30,11 @@ pub(crate) async fn get_missing_events_route( .check() .await?; - let limit = body.limit.try_into()?; + let limit = body + .limit + .try_into() + .unwrap_or(LIMIT_DEFAULT) + .min(LIMIT_MAX); let mut queued_events = body.latest_events.clone(); // the vec will never have more entries the limit @@ -32,60 +42,51 @@ pub(crate) async fn get_missing_events_route( let mut i: usize = 0; while i < queued_events.len() && events.len() < limit { - if let Ok(pdu) = services + let Ok(pdu) = services.rooms.timeline.get_pdu(&queued_events[i]).await else { + debug_info!(?body.origin, "Event {} does not exist locally, skipping", &queued_events[i]); + i = i.saturating_add(1); + continue; + }; + + if pdu.room_id != body.room_id { + warn!(?body.origin, + "Got an event for the wrong room in database. Found {:?} in {:?}, server requested events in {:?}. Skipping.", + pdu.event_id, pdu.room_id, body.room_id + ); + i = i.saturating_add(1); + continue; + } + + if body.earliest_events.contains(&queued_events[i]) { + i = i.saturating_add(1); + continue; + } + + if !services .rooms - .timeline - .get_pdu_json(&queued_events[i]) + .state_accessor + .server_can_see_event(body.origin(), &body.room_id, &queued_events[i]) .await { - let room_id_str = pdu - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database."))?; - - let event_room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room_id in event in database."))?; - - if event_room_id != body.room_id { - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event from wrong room.")); - } - - if body.earliest_events.contains(&queued_events[i]) { - i = i.saturating_add(1); - continue; - } - - if !services - .rooms - .state_accessor - .server_can_see_event(body.origin(), &body.room_id, &queued_events[i]) - .await - { - i = i.saturating_add(1); - continue; - } - - let prev_events = pdu - .get("prev_events") - .and_then(CanonicalJsonValue::as_array) - .unwrap_or_default(); - - queued_events.extend( - prev_events - .iter() - .map(<&EventId>::try_from) - .filter_map(Result::ok) - .map(ToOwned::to_owned), - ); - - events.push( - services - .sending - .convert_to_outgoing_federation_event(pdu) - .await, - ); + debug!(?body.origin, "Server cannot see {:?} in {:?}, skipping", pdu.event_id, pdu.room_id); + i = i.saturating_add(1); + continue; } - i = i.saturating_add(1); + + let Ok(pdu_json) = utils::to_canonical_object(&pdu) else { + debug_warn!(?body.origin, "Failed to convert PDU in database to canonical JSON: {pdu:?}"); + i = i.saturating_add(1); + continue; + }; + + queued_events.extend(pdu.prev_events.iter().map(ToOwned::to_owned)); + + events.push( + services + .sending + .convert_to_outgoing_federation_event(pdu_json) + .await, + ); } Ok(get_missing_events::v1::Response { events }) From 1036f8dfa8fabb9642b9638b54381e00016eef9c Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 2 Apr 2025 22:46:01 -0400 Subject: [PATCH 296/328] default shared history vis on unknown visibilities, drop needless error log Signed-off-by: June Clementine Strawberry --- src/service/rooms/state_accessor/server_can.rs | 4 ++-- src/service/rooms/state_accessor/user_can.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/service/rooms/state_accessor/server_can.rs b/src/service/rooms/state_accessor/server_can.rs index 7d1b197f..c946fbfd 100644 --- a/src/service/rooms/state_accessor/server_can.rs +++ b/src/service/rooms/state_accessor/server_can.rs @@ -50,8 +50,8 @@ pub async fn server_can_see_event( .await }, | _ => { - error!("Unknown history visibility {history_visibility}"); - false + debug_info!(%room_id, "Unknown history visibility, defaulting to shared: {history_visibility:?}"); + true }, } } diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs index 32a766a8..aa54407b 100644 --- a/src/service/rooms/state_accessor/user_can.rs +++ b/src/service/rooms/state_accessor/user_can.rs @@ -119,8 +119,8 @@ pub async fn user_can_see_event( self.user_was_joined(shortstatehash, user_id).await }, | _ => { - error!("Unknown history visibility {history_visibility}"); - false + debug_info!(%room_id, "Unknown history visibility, defaulting to shared: {history_visibility:?}"); + currently_member }, } } From 0e0b8cc4032732378966f07b38b97af89788e399 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 2 Apr 2025 22:51:17 -0400 Subject: [PATCH 297/328] fixup+update msc3266, add fed support, parallelise IO Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +- Cargo.toml | 2 +- src/api/client/room/mod.rs | 9 +- src/api/client/room/summary.rs | 308 ++++++++++++++++++++++++ src/api/client/room/upgrade.rs | 2 +- src/api/client/unstable.rs | 138 +---------- src/service/rooms/spaces/mod.rs | 54 +++-- src/service/rooms/state_accessor/mod.rs | 28 ++- 8 files changed, 389 insertions(+), 174 deletions(-) create mode 100644 src/api/client/room/summary.rs diff --git a/Cargo.lock b/Cargo.lock index 77d03506..a53258bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3531,7 +3531,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "assign", "js_int", @@ -3551,7 +3551,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "js_int", "ruma-common", @@ -3563,7 +3563,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "as_variant", "assign", @@ -3586,7 +3586,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "as_variant", "base64 0.22.1", @@ -3618,7 +3618,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3643,7 +3643,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "bytes", "headers", @@ -3665,7 +3665,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3674,7 +3674,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "js_int", "ruma-common", @@ -3684,7 +3684,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3699,7 +3699,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "js_int", "ruma-common", @@ -3711,7 +3711,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index 62bbaf16..940ece86 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "d197318a2507d38ffe6ee524d0d52728ca72538a" +rev = "ea1278657125e9414caada074e8c172bc252fb1c" features = [ "compat", "rand", diff --git a/src/api/client/room/mod.rs b/src/api/client/room/mod.rs index 16fcadab..86d68f7e 100644 --- a/src/api/client/room/mod.rs +++ b/src/api/client/room/mod.rs @@ -2,9 +2,14 @@ mod aliases; mod create; mod event; mod initial_sync; +mod summary; mod upgrade; pub(crate) use self::{ - aliases::get_room_aliases_route, create::create_room_route, event::get_room_event_route, - initial_sync::room_initial_sync_route, upgrade::upgrade_room_route, + aliases::get_room_aliases_route, + create::create_room_route, + event::get_room_event_route, + initial_sync::room_initial_sync_route, + summary::{get_room_summary, get_room_summary_legacy}, + upgrade::upgrade_room_route, }; diff --git a/src/api/client/room/summary.rs b/src/api/client/room/summary.rs new file mode 100644 index 00000000..34820e83 --- /dev/null +++ b/src/api/client/room/summary.rs @@ -0,0 +1,308 @@ +use axum::extract::State; +use axum_client_ip::InsecureClientIp; +use conduwuit::{ + Err, Result, debug_warn, + utils::{IterStream, future::TryExtExt}, +}; +use futures::{FutureExt, StreamExt, future::join3, stream::FuturesUnordered}; +use ruma::{ + OwnedRoomId, OwnedServerName, RoomId, UserId, + api::{ + client::room::get_summary, + federation::space::{SpaceHierarchyParentSummary, get_hierarchy}, + }, + events::room::member::MembershipState, + space::SpaceRoomJoinRule::{self, *}, +}; +use service::Services; + +use crate::{Ruma, RumaResponse}; + +/// # `GET /_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary` +/// +/// Returns a short description of the state of a room. +/// +/// This is the "wrong" endpoint that some implementations/clients may use +/// according to the MSC. Request and response bodies are the same as +/// `get_room_summary`. +/// +/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) +pub(crate) async fn get_room_summary_legacy( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result> { + get_room_summary(State(services), InsecureClientIp(client), body) + .boxed() + .await + .map(RumaResponse) +} + +/// # `GET /_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}` +/// +/// Returns a short description of the state of a room. +/// +/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) +#[tracing::instrument(skip_all, fields(%client), name = "room_summary")] +pub(crate) async fn get_room_summary( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result { + let (room_id, servers) = services + .rooms + .alias + .resolve_with_servers(&body.room_id_or_alias, Some(body.via.clone())) + .await?; + + if services.rooms.metadata.is_banned(&room_id).await { + return Err!(Request(Forbidden("This room is banned on this homeserver."))); + } + + room_summary_response(&services, &room_id, &servers, body.sender_user.as_deref()) + .boxed() + .await +} + +async fn room_summary_response( + services: &Services, + room_id: &RoomId, + servers: &[OwnedServerName], + sender_user: Option<&UserId>, +) -> Result { + if services.rooms.metadata.exists(room_id).await { + return local_room_summary_response(services, room_id, sender_user) + .boxed() + .await; + } + + let room = + remote_room_summary_hierarchy_response(services, room_id, servers, sender_user).await?; + + Ok(get_summary::msc3266::Response { + room_id: room_id.to_owned(), + canonical_alias: room.canonical_alias, + avatar_url: room.avatar_url, + guest_can_join: room.guest_can_join, + name: room.name, + num_joined_members: room.num_joined_members, + topic: room.topic, + world_readable: room.world_readable, + join_rule: room.join_rule, + room_type: room.room_type, + room_version: room.room_version, + membership: if sender_user.is_none() { + None + } else { + Some(MembershipState::Leave) + }, + encryption: room.encryption, + allowed_room_ids: room.allowed_room_ids, + }) +} + +async fn local_room_summary_response( + services: &Services, + room_id: &RoomId, + sender_user: Option<&UserId>, +) -> Result { + let join_rule = services.rooms.state_accessor.get_space_join_rule(room_id); + let world_readable = services.rooms.state_accessor.is_world_readable(room_id); + let guest_can_join = services.rooms.state_accessor.guest_can_join(room_id); + + let ((join_rule, allowed_room_ids), world_readable, guest_can_join) = + join3(join_rule, world_readable, guest_can_join).await; + + user_can_see_summary( + services, + room_id, + &join_rule, + guest_can_join, + world_readable, + &allowed_room_ids, + sender_user, + ) + .await?; + + let canonical_alias = services + .rooms + .state_accessor + .get_canonical_alias(room_id) + .ok(); + let name = services.rooms.state_accessor.get_name(room_id).ok(); + let topic = services.rooms.state_accessor.get_room_topic(room_id).ok(); + let room_type = services.rooms.state_accessor.get_room_type(room_id).ok(); + let avatar_url = services + .rooms + .state_accessor + .get_avatar(room_id) + .map(|res| res.into_option().unwrap_or_default().url); + let room_version = services.rooms.state.get_room_version(room_id).ok(); + let encryption = services + .rooms + .state_accessor + .get_room_encryption(room_id) + .ok(); + let num_joined_members = services + .rooms + .state_cache + .room_joined_count(room_id) + .unwrap_or(0); + + let ( + canonical_alias, + name, + num_joined_members, + topic, + avatar_url, + room_type, + room_version, + encryption, + ) = futures::join!( + canonical_alias, + name, + num_joined_members, + topic, + avatar_url, + room_type, + room_version, + encryption, + ); + + Ok(get_summary::msc3266::Response { + room_id: room_id.to_owned(), + canonical_alias, + avatar_url, + guest_can_join, + name, + num_joined_members: num_joined_members.try_into().unwrap_or_default(), + topic, + world_readable, + join_rule, + room_type, + room_version, + membership: if let Some(sender_user) = sender_user { + services + .rooms + .state_accessor + .get_member(room_id, sender_user) + .await + .map_or(Some(MembershipState::Leave), |content| Some(content.membership)) + } else { + None + }, + encryption, + allowed_room_ids, + }) +} + +/// used by MSC3266 to fetch a room's info if we do not know about it +async fn remote_room_summary_hierarchy_response( + services: &Services, + room_id: &RoomId, + servers: &[OwnedServerName], + sender_user: Option<&UserId>, +) -> Result { + if !services.config.allow_federation { + return Err!(Request(Forbidden("Federation is disabled."))); + } + + if services.rooms.metadata.is_disabled(room_id).await { + return Err!(Request(Forbidden( + "Federaton of room {room_id} is currently disabled on this server." + ))); + } + + let request = get_hierarchy::v1::Request::new(room_id.to_owned()); + + let mut requests: FuturesUnordered<_> = servers + .iter() + .map(|server| { + services + .sending + .send_federation_request(server, request.clone()) + }) + .collect(); + + while let Some(Ok(response)) = requests.next().await { + let room = response.room.clone(); + if room.room_id != room_id { + debug_warn!( + "Room ID {} returned does not belong to the requested room ID {}", + room.room_id, + room_id + ); + continue; + } + + return user_can_see_summary( + services, + room_id, + &room.join_rule, + room.guest_can_join, + room.world_readable, + &room.allowed_room_ids, + sender_user, + ) + .await + .map(|()| room); + } + + Err!(Request(NotFound( + "Room is unknown to this server and was unable to fetch over federation with the \ + provided servers available" + ))) +} + +async fn user_can_see_summary( + services: &Services, + room_id: &RoomId, + join_rule: &SpaceRoomJoinRule, + guest_can_join: bool, + world_readable: bool, + allowed_room_ids: &[OwnedRoomId], + sender_user: Option<&UserId>, +) -> Result { + match sender_user { + | Some(sender_user) => { + let user_can_see_state_events = services + .rooms + .state_accessor + .user_can_see_state_events(sender_user, room_id); + let is_guest = services.users.is_deactivated(sender_user).unwrap_or(false); + let user_in_allowed_restricted_room = allowed_room_ids + .iter() + .stream() + .any(|room| services.rooms.state_cache.is_joined(sender_user, room)); + + let (user_can_see_state_events, is_guest, user_in_allowed_restricted_room) = + join3(user_can_see_state_events, is_guest, user_in_allowed_restricted_room) + .boxed() + .await; + + if user_can_see_state_events + || (is_guest && guest_can_join) + || matches!(&join_rule, &Public | &Knock | &KnockRestricted) + || user_in_allowed_restricted_room + { + return Ok(()); + } + + Err!(Request(Forbidden( + "Room is not world readable, not publicly accessible/joinable, restricted room \ + conditions not met, and guest access is forbidden. Not allowed to see details \ + of this room." + ))) + }, + | None => { + if matches!(join_rule, Public | Knock | KnockRestricted) || world_readable { + return Ok(()); + } + + Err!(Request(Forbidden( + "Room is not world readable or publicly accessible/joinable, authentication is \ + required" + ))) + }, + } +} diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index 4ac341a9..3cfb3c28 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -103,7 +103,7 @@ pub(crate) async fn upgrade_room_route( // Use the m.room.tombstone event as the predecessor let predecessor = Some(ruma::events::room::create::PreviousRoom::new( body.room_id.clone(), - (*tombstone_event_id).to_owned(), + Some(tombstone_event_id), )); // Send a m.room.create event containing a predecessor field and the applicable diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index 45ad103e..e21eaf21 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -2,7 +2,7 @@ use std::collections::BTreeMap; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::Err; +use conduwuit::{Err, Error, Result}; use futures::StreamExt; use ruma::{ OwnedRoomId, @@ -14,16 +14,14 @@ use ruma::{ delete_profile_key, delete_timezone_key, get_profile_key, get_timezone_key, set_profile_key, set_timezone_key, }, - room::get_summary, }, federation, }, - events::room::member::MembershipState, presence::PresenceState, }; use super::{update_avatar_url, update_displayname}; -use crate::{Error, Result, Ruma, RumaResponse}; +use crate::Ruma; /// # `GET /_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms` /// @@ -38,13 +36,10 @@ pub(crate) async fn get_mutual_rooms_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); - if sender_user == &body.user_id { - return Err(Error::BadRequest( - ErrorKind::Unknown, - "You cannot request rooms in common with yourself.", - )); + if sender_user == body.user_id { + return Err!(Request(Unknown("You cannot request rooms in common with yourself."))); } if !services.users.exists(&body.user_id).await { @@ -65,129 +60,6 @@ pub(crate) async fn get_mutual_rooms_route( }) } -/// # `GET /_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary` -/// -/// Returns a short description of the state of a room. -/// -/// This is the "wrong" endpoint that some implementations/clients may use -/// according to the MSC. Request and response bodies are the same as -/// `get_room_summary`. -/// -/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) -pub(crate) async fn get_room_summary_legacy( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result> { - get_room_summary(State(services), InsecureClientIp(client), body) - .await - .map(RumaResponse) -} - -/// # `GET /_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}` -/// -/// Returns a short description of the state of a room. -/// -/// TODO: support fetching remote room info if we don't know the room -/// -/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) -#[tracing::instrument(skip_all, fields(%client), name = "room_summary")] -pub(crate) async fn get_room_summary( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref(); - - let room_id = services.rooms.alias.resolve(&body.room_id_or_alias).await?; - - if !services.rooms.metadata.exists(&room_id).await { - return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server")); - } - - if sender_user.is_none() - && !services - .rooms - .state_accessor - .is_world_readable(&room_id) - .await - { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Room is not world readable, authentication is required", - )); - } - - Ok(get_summary::msc3266::Response { - room_id: room_id.clone(), - canonical_alias: services - .rooms - .state_accessor - .get_canonical_alias(&room_id) - .await - .ok(), - avatar_url: services - .rooms - .state_accessor - .get_avatar(&room_id) - .await - .into_option() - .unwrap_or_default() - .url, - guest_can_join: services.rooms.state_accessor.guest_can_join(&room_id).await, - name: services.rooms.state_accessor.get_name(&room_id).await.ok(), - num_joined_members: services - .rooms - .state_cache - .room_joined_count(&room_id) - .await - .unwrap_or(0) - .try_into()?, - topic: services - .rooms - .state_accessor - .get_room_topic(&room_id) - .await - .ok(), - world_readable: services - .rooms - .state_accessor - .is_world_readable(&room_id) - .await, - join_rule: services - .rooms - .state_accessor - .get_join_rule(&room_id) - .await - .unwrap_or_default() - .0, - room_type: services - .rooms - .state_accessor - .get_room_type(&room_id) - .await - .ok(), - room_version: services.rooms.state.get_room_version(&room_id).await.ok(), - membership: if let Some(sender_user) = sender_user { - services - .rooms - .state_accessor - .get_member(&room_id, sender_user) - .await - .map_or_else(|_| MembershipState::Leave, |content| content.membership) - .into() - } else { - None - }, - encryption: services - .rooms - .state_accessor - .get_room_encryption(&room_id) - .await - .ok(), - }) -} - /// # `DELETE /_matrix/client/unstable/uk.tcpip.msc4133/profile/:user_id/us.cloke.msc4175.tz` /// /// Deletes the `tz` (timezone) of a user, as per MSC4133 and MSC4175. diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index da52e095..f51a5e3a 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -27,7 +27,6 @@ use ruma::{ }, events::{ StateEventType, - room::join_rules::{JoinRule, RoomJoinRulesEventContent}, space::child::{HierarchySpaceChildEvent, SpaceChildEventContent}, }, serde::Raw, @@ -306,25 +305,18 @@ async fn get_room_summary( children_state: Vec>, identifier: &Identifier<'_>, ) -> Result { - let join_rule = self + let (join_rule, allowed_room_ids) = self .services .state_accessor - .room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") - .await - .map_or(JoinRule::Invite, |c: RoomJoinRulesEventContent| c.join_rule); + .get_space_join_rule(room_id) + .await; - let allowed_room_ids = self - .services - .state_accessor - .allowed_room_ids(join_rule.clone()); - - let join_rule = join_rule.clone().into(); let is_accessible_child = self .is_accessible_child(room_id, &join_rule, identifier, &allowed_room_ids) .await; if !is_accessible_child { - return Err!(Request(Forbidden("User is not allowed to see the room",))); + return Err!(Request(Forbidden("User is not allowed to see the room"))); } let name = self.services.state_accessor.get_name(room_id).ok(); @@ -355,6 +347,14 @@ async fn get_room_summary( .get_avatar(room_id) .map(|res| res.into_option().unwrap_or_default().url); + let room_version = self.services.state.get_room_version(room_id).ok(); + + let encryption = self + .services + .state_accessor + .get_room_encryption(room_id) + .ok(); + let ( canonical_alias, name, @@ -364,6 +364,8 @@ async fn get_room_summary( guest_can_join, avatar_url, room_type, + room_version, + encryption, ) = futures::join!( canonical_alias, name, @@ -372,7 +374,9 @@ async fn get_room_summary( world_readable, guest_can_join, avatar_url, - room_type + room_type, + room_version, + encryption, ); Ok(SpaceHierarchyParentSummary { @@ -387,9 +391,9 @@ async fn get_room_summary( allowed_room_ids, join_rule, room_id: room_id.to_owned(), - num_joined_members: num_joined_members - .try_into() - .expect("user count should not be that big"), + num_joined_members: num_joined_members.try_into().unwrap_or_default(), + encryption, + room_version, }) } @@ -487,6 +491,8 @@ async fn cache_insert( join_rule, room_type, allowed_room_ids, + encryption, + room_version, } = child; let summary = SpaceHierarchyParentSummary { @@ -506,6 +512,8 @@ async fn cache_insert( .map(PduEvent::into_stripped_spacechild_state_event) .collect() .await, + encryption, + room_version, }; cache.insert(current_room.to_owned(), Some(CachedSpaceHierarchySummary { summary })); @@ -527,7 +535,9 @@ impl From for SpaceHierarchyRoomsChunk { join_rule, room_type, children_state, - .. + allowed_room_ids, + encryption, + room_version, } = value.summary; Self { @@ -542,6 +552,9 @@ impl From for SpaceHierarchyRoomsChunk { join_rule, room_type, children_state, + encryption, + room_version, + allowed_room_ids, } } } @@ -562,7 +575,9 @@ pub fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyR join_rule, room_type, children_state, - .. + allowed_room_ids, + encryption, + room_version, } = summary; SpaceHierarchyRoomsChunk { @@ -577,5 +592,8 @@ pub fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyR join_rule, room_type, children_state, + encryption, + room_version, + allowed_room_ids, } } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index b57465ce..7fff5935 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -129,22 +129,34 @@ impl Service { .map(|c: RoomTopicEventContent| c.topic) } - /// Returns the join rule (`SpaceRoomJoinRule`) for a given room - pub async fn get_join_rule( + /// Returns the space join rule (`SpaceRoomJoinRule`) for a given room and + /// any allowed room IDs if available. Will default to Invite and empty vec + /// if doesnt exist or invalid, + pub async fn get_space_join_rule( &self, room_id: &RoomId, - ) -> Result<(SpaceRoomJoinRule, Vec)> { + ) -> (SpaceRoomJoinRule, Vec) { self.room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") .await - .map(|c: RoomJoinRulesEventContent| { - (c.join_rule.clone().into(), self.allowed_room_ids(c.join_rule)) - }) - .or_else(|_| Ok((SpaceRoomJoinRule::Invite, vec![]))) + .map_or_else( + |_| (SpaceRoomJoinRule::Invite, vec![]), + |c: RoomJoinRulesEventContent| { + (c.join_rule.clone().into(), self.allowed_room_ids(c.join_rule)) + }, + ) + } + + /// Returns the join rules for a given room (`JoinRule` type). Will default + /// to Invite if doesnt exist or invalid + pub async fn get_join_rules(&self, room_id: &RoomId) -> JoinRule { + self.room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") + .await + .map_or_else(|_| JoinRule::Invite, |c: RoomJoinRulesEventContent| (c.join_rule)) } /// Returns an empty vec if not a restricted room pub fn allowed_room_ids(&self, join_rule: JoinRule) -> Vec { - let mut room_ids = Vec::with_capacity(1); + let mut room_ids = Vec::with_capacity(1); // restricted rooms generally only have 1 allowed room ID if let JoinRule::Restricted(r) | JoinRule::KnockRestricted(r) = join_rule { for rule in r.allow { if let AllowRule::RoomMembership(RoomMembership { room_id: membership }) = rule { From 24be5794774b7585b6ec1e3dbaa901967d241972 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 3 Apr 2025 12:20:10 -0400 Subject: [PATCH 298/328] add appservice MSC4190 support Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +++--- Cargo.toml | 2 +- src/api/client/account.rs | 12 ++-- src/api/client/appservice.rs | 8 ++- src/api/client/device.rs | 112 +++++++++++++++++++++++------- src/service/sending/appservice.rs | 18 +++-- src/service/users/mod.rs | 1 - 7 files changed, 125 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a53258bc..2bcfcee4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3531,7 +3531,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "assign", "js_int", @@ -3551,7 +3551,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "js_int", "ruma-common", @@ -3563,7 +3563,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "as_variant", "assign", @@ -3586,7 +3586,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "as_variant", "base64 0.22.1", @@ -3618,7 +3618,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3643,7 +3643,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "bytes", "headers", @@ -3665,7 +3665,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3674,7 +3674,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "js_int", "ruma-common", @@ -3684,7 +3684,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3699,7 +3699,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "js_int", "ruma-common", @@ -3711,7 +3711,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index 940ece86..0abaa2f9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "ea1278657125e9414caada074e8c172bc252fb1c" +rev = "0701341a2fd5a6ea74beada18d5974cc401a4fc1" features = [ "compat", "rand", diff --git a/src/api/client/account.rs b/src/api/client/account.rs index efa8b142..e5894d47 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -318,14 +318,14 @@ pub(crate) async fn register_route( // Success! }, | _ => match body.json_body { - | Some(json) => { + | Some(ref json) => { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services.uiaa.create( &UserId::parse_with_server_name("", services.globals.server_name()) .unwrap(), "".into(), &uiaainfo, - &json, + json, ); return Err(Error::Uiaa(uiaainfo)); }, @@ -373,8 +373,12 @@ pub(crate) async fn register_route( ) .await?; - // Inhibit login does not work for guests - if !is_guest && body.inhibit_login { + if (!is_guest && body.inhibit_login) + || body + .appservice_info + .as_ref() + .is_some_and(|appservice| appservice.registration.device_management) + { return Ok(register::v3::Response { access_token: None, user_id, diff --git a/src/api/client/appservice.rs b/src/api/client/appservice.rs index 84955309..eb6b3312 100644 --- a/src/api/client/appservice.rs +++ b/src/api/client/appservice.rs @@ -22,7 +22,13 @@ pub(crate) async fn appservice_ping( ))); } - if appservice_info.registration.url.is_none() { + if appservice_info.registration.url.is_none() + || appservice_info + .registration + .url + .as_ref() + .is_some_and(|url| url.is_empty() || url == "null") + { return Err!(Request(UrlNotSet( "Appservice does not have a URL set, there is nothing to ping." ))); diff --git a/src/api/client/device.rs b/src/api/client/device.rs index 6a845aed..7603c866 100644 --- a/src/api/client/device.rs +++ b/src/api/client/device.rs @@ -1,9 +1,9 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, err}; +use conduwuit::{Err, debug, err}; use futures::StreamExt; use ruma::{ - MilliSecondsSinceUnixEpoch, + MilliSecondsSinceUnixEpoch, OwnedDeviceId, api::client::{ device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, error::ErrorKind, @@ -12,7 +12,7 @@ use ruma::{ }; use super::SESSION_ID_LENGTH; -use crate::{Error, Result, Ruma, utils}; +use crate::{Error, Result, Ruma, client::DEVICE_ID_LENGTH, utils}; /// # `GET /_matrix/client/r0/devices` /// @@ -59,26 +59,58 @@ pub(crate) async fn update_device_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); + let appservice = body.appservice_info.as_ref(); - let mut device = services + match services .users .get_device_metadata(sender_user, &body.device_id) .await - .map_err(|_| err!(Request(NotFound("Device not found."))))?; + { + | Ok(mut device) => { + device.display_name.clone_from(&body.display_name); + device.last_seen_ip.clone_from(&Some(client.to_string())); + device + .last_seen_ts + .clone_from(&Some(MilliSecondsSinceUnixEpoch::now())); - device.display_name.clone_from(&body.display_name); - device.last_seen_ip.clone_from(&Some(client.to_string())); - device - .last_seen_ts - .clone_from(&Some(MilliSecondsSinceUnixEpoch::now())); + services + .users + .update_device_metadata(sender_user, &body.device_id, &device) + .await?; - services - .users - .update_device_metadata(sender_user, &body.device_id, &device) - .await?; + Ok(update_device::v3::Response {}) + }, + | Err(_) => { + let Some(appservice) = appservice else { + return Err!(Request(NotFound("Device not found."))); + }; + if !appservice.registration.device_management { + return Err!(Request(NotFound("Device not found."))); + } - Ok(update_device::v3::Response {}) + debug!( + "Creating new device for {sender_user} from appservice {} as MSC4190 is enabled \ + and device ID does not exist", + appservice.registration.id + ); + + let device_id = OwnedDeviceId::from(utils::random_string(DEVICE_ID_LENGTH)); + + services + .users + .create_device( + sender_user, + &device_id, + &appservice.registration.as_token, + None, + Some(client.to_string()), + ) + .await?; + + return Ok(update_device::v3::Response {}); + }, + } } /// # `DELETE /_matrix/client/r0/devices/{deviceId}` @@ -95,8 +127,21 @@ pub(crate) async fn delete_device_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let (sender_user, sender_device) = body.sender(); + let appservice = body.appservice_info.as_ref(); + + if appservice.is_some_and(|appservice| appservice.registration.device_management) { + debug!( + "Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \ + enabled" + ); + services + .users + .remove_device(sender_user, &body.device_id) + .await; + + return Ok(delete_device::v3::Response {}); + } // UIAA let mut uiaainfo = UiaaInfo { @@ -120,11 +165,11 @@ pub(crate) async fn delete_device_route( // Success! }, | _ => match body.json_body { - | Some(json) => { + | Some(ref json) => { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + .create(sender_user, sender_device, &uiaainfo, json); return Err!(Uiaa(uiaainfo)); }, @@ -142,11 +187,12 @@ pub(crate) async fn delete_device_route( Ok(delete_device::v3::Response {}) } -/// # `PUT /_matrix/client/r0/devices/{deviceId}` +/// # `POST /_matrix/client/v3/delete_devices` /// -/// Deletes the given device. +/// Deletes the given list of devices. /// -/// - Requires UIAA to verify user password +/// - Requires UIAA to verify user password unless from an appservice with +/// MSC4190 enabled. /// /// For each device: /// - Invalidates access token @@ -158,8 +204,20 @@ pub(crate) async fn delete_devices_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let (sender_user, sender_device) = body.sender(); + let appservice = body.appservice_info.as_ref(); + + if appservice.is_some_and(|appservice| appservice.registration.device_management) { + debug!( + "Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \ + enabled" + ); + for device_id in &body.devices { + services.users.remove_device(sender_user, device_id).await; + } + + return Ok(delete_devices::v3::Response {}); + } // UIAA let mut uiaainfo = UiaaInfo { @@ -183,11 +241,11 @@ pub(crate) async fn delete_devices_route( // Success! }, | _ => match body.json_body { - | Some(json) => { + | Some(ref json) => { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + .create(sender_user, sender_device, &uiaainfo, json); return Err(Error::Uiaa(uiaainfo)); }, diff --git a/src/service/sending/appservice.rs b/src/service/sending/appservice.rs index 7fa0be9a..c7fae11f 100644 --- a/src/service/sending/appservice.rs +++ b/src/service/sending/appservice.rs @@ -25,6 +25,10 @@ where return Ok(None); }; + if dest == *"null" || dest.is_empty() { + return Ok(None); + } + trace!("Appservice URL \"{dest}\", Appservice ID: {}", registration.id); let hs_token = registration.hs_token.as_str(); @@ -34,7 +38,11 @@ where SendAccessToken::IfRequired(hs_token), &VERSIONS, ) - .map_err(|e| err!(BadServerResponse(warn!("Failed to find destination {dest}: {e}"))))? + .map_err(|e| { + err!(BadServerResponse( + warn!(appservice = %registration.id, "Failed to find destination {dest}: {e:?}") + )) + })? .map(BytesMut::freeze); let mut parts = http_request.uri().clone().into_parts(); @@ -51,7 +59,7 @@ where let reqwest_request = reqwest::Request::try_from(http_request)?; let mut response = client.execute(reqwest_request).await.map_err(|e| { - warn!("Could not send request to appservice \"{}\" at {dest}: {e}", registration.id); + warn!("Could not send request to appservice \"{}\" at {dest}: {e:?}", registration.id); e })?; @@ -71,7 +79,7 @@ where if !status.is_success() { debug_error!("Appservice response bytes: {:?}", utils::string_from_bytes(&body)); - return Err!(BadServerResponse(error!( + return Err!(BadServerResponse(warn!( "Appservice \"{}\" returned unsuccessful HTTP response {status} at {dest}", registration.id ))); @@ -84,8 +92,8 @@ where ); response.map(Some).map_err(|e| { - err!(BadServerResponse(error!( - "Appservice \"{}\" returned invalid response bytes {dest}: {e}", + err!(BadServerResponse(warn!( + "Appservice \"{}\" returned invalid/malformed response bytes {dest}: {e}", registration.id ))) }) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 5265e64b..87a8b93b 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -350,7 +350,6 @@ impl Service { token: &str, ) -> Result<()> { let key = (user_id, device_id); - // should not be None, but we shouldn't assert either lol... if self.db.userdeviceid_metadata.qry(&key).await.is_err() { return Err!(Database(error!( ?user_id, From f14756fb767abda97dc966ad842c958d970d77b9 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 3 Apr 2025 12:20:53 -0400 Subject: [PATCH 299/328] leave room locally if room is banned, rescind knocks on deactivation too Signed-off-by: June Clementine Strawberry --- src/api/client/membership.rs | 87 +++++++++++++++++++++------- src/api/client/sync/v3.rs | 12 ++-- src/api/client/sync/v4.rs | 5 +- src/api/client/sync/v5.rs | 5 +- src/service/rooms/state_cache/mod.rs | 6 +- 5 files changed, 87 insertions(+), 28 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 315a363c..ef40e972 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -475,9 +475,9 @@ pub(crate) async fn leave_room_route( State(services): State, body: Ruma, ) -> Result { - leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone()).await?; - - Ok(leave_room::v3::Response::new()) + leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone()) + .await + .map(|()| leave_room::v3::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/invite` @@ -1763,8 +1763,8 @@ pub(crate) async fn invite_helper( Ok(()) } -// Make a user leave all their joined rooms, forgets all rooms, and ignores -// errors +// Make a user leave all their joined rooms, rescinds knocks, forgets all rooms, +// and ignores errors pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { let rooms_joined = services .rooms @@ -1778,7 +1778,17 @@ pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { .rooms_invited(user_id) .map(|(r, _)| r); - let all_rooms: Vec<_> = rooms_joined.chain(rooms_invited).collect().await; + let rooms_knocked = services + .rooms + .state_cache + .rooms_knocked(user_id) + .map(|(r, _)| r); + + let all_rooms: Vec<_> = rooms_joined + .chain(rooms_invited) + .chain(rooms_knocked) + .collect() + .await; for room_id in all_rooms { // ignore errors @@ -1795,7 +1805,40 @@ pub async fn leave_room( user_id: &UserId, room_id: &RoomId, reason: Option, -) -> Result<()> { +) -> Result { + let default_member_content = RoomMemberEventContent { + membership: MembershipState::Leave, + reason: reason.clone(), + join_authorized_via_users_server: None, + is_direct: None, + avatar_url: None, + displayname: None, + third_party_invite: None, + blurhash: None, + }; + + if services.rooms.metadata.is_banned(room_id).await + || services.rooms.metadata.is_disabled(room_id).await + { + // the room is banned/disabled, the room must be rejected locally since we + // cant/dont want to federate with this server + services + .rooms + .state_cache + .update_membership( + room_id, + user_id, + default_member_content, + user_id, + None, + None, + true, + ) + .await?; + + return Ok(()); + } + // Ask a remote server if we don't have this room and are not knocking on it if !services .rooms @@ -1828,7 +1871,7 @@ pub async fn leave_room( .update_membership( room_id, user_id, - RoomMemberEventContent::new(MembershipState::Leave), + default_member_content, user_id, last_state, None, @@ -1848,26 +1891,23 @@ pub async fn leave_room( ) .await else { - // Fix for broken rooms - warn!( + debug_warn!( "Trying to leave a room you are not a member of, marking room as left locally." ); - services + return services .rooms .state_cache .update_membership( room_id, user_id, - RoomMemberEventContent::new(MembershipState::Leave), + default_member_content, user_id, None, None, true, ) - .await?; - - return Ok(()); + .await; }; services @@ -1897,7 +1937,7 @@ async fn remote_leave_room( room_id: &RoomId, ) -> Result<()> { let mut make_leave_response_and_server = - Err!(BadServerResponse("No server available to assist in leaving.")); + Err!(BadServerResponse("No remote server available to assist in leaving {room_id}.")); let mut servers: HashSet = services .rooms @@ -1977,20 +2017,25 @@ async fn remote_leave_room( let (make_leave_response, remote_server) = make_leave_response_and_server?; let Some(room_version_id) = make_leave_response.room_version else { - return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); + return Err!(BadServerResponse(warn!( + "No room version was returned by {remote_server} for {room_id}, room version is \ + likely not supported by conduwuit" + ))); }; if !services.server.supported_room_version(&room_version_id) { - return Err!(BadServerResponse( - "Remote room version {room_version_id} is not supported by conduwuit" - )); + return Err!(BadServerResponse(warn!( + "Remote room version {room_version_id} for {room_id} is not supported by conduwuit", + ))); } let mut leave_event_stub = serde_json::from_str::( make_leave_response.event.get(), ) .map_err(|e| { - err!(BadServerResponse("Invalid make_leave event json received from server: {e:?}")) + err!(BadServerResponse(warn!( + "Invalid make_leave event json received from {remote_server} for {room_id}: {e:?}" + ))) })?; // TODO: Is origin needed? diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 530c1278..83ffa55a 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -15,6 +15,7 @@ use conduwuit::{ math::ruma_from_u64, stream::{BroadbandExt, Tools, TryExpect, WidebandExt}, }, + warn, }; use conduwuit_service::{ Services, @@ -428,9 +429,12 @@ async fn handle_left_room( return Ok(None); } - if !services.rooms.metadata.exists(room_id).await { + if !services.rooms.metadata.exists(room_id).await + || services.rooms.metadata.is_disabled(room_id).await + || services.rooms.metadata.is_banned(room_id).await + { // This is just a rejected invite, not a room we know - // Insert a leave event anyways + // Insert a leave event anyways for the client let event = PduEvent { event_id: EventId::new(services.globals.server_name()), sender: sender_user.to_owned(), @@ -489,7 +493,7 @@ async fn handle_left_room( .room_state_get_id(room_id, &StateEventType::RoomMember, sender_user.as_str()) .await else { - error!("Left room but no left state event"); + warn!("Left {room_id} but no left state event"); return Ok(None); }; @@ -499,7 +503,7 @@ async fn handle_left_room( .pdu_shortstatehash(&left_event_id) .await else { - error!(event_id = %left_event_id, "Leave event has no state"); + warn!(event_id = %left_event_id, "Leave event has no state in {room_id}"); return Ok(None); }; diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 7e902973..f7edb8c0 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -438,7 +438,10 @@ pub(crate) async fn sync_events_v4_route( let mut known_subscription_rooms = BTreeSet::new(); for (room_id, room) in &body.room_subscriptions { - if !services.rooms.metadata.exists(room_id).await { + if !services.rooms.metadata.exists(room_id).await + || services.rooms.metadata.is_disabled(room_id).await + || services.rooms.metadata.is_banned(room_id).await + { continue; } let todo_room = diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index 48b41b21..c4e71d88 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -214,7 +214,10 @@ async fn fetch_subscriptions( ) { let mut known_subscription_rooms = BTreeSet::new(); for (room_id, room) in &body.room_subscriptions { - if !services.rooms.metadata.exists(room_id).await { + if !services.rooms.metadata.exists(room_id).await + || services.rooms.metadata.is_disabled(room_id).await + || services.rooms.metadata.is_banned(room_id).await + { continue; } let todo_room = diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 23ba0520..d3dbc143 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -40,6 +40,7 @@ struct Services { account_data: Dep, config: Dep, globals: Dep, + metadata: Dep, state_accessor: Dep, users: Dep, } @@ -73,6 +74,7 @@ impl crate::Service for Service { account_data: args.depend::("account_data"), config: args.depend::("config"), globals: args.depend::("globals"), + metadata: args.depend::("rooms::metadata"), state_accessor: args .depend::("rooms::state_accessor"), users: args.depend::("users"), @@ -271,7 +273,9 @@ impl Service { self.mark_as_left(user_id, room_id); if self.services.globals.user_is_local(user_id) - && self.services.config.forget_forced_upon_leave + && (self.services.config.forget_forced_upon_leave + || self.services.metadata.is_banned(room_id).await + || self.services.metadata.is_disabled(room_id).await) { self.forget(room_id, user_id); } From 5d1404e9dfff9bc0e5bed4bab6d75c9c94b38183 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 02:52:42 +0000 Subject: [PATCH 300/328] fix well-known using the hooked resolver Signed-off-by: Jason Volk --- src/service/client/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/client/mod.rs b/src/service/client/mod.rs index d51e5721..1aeeb492 100644 --- a/src/service/client/mod.rs +++ b/src/service/client/mod.rs @@ -56,7 +56,7 @@ impl crate::Service for Service { .build()?, well_known: base(config)? - .dns_resolver(resolver.resolver.hooked.clone()) + .dns_resolver(resolver.resolver.clone()) .connect_timeout(Duration::from_secs(config.well_known_conn_timeout)) .read_timeout(Duration::from_secs(config.well_known_timeout)) .timeout(Duration::from_secs(config.well_known_timeout)) From 58adb6fead27c863849c63184f145be209e40e1b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 04:05:42 +0000 Subject: [PATCH 301/328] upgrade hickory and hyper-util dependencies Signed-off-by: Jason Volk --- Cargo.lock | 195 +++++++++++++++++++++++++++++++-- Cargo.toml | 10 +- src/service/resolver/actual.rs | 39 ++++--- src/service/resolver/dns.rs | 24 ++-- 4 files changed, 229 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2bcfcee4..545f0f0d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -142,6 +142,17 @@ dependencies = [ "zstd-safe", ] +[[package]] +name = "async-recursion" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-stream" version = "0.3.6" @@ -927,7 +938,7 @@ dependencies = [ "const-str", "either", "futures", - "hickory-resolver", + "hickory-resolver 0.25.1", "http", "image", "ipaddress", @@ -1061,6 +1072,12 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "critical-section" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" + [[package]] name = "crokey" version = "1.1.1" @@ -1584,6 +1601,19 @@ dependencies = [ "slab", ] +[[package]] +name = "generator" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" +dependencies = [ + "cfg-if", + "libc", + "log", + "rustversion", + "windows 0.58.0", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -1769,6 +1799,34 @@ dependencies = [ "url", ] +[[package]] +name = "hickory-proto" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d844af74f7b799e41c78221be863bade11c430d46042c3b49ca8ae0c6d27287" +dependencies = [ + "async-recursion", + "async-trait", + "cfg-if", + "critical-section", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "once_cell", + "rand 0.9.0", + "ring", + "serde", + "thiserror 2.0.12", + "tinyvec", + "tokio", + "tracing", + "url", +] + [[package]] name = "hickory-resolver" version = "0.24.4" @@ -1777,7 +1835,7 @@ checksum = "cbb117a1ca520e111743ab2f6688eddee69db4e0ea242545a604dce8a66fd22e" dependencies = [ "cfg-if", "futures-util", - "hickory-proto", + "hickory-proto 0.24.4", "ipconfig", "lru-cache", "once_cell", @@ -1790,6 +1848,28 @@ dependencies = [ "tracing", ] +[[package]] +name = "hickory-resolver" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a128410b38d6f931fcc6ca5c107a3b02cabd6c05967841269a4ad65d23c44331" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto 0.25.1", + "ipconfig", + "moka", + "once_cell", + "parking_lot", + "rand 0.9.0", + "resolv-conf", + "serde", + "smallvec", + "thiserror 2.0.12", + "tokio", + "tracing", +] + [[package]] name = "hmac" version = "0.12.1" @@ -1816,7 +1896,7 @@ checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" dependencies = [ "cfg-if", "libc", - "windows", + "windows 0.52.0", ] [[package]] @@ -1949,9 +2029,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.8" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" +checksum = "497bbc33a26fdd4af9ed9c70d63f61cf56a938375fbb32df34db9b1cd6d643f2" dependencies = [ "bytes", "futures-channel", @@ -1959,10 +2039,10 @@ dependencies = [ "http", "http-body", "hyper", + "libc", "pin-project-lite", "socket2", "tokio", - "tower 0.4.13", "tower-service", "tracing", ] @@ -2439,6 +2519,19 @@ dependencies = [ "futures-sink", ] +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber", +] + [[package]] name = "loop9" version = "0.1.5" @@ -2609,6 +2702,25 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "moka" +version = "0.12.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "loom", + "parking_lot", + "portable-atomic", + "rustc_version", + "smallvec", + "tagptr", + "thiserror 1.0.69", + "uuid", +] + [[package]] name = "new_debug_unreachable" version = "1.0.6" @@ -2773,6 +2885,10 @@ name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +dependencies = [ + "critical-section", + "portable-atomic", +] [[package]] name = "openssl-probe" @@ -3052,6 +3168,12 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "portable-atomic" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" + [[package]] name = "powerfmt" version = "0.2.0" @@ -3463,7 +3585,7 @@ dependencies = [ "futures-core", "futures-util", "h2", - "hickory-resolver", + "hickory-resolver 0.24.4", "http", "http-body", "http-body-util", @@ -3893,6 +4015,12 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + [[package]] name = "scopeguard" version = "1.2.0" @@ -4464,6 +4592,12 @@ dependencies = [ "version-compare", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "target-lexicon" version = "0.12.16" @@ -5367,7 +5501,17 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ - "windows-core", + "windows-core 0.52.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +dependencies = [ + "windows-core 0.58.0", "windows-targets 0.52.6", ] @@ -5380,6 +5524,41 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-core" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "windows-registry" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index 0abaa2f9..6c5c291f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -284,8 +284,7 @@ features = [ ] [workspace.dependencies.hyper-util] -# hyper-util >=0.1.9 seems to have DNS issues -version = "=0.1.8" +version = "0.1.11" default-features = false features = [ "server-auto", @@ -306,8 +305,13 @@ default-features = false features = ["env", "toml"] [workspace.dependencies.hickory-resolver] -version = "0.24.2" +version = "0.25.1" default-features = false +features = [ + "serde", + "system-config", + "tokio", +] # Used for conduwuit::Error type [workspace.dependencies.thiserror] diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 1ad76f66..0151c4d7 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -5,7 +5,7 @@ use std::{ use conduwuit::{Err, Result, debug, debug_info, err, error, trace}; use futures::{FutureExt, TryFutureExt}; -use hickory_resolver::error::ResolveError; +use hickory_resolver::ResolveError; use ipaddress::IPAddress; use ruma::ServerName; @@ -334,25 +334,28 @@ impl super::Service { } fn handle_resolve_error(e: &ResolveError, host: &'_ str) -> Result<()> { - use hickory_resolver::error::ResolveErrorKind; + use hickory_resolver::{ResolveErrorKind::Proto, proto::ProtoErrorKind}; - match *e.kind() { - | ResolveErrorKind::NoRecordsFound { .. } => { - // Raise to debug_warn if we can find out the result wasn't from cache - debug!(%host, "No DNS records found: {e}"); - Ok(()) - }, - | ResolveErrorKind::Timeout => { - Err!(warn!(%host, "DNS {e}")) - }, - | ResolveErrorKind::NoConnections => { - error!( - "Your DNS server is overloaded and has ran out of connections. It is \ - strongly recommended you remediate this issue to ensure proper federation \ - connectivity." - ); + match e.kind() { + | Proto(e) => match e.kind() { + | ProtoErrorKind::NoRecordsFound { .. } => { + // Raise to debug_warn if we can find out the result wasn't from cache + debug!(%host, "No DNS records found: {e}"); + Ok(()) + }, + | ProtoErrorKind::Timeout => { + Err!(warn!(%host, "DNS {e}")) + }, + | ProtoErrorKind::NoConnections => { + error!( + "Your DNS server is overloaded and has ran out of connections. It is \ + strongly recommended you remediate this issue to ensure proper \ + federation connectivity." + ); - Err!(error!(%host, "DNS error: {e}")) + Err!(error!(%host, "DNS error: {e}")) + }, + | _ => Err!(error!(%host, "DNS error: {e}")), }, | _ => Err!(error!(%host, "DNS error: {e}")), } diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index e4245a5b..3a0b2551 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -2,19 +2,19 @@ use std::{net::SocketAddr, sync::Arc, time::Duration}; use conduwuit::{Result, Server, err}; use futures::FutureExt; -use hickory_resolver::{TokioAsyncResolver, lookup_ip::LookupIp}; +use hickory_resolver::{TokioResolver, lookup_ip::LookupIp}; use reqwest::dns::{Addrs, Name, Resolve, Resolving}; use super::cache::{Cache, CachedOverride}; pub struct Resolver { - pub(crate) resolver: Arc, + pub(crate) resolver: Arc, pub(crate) hooked: Arc, server: Arc, } pub(crate) struct Hooked { - resolver: Arc, + resolver: Arc, cache: Arc, server: Arc, } @@ -42,7 +42,7 @@ impl Resolver { let mut ns = sys_conf.clone(); if config.query_over_tcp_only { - ns.protocol = hickory_resolver::config::Protocol::Tcp; + ns.protocol = hickory_resolver::proto::xfer::Protocol::Tcp; } ns.trust_negative_responses = !config.query_all_nameservers; @@ -51,6 +51,7 @@ impl Resolver { } opts.cache_size = config.dns_cache_entries as usize; + opts.preserve_intermediates = true; opts.negative_min_ttl = Some(Duration::from_secs(config.dns_min_ttl_nxdomain)); opts.negative_max_ttl = Some(Duration::from_secs(60 * 60 * 24 * 30)); opts.positive_min_ttl = Some(Duration::from_secs(config.dns_min_ttl)); @@ -60,8 +61,7 @@ impl Resolver { opts.try_tcp_on_error = config.dns_tcp_fallback; opts.num_concurrent_reqs = 1; opts.edns0 = true; - opts.shuffle_dns_servers = true; - opts.rotate = true; + opts.case_randomization = true; opts.ip_strategy = match config.ip_lookup_strategy { | 1 => hickory_resolver::config::LookupIpStrategy::Ipv4Only, | 2 => hickory_resolver::config::LookupIpStrategy::Ipv6Only, @@ -69,9 +69,13 @@ impl Resolver { | 4 => hickory_resolver::config::LookupIpStrategy::Ipv6thenIpv4, | _ => hickory_resolver::config::LookupIpStrategy::Ipv4thenIpv6, }; - opts.authentic_data = false; - let resolver = Arc::new(TokioAsyncResolver::tokio(conf, opts)); + let rt_prov = hickory_resolver::proto::runtime::TokioRuntimeProvider::new(); + let conn_prov = hickory_resolver::name_server::TokioConnectionProvider::new(rt_prov); + let mut builder = TokioResolver::builder_with_config(conf, conn_prov); + *builder.options_mut() = opts; + let resolver = Arc::new(builder.build()); + Ok(Arc::new(Self { resolver: resolver.clone(), hooked: Arc::new(Hooked { resolver, cache, server: server.clone() }), @@ -105,7 +109,7 @@ impl Resolve for Hooked { async fn hooked_resolve( cache: Arc, server: Arc, - resolver: Arc, + resolver: Arc, name: Name, ) -> Result> { match cache.get_override(name.as_str()).await { @@ -129,7 +133,7 @@ async fn hooked_resolve( async fn resolve_to_reqwest( server: Arc, - resolver: Arc, + resolver: Arc, name: Name, ) -> ResolvingResult { use std::{io, io::ErrorKind::Interrupted}; From 0b56204f89d37470346c1940e70354deebfd1a3a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 04:34:11 +0000 Subject: [PATCH 302/328] bump additional dependencies Signed-off-by: Jason Volk --- Cargo.lock | 264 ++++++++++++++++++++++++++++++----------------------- Cargo.toml | 20 ++-- 2 files changed, 161 insertions(+), 123 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 545f0f0d..da33af05 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -232,9 +232,9 @@ dependencies = [ [[package]] name = "aws-lc-rs" -version = "1.12.6" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dabb68eb3a7aa08b46fddfd59a3d55c978243557a90ab804769f7e20e67d2b01" +checksum = "19b756939cb2f8dc900aa6dcd505e6e2428e9cae7ff7b028c49e3946efa70878" dependencies = [ "aws-lc-sys", "zeroize", @@ -242,9 +242,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.27.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77926887776171ced7d662120a75998e444d3750c951abfe07f90da130514b1f" +checksum = "b9f7720b74ed28ca77f90769a71fd8c637a0137f6fae4ae947e1050229cff57f" dependencies = [ "bindgen 0.69.5", "cc", @@ -663,9 +663,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.34" +version = "4.5.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e958897981290da2a852763fe9cdb89cd36977a5d729023127095fa94d95e2ff" +checksum = "d8aa86934b44c19c50f87cc2790e19f54f7a67aedb64101c2e1a2e5ecfb73944" dependencies = [ "clap_builder", "clap_derive", @@ -673,9 +673,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.34" +version = "4.5.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83b0f35019843db2160b5bb19ae09b4e6411ac33fc6a712003c33e03090e2489" +checksum = "2414dbb2dd0695280da6ea9261e327479e9d37b0630f6b53ba2a11c60c679fd9" dependencies = [ "anstyle", "clap_lex", @@ -742,8 +742,8 @@ dependencies = [ "opentelemetry-jaeger", "opentelemetry_sdk", "sentry", - "sentry-tower 0.35.0", - "sentry-tracing 0.35.0", + "sentry-tower", + "sentry-tracing", "tokio", "tokio-metrics", "tracing", @@ -916,8 +916,8 @@ dependencies = [ "rustls", "sd-notify", "sentry", - "sentry-tower 0.35.0", - "sentry-tracing 0.35.0", + "sentry-tower", + "sentry-tracing", "serde_json", "tokio", "tower 0.5.2", @@ -1454,9 +1454,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc" +checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" dependencies = [ "crc32fast", "miniz_oxide", @@ -2016,9 +2016,9 @@ dependencies = [ [[package]] name = "hyper-timeout" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ "hyper", "hyper-util", @@ -2336,10 +2336,11 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" dependencies = [ + "getrandom 0.3.2", "libc", ] @@ -3574,9 +3575,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.12.9" +version = "0.12.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" +checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" dependencies = [ "async-compression", "base64 0.22.1", @@ -3612,6 +3613,7 @@ dependencies = [ "tokio-rustls", "tokio-socks", "tokio-util", + "tower 0.5.2", "tower-service", "url", "wasm-bindgen", @@ -4067,21 +4069,21 @@ checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "sentry" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a7332159e544e34db06b251b1eda5e546bd90285c3f58d9c8ff8450b484e0da" +checksum = "255914a8e53822abd946e2ce8baa41d4cded6b8e938913b7f7b9da5b7ab44335" dependencies = [ "httpdate", "reqwest", "rustls", "sentry-backtrace", "sentry-contexts", - "sentry-core 0.36.0", + "sentry-core", "sentry-debug-images", "sentry-log", "sentry-panic", - "sentry-tower 0.36.0", - "sentry-tracing 0.36.0", + "sentry-tower", + "sentry-tracing", "tokio", "ureq", "webpki-roots", @@ -4089,107 +4091,83 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "565ec31ad37bab8e6d9f289f34913ed8768347b133706192f10606dabd5c6bc4" +checksum = "00293cd332a859961f24fd69258f7e92af736feaeb91020cff84dac4188a4302" dependencies = [ "backtrace", "once_cell", "regex", - "sentry-core 0.36.0", + "sentry-core", ] [[package]] name = "sentry-contexts" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e860275f25f27e8c0c7726ce116c7d5c928c5bba2ee73306e52b20a752298ea6" +checksum = "961990f9caa76476c481de130ada05614cd7f5aa70fb57c2142f0e09ad3fb2aa" dependencies = [ "hostname", "libc", "os_info", "rustc_version", - "sentry-core 0.36.0", + "sentry-core", "uname", ] [[package]] name = "sentry-core" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9f8b6dcd4fbae1e3e22b447f32670360b27e31b62ab040f7fb04e0f80c04d92" +checksum = "1a6409d845707d82415c800290a5d63be5e3df3c2e417b0997c60531dfbd35ef" dependencies = [ "once_cell", "rand 0.8.5", - "sentry-types 0.35.0", - "serde", - "serde_json", -] - -[[package]] -name = "sentry-core" -version = "0.36.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "653942e6141f16651273159f4b8b1eaeedf37a7554c00cd798953e64b8a9bf72" -dependencies = [ - "once_cell", - "rand 0.8.5", - "sentry-types 0.36.0", + "sentry-types", "serde", "serde_json", ] [[package]] name = "sentry-debug-images" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60bc2154e6df59beed0ac13d58f8dfaf5ad20a88548a53e29e4d92e8e835c2" +checksum = "71ab5df4f3b64760508edfe0ba4290feab5acbbda7566a79d72673065888e5cc" dependencies = [ "findshlibs", "once_cell", - "sentry-core 0.36.0", + "sentry-core", ] [[package]] name = "sentry-log" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c96d796cba1b3a0793e7f53edc420c61f9419fba8fb34ad5519f5c7d01af6b2" +checksum = "693841da8dfb693af29105edfbea1d91348a13d23dd0a5d03761eedb9e450c46" dependencies = [ "log", - "sentry-core 0.36.0", + "sentry-core", ] [[package]] name = "sentry-panic" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "105e3a956c8aa9dab1e4087b1657b03271bfc49d838c6ae9bfc7c58c802fd0ef" +checksum = "609b1a12340495ce17baeec9e08ff8ed423c337c1a84dffae36a178c783623f3" dependencies = [ "sentry-backtrace", - "sentry-core 0.36.0", + "sentry-core", ] [[package]] name = "sentry-tower" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcdaf9b1939589476bd57751d12a9653bbfe356610fc476d03d7683189183ab7" -dependencies = [ - "sentry-core 0.35.0", - "tower-layer", - "tower-service", -] - -[[package]] -name = "sentry-tower" -version = "0.36.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "082f781dfc504d984e16d99f8dbf94d6ee4762dd0fc28de25713d0f900a8164d" +checksum = "4b98005537e38ee3bc10e7d36e7febe9b8e573d03f2ddd85fcdf05d21f9abd6d" dependencies = [ "http", "pin-project", - "sentry-core 0.36.0", + "sentry-core", "tower-layer", "tower-service", "url", @@ -4197,49 +4175,21 @@ dependencies = [ [[package]] name = "sentry-tracing" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "263f73c757ed7915d3e1e34625eae18cad498a95b4261603d4ce3f87b159a6f0" -dependencies = [ - "sentry-core 0.35.0", - "tracing-core", - "tracing-subscriber", -] - -[[package]] -name = "sentry-tracing" -version = "0.36.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e75c831b4d8b34a5aec1f65f67c5d46a26c7c5d3c7abd8b5ef430796900cf8" +checksum = "49f4e86402d5c50239dc7d8fd3f6d5e048221d5fcb4e026d8d50ab57fe4644cb" dependencies = [ "sentry-backtrace", - "sentry-core 0.36.0", + "sentry-core", "tracing-core", "tracing-subscriber", ] [[package]] name = "sentry-types" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a71ed3a389948a6a6d92b98e997a2723ca22f09660c5a7b7388ecd509a70a527" -dependencies = [ - "debugid", - "hex", - "rand 0.8.5", - "serde", - "serde_json", - "thiserror 1.0.69", - "time", - "url", - "uuid", -] - -[[package]] -name = "sentry-types" -version = "0.36.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d4203359e60724aa05cf2385aaf5d4f147e837185d7dd2b9ccf1ee77f4420c8" +checksum = "3d3f117b8755dbede8260952de2aeb029e20f432e72634e8969af34324591631" dependencies = [ "debugid", "hex", @@ -5532,8 +5482,8 @@ checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" dependencies = [ "windows-implement", "windows-interface", - "windows-result", - "windows-strings", + "windows-result 0.2.0", + "windows-strings 0.1.0", "windows-targets 0.52.6", ] @@ -5560,14 +5510,20 @@ dependencies = [ ] [[package]] -name = "windows-registry" -version = "0.2.0" +name = "windows-link" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" + +[[package]] +name = "windows-registry" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" dependencies = [ - "windows-result", - "windows-strings", - "windows-targets 0.52.6", + "windows-result 0.3.2", + "windows-strings 0.3.1", + "windows-targets 0.53.0", ] [[package]] @@ -5579,16 +5535,34 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-result" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-strings" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" dependencies = [ - "windows-result", + "windows-result 0.2.0", "windows-targets 0.52.6", ] +[[package]] +name = "windows-strings" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -5640,13 +5614,29 @@ dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm", + "windows_i686_gnullvm 0.52.6", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] +[[package]] +name = "windows-targets" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +dependencies = [ + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -5659,6 +5649,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" @@ -5671,6 +5667,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + [[package]] name = "windows_i686_gnu" version = "0.48.5" @@ -5683,12 +5685,24 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + [[package]] name = "windows_i686_msvc" version = "0.48.5" @@ -5701,6 +5715,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" @@ -5713,6 +5733,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" @@ -5725,6 +5751,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" @@ -5737,6 +5769,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + [[package]] name = "winnow" version = "0.7.4" diff --git a/Cargo.toml b/Cargo.toml index 6c5c291f..3ffa9e44 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -141,12 +141,12 @@ features = [ ] [workspace.dependencies.rustls] -version = "0.23.19" +version = "0.23.25" default-features = false features = ["aws_lc_rs"] [workspace.dependencies.reqwest] -version = "0.12.9" +version = "0.12.15" default-features = false features = [ "rustls-tls-native-roots", @@ -204,7 +204,7 @@ features = [ # logging [workspace.dependencies.log] -version = "0.4.22" +version = "0.4.27" default-features = false [workspace.dependencies.tracing] version = "0.1.41" @@ -224,7 +224,7 @@ default-features = false # used for conduwuit's CLI and admin room command parsing [workspace.dependencies.clap] -version = "4.5.23" +version = "4.5.35" default-features = false features = [ "derive", @@ -320,7 +320,7 @@ default-features = false # Used when hashing the state [workspace.dependencies.ring] -version = "0.17.8" +version = "0.17.14" default-features = false # Used to make working with iterators easier, was already a transitive depdendency @@ -427,7 +427,7 @@ features = ["rt-tokio"] # optional sentry metrics for crash/panic reporting [workspace.dependencies.sentry] -version = "0.36.0" +version = "0.37.0" default-features = false features = [ "backtrace", @@ -443,9 +443,9 @@ features = [ ] [workspace.dependencies.sentry-tracing] -version = "0.35.0" +version = "0.37.0" [workspace.dependencies.sentry-tower] -version = "0.35.0" +version = "0.37.0" # jemalloc usage [workspace.dependencies.tikv-jemalloc-sys] @@ -479,7 +479,7 @@ default-features = false features = ["resource"] [workspace.dependencies.sd-notify] -version = "0.4.3" +version = "0.4.5" default-features = false [workspace.dependencies.hardened_malloc-rs] @@ -496,7 +496,7 @@ version = "0.4.3" default-features = false [workspace.dependencies.termimad] -version = "0.31.1" +version = "0.31.2" default-features = false [workspace.dependencies.checked_ops] From f9529937ce9a8dacf186fb4f60ef0c3315bb02a0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 19:36:24 +0000 Subject: [PATCH 303/328] patch hyper-util due to conflicts with federation resolver hooks Signed-off-by: Jason Volk --- Cargo.lock | 3 +-- Cargo.toml | 6 ++++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index da33af05..8918a631 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2030,8 +2030,7 @@ dependencies = [ [[package]] name = "hyper-util" version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497bbc33a26fdd4af9ed9c70d63f61cf56a938375fbb32df34db9b1cd6d643f2" +source = "git+https://github.com/girlbossceo/hyper-util?rev=e4ae7628fe4fcdacef9788c4c8415317a4489941#e4ae7628fe4fcdacef9788c4c8415317a4489941" dependencies = [ "bytes", "futures-channel", diff --git a/Cargo.toml b/Cargo.toml index 3ffa9e44..bf7ec2bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -570,10 +570,16 @@ rev = "fe4aebeeaae435af60087ddd56b573a2e0be671d" git = "https://github.com/girlbossceo/async-channel" rev = "92e5e74063bf2a3b10414bcc8a0d68b235644280" +# adds affinity masks for selecting more than one core at a time [patch.crates-io.core_affinity] git = "https://github.com/girlbossceo/core_affinity_rs" rev = "9c8e51510c35077df888ee72a36b4b05637147da" +# reverts hyperium#148 conflicting with our delicate federation resolver hooks +[patch.crates-io.hyper-util] +git = "https://github.com/girlbossceo/hyper-util" +rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941" + # # Our crates # From 45fd3875c8932e56d1ab092004065b0800861201 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 00:59:23 +0000 Subject: [PATCH 304/328] move runtime shutdown out of main; gather final stats Signed-off-by: Jason Volk --- src/main/main.rs | 7 +++---- src/main/runtime.rs | 43 ++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 43 insertions(+), 7 deletions(-) diff --git a/src/main/main.rs b/src/main/main.rs index 52f40384..1a9d3fe4 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -16,15 +16,14 @@ use server::Server; rustc_flags_capture! {} -fn main() -> Result<(), Error> { +fn main() -> Result { let args = clap::parse(); let runtime = runtime::new(&args)?; let server = Server::new(&args, Some(runtime.handle()))?; + runtime.spawn(signal::signal(server.clone())); runtime.block_on(async_main(&server))?; - - // explicit drop here to trace thread and tls dtors - drop(runtime); + runtime::shutdown(&server, runtime); #[cfg(unix)] if server.server.restarting.load(Ordering::Acquire) { diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 920476db..1c58ea81 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -1,7 +1,7 @@ use std::{ iter::once, sync::{ - OnceLock, + Arc, OnceLock, atomic::{AtomicUsize, Ordering}, }, thread, @@ -11,17 +11,18 @@ use std::{ #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] use conduwuit_core::result::LogDebugErr; use conduwuit_core::{ - Result, is_true, + Result, debug, is_true, utils::sys::compute::{nth_core_available, set_affinity}, }; use tokio::runtime::Builder; -use crate::clap::Args; +use crate::{clap::Args, server::Server}; const WORKER_NAME: &str = "conduwuit:worker"; const WORKER_MIN: usize = 2; const WORKER_KEEPALIVE: u64 = 36; const MAX_BLOCKING_THREADS: usize = 1024; +const SHUTDOWN_TIMEOUT: Duration = Duration::from_millis(10000); #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] const DISABLE_MUZZY_THRESHOLD: usize = 4; @@ -83,6 +84,42 @@ fn enable_histogram(builder: &mut Builder, args: &Args) { .metrics_poll_time_histogram_configuration(linear); } +#[cfg(tokio_unstable)] +#[tracing::instrument(name = "stop", level = "info", skip_all)] +pub(super) fn shutdown(server: &Arc, runtime: tokio::runtime::Runtime) { + use conduwuit_core::event; + use tracing::Level; + + // The final metrics output is promoted to INFO when tokio_unstable is active in + // a release/bench mode and DEBUG is likely optimized out + const LEVEL: Level = if cfg!(debug_assertions) { + Level::DEBUG + } else { + Level::INFO + }; + + debug!( + timeout = ?SHUTDOWN_TIMEOUT, + "Waiting for runtime..." + ); + + runtime.shutdown_timeout(SHUTDOWN_TIMEOUT); + let runtime_metrics = server.server.metrics.runtime_interval().unwrap_or_default(); + + event!(LEVEL, ?runtime_metrics, "Final runtime metrics"); +} + +#[cfg(not(tokio_unstable))] +#[tracing::instrument(name = "stop", level = "info", skip_all)] +pub(super) fn shutdown(_server: &Arc, runtime: tokio::runtime::Runtime) { + debug!( + timeout = ?SHUTDOWN_TIMEOUT, + "Waiting for runtime..." + ); + + runtime.shutdown_timeout(SHUTDOWN_TIMEOUT); +} + #[tracing::instrument( name = "fork", level = "debug", From 29d55b80366e17737094d3ad9a8031fe20c6286e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 2 Apr 2025 04:12:24 +0000 Subject: [PATCH 305/328] move systemd stopping notification point Signed-off-by: Jason Volk --- src/core/server.rs | 19 ++++++++++--------- src/router/run.rs | 4 ++++ 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/src/core/server.rs b/src/core/server.rs index b67759d6..4b673f32 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -69,10 +69,6 @@ impl Server { return Err!("Reloading not enabled"); } - #[cfg(all(feature = "systemd", target_os = "linux"))] - sd_notify::notify(true, &[sd_notify::NotifyState::Reloading]) - .expect("failed to notify systemd of reloading state"); - if self.reloading.swap(true, Ordering::AcqRel) { return Err!("Reloading already in progress"); } @@ -98,10 +94,6 @@ impl Server { } pub fn shutdown(&self) -> Result { - #[cfg(all(feature = "systemd", target_os = "linux"))] - sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]) - .expect("failed to notify systemd of stopping state"); - if self.stopping.swap(true, Ordering::AcqRel) { return Err!("Shutdown already in progress"); } @@ -144,7 +136,16 @@ impl Server { } #[inline] - pub fn running(&self) -> bool { !self.stopping.load(Ordering::Acquire) } + pub fn running(&self) -> bool { !self.is_stopping() } + + #[inline] + pub fn is_stopping(&self) -> bool { self.stopping.load(Ordering::Relaxed) } + + #[inline] + pub fn is_reloading(&self) -> bool { self.reloading.load(Ordering::Relaxed) } + + #[inline] + pub fn is_restarting(&self) -> bool { self.restarting.load(Ordering::Relaxed) } #[inline] pub fn is_ours(&self, name: &str) -> bool { name == self.config.server_name } diff --git a/src/router/run.rs b/src/router/run.rs index 31789626..ff54594f 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -77,6 +77,10 @@ pub(crate) async fn start(server: Arc) -> Result> { pub(crate) async fn stop(services: Arc) -> Result<()> { debug!("Shutting down..."); + #[cfg(all(feature = "systemd", target_os = "linux"))] + sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]) + .expect("failed to notify systemd of stopping state"); + // Wait for all completions before dropping or we'll lose them to the module // unload and explode. services.stop().await; From 94b107b42b722aff9518f64ad603ce01665b25f3 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 3 Apr 2025 16:08:02 -0400 Subject: [PATCH 306/328] add some debug logging and misc cleanup to keys/signatures/upload Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +++++----- Cargo.toml | 2 +- src/api/client/keys.rs | 95 ++++++++++++++++++++++++++-------------- src/service/users/mod.rs | 18 +++++--- 4 files changed, 86 insertions(+), 51 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8918a631..0753f81d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3654,7 +3654,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "assign", "js_int", @@ -3674,7 +3674,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "js_int", "ruma-common", @@ -3686,7 +3686,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "as_variant", "assign", @@ -3709,7 +3709,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "as_variant", "base64 0.22.1", @@ -3741,7 +3741,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3766,7 +3766,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "bytes", "headers", @@ -3788,7 +3788,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3797,7 +3797,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "js_int", "ruma-common", @@ -3807,7 +3807,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3822,7 +3822,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "js_int", "ruma-common", @@ -3834,7 +3834,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index bf7ec2bb..a44fc0f0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -350,7 +350,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "0701341a2fd5a6ea74beada18d5974cc401a4fc1" +rev = "edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" features = [ "compat", "rand", diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index f50d7afa..f6224343 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -9,7 +9,8 @@ use ruma::{ client::{ error::ErrorKind, keys::{ - claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, + claim_keys, get_key_changes, get_keys, upload_keys, + upload_signatures::{self, v3::Failure}, upload_signing_keys, }, uiaa::{AuthFlow, AuthType, UiaaInfo}, @@ -308,53 +309,81 @@ async fn check_for_new_keys( /// # `POST /_matrix/client/r0/keys/signatures/upload` /// /// Uploads end-to-end key signatures from the sender user. +/// +/// TODO: clean this timo-code up more. tried to improve it a bit to stop +/// exploding the entire request on bad sigs, but needs way more work. pub(crate) async fn upload_signatures_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + use upload_signatures::v3::FailureErrorCode::*; + + if body.signed_keys.is_empty() { + debug!("Empty signed_keys sent in key signature upload"); + return Ok(upload_signatures::v3::Response::new()); + } + + let sender_user = body.sender_user(); + let mut failures: BTreeMap> = BTreeMap::new(); + let mut failure_reasons: BTreeMap = BTreeMap::new(); + let failure = Failure { + errcode: InvalidSignature, + error: String::new(), + }; for (user_id, keys) in &body.signed_keys { for (key_id, key) in keys { - let key = serde_json::to_value(key) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?; + let Ok(key) = serde_json::to_value(key) + .inspect_err(|e| debug_warn!(?key_id, "Invalid \"key\" JSON: {e}")) + else { + let mut failure = failure.clone(); + failure.error = String::from("Invalid \"key\" JSON"); + failure_reasons.insert(key_id.to_owned(), failure); + continue; + }; - for signature in key - .get("signatures") - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Missing signatures field."))? - .get(sender_user.to_string()) - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid user in signatures field.", - ))? - .as_object() - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid signature."))? - .clone() - { - // Signature validation? - let signature = ( - signature.0, - signature - .1 - .as_str() - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid signature value.", - ))? - .to_owned(), - ); + let Some(signatures) = key.get("signatures") else { + let mut failure = failure.clone(); + failure.error = String::from("Missing \"signatures\" field"); + failure_reasons.insert(key_id.to_owned(), failure); + continue; + }; - services + let Some(sender_user_val) = signatures.get(sender_user.to_string()) else { + let mut failure = failure.clone(); + failure.error = String::from("Invalid user in signatures field"); + failure_reasons.insert(key_id.to_owned(), failure); + continue; + }; + + let Some(sender_user_object) = sender_user_val.as_object() else { + let mut failure = failure.clone(); + failure.error = String::from("signatures field is not a JSON object"); + failure_reasons.insert(key_id.to_owned(), failure); + continue; + }; + + for (signature, val) in sender_user_object.clone() { + let signature = (signature, val.to_string()); + + if let Err(e) = services .users .sign_key(user_id, key_id, signature, sender_user) - .await?; + .await + .inspect_err(|e| debug_warn!("{e}")) + { + let mut failure = failure.clone(); + failure.error = format!("Error signing key: {e}"); + failure_reasons.insert(key_id.to_owned(), failure); + continue; + } } } + + failures.insert(user_id.to_owned(), failure_reasons.clone()); } - Ok(upload_signatures::v3::Response { - failures: BTreeMap::new(), // TODO: integrate - }) + Ok(upload_signatures::v3::Response { failures }) } /// # `POST /_matrix/client/r0/keys/changes` diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 87a8b93b..1eb289fc 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -593,7 +593,7 @@ impl Service { key_id: &str, signature: (String, String), sender_id: &UserId, - ) -> Result<()> { + ) -> Result { let key = (target_id, key_id); let mut cross_signing_key: serde_json::Value = self @@ -601,21 +601,27 @@ impl Service { .keyid_key .qry(&key) .await - .map_err(|_| err!(Request(InvalidParam("Tried to sign nonexistent key."))))? + .map_err(|_| err!(Request(InvalidParam("Tried to sign nonexistent key"))))? .deserialized() - .map_err(|e| err!(Database("key in keyid_key is invalid. {e:?}")))?; + .map_err(|e| err!(Database(debug_warn!("key in keyid_key is invalid: {e:?}"))))?; let signatures = cross_signing_key .get_mut("signatures") - .ok_or_else(|| err!(Database("key in keyid_key has no signatures field.")))? + .ok_or_else(|| { + err!(Database(debug_warn!("key in keyid_key has no signatures field"))) + })? .as_object_mut() - .ok_or_else(|| err!(Database("key in keyid_key has invalid signatures field.")))? + .ok_or_else(|| { + err!(Database(debug_warn!("key in keyid_key has invalid signatures field."))) + })? .entry(sender_id.to_string()) .or_insert_with(|| serde_json::Map::new().into()); signatures .as_object_mut() - .ok_or_else(|| err!(Database("signatures in keyid_key for a user is invalid.")))? + .ok_or_else(|| { + err!(Database(debug_warn!("signatures in keyid_key for a user is invalid."))) + })? .insert(signature.0, signature.1.into()); let key = (target_id, key_id); From b7109131e29804ac6b4e30aaaa40f213d092a63a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 22:06:51 +0000 Subject: [PATCH 307/328] further simplify get_missing_events; various log calls Signed-off-by: Jason Volk --- src/api/server/get_missing_events.rs | 47 +++++++++---------- .../rooms/state_accessor/server_can.rs | 8 +--- src/service/rooms/state_accessor/user_can.rs | 14 ++---- 3 files changed, 29 insertions(+), 40 deletions(-) diff --git a/src/api/server/get_missing_events.rs b/src/api/server/get_missing_events.rs index d72918fa..04dc30ed 100644 --- a/src/api/server/get_missing_events.rs +++ b/src/api/server/get_missing_events.rs @@ -1,9 +1,5 @@ use axum::extract::State; -use conduwuit::{ - Result, debug, debug_info, debug_warn, - utils::{self}, - warn, -}; +use conduwuit::{Result, debug, debug_error, utils::to_canonical_object}; use ruma::api::federation::event::get_missing_events; use super::AccessCheck; @@ -43,19 +39,13 @@ pub(crate) async fn get_missing_events_route( let mut i: usize = 0; while i < queued_events.len() && events.len() < limit { let Ok(pdu) = services.rooms.timeline.get_pdu(&queued_events[i]).await else { - debug_info!(?body.origin, "Event {} does not exist locally, skipping", &queued_events[i]); - i = i.saturating_add(1); - continue; - }; - - if pdu.room_id != body.room_id { - warn!(?body.origin, - "Got an event for the wrong room in database. Found {:?} in {:?}, server requested events in {:?}. Skipping.", - pdu.event_id, pdu.room_id, body.room_id + debug!( + ?body.origin, + "Event {} does not exist locally, skipping", &queued_events[i] ); i = i.saturating_add(1); continue; - } + }; if body.earliest_events.contains(&queued_events[i]) { i = i.saturating_add(1); @@ -68,25 +58,32 @@ pub(crate) async fn get_missing_events_route( .server_can_see_event(body.origin(), &body.room_id, &queued_events[i]) .await { - debug!(?body.origin, "Server cannot see {:?} in {:?}, skipping", pdu.event_id, pdu.room_id); + debug!( + ?body.origin, + "Server cannot see {:?} in {:?}, skipping", pdu.event_id, pdu.room_id + ); i = i.saturating_add(1); continue; } - let Ok(pdu_json) = utils::to_canonical_object(&pdu) else { - debug_warn!(?body.origin, "Failed to convert PDU in database to canonical JSON: {pdu:?}"); + let Ok(event) = to_canonical_object(&pdu) else { + debug_error!( + ?body.origin, + "Failed to convert PDU in database to canonical JSON: {pdu:?}" + ); i = i.saturating_add(1); continue; }; - queued_events.extend(pdu.prev_events.iter().map(ToOwned::to_owned)); + let prev_events = pdu.prev_events.iter().map(ToOwned::to_owned); - events.push( - services - .sending - .convert_to_outgoing_federation_event(pdu_json) - .await, - ); + let event = services + .sending + .convert_to_outgoing_federation_event(event) + .await; + + queued_events.extend(prev_events); + events.push(event); } Ok(get_missing_events::v1::Response { events }) diff --git a/src/service/rooms/state_accessor/server_can.rs b/src/service/rooms/state_accessor/server_can.rs index c946fbfd..2befec22 100644 --- a/src/service/rooms/state_accessor/server_can.rs +++ b/src/service/rooms/state_accessor/server_can.rs @@ -1,4 +1,4 @@ -use conduwuit::{debug_info, implement, utils::stream::ReadyExt}; +use conduwuit::{implement, utils::stream::ReadyExt}; use futures::StreamExt; use ruma::{ EventId, RoomId, ServerName, @@ -36,7 +36,6 @@ pub async fn server_can_see_event( .ready_filter(|member| member.server_name() == origin); match history_visibility { - | HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, | HistoryVisibility::Invited => { // Allow if any member on requesting server was AT LEAST invited, else deny current_server_members @@ -49,9 +48,6 @@ pub async fn server_can_see_event( .any(|member| self.user_was_joined(shortstatehash, member)) .await }, - | _ => { - debug_info!(%room_id, "Unknown history visibility, defaulting to shared: {history_visibility:?}"); - true - }, + | HistoryVisibility::WorldReadable | HistoryVisibility::Shared | _ => true, } } diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs index aa54407b..67e0b52b 100644 --- a/src/service/rooms/state_accessor/user_can.rs +++ b/src/service/rooms/state_accessor/user_can.rs @@ -1,4 +1,4 @@ -use conduwuit::{Err, Error, Result, debug_info, implement, pdu::PduBuilder}; +use conduwuit::{Err, Result, implement, pdu::PduBuilder}; use ruma::{ EventId, RoomId, UserId, events::{ @@ -76,8 +76,8 @@ pub async fn user_can_redact( || redacting_event .as_ref() .is_ok_and(|redacting_event| redacting_event.sender == sender)), - | _ => Err(Error::bad_database( - "No m.room.power_levels or m.room.create events in database for room", + | _ => Err!(Database( + "No m.room.power_levels or m.room.create events in database for room" )), } }, @@ -108,8 +108,6 @@ pub async fn user_can_see_event( }); match history_visibility { - | HistoryVisibility::WorldReadable => true, - | HistoryVisibility::Shared => currently_member, | HistoryVisibility::Invited => { // Allow if any member on requesting server was AT LEAST invited, else deny self.user_was_invited(shortstatehash, user_id).await @@ -118,10 +116,8 @@ pub async fn user_can_see_event( // Allow if any member on requested server was joined, else deny self.user_was_joined(shortstatehash, user_id).await }, - | _ => { - debug_info!(%room_id, "Unknown history visibility, defaulting to shared: {history_visibility:?}"); - currently_member - }, + | HistoryVisibility::WorldReadable => true, + | HistoryVisibility::Shared | _ => currently_member, } } From 6a073b4fa4c728b15f94de88ac37d136c97982bf Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 2 Apr 2025 06:28:34 +0000 Subject: [PATCH 308/328] remove additional unnecessary Arc Signed-off-by: Jason Volk --- .../fetch_and_handle_outliers.rs | 6 ++--- src/service/rooms/event_handler/fetch_prev.rs | 7 ++---- .../rooms/event_handler/handle_outlier_pdu.rs | 22 ++++++------------- .../rooms/event_handler/handle_prev_pdu.rs | 6 +---- .../rooms/event_handler/state_at_incoming.rs | 5 ++--- .../event_handler/upgrade_outlier_pdu.rs | 2 +- 6 files changed, 15 insertions(+), 33 deletions(-) diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index 80e91eff..b0a7d827 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -1,6 +1,5 @@ use std::{ collections::{BTreeMap, HashSet, VecDeque, hash_map}, - sync::Arc, time::Instant, }; @@ -8,7 +7,6 @@ use conduwuit::{ PduEvent, debug, debug_error, debug_warn, implement, pdu, trace, utils::continue_exponential_backoff_secs, warn, }; -use futures::TryFutureExt; use ruma::{ CanonicalJsonValue, OwnedEventId, RoomId, ServerName, api::federation::event::get_event, }; @@ -31,7 +29,7 @@ pub(super) async fn fetch_and_handle_outliers<'a>( events: &'a [OwnedEventId], create_event: &'a PduEvent, room_id: &'a RoomId, -) -> Vec<(Arc, Option>)> { +) -> Vec<(PduEvent, Option>)> { let back_off = |id| match self .services .globals @@ -53,7 +51,7 @@ pub(super) async fn fetch_and_handle_outliers<'a>( // a. Look in the main timeline (pduid_pdu tree) // b. Look at outlier pdu tree // (get_pdu_json checks both) - if let Ok(local_pdu) = self.services.timeline.get_pdu(id).map_ok(Arc::new).await { + if let Ok(local_pdu) = self.services.timeline.get_pdu(id).await { trace!("Found {id} in db"); events_with_auth_events.push((id, Some(local_pdu), vec![])); continue; diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index e817430b..0f92d6e6 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -1,7 +1,4 @@ -use std::{ - collections::{BTreeMap, HashMap, HashSet, VecDeque}, - sync::Arc, -}; +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; use conduwuit::{ PduEvent, Result, debug_warn, err, implement, @@ -31,7 +28,7 @@ pub(super) async fn fetch_prev( initial_set: Vec, ) -> Result<( Vec, - HashMap, BTreeMap)>, + HashMap)>, )> { let mut graph: HashMap = HashMap::with_capacity(initial_set.len()); let mut eventid_info = HashMap::new(); diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index 99e90a50..5339249d 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -1,12 +1,9 @@ -use std::{ - collections::{BTreeMap, HashMap, hash_map}, - sync::Arc, -}; +use std::collections::{BTreeMap, HashMap, hash_map}; use conduwuit::{ Err, Error, PduEvent, Result, debug, debug_info, err, implement, state_res, trace, warn, }; -use futures::{TryFutureExt, future::ready}; +use futures::future::ready; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, EventId, RoomId, ServerName, api::client::error::ErrorKind, events::StateEventType, @@ -24,7 +21,7 @@ pub(super) async fn handle_outlier_pdu<'a>( room_id: &'a RoomId, mut value: CanonicalJsonObject, auth_events_known: bool, -) -> Result<(Arc, BTreeMap)> { +) -> Result<(PduEvent, BTreeMap)> { // 1. Remove unsigned field value.remove("unsigned"); @@ -95,7 +92,7 @@ pub(super) async fn handle_outlier_pdu<'a>( // Build map of auth events let mut auth_events = HashMap::with_capacity(incoming_pdu.auth_events.len()); for id in &incoming_pdu.auth_events { - let Ok(auth_event) = self.services.timeline.get_pdu(id).map_ok(Arc::new).await else { + let Ok(auth_event) = self.services.timeline.get_pdu(id).await else { warn!("Could not find auth event {id}"); continue; }; @@ -123,15 +120,10 @@ pub(super) async fn handle_outlier_pdu<'a>( // The original create event must be in the auth events if !matches!( - auth_events - .get(&(StateEventType::RoomCreate, String::new().into())) - .map(AsRef::as_ref), + auth_events.get(&(StateEventType::RoomCreate, String::new().into())), Some(_) | None ) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Incoming event refers to wrong create event.", - )); + return Err!(Request(InvalidParam("Incoming event refers to wrong create event."))); } let state_fetch = |ty: &StateEventType, sk: &str| { @@ -161,5 +153,5 @@ pub(super) async fn handle_outlier_pdu<'a>( trace!("Added pdu as outlier."); - Ok((Arc::new(incoming_pdu), val)) + Ok((incoming_pdu, val)) } diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index cf69a515..85e0a6b9 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -1,6 +1,5 @@ use std::{ collections::{BTreeMap, HashMap}, - sync::Arc, time::Instant, }; @@ -24,10 +23,7 @@ pub(super) async fn handle_prev_pdu<'a>( origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId, - eventid_info: &mut HashMap< - OwnedEventId, - (Arc, BTreeMap), - >, + eventid_info: &mut HashMap)>, create_event: &PduEvent, first_ts_in_room: UInt, prev_id: &EventId, diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 8326f9da..0402ff14 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -2,7 +2,6 @@ use std::{ borrow::Borrow, collections::{HashMap, HashSet}, iter::Iterator, - sync::Arc, }; use conduwuit::{ @@ -20,7 +19,7 @@ use crate::rooms::short::ShortStateHash; #[tracing::instrument(name = "state", level = "debug", skip_all)] pub(super) async fn state_at_incoming_degree_one( &self, - incoming_pdu: &Arc, + incoming_pdu: &PduEvent, ) -> Result>> { let prev_event = &incoming_pdu.prev_events[0]; let Ok(prev_event_sstatehash) = self @@ -67,7 +66,7 @@ pub(super) async fn state_at_incoming_degree_one( #[tracing::instrument(name = "state", level = "debug", skip_all)] pub(super) async fn state_at_incoming_resolved( &self, - incoming_pdu: &Arc, + incoming_pdu: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId, ) -> Result>> { diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index c1a1c3eb..086dc6bd 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -18,7 +18,7 @@ use crate::rooms::{ #[implement(super::Service)] pub(super) async fn upgrade_outlier_to_timeline_pdu( &self, - incoming_pdu: Arc, + incoming_pdu: PduEvent, val: BTreeMap, create_event: &PduEvent, origin: &ServerName, From d036394ec79cf94aee484e6bea41421396dcd749 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 2 Apr 2025 09:53:42 +0000 Subject: [PATCH 309/328] refactor incoming prev events loop; mitigate large future Signed-off-by: Jason Volk --- .../event_handler/handle_incoming_pdu.rs | 102 ++++++++++-------- .../rooms/event_handler/handle_prev_pdu.rs | 65 +++++------ 2 files changed, 88 insertions(+), 79 deletions(-) diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index b437bf2e..77cae41d 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -3,9 +3,12 @@ use std::{ time::Instant, }; -use conduwuit::{Err, Result, debug, debug::INFO_SPAN_LEVEL, err, implement, warn}; +use conduwuit::{ + Err, Result, debug, debug::INFO_SPAN_LEVEL, defer, err, implement, utils::stream::IterStream, + warn, +}; use futures::{ - FutureExt, + FutureExt, TryFutureExt, TryStreamExt, future::{OptionFuture, try_join5}, }; use ruma::{CanonicalJsonValue, EventId, RoomId, ServerName, UserId, events::StateEventType}; @@ -86,7 +89,7 @@ pub async fn handle_incoming_pdu<'a>( .state_accessor .room_state_get(room_id, &StateEventType::RoomCreate, ""); - let (meta_exists, is_disabled, (), (), create_event) = try_join5( + let (meta_exists, is_disabled, (), (), ref create_event) = try_join5( meta_exists, is_disabled, origin_acl_check, @@ -104,7 +107,7 @@ pub async fn handle_incoming_pdu<'a>( } let (incoming_pdu, val) = self - .handle_outlier_pdu(origin, &create_event, event_id, room_id, value, false) + .handle_outlier_pdu(origin, create_event, event_id, room_id, value, false) .await?; // 8. if not timeline event: stop @@ -129,66 +132,71 @@ pub async fn handle_incoming_pdu<'a>( let (sorted_prev_events, mut eventid_info) = self .fetch_prev( origin, - &create_event, + create_event, room_id, first_ts_in_room, incoming_pdu.prev_events.clone(), ) .await?; - debug!(events = ?sorted_prev_events, "Got previous events"); - for prev_id in sorted_prev_events { - self.services.server.check_running()?; - if let Err(e) = self - .handle_prev_pdu( + debug!( + events = ?sorted_prev_events, + "Handling previous events" + ); + + sorted_prev_events + .iter() + .try_stream() + .map_ok(AsRef::as_ref) + .try_for_each(|prev_id| { + self.handle_prev_pdu( origin, event_id, room_id, - &mut eventid_info, - &create_event, + eventid_info.remove(prev_id), + create_event, first_ts_in_room, - &prev_id, + prev_id, ) - .await - { - use hash_map::Entry; - - let now = Instant::now(); - warn!("Prev event {prev_id} failed: {e}"); - - match self - .services - .globals - .bad_event_ratelimiter - .write() - .expect("locked") - .entry(prev_id) - { - | Entry::Vacant(e) => { - e.insert((now, 1)); - }, - | Entry::Occupied(mut e) => { - *e.get_mut() = (now, e.get().1.saturating_add(1)); - }, - } - } - } + .inspect_err(move |e| { + warn!("Prev {prev_id} failed: {e}"); + match self + .services + .globals + .bad_event_ratelimiter + .write() + .expect("locked") + .entry(prev_id.into()) + { + | hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + }, + | hash_map::Entry::Occupied(mut e) => { + let tries = e.get().1.saturating_add(1); + *e.get_mut() = (Instant::now(), tries); + }, + } + }) + .map(|_| self.services.server.check_running()) + }) + .boxed() + .await?; // Done with prev events, now handling the incoming event let start_time = Instant::now(); self.federation_handletime .write() .expect("locked") - .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); + .insert(room_id.into(), (event_id.to_owned(), start_time)); - let r = self - .upgrade_outlier_to_timeline_pdu(incoming_pdu, val, &create_event, origin, room_id) - .await; + defer! {{ + self.federation_handletime + .write() + .expect("locked") + .remove(room_id); + }}; - self.federation_handletime - .write() - .expect("locked") - .remove(&room_id.to_owned()); - - r + self.upgrade_outlier_to_timeline_pdu(incoming_pdu, val, create_event, origin, room_id) + .boxed() + .await } diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index 85e0a6b9..d612b2bf 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -1,13 +1,10 @@ -use std::{ - collections::{BTreeMap, HashMap}, - time::Instant, -}; +use std::{collections::BTreeMap, time::Instant}; use conduwuit::{ - Err, PduEvent, Result, debug, debug::INFO_SPAN_LEVEL, implement, + Err, PduEvent, Result, debug, debug::INFO_SPAN_LEVEL, defer, implement, utils::continue_exponential_backoff_secs, }; -use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName, UInt}; +use ruma::{CanonicalJsonValue, EventId, RoomId, ServerName, UInt}; #[implement(super::Service)] #[allow(clippy::type_complexity)] @@ -23,10 +20,10 @@ pub(super) async fn handle_prev_pdu<'a>( origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId, - eventid_info: &mut HashMap)>, - create_event: &PduEvent, + eventid_info: Option<(PduEvent, BTreeMap)>, + create_event: &'a PduEvent, first_ts_in_room: UInt, - prev_id: &EventId, + prev_id: &'a EventId, ) -> Result { // Check for disabled again because it might have changed if self.services.metadata.is_disabled(room_id).await { @@ -57,31 +54,35 @@ pub(super) async fn handle_prev_pdu<'a>( } } - if let Some((pdu, json)) = eventid_info.remove(prev_id) { - // Skip old events - if pdu.origin_server_ts < first_ts_in_room { - return Ok(()); - } + let Some((pdu, json)) = eventid_info else { + return Ok(()); + }; - let start_time = Instant::now(); - self.federation_handletime - .write() - .expect("locked") - .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); - - self.upgrade_outlier_to_timeline_pdu(pdu, json, create_event, origin, room_id) - .await?; - - self.federation_handletime - .write() - .expect("locked") - .remove(&room_id.to_owned()); - - debug!( - elapsed = ?start_time.elapsed(), - "Handled prev_event", - ); + // Skip old events + if pdu.origin_server_ts < first_ts_in_room { + return Ok(()); } + let start_time = Instant::now(); + self.federation_handletime + .write() + .expect("locked") + .insert(room_id.into(), ((*prev_id).to_owned(), start_time)); + + defer! {{ + self.federation_handletime + .write() + .expect("locked") + .remove(room_id); + }}; + + self.upgrade_outlier_to_timeline_pdu(pdu, json, create_event, origin, room_id) + .await?; + + debug!( + elapsed = ?start_time.elapsed(), + "Handled prev_event", + ); + Ok(()) } From 00f7745ec4ebcea5f892376c5de5db1299f71696 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Apr 2025 02:56:54 +0000 Subject: [PATCH 310/328] remove the db pool queue full warning Signed-off-by: Jason Volk --- src/database/pool.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/database/pool.rs b/src/database/pool.rs index 47e61c30..0fa742d1 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -12,7 +12,7 @@ use std::{ use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; use conduwuit::{ - Error, Result, Server, debug, debug_warn, err, error, implement, + Error, Result, Server, debug, err, error, implement, result::DebugInspect, smallvec::SmallVec, trace, @@ -245,13 +245,6 @@ async fn execute(&self, queue: &Sender, cmd: Cmd) -> Result { self.queued_max.fetch_max(queue.len(), Ordering::Relaxed); } - if queue.is_full() { - debug_warn!( - capacity = ?queue.capacity(), - "pool queue is full" - ); - } - queue .send(cmd) .await From 4e5b87d0cd16f3d015f4b61285b369d027bb909d Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 4 Apr 2025 11:34:31 -0400 Subject: [PATCH 311/328] add missing condition for signatures upload failures Signed-off-by: June Clementine Strawberry --- src/api/client/keys.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index f6224343..2fdfc0bc 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -380,7 +380,9 @@ pub(crate) async fn upload_signatures_route( } } - failures.insert(user_id.to_owned(), failure_reasons.clone()); + if !failure_reasons.is_empty() { + failures.insert(user_id.to_owned(), failure_reasons.clone()); + } } Ok(upload_signatures::v3::Response { failures }) From 532dfd004dbc020baa74a4d4413d9ad8139f851e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Apr 2025 03:30:13 +0000 Subject: [PATCH 312/328] move core::pdu and core::state_res into core::matrix:: Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 4 +- src/admin/user/commands.rs | 3 +- src/api/client/account.rs | 6 ++- src/api/client/account_data.rs | 5 +- src/api/client/alias.rs | 2 +- src/api/client/backup.rs | 4 +- src/api/client/context.rs | 6 ++- src/api/client/device.rs | 4 +- src/api/client/directory.rs | 2 +- src/api/client/filter.rs | 4 +- src/api/client/keys.rs | 6 +-- src/api/client/membership.rs | 27 ++++++----- src/api/client/message.rs | 22 +++++---- src/api/client/openid.rs | 4 +- src/api/client/profile.rs | 4 +- src/api/client/push.rs | 6 +-- src/api/client/read_marker.rs | 4 +- src/api/client/redact.rs | 3 +- src/api/client/relations.rs | 5 +- src/api/client/report.rs | 8 ++-- src/api/client/room/create.rs | 6 ++- src/api/client/room/upgrade.rs | 5 +- src/api/client/search.rs | 5 +- src/api/client/send.rs | 4 +- src/api/client/session.rs | 9 ++-- src/api/client/space.rs | 10 ++-- src/api/client/state.rs | 8 +++- src/api/client/sync/mod.rs | 5 +- src/api/client/sync/v3.rs | 9 ++-- src/api/client/sync/v5.rs | 9 +++- src/api/client/tag.rs | 3 +- src/api/client/thirdparty.rs | 3 +- src/api/client/threads.rs | 7 ++- src/api/client/to_device.rs | 2 +- src/api/client/typing.rs | 4 +- src/api/client/unversioned.rs | 3 +- src/api/client/user_directory.rs | 4 +- src/api/client/voip.rs | 4 +- src/api/client/well_known.rs | 3 +- src/api/mod.rs | 2 - src/api/server/hierarchy.rs | 4 +- src/api/server/invite.rs | 5 +- src/api/server/make_join.rs | 10 ++-- src/api/server/make_knock.rs | 5 +- src/api/server/make_leave.rs | 4 +- src/api/server/openid.rs | 3 +- src/api/server/publicrooms.rs | 3 +- src/api/server/send.rs | 16 +++---- src/api/server/send_join.rs | 2 +- src/api/server/send_knock.rs | 6 ++- src/api/server/send_leave.rs | 8 ++-- src/api/server/version.rs | 3 +- src/api/server/well_known.rs | 3 +- .../state_event.rs => matrix/event.rs} | 0 src/core/matrix/mod.rs | 9 ++++ src/core/{pdu/mod.rs => matrix/pdu.rs} | 47 +++++++++++++++---- src/core/{ => matrix}/pdu/builder.rs | 0 src/core/{ => matrix}/pdu/content.rs | 0 src/core/{ => matrix}/pdu/count.rs | 0 src/core/{ => matrix}/pdu/event_id.rs | 0 src/core/{ => matrix}/pdu/filter.rs | 0 src/core/{ => matrix}/pdu/id.rs | 0 src/core/{ => matrix}/pdu/raw_id.rs | 0 src/core/{ => matrix}/pdu/redact.rs | 0 src/core/{ => matrix}/pdu/relation.rs | 0 src/core/{ => matrix}/pdu/state_key.rs | 0 src/core/{ => matrix}/pdu/strip.rs | 0 src/core/{ => matrix}/pdu/tests.rs | 0 src/core/{ => matrix}/pdu/unsigned.rs | 0 src/core/{ => matrix}/state_res/LICENSE | 0 src/core/{ => matrix}/state_res/benches.rs | 0 src/core/{ => matrix}/state_res/error.rs | 0 src/core/{ => matrix}/state_res/event_auth.rs | 0 src/core/{ => matrix}/state_res/mod.rs | 8 ++-- src/core/{ => matrix}/state_res/outcomes.txt | 0 .../{ => matrix}/state_res/power_levels.rs | 2 +- .../{ => matrix}/state_res/room_version.rs | 0 src/core/{ => matrix}/state_res/test_utils.rs | 5 +- src/core/mod.rs | 6 +-- src/core/pdu/event.rs | 35 -------------- src/service/admin/grant.rs | 4 +- src/service/mod.rs | 1 - .../rooms/event_handler/state_at_incoming.rs | 4 +- .../event_handler/upgrade_outlier_pdu.rs | 3 +- src/service/rooms/outlier/mod.rs | 6 +-- src/service/rooms/read_receipt/mod.rs | 6 ++- src/service/rooms/short/mod.rs | 4 +- .../rooms/state_accessor/room_state.rs | 5 +- src/service/rooms/state_accessor/state.rs | 6 ++- src/service/rooms/threads/mod.rs | 5 +- src/service/rooms/timeline/mod.rs | 9 ++-- 91 files changed, 266 insertions(+), 205 deletions(-) rename src/core/{state_res/state_event.rs => matrix/event.rs} (100%) create mode 100644 src/core/matrix/mod.rs rename src/core/{pdu/mod.rs => matrix/pdu.rs} (72%) rename src/core/{ => matrix}/pdu/builder.rs (100%) rename src/core/{ => matrix}/pdu/content.rs (100%) rename src/core/{ => matrix}/pdu/count.rs (100%) rename src/core/{ => matrix}/pdu/event_id.rs (100%) rename src/core/{ => matrix}/pdu/filter.rs (100%) rename src/core/{ => matrix}/pdu/id.rs (100%) rename src/core/{ => matrix}/pdu/raw_id.rs (100%) rename src/core/{ => matrix}/pdu/redact.rs (100%) rename src/core/{ => matrix}/pdu/relation.rs (100%) rename src/core/{ => matrix}/pdu/state_key.rs (100%) rename src/core/{ => matrix}/pdu/strip.rs (100%) rename src/core/{ => matrix}/pdu/tests.rs (100%) rename src/core/{ => matrix}/pdu/unsigned.rs (100%) rename src/core/{ => matrix}/state_res/LICENSE (100%) rename src/core/{ => matrix}/state_res/benches.rs (100%) rename src/core/{ => matrix}/state_res/error.rs (100%) rename src/core/{ => matrix}/state_res/event_auth.rs (100%) rename src/core/{ => matrix}/state_res/mod.rs (99%) rename src/core/{ => matrix}/state_res/outcomes.txt (100%) rename src/core/{ => matrix}/state_res/power_levels.rs (99%) rename src/core/{ => matrix}/state_res/room_version.rs (100%) rename src/core/{ => matrix}/state_res/test_utils.rs (99%) delete mode 100644 src/core/pdu/event.rs diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index c6f6a170..87ca03a0 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -6,7 +6,9 @@ use std::{ }; use conduwuit::{ - Error, PduEvent, PduId, RawPduId, Result, debug_error, err, info, trace, utils, + Error, Result, debug_error, err, info, + matrix::pdu::{PduEvent, PduId, RawPduId}, + trace, utils, utils::{ stream::{IterStream, ReadyExt}, string::EMPTY, diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 35067304..45e550be 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -2,7 +2,8 @@ use std::{collections::BTreeMap, fmt::Write as _}; use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room}; use conduwuit::{ - PduBuilder, Result, debug, debug_warn, error, info, is_equal_to, + Result, debug, debug_warn, error, info, is_equal_to, + matrix::pdu::PduBuilder, utils::{self, ReadyExt}, warn, }; diff --git a/src/api/client/account.rs b/src/api/client/account.rs index e5894d47..32f2530c 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -3,10 +3,13 @@ use std::fmt::Write; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Err, Error, PduBuilder, Result, debug_info, err, error, info, is_equal_to, utils, + Err, Error, Result, debug_info, err, error, info, is_equal_to, + matrix::pdu::PduBuilder, + utils, utils::{ReadyExt, stream::BroadbandExt}, warn, }; +use conduwuit_service::Services; use futures::{FutureExt, StreamExt}; use register::RegistrationKind; use ruma::{ @@ -30,7 +33,6 @@ use ruma::{ }, push, }; -use service::Services; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH, join_room_by_id_helper}; use crate::Ruma; diff --git a/src/api/client/account_data.rs b/src/api/client/account_data.rs index 60c18b37..e44ce4e7 100644 --- a/src/api/client/account_data.rs +++ b/src/api/client/account_data.rs @@ -1,5 +1,6 @@ use axum::extract::State; -use conduwuit::{Err, err}; +use conduwuit::{Err, Result, err}; +use conduwuit_service::Services; use ruma::{ RoomId, UserId, api::client::config::{ @@ -15,7 +16,7 @@ use ruma::{ use serde::Deserialize; use serde_json::{json, value::RawValue as RawJsonValue}; -use crate::{Result, Ruma, service::Services}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}` /// diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index 319e5141..9f1b05f8 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -1,12 +1,12 @@ use axum::extract::State; use conduwuit::{Err, Result, debug}; +use conduwuit_service::Services; use futures::StreamExt; use rand::seq::SliceRandom; use ruma::{ OwnedServerName, RoomAliasId, RoomId, api::client::alias::{create_alias, delete_alias, get_alias}, }; -use service::Services; use crate::Ruma; diff --git a/src/api/client/backup.rs b/src/api/client/backup.rs index 83955fea..2ad37cf3 100644 --- a/src/api/client/backup.rs +++ b/src/api/client/backup.rs @@ -1,7 +1,7 @@ use std::cmp::Ordering; use axum::extract::State; -use conduwuit::{Err, err}; +use conduwuit::{Err, Result, err}; use ruma::{ UInt, api::client::backup::{ @@ -13,7 +13,7 @@ use ruma::{ }, }; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `POST /_matrix/client/r0/room_keys/version` /// diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 1dda7b53..dbc2a22f 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,18 +1,20 @@ use axum::extract::State; use conduwuit::{ - Err, PduEvent, Result, at, debug_warn, err, ref_at, + Err, Result, at, debug_warn, err, + matrix::pdu::PduEvent, + ref_at, utils::{ IterStream, future::TryExtExt, stream::{BroadbandExt, ReadyExt, TryIgnore, WidebandExt}, }, }; +use conduwuit_service::rooms::{lazy_loading, lazy_loading::Options, short::ShortStateKey}; use futures::{ FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::{OptionFuture, join, join3, try_join3}, }; use ruma::{OwnedEventId, UserId, api::client::context::get_context, events::StateEventType}; -use service::rooms::{lazy_loading, lazy_loading::Options, short::ShortStateKey}; use crate::{ Ruma, diff --git a/src/api/client/device.rs b/src/api/client/device.rs index 7603c866..5519a1a5 100644 --- a/src/api/client/device.rs +++ b/src/api/client/device.rs @@ -1,6 +1,6 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, debug, err}; +use conduwuit::{Err, Error, Result, debug, err, utils}; use futures::StreamExt; use ruma::{ MilliSecondsSinceUnixEpoch, OwnedDeviceId, @@ -12,7 +12,7 @@ use ruma::{ }; use super::SESSION_ID_LENGTH; -use crate::{Error, Result, Ruma, client::DEVICE_ID_LENGTH, utils}; +use crate::{Ruma, client::DEVICE_ID_LENGTH}; /// # `GET /_matrix/client/r0/devices` /// diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index f2f668c8..9ca35537 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -9,6 +9,7 @@ use conduwuit::{ stream::{ReadyExt, WidebandExt}, }, }; +use conduwuit_service::Services; use futures::{ FutureExt, StreamExt, TryFutureExt, future::{join, join4, join5}, @@ -35,7 +36,6 @@ use ruma::{ }, uint, }; -use service::Services; use crate::Ruma; diff --git a/src/api/client/filter.rs b/src/api/client/filter.rs index 84086452..97044ffc 100644 --- a/src/api/client/filter.rs +++ b/src/api/client/filter.rs @@ -1,8 +1,8 @@ use axum::extract::State; -use conduwuit::err; +use conduwuit::{Result, err}; use ruma::api::client::filter::{create_filter, get_filter}; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}` /// diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 2fdfc0bc..6865c2a4 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -2,6 +2,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use axum::extract::State; use conduwuit::{Err, Error, Result, debug, debug_warn, err, info, result::NotFound, utils}; +use conduwuit_service::{Services, users::parse_master_key}; use futures::{StreamExt, stream::FuturesUnordered}; use ruma::{ OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, @@ -23,10 +24,7 @@ use ruma::{ use serde_json::json; use super::SESSION_ID_LENGTH; -use crate::{ - Ruma, - service::{Services, users::parse_master_key}, -}; +use crate::Ruma; /// # `POST /_matrix/client/r0/keys/upload` /// diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index ef40e972..d0345c8e 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -9,13 +9,25 @@ use std::{ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Err, PduEvent, Result, StateKey, at, debug, debug_info, debug_warn, err, error, info, - pdu::{PduBuilder, gen_event_id_canonical_json}, + Err, Result, at, debug, debug_info, debug_warn, err, error, info, + matrix::{ + StateKey, + pdu::{PduBuilder, PduEvent, gen_event_id, gen_event_id_canonical_json}, + state_res, + }, result::{FlatOk, NotFound}, - state_res, trace, + trace, utils::{self, IterStream, ReadyExt, shuffle}, warn, }; +use conduwuit_service::{ + Services, + appservice::RegistrationInfo, + rooms::{ + state::RoomMutexGuard, + state_compressor::{CompressedState, HashSetCompressStateEvent}, + }, +}; use futures::{FutureExt, StreamExt, TryFutureExt, future::join4, join}; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, @@ -44,15 +56,6 @@ use ruma::{ }, }, }; -use service::{ - Services, - appservice::RegistrationInfo, - pdu::gen_event_id, - rooms::{ - state::RoomMutexGuard, - state_compressor::{CompressedState, HashSetCompressStateEvent}, - }, -}; use crate::{Ruma, client::full_user_deactivate}; diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 03c7335a..3e784a4a 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,12 +1,24 @@ use axum::extract::State; use conduwuit::{ - Err, Event, PduCount, PduEvent, Result, at, + Err, Result, at, + matrix::{ + Event, + pdu::{PduCount, PduEvent}, + }, utils::{ IterStream, ReadyExt, result::{FlatOk, LogErr}, stream::{BroadbandExt, TryIgnore, WidebandExt}, }, }; +use conduwuit_service::{ + Services, + rooms::{ + lazy_loading, + lazy_loading::{Options, Witness}, + timeline::PdusIterItem, + }, +}; use futures::{FutureExt, StreamExt, TryFutureExt, future::OptionFuture, pin_mut}; use ruma::{ RoomId, UserId, @@ -17,14 +29,6 @@ use ruma::{ events::{AnyStateEvent, StateEventType, TimelineEventType, TimelineEventType::*}, serde::Raw, }; -use service::{ - Services, - rooms::{ - lazy_loading, - lazy_loading::{Options, Witness}, - timeline::PdusIterItem, - }, -}; use crate::Ruma; diff --git a/src/api/client/openid.rs b/src/api/client/openid.rs index 671d0c6d..8d2de68d 100644 --- a/src/api/client/openid.rs +++ b/src/api/client/openid.rs @@ -1,14 +1,14 @@ use std::time::Duration; use axum::extract::State; -use conduwuit::utils; +use conduwuit::{Error, Result, utils}; use ruma::{ api::client::{account, error::ErrorKind}, authentication::TokenType, }; use super::TOKEN_LENGTH; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `POST /_matrix/client/v3/user/{userId}/openid/request_token` /// diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index 5abe5b23..3699b590 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -3,10 +3,11 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ Err, Error, Result, - pdu::PduBuilder, + matrix::pdu::PduBuilder, utils::{IterStream, stream::TryIgnore}, warn, }; +use conduwuit_service::Services; use futures::{StreamExt, TryStreamExt, future::join3}; use ruma::{ OwnedMxcUri, OwnedRoomId, UserId, @@ -22,7 +23,6 @@ use ruma::{ events::room::member::{MembershipState, RoomMemberEventContent}, presence::PresenceState, }; -use service::Services; use crate::Ruma; diff --git a/src/api/client/push.rs b/src/api/client/push.rs index cc1d3be2..81020ffa 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -1,5 +1,6 @@ use axum::extract::State; -use conduwuit::{Err, err}; +use conduwuit::{Err, Error, Result, err}; +use conduwuit_service::Services; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, api::client::{ @@ -19,9 +20,8 @@ use ruma::{ RemovePushRuleError, Ruleset, }, }; -use service::Services; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/r0/pushrules/` /// diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index b334e356..fbfc8fea 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -1,7 +1,7 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduwuit::{Err, PduCount, err}; +use conduwuit::{Err, PduCount, Result, err}; use ruma::{ MilliSecondsSinceUnixEpoch, api::client::{read_marker::set_read_marker, receipt::create_receipt}, @@ -11,7 +11,7 @@ use ruma::{ }, }; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers` /// diff --git a/src/api/client/redact.rs b/src/api/client/redact.rs index 7b512d06..8dbe47a6 100644 --- a/src/api/client/redact.rs +++ b/src/api/client/redact.rs @@ -1,9 +1,10 @@ use axum::extract::State; +use conduwuit::{Result, matrix::pdu::PduBuilder}; use ruma::{ api::client::redact::redact_event, events::room::redaction::RoomRedactionEventContent, }; -use crate::{Result, Ruma, service::pdu::PduBuilder}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}` /// diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index 7ed40f14..b8c2dd4d 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -1,8 +1,10 @@ use axum::extract::State; use conduwuit::{ - PduCount, Result, at, + Result, at, + matrix::pdu::PduCount, utils::{IterStream, ReadyExt, result::FlatOk, stream::WidebandExt}, }; +use conduwuit_service::{Services, rooms::timeline::PdusIterItem}; use futures::StreamExt; use ruma::{ EventId, RoomId, UInt, UserId, @@ -15,7 +17,6 @@ use ruma::{ }, events::{TimelineEventType, relation::RelationType}, }; -use service::{Services, rooms::timeline::PdusIterItem}; use crate::Ruma; diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 7922caca..4ee8ebe5 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -2,7 +2,8 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, info, utils::ReadyExt}; +use conduwuit::{Err, Error, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt}; +use conduwuit_service::Services; use rand::Rng; use ruma::{ EventId, RoomId, UserId, @@ -15,10 +16,7 @@ use ruma::{ }; use tokio::time::sleep; -use crate::{ - Error, Result, Ruma, debug_info, - service::{Services, pdu::PduEvent}, -}; +use crate::Ruma; /// # `POST /_matrix/client/v3/rooms/{roomId}/report` /// diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index bdc5d5a5..4ce53f15 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -2,8 +2,11 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - Err, Error, Result, StateKey, debug_info, debug_warn, err, error, info, pdu::PduBuilder, warn, + Err, Error, Result, debug_info, debug_warn, err, error, info, + matrix::{StateKey, pdu::PduBuilder}, + warn, }; +use conduwuit_service::{Services, appservice::RegistrationInfo}; use futures::FutureExt; use ruma::{ CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, @@ -29,7 +32,6 @@ use ruma::{ serde::{JsonObject, Raw}, }; use serde_json::{json, value::to_raw_value}; -use service::{Services, appservice::RegistrationInfo}; use crate::{Ruma, client::invite_helper}; diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index 3cfb3c28..9ec0b3bb 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -1,7 +1,10 @@ use std::cmp::max; use axum::extract::State; -use conduwuit::{Error, Result, StateKey, err, info, pdu::PduBuilder}; +use conduwuit::{ + Error, Result, err, info, + matrix::{StateKey, pdu::PduBuilder}, +}; use futures::StreamExt; use ruma::{ CanonicalJsonObject, RoomId, RoomVersionId, diff --git a/src/api/client/search.rs b/src/api/client/search.rs index d66df881..d4dcde57 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -2,10 +2,12 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - Err, PduEvent, Result, at, is_true, + Err, Result, at, is_true, + matrix::pdu::PduEvent, result::FlatOk, utils::{IterStream, stream::ReadyExt}, }; +use conduwuit_service::{Services, rooms::search::RoomQuery}; use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::OptionFuture}; use ruma::{ OwnedRoomId, RoomId, UInt, UserId, @@ -17,7 +19,6 @@ use ruma::{ serde::Raw, }; use search_events::v3::{Request, Response}; -use service::{Services, rooms::search::RoomQuery}; use crate::Ruma; diff --git a/src/api/client/send.rs b/src/api/client/send.rs index 1af74f57..f753fa65 100644 --- a/src/api/client/send.rs +++ b/src/api/client/send.rs @@ -1,11 +1,11 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduwuit::{Err, err}; +use conduwuit::{Err, Result, err, matrix::pdu::PduBuilder, utils}; use ruma::{api::client::message::send_message_event, events::MessageLikeEventType}; use serde_json::from_str; -use crate::{Result, Ruma, service::pdu::PduBuilder, utils}; +use crate::Ruma; /// # `PUT /_matrix/client/v3/rooms/{roomId}/send/{eventType}/{txnId}` /// diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 3de625e4..2499a43d 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -2,7 +2,11 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, debug, err, info, utils::ReadyExt}; +use conduwuit::{ + Err, Error, Result, debug, err, info, utils, + utils::{ReadyExt, hash}, +}; +use conduwuit_service::uiaa::SESSION_ID_LENGTH; use futures::StreamExt; use ruma::{ UserId, @@ -22,10 +26,9 @@ use ruma::{ uiaa, }, }; -use service::uiaa::SESSION_ID_LENGTH; use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; -use crate::{Error, Result, Ruma, utils, utils::hash}; +use crate::Ruma; /// # `GET /_matrix/client/v3/login` /// diff --git a/src/api/client/space.rs b/src/api/client/space.rs index 567ac62f..4eee9d76 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -8,16 +8,16 @@ use conduwuit::{ Err, Result, utils::{future::TryExtExt, stream::IterStream}, }; -use futures::{StreamExt, TryFutureExt, future::OptionFuture}; -use ruma::{ - OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, api::client::space::get_hierarchy, -}; -use service::{ +use conduwuit_service::{ Services, rooms::spaces::{ PaginationToken, SummaryAccessibility, get_parent_children_via, summary_to_chunk, }, }; +use futures::{StreamExt, TryFutureExt, future::OptionFuture}; +use ruma::{ + OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, api::client::space::get_hierarchy, +}; use crate::Ruma; diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 23583356..5c5c71f2 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -1,5 +1,10 @@ use axum::extract::State; -use conduwuit::{Err, PduEvent, Result, err, pdu::PduBuilder, utils::BoolExt}; +use conduwuit::{ + Err, Result, err, + matrix::pdu::{PduBuilder, PduEvent}, + utils::BoolExt, +}; +use conduwuit_service::Services; use futures::TryStreamExt; use ruma::{ OwnedEventId, RoomId, UserId, @@ -16,7 +21,6 @@ use ruma::{ }, serde::Raw, }; -use service::Services; use crate::{Ruma, RumaResponse}; diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 3eab76cc..14459acf 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -3,12 +3,14 @@ mod v4; mod v5; use conduwuit::{ - PduCount, + Error, PduCount, Result, + matrix::pdu::PduEvent, utils::{ IterStream, stream::{BroadbandExt, ReadyExt, TryIgnore}, }, }; +use conduwuit_service::Services; use futures::{StreamExt, pin_mut}; use ruma::{ RoomId, UserId, @@ -21,7 +23,6 @@ use ruma::{ pub(crate) use self::{ v3::sync_events_route, v4::sync_events_v4_route, v5::sync_events_v5_route, }; -use crate::{Error, PduEvent, Result, service::Services}; pub(crate) const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] = &[CallInvite, PollStart, Beacon, RoomEncrypted, RoomMessage, Sticker]; diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 83ffa55a..12731ff6 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -6,9 +6,12 @@ use std::{ use axum::extract::State; use conduwuit::{ - PduCount, PduEvent, Result, at, err, error, extract_variant, is_equal_to, pair_of, - pdu::{Event, EventHash}, - ref_at, + Result, at, err, error, extract_variant, is_equal_to, + matrix::{ + Event, + pdu::{EventHash, PduCount, PduEvent}, + }, + pair_of, ref_at, result::FlatOk, utils::{ self, BoolExt, IterStream, ReadyExt, TryFutureExtExt, diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index c4e71d88..684752ec 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -6,13 +6,19 @@ use std::{ use axum::extract::State; use conduwuit::{ - Error, PduEvent, Result, TypeStateKey, debug, error, extract_variant, trace, + Error, Result, debug, error, extract_variant, + matrix::{ + TypeStateKey, + pdu::{PduCount, PduEvent}, + }, + trace, utils::{ BoolExt, IterStream, ReadyExt, TryFutureExtExt, math::{ruma_from_usize, usize_from_ruma}, }, warn, }; +use conduwuit_service::rooms::read_receipt::pack_receipts; use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, @@ -27,7 +33,6 @@ use ruma::{ serde::Raw, uint, }; -use service::{PduCount, rooms::read_receipt::pack_receipts}; use super::{filter_rooms, share_encrypted_room}; use crate::{ diff --git a/src/api/client/tag.rs b/src/api/client/tag.rs index 3b3b40d4..caafe10d 100644 --- a/src/api/client/tag.rs +++ b/src/api/client/tag.rs @@ -1,6 +1,7 @@ use std::collections::BTreeMap; use axum::extract::State; +use conduwuit::Result; use ruma::{ api::client::tag::{create_tag, delete_tag, get_tags}, events::{ @@ -9,7 +10,7 @@ use ruma::{ }, }; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` /// diff --git a/src/api/client/thirdparty.rs b/src/api/client/thirdparty.rs index 790b27d3..0713a882 100644 --- a/src/api/client/thirdparty.rs +++ b/src/api/client/thirdparty.rs @@ -1,8 +1,9 @@ use std::collections::BTreeMap; +use conduwuit::Result; use ruma::api::client::thirdparty::get_protocols; -use crate::{Result, Ruma, RumaResponse}; +use crate::{Ruma, RumaResponse}; /// # `GET /_matrix/client/r0/thirdparty/protocols` /// diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs index 00bfe553..5b838bef 100644 --- a/src/api/client/threads.rs +++ b/src/api/client/threads.rs @@ -1,9 +1,12 @@ use axum::extract::State; -use conduwuit::{PduCount, PduEvent, at}; +use conduwuit::{ + Result, at, + matrix::pdu::{PduCount, PduEvent}, +}; use futures::StreamExt; use ruma::{api::client::threads::get_threads, uint}; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/r0/rooms/{roomId}/threads` pub(crate) async fn get_threads_route( diff --git a/src/api/client/to_device.rs b/src/api/client/to_device.rs index 1b942fba..8ad9dc99 100644 --- a/src/api/client/to_device.rs +++ b/src/api/client/to_device.rs @@ -2,6 +2,7 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{Error, Result}; +use conduwuit_service::sending::EduBuf; use futures::StreamExt; use ruma::{ api::{ @@ -10,7 +11,6 @@ use ruma::{ }, to_device::DeviceIdOrAllDevices, }; -use service::sending::EduBuf; use crate::Ruma; diff --git a/src/api/client/typing.rs b/src/api/client/typing.rs index b02cc473..1d8d02fd 100644 --- a/src/api/client/typing.rs +++ b/src/api/client/typing.rs @@ -1,8 +1,8 @@ use axum::extract::State; -use conduwuit::{Err, utils::math::Tried}; +use conduwuit::{Err, Result, utils, utils::math::Tried}; use ruma::api::client::typing::create_typing_event; -use crate::{Result, Ruma, utils}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// diff --git a/src/api/client/unversioned.rs b/src/api/client/unversioned.rs index 4e2b7d9d..232d5b28 100644 --- a/src/api/client/unversioned.rs +++ b/src/api/client/unversioned.rs @@ -1,10 +1,11 @@ use std::collections::BTreeMap; use axum::{Json, extract::State, response::IntoResponse}; +use conduwuit::Result; use futures::StreamExt; use ruma::api::client::discovery::get_supported_versions; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/versions` /// diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index c5d79a56..8f564eed 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::utils::TryFutureExtExt; +use conduwuit::{Result, utils::TryFutureExtExt}; use futures::{StreamExt, pin_mut}; use ruma::{ api::client::user_directory::search_users, @@ -9,7 +9,7 @@ use ruma::{ }, }; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `POST /_matrix/client/r0/user_directory/search` /// diff --git a/src/api/client/voip.rs b/src/api/client/voip.rs index 37e67984..91991d24 100644 --- a/src/api/client/voip.rs +++ b/src/api/client/voip.rs @@ -2,12 +2,12 @@ use std::time::{Duration, SystemTime}; use axum::extract::State; use base64::{Engine as _, engine::general_purpose}; -use conduwuit::{Err, utils}; +use conduwuit::{Err, Result, utils}; use hmac::{Hmac, Mac}; use ruma::{SecondsSinceUnixEpoch, UserId, api::client::voip::get_turn_server_info}; use sha1::Sha1; -use crate::{Result, Ruma}; +use crate::Ruma; const RANDOM_USER_ID_LENGTH: usize = 10; diff --git a/src/api/client/well_known.rs b/src/api/client/well_known.rs index abda61b0..eedab981 100644 --- a/src/api/client/well_known.rs +++ b/src/api/client/well_known.rs @@ -1,4 +1,5 @@ use axum::{Json, extract::State, response::IntoResponse}; +use conduwuit::{Error, Result}; use ruma::api::client::{ discovery::{ discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo}, @@ -7,7 +8,7 @@ use ruma::api::client::{ error::ErrorKind, }; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `GET /.well-known/matrix/client` /// diff --git a/src/api/mod.rs b/src/api/mod.rs index 090cf897..9ca24e72 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -8,8 +8,6 @@ pub mod server; extern crate conduwuit_core as conduwuit; extern crate conduwuit_service as service; -pub(crate) use conduwuit::{Error, Result, debug_info, pdu::PduEvent, utils}; - pub(crate) use self::router::{Ruma, RumaResponse, State}; conduwuit::mod_ctor! {} diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs index c759c8ea..42c348f9 100644 --- a/src/api/server/hierarchy.rs +++ b/src/api/server/hierarchy.rs @@ -3,9 +3,11 @@ use conduwuit::{ Err, Result, utils::stream::{BroadbandExt, IterStream}, }; +use conduwuit_service::rooms::spaces::{ + Identifier, SummaryAccessibility, get_parent_children_via, +}; use futures::{FutureExt, StreamExt}; use ruma::api::federation::space::get_hierarchy; -use service::rooms::spaces::{Identifier, SummaryAccessibility, get_parent_children_via}; use crate::Ruma; diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index f4cc6eb2..cda34fb5 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -1,14 +1,15 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use base64::{Engine as _, engine::general_purpose}; -use conduwuit::{Err, Error, PduEvent, Result, err, utils, utils::hash::sha256, warn}; +use conduwuit::{ + Err, Error, PduEvent, Result, err, pdu::gen_event_id, utils, utils::hash::sha256, warn, +}; use ruma::{ CanonicalJsonValue, OwnedUserId, UserId, api::{client::error::ErrorKind, federation::membership::create_invite}, events::room::member::{MembershipState, RoomMemberEventContent}, serde::JsonObject, }; -use service::pdu::gen_event_id; use crate::Ruma; diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index f18d1304..4664b904 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -1,5 +1,8 @@ use axum::extract::State; -use conduwuit::{Err, debug_info, utils::IterStream, warn}; +use conduwuit::{ + Err, Error, Result, debug_info, matrix::pdu::PduBuilder, utils::IterStream, warn, +}; +use conduwuit_service::Services; use futures::StreamExt; use ruma::{ CanonicalJsonObject, OwnedUserId, RoomId, RoomVersionId, UserId, @@ -14,10 +17,7 @@ use ruma::{ }; use serde_json::value::to_raw_value; -use crate::{ - Error, Result, Ruma, - service::{Services, pdu::PduBuilder}, -}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` /// diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs index 71536439..6d71ab2a 100644 --- a/src/api/server/make_knock.rs +++ b/src/api/server/make_knock.rs @@ -1,15 +1,14 @@ use RoomVersionId::*; use axum::extract::State; -use conduwuit::{Err, debug_warn}; +use conduwuit::{Err, Error, Result, debug_warn, matrix::pdu::PduBuilder, warn}; use ruma::{ RoomVersionId, api::{client::error::ErrorKind, federation::knock::create_knock_event_template}, events::room::member::{MembershipState, RoomMemberEventContent}, }; use serde_json::value::to_raw_value; -use tracing::warn; -use crate::{Error, Result, Ruma, service::pdu::PduBuilder}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/make_knock/{roomId}/{userId}` /// diff --git a/src/api/server/make_leave.rs b/src/api/server/make_leave.rs index 1ed02785..cb6bd2fa 100644 --- a/src/api/server/make_leave.rs +++ b/src/api/server/make_leave.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::{Err, Result}; +use conduwuit::{Err, Result, matrix::pdu::PduBuilder}; use ruma::{ api::federation::membership::prepare_leave_event, events::room::member::{MembershipState, RoomMemberEventContent}, @@ -7,7 +7,7 @@ use ruma::{ use serde_json::value::to_raw_value; use super::make_join::maybe_strip_event_id; -use crate::{Ruma, service::pdu::PduBuilder}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/make_leave/{roomId}/{eventId}` /// diff --git a/src/api/server/openid.rs b/src/api/server/openid.rs index 4833fbe1..a09cd7ad 100644 --- a/src/api/server/openid.rs +++ b/src/api/server/openid.rs @@ -1,7 +1,8 @@ use axum::extract::State; +use conduwuit::Result; use ruma::api::federation::openid::get_openid_userinfo; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/openid/userinfo` /// diff --git a/src/api/server/publicrooms.rs b/src/api/server/publicrooms.rs index ff74574a..cf66ea71 100644 --- a/src/api/server/publicrooms.rs +++ b/src/api/server/publicrooms.rs @@ -1,5 +1,6 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; +use conduwuit::{Error, Result}; use ruma::{ api::{ client::error::ErrorKind, @@ -8,7 +9,7 @@ use ruma::{ directory::Filter, }; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `POST /_matrix/federation/v1/publicRooms` /// diff --git a/src/api/server/send.rs b/src/api/server/send.rs index 1f467dac..9c5bfd2b 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -9,11 +9,15 @@ use conduwuit::{ result::LogErr, trace, utils::{ - IterStream, ReadyExt, + IterStream, ReadyExt, millis_since_unix_epoch, stream::{BroadbandExt, TryBroadbandExt, automatic_width}, }, warn, }; +use conduwuit_service::{ + Services, + sending::{EDU_LIMIT, PDU_LIMIT}, +}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use itertools::Itertools; use ruma::{ @@ -33,16 +37,8 @@ use ruma::{ serde::Raw, to_device::DeviceIdOrAllDevices, }; -use service::{ - Services, - sending::{EDU_LIMIT, PDU_LIMIT}, -}; -use utils::millis_since_unix_epoch; -use crate::{ - Ruma, - utils::{self}, -}; +use crate::Ruma; type ResolvedMap = BTreeMap; type Pdu = (OwnedRoomId, OwnedEventId, CanonicalJsonObject); diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index c1749835..2e2e89ee 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -9,6 +9,7 @@ use conduwuit::{ utils::stream::{IterStream, TryBroadbandExt}, warn, }; +use conduwuit_service::Services; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, @@ -20,7 +21,6 @@ use ruma::{ }, }; use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; -use service::Services; use crate::Ruma; diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index f7bb0735..c5ab0306 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -1,5 +1,9 @@ use axum::extract::State; -use conduwuit::{Err, PduEvent, Result, err, pdu::gen_event_id_canonical_json, warn}; +use conduwuit::{ + Err, Result, err, + matrix::pdu::{PduEvent, gen_event_id_canonical_json}, + warn, +}; use futures::FutureExt; use ruma::{ OwnedServerName, OwnedUserId, diff --git a/src/api/server/send_leave.rs b/src/api/server/send_leave.rs index 71516553..d3dc994c 100644 --- a/src/api/server/send_leave.rs +++ b/src/api/server/send_leave.rs @@ -1,7 +1,8 @@ #![allow(deprecated)] use axum::extract::State; -use conduwuit::{Err, Result, err}; +use conduwuit::{Err, Result, err, matrix::pdu::gen_event_id_canonical_json}; +use conduwuit_service::Services; use futures::FutureExt; use ruma::{ OwnedRoomId, OwnedUserId, RoomId, ServerName, @@ -13,10 +14,7 @@ use ruma::{ }; use serde_json::value::RawValue as RawJsonValue; -use crate::{ - Ruma, - service::{Services, pdu::gen_event_id_canonical_json}, -}; +use crate::Ruma; /// # `PUT /_matrix/federation/v1/send_leave/{roomId}/{eventId}` /// diff --git a/src/api/server/version.rs b/src/api/server/version.rs index 036b61f7..b08ff77a 100644 --- a/src/api/server/version.rs +++ b/src/api/server/version.rs @@ -1,6 +1,7 @@ +use conduwuit::Result; use ruma::api::federation::discovery::get_server_version; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/version` /// diff --git a/src/api/server/well_known.rs b/src/api/server/well_known.rs index 48caa7d6..75c7cf5d 100644 --- a/src/api/server/well_known.rs +++ b/src/api/server/well_known.rs @@ -1,7 +1,8 @@ use axum::extract::State; +use conduwuit::{Error, Result}; use ruma::api::{client::error::ErrorKind, federation::discovery::discover_homeserver}; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `GET /.well-known/matrix/server` /// diff --git a/src/core/state_res/state_event.rs b/src/core/matrix/event.rs similarity index 100% rename from src/core/state_res/state_event.rs rename to src/core/matrix/event.rs diff --git a/src/core/matrix/mod.rs b/src/core/matrix/mod.rs new file mode 100644 index 00000000..8c978173 --- /dev/null +++ b/src/core/matrix/mod.rs @@ -0,0 +1,9 @@ +//! Core Matrix Library + +pub mod event; +pub mod pdu; +pub mod state_res; + +pub use event::Event; +pub use pdu::{PduBuilder, PduCount, PduEvent, PduId, RawPduId, StateKey}; +pub use state_res::{EventTypeExt, RoomVersion, StateMap, TypeStateKey}; diff --git a/src/core/pdu/mod.rs b/src/core/matrix/pdu.rs similarity index 72% rename from src/core/pdu/mod.rs rename to src/core/matrix/pdu.rs index 9fb2a3da..7e1ecfa8 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/matrix/pdu.rs @@ -1,7 +1,6 @@ mod builder; mod content; mod count; -mod event; mod event_id; mod filter; mod id; @@ -17,8 +16,8 @@ mod unsigned; use std::cmp::Ordering; use ruma::{ - CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, - OwnedUserId, UInt, events::TimelineEventType, + CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, UInt, UserId, events::TimelineEventType, }; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue as RawJsonValue; @@ -27,12 +26,12 @@ pub use self::{ Count as PduCount, Id as PduId, Pdu as PduEvent, RawId as RawPduId, builder::{Builder, Builder as PduBuilder}, count::Count, - event::Event, event_id::*, id::*, raw_id::*, state_key::{ShortStateKey, StateKey}, }; +use super::Event; use crate::Result; /// Persistent Data Unit (Event) @@ -79,6 +78,36 @@ impl Pdu { } } +impl Event for Pdu { + type Id = OwnedEventId; + + fn event_id(&self) -> &Self::Id { &self.event_id } + + fn room_id(&self) -> &RoomId { &self.room_id } + + fn sender(&self) -> &UserId { &self.sender } + + fn event_type(&self) -> &TimelineEventType { &self.kind } + + fn content(&self) -> &RawJsonValue { &self.content } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + MilliSecondsSinceUnixEpoch(self.origin_server_ts) + } + + fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } + + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + self.prev_events.iter() + } + + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { + self.auth_events.iter() + } + + fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() } +} + /// Prevent derived equality which wouldn't limit itself to event_id impl Eq for Pdu {} @@ -87,12 +116,12 @@ impl PartialEq for Pdu { fn eq(&self, other: &Self) -> bool { self.event_id == other.event_id } } -/// Ordering determined by the Pdu's ID, not the memory representations. -impl PartialOrd for Pdu { - fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } -} - /// Ordering determined by the Pdu's ID, not the memory representations. impl Ord for Pdu { fn cmp(&self, other: &Self) -> Ordering { self.event_id.cmp(&other.event_id) } } + +/// Ordering determined by the Pdu's ID, not the memory representations. +impl PartialOrd for Pdu { + fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } +} diff --git a/src/core/pdu/builder.rs b/src/core/matrix/pdu/builder.rs similarity index 100% rename from src/core/pdu/builder.rs rename to src/core/matrix/pdu/builder.rs diff --git a/src/core/pdu/content.rs b/src/core/matrix/pdu/content.rs similarity index 100% rename from src/core/pdu/content.rs rename to src/core/matrix/pdu/content.rs diff --git a/src/core/pdu/count.rs b/src/core/matrix/pdu/count.rs similarity index 100% rename from src/core/pdu/count.rs rename to src/core/matrix/pdu/count.rs diff --git a/src/core/pdu/event_id.rs b/src/core/matrix/pdu/event_id.rs similarity index 100% rename from src/core/pdu/event_id.rs rename to src/core/matrix/pdu/event_id.rs diff --git a/src/core/pdu/filter.rs b/src/core/matrix/pdu/filter.rs similarity index 100% rename from src/core/pdu/filter.rs rename to src/core/matrix/pdu/filter.rs diff --git a/src/core/pdu/id.rs b/src/core/matrix/pdu/id.rs similarity index 100% rename from src/core/pdu/id.rs rename to src/core/matrix/pdu/id.rs diff --git a/src/core/pdu/raw_id.rs b/src/core/matrix/pdu/raw_id.rs similarity index 100% rename from src/core/pdu/raw_id.rs rename to src/core/matrix/pdu/raw_id.rs diff --git a/src/core/pdu/redact.rs b/src/core/matrix/pdu/redact.rs similarity index 100% rename from src/core/pdu/redact.rs rename to src/core/matrix/pdu/redact.rs diff --git a/src/core/pdu/relation.rs b/src/core/matrix/pdu/relation.rs similarity index 100% rename from src/core/pdu/relation.rs rename to src/core/matrix/pdu/relation.rs diff --git a/src/core/pdu/state_key.rs b/src/core/matrix/pdu/state_key.rs similarity index 100% rename from src/core/pdu/state_key.rs rename to src/core/matrix/pdu/state_key.rs diff --git a/src/core/pdu/strip.rs b/src/core/matrix/pdu/strip.rs similarity index 100% rename from src/core/pdu/strip.rs rename to src/core/matrix/pdu/strip.rs diff --git a/src/core/pdu/tests.rs b/src/core/matrix/pdu/tests.rs similarity index 100% rename from src/core/pdu/tests.rs rename to src/core/matrix/pdu/tests.rs diff --git a/src/core/pdu/unsigned.rs b/src/core/matrix/pdu/unsigned.rs similarity index 100% rename from src/core/pdu/unsigned.rs rename to src/core/matrix/pdu/unsigned.rs diff --git a/src/core/state_res/LICENSE b/src/core/matrix/state_res/LICENSE similarity index 100% rename from src/core/state_res/LICENSE rename to src/core/matrix/state_res/LICENSE diff --git a/src/core/state_res/benches.rs b/src/core/matrix/state_res/benches.rs similarity index 100% rename from src/core/state_res/benches.rs rename to src/core/matrix/state_res/benches.rs diff --git a/src/core/state_res/error.rs b/src/core/matrix/state_res/error.rs similarity index 100% rename from src/core/state_res/error.rs rename to src/core/matrix/state_res/error.rs diff --git a/src/core/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs similarity index 100% rename from src/core/state_res/event_auth.rs rename to src/core/matrix/state_res/event_auth.rs diff --git a/src/core/state_res/mod.rs b/src/core/matrix/state_res/mod.rs similarity index 99% rename from src/core/state_res/mod.rs rename to src/core/matrix/state_res/mod.rs index 1db92e59..93c00d15 100644 --- a/src/core/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -4,7 +4,6 @@ pub(crate) mod error; pub mod event_auth; mod power_levels; mod room_version; -mod state_event; #[cfg(test)] mod test_utils; @@ -36,9 +35,12 @@ use self::power_levels::PowerLevelsContentFields; pub use self::{ event_auth::{auth_check, auth_types_for_event}, room_version::RoomVersion, - state_event::Event, }; -use crate::{debug, pdu::StateKey, trace, warn}; +use crate::{ + debug, + matrix::{event::Event, pdu::StateKey}, + trace, warn, +}; /// A mapping of event type and state_key to some value `T`, usually an /// `EventId`. diff --git a/src/core/state_res/outcomes.txt b/src/core/matrix/state_res/outcomes.txt similarity index 100% rename from src/core/state_res/outcomes.txt rename to src/core/matrix/state_res/outcomes.txt diff --git a/src/core/state_res/power_levels.rs b/src/core/matrix/state_res/power_levels.rs similarity index 99% rename from src/core/state_res/power_levels.rs rename to src/core/matrix/state_res/power_levels.rs index 045b1666..19ba8fb9 100644 --- a/src/core/state_res/power_levels.rs +++ b/src/core/matrix/state_res/power_levels.rs @@ -11,9 +11,9 @@ use ruma::{ }; use serde::Deserialize; use serde_json::{Error, from_str as from_json_str}; -use tracing::error; use super::{Result, RoomVersion}; +use crate::error; #[derive(Deserialize)] struct IntRoomPowerLevelsEventContent { diff --git a/src/core/state_res/room_version.rs b/src/core/matrix/state_res/room_version.rs similarity index 100% rename from src/core/state_res/room_version.rs rename to src/core/matrix/state_res/room_version.rs diff --git a/src/core/state_res/test_utils.rs b/src/core/matrix/state_res/test_utils.rs similarity index 99% rename from src/core/state_res/test_utils.rs rename to src/core/matrix/state_res/test_utils.rs index d96ee927..f2ee4238 100644 --- a/src/core/state_res/test_utils.rs +++ b/src/core/matrix/state_res/test_utils.rs @@ -28,7 +28,10 @@ use serde_json::{ pub(crate) use self::event::PduEvent; use super::auth_types_for_event; -use crate::{Event, EventTypeExt, Result, StateMap, info}; +use crate::{ + Result, info, + matrix::{Event, EventTypeExt, StateMap}, +}; static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); diff --git a/src/core/mod.rs b/src/core/mod.rs index 80ebbdcb..b91cdf0b 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -6,11 +6,10 @@ pub mod debug; pub mod error; pub mod info; pub mod log; +pub mod matrix; pub mod metrics; pub mod mods; -pub mod pdu; pub mod server; -pub mod state_res; pub mod utils; pub use ::arrayvec; @@ -23,9 +22,8 @@ pub use ::tracing; pub use config::Config; pub use error::Error; pub use info::{rustc_flags_capture, version, version::version}; -pub use pdu::{Event, PduBuilder, PduCount, PduEvent, PduId, RawPduId, StateKey}; +pub use matrix::{Event, EventTypeExt, PduCount, PduEvent, PduId, RoomVersion, pdu, state_res}; pub use server::Server; -pub use state_res::{EventTypeExt, RoomVersion, StateMap, TypeStateKey}; pub use utils::{ctor, dtor, implement, result, result::Result}; pub use crate as conduwuit_core; diff --git a/src/core/pdu/event.rs b/src/core/pdu/event.rs deleted file mode 100644 index 09ad1666..00000000 --- a/src/core/pdu/event.rs +++ /dev/null @@ -1,35 +0,0 @@ -use ruma::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, events::TimelineEventType}; -use serde_json::value::RawValue as RawJsonValue; - -use super::Pdu; -pub use crate::state_res::Event; - -impl Event for Pdu { - type Id = OwnedEventId; - - fn event_id(&self) -> &Self::Id { &self.event_id } - - fn room_id(&self) -> &RoomId { &self.room_id } - - fn sender(&self) -> &UserId { &self.sender } - - fn event_type(&self) -> &TimelineEventType { &self.kind } - - fn content(&self) -> &RawJsonValue { &self.content } - - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { - MilliSecondsSinceUnixEpoch(self.origin_server_ts) - } - - fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } - - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { - self.prev_events.iter() - } - - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { - self.auth_events.iter() - } - - fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() } -} diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index 5173987a..6780b7ae 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -1,6 +1,6 @@ use std::collections::BTreeMap; -use conduwuit::{Err, Result, debug_info, debug_warn, error, implement}; +use conduwuit::{Err, Result, debug_info, debug_warn, error, implement, matrix::pdu::PduBuilder}; use ruma::{ RoomId, UserId, events::{ @@ -14,8 +14,6 @@ use ruma::{ }, }; -use crate::pdu::PduBuilder; - /// Invite the user to the conduwuit admin room. /// /// This is equivalent to granting server admin privileges. diff --git a/src/service/mod.rs b/src/service/mod.rs index 8f4a84b0..63a51213 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -31,7 +31,6 @@ pub mod users; extern crate conduwuit_core as conduwuit; extern crate conduwuit_database as database; -pub use conduwuit::{PduBuilder, PduCount, PduEvent, pdu}; pub(crate) use service::{Args, Dep, Service}; pub use crate::services::Services; diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 0402ff14..eb38c2c3 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -5,7 +5,9 @@ use std::{ }; use conduwuit::{ - PduEvent, Result, StateMap, debug, err, implement, trace, + Result, debug, err, implement, + matrix::{PduEvent, StateMap}, + trace, utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryWidebandExt}, }; use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join}; diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 086dc6bd..97d3df97 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,7 +1,8 @@ use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant}; use conduwuit::{ - Err, EventTypeExt, PduEvent, Result, StateKey, debug, debug_info, err, implement, state_res, + Err, Result, debug, debug_info, err, implement, + matrix::{EventTypeExt, PduEvent, StateKey, state_res}, trace, utils::stream::{BroadbandExt, ReadyExt}, warn, diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index a1b0263a..12b56935 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,11 +1,9 @@ use std::sync::Arc; -use conduwuit::{Result, implement}; -use database::{Deserialized, Json, Map}; +use conduwuit::{Result, implement, matrix::pdu::PduEvent}; +use conduwuit_database::{Deserialized, Json, Map}; use ruma::{CanonicalJsonObject, EventId}; -use crate::PduEvent; - pub struct Service { db: Data, } diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index d6239aee..69e859c4 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -2,7 +2,11 @@ mod data; use std::{collections::BTreeMap, sync::Arc}; -use conduwuit::{PduCount, PduId, RawPduId, Result, debug, err, warn}; +use conduwuit::{ + Result, debug, err, + matrix::pdu::{PduCount, PduId, RawPduId}, + warn, +}; use futures::{Stream, TryFutureExt, try_join}; use ruma::{ OwnedEventId, OwnedUserId, RoomId, UserId, diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 3980617e..06ff6493 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,7 +1,7 @@ use std::{borrow::Borrow, fmt::Debug, mem::size_of_val, sync::Arc}; -pub use conduwuit::pdu::{ShortEventId, ShortId, ShortRoomId, ShortStateKey}; -use conduwuit::{Result, StateKey, err, implement, utils, utils::IterStream}; +pub use conduwuit::matrix::pdu::{ShortEventId, ShortId, ShortRoomId, ShortStateKey}; +use conduwuit::{Result, err, implement, matrix::StateKey, utils, utils::IterStream}; use database::{Deserialized, Get, Map, Qry}; use futures::{Stream, StreamExt}; use ruma::{EventId, RoomId, events::StateEventType}; diff --git a/src/service/rooms/state_accessor/room_state.rs b/src/service/rooms/state_accessor/room_state.rs index 642cd5d2..89fa2a83 100644 --- a/src/service/rooms/state_accessor/room_state.rs +++ b/src/service/rooms/state_accessor/room_state.rs @@ -1,6 +1,9 @@ use std::borrow::Borrow; -use conduwuit::{PduEvent, Result, StateKey, err, implement}; +use conduwuit::{ + Result, err, implement, + matrix::{PduEvent, StateKey}, +}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{EventId, RoomId, events::StateEventType}; use serde::Deserialize; diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs index 8f2dd76f..169e69e9 100644 --- a/src/service/rooms/state_accessor/state.rs +++ b/src/service/rooms/state_accessor/state.rs @@ -1,13 +1,15 @@ use std::{borrow::Borrow, ops::Deref, sync::Arc}; use conduwuit::{ - PduEvent, Result, StateKey, at, err, implement, pair_of, + Result, at, err, implement, + matrix::{PduEvent, StateKey}, + pair_of, utils::{ result::FlatOk, stream::{BroadbandExt, IterStream, ReadyExt, TryIgnore}, }, }; -use database::Deserialized; +use conduwuit_database::Deserialized; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, future::try_join, pin_mut}; use ruma::{ EventId, OwnedEventId, UserId, diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index 7f9a7515..a680df55 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -1,13 +1,14 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::{ - PduCount, PduEvent, PduId, RawPduId, Result, err, + Result, err, + matrix::pdu::{PduCount, PduEvent, PduId, RawPduId}, utils::{ ReadyExt, stream::{TryIgnore, WidebandExt}, }, }; -use database::{Deserialized, Map}; +use conduwuit_database::{Deserialized, Map}; use futures::{Stream, StreamExt}; use ruma::{ CanonicalJsonValue, EventId, OwnedUserId, RoomId, UserId, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index dc359d22..947e1c38 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -10,16 +10,19 @@ use std::{ }; use async_trait::async_trait; +pub use conduwuit::matrix::pdu::{PduId, RawPduId}; use conduwuit::{ Err, Error, Result, Server, at, debug, debug_warn, err, error, implement, info, - pdu::{EventHash, PduBuilder, PduCount, PduEvent, gen_event_id}, - state_res::{self, Event, RoomVersion}, + matrix::{ + Event, + pdu::{EventHash, PduBuilder, PduCount, PduEvent, gen_event_id}, + state_res::{self, RoomVersion}, + }, utils::{ self, IterStream, MutexMap, MutexMapGuard, ReadyExt, future::TryExtExt, stream::TryIgnore, }, validated, warn, }; -pub use conduwuit::{PduId, RawPduId}; use futures::{ Future, FutureExt, Stream, StreamExt, TryStreamExt, future, future::ready, pin_mut, }; From bb8320a691eda03c202bc428e75a616b0021fe03 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Apr 2025 02:39:40 +0000 Subject: [PATCH 313/328] abstract and encapsulate the awkward OptionFuture into Stream pattern Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 45 +++----------------------- src/core/utils/future/mod.rs | 2 ++ src/core/utils/future/option_ext.rs | 3 ++ src/core/utils/future/option_stream.rs | 25 ++++++++++++++ 4 files changed, 35 insertions(+), 40 deletions(-) create mode 100644 src/core/utils/future/option_stream.rs diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 12731ff6..24930941 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -15,6 +15,7 @@ use conduwuit::{ result::FlatOk, utils::{ self, BoolExt, IterStream, ReadyExt, TryFutureExtExt, + future::OptionStream, math::ruma_from_u64, stream::{BroadbandExt, Tools, TryExpect, WidebandExt}, }, @@ -1036,7 +1037,7 @@ async fn calculate_state_incremental<'a>( }) .into(); - let state_diff: OptionFuture<_> = (!full_state && state_changed) + let state_diff_ids: OptionFuture<_> = (!full_state && state_changed) .then(|| { StreamExt::into_future( services @@ -1061,45 +1062,9 @@ async fn calculate_state_incremental<'a>( }) .into(); - let lazy_state_ids = lazy_state_ids - .map(|opt| { - opt.map(|(curr, next)| { - let opt = curr; - let iter = Option::into_iter(opt); - IterStream::stream(iter).chain(next) - }) - }) - .map(Option::into_iter) - .map(IterStream::stream) - .flatten_stream() - .flatten(); - - let state_diff_ids = state_diff - .map(|opt| { - opt.map(|(curr, next)| { - let opt = curr; - let iter = Option::into_iter(opt); - IterStream::stream(iter).chain(next) - }) - }) - .map(Option::into_iter) - .map(IterStream::stream) - .flatten_stream() - .flatten(); - let state_events = current_state_ids - .map(|opt| { - opt.map(|(curr, next)| { - let opt = curr; - let iter = Option::into_iter(opt); - IterStream::stream(iter).chain(next) - }) - }) - .map(Option::into_iter) - .map(IterStream::stream) - .flatten_stream() - .flatten() - .chain(state_diff_ids) + .stream() + .chain(state_diff_ids.stream()) .broad_filter_map(|(shortstatekey, shorteventid)| async move { if witness.is_none() || encrypted_room { return Some(shorteventid); @@ -1107,7 +1072,7 @@ async fn calculate_state_incremental<'a>( lazy_filter(services, sender_user, shortstatekey, shorteventid).await }) - .chain(lazy_state_ids) + .chain(lazy_state_ids.stream()) .broad_filter_map(|shorteventid| { services .rooms diff --git a/src/core/utils/future/mod.rs b/src/core/utils/future/mod.rs index e1d96941..4edd0102 100644 --- a/src/core/utils/future/mod.rs +++ b/src/core/utils/future/mod.rs @@ -1,9 +1,11 @@ mod bool_ext; mod ext_ext; mod option_ext; +mod option_stream; mod try_ext_ext; pub use bool_ext::{BoolExt, and, or}; pub use ext_ext::ExtExt; pub use option_ext::OptionExt; +pub use option_stream::OptionStream; pub use try_ext_ext::TryExtExt; diff --git a/src/core/utils/future/option_ext.rs b/src/core/utils/future/option_ext.rs index d553e5dc..920dd044 100644 --- a/src/core/utils/future/option_ext.rs +++ b/src/core/utils/future/option_ext.rs @@ -11,11 +11,14 @@ pub trait OptionExt { impl OptionExt for OptionFuture where Fut: Future + Send, + T: Send, { + #[inline] fn is_none_or(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send { self.map(|o| o.as_ref().is_none_or(f)) } + #[inline] fn is_some_and(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send { self.map(|o| o.as_ref().is_some_and(f)) } diff --git a/src/core/utils/future/option_stream.rs b/src/core/utils/future/option_stream.rs new file mode 100644 index 00000000..81130c87 --- /dev/null +++ b/src/core/utils/future/option_stream.rs @@ -0,0 +1,25 @@ +use futures::{Future, FutureExt, Stream, StreamExt, future::OptionFuture}; + +use super::super::IterStream; + +pub trait OptionStream { + fn stream(self) -> impl Stream + Send; +} + +impl OptionStream for OptionFuture +where + Fut: Future + Send, + S: Stream + Send, + O: IntoIterator + Send, + ::IntoIter: Send, + T: Send, +{ + #[inline] + fn stream(self) -> impl Stream + Send { + self.map(|opt| opt.map(|(curr, next)| curr.into_iter().stream().chain(next))) + .map(Option::into_iter) + .map(IterStream::stream) + .flatten_stream() + .flatten() + } +} From 58b8c7516a755c0300be1fe0d36b819ebda36ffb Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 09:02:12 +0000 Subject: [PATCH 314/328] extend extract_variant to multiple variants Signed-off-by: Jason Volk --- src/core/utils/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 7593990c..117fb739 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -49,10 +49,10 @@ pub fn exchange(state: &mut T, source: T) -> T { std::mem::replace(state, sou #[macro_export] macro_rules! extract_variant { - ($e:expr_2021, $variant:path) => { + ( $e:expr_2021, $( $variant:path )|* ) => { match $e { - | $variant(value) => Some(value), - | _ => None, + $( $variant(value) => Some(value), )* + _ => None, } }; } From a212bf7cfca7a6547681f46a438ecc278a905aab Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 5 Apr 2025 14:00:40 -0400 Subject: [PATCH 315/328] update default room version to v11 Signed-off-by: June Clementine Strawberry --- conduwuit-example.toml | 4 ++-- src/core/config/mod.rs | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 75ecddab..46459547 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -527,9 +527,9 @@ # Default room version conduwuit will create rooms with. # -# Per spec, room version 10 is the default. +# Per spec, room version 11 is the default. # -#default_room_version = 10 +#default_room_version = 11 # This item is undocumented. Please contribute documentation for it. # diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 7be140a5..bb509a0d 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -640,9 +640,9 @@ pub struct Config { /// Default room version conduwuit will create rooms with. /// - /// Per spec, room version 10 is the default. + /// Per spec, room version 11 is the default. /// - /// default: 10 + /// default: 11 #[serde(default = "default_default_room_version")] pub default_room_version: RoomVersionId, @@ -2170,7 +2170,7 @@ fn default_rocksdb_stats_level() -> u8 { 1 } // I know, it's a great name #[must_use] #[inline] -pub fn default_default_room_version() -> RoomVersionId { RoomVersionId::V10 } +pub fn default_default_room_version() -> RoomVersionId { RoomVersionId::V11 } fn default_ip_range_denylist() -> Vec { vec![ From c7246662f4b2c892667b253aff1560523d8e2cff Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 5 Apr 2025 14:07:37 -0400 Subject: [PATCH 316/328] try partially reverting 94b107b42b722aff9518f64ad603ce01665b25f3 Signed-off-by: June Clementine Strawberry --- src/api/client/keys.rs | 43 ++++++++++-------------------------------- 1 file changed, 10 insertions(+), 33 deletions(-) diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 6865c2a4..adbdd715 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -11,7 +11,7 @@ use ruma::{ error::ErrorKind, keys::{ claim_keys, get_key_changes, get_keys, upload_keys, - upload_signatures::{self, v3::Failure}, + upload_signatures::{self}, upload_signing_keys, }, uiaa::{AuthFlow, AuthType, UiaaInfo}, @@ -308,82 +308,59 @@ async fn check_for_new_keys( /// /// Uploads end-to-end key signatures from the sender user. /// -/// TODO: clean this timo-code up more. tried to improve it a bit to stop -/// exploding the entire request on bad sigs, but needs way more work. +/// TODO: clean this timo-code up more and integrate failures. tried to improve +/// it a bit to stop exploding the entire request on bad sigs, but needs way +/// more work. pub(crate) async fn upload_signatures_route( State(services): State, body: Ruma, ) -> Result { - use upload_signatures::v3::FailureErrorCode::*; - if body.signed_keys.is_empty() { debug!("Empty signed_keys sent in key signature upload"); return Ok(upload_signatures::v3::Response::new()); } let sender_user = body.sender_user(); - let mut failures: BTreeMap> = BTreeMap::new(); - let mut failure_reasons: BTreeMap = BTreeMap::new(); - let failure = Failure { - errcode: InvalidSignature, - error: String::new(), - }; for (user_id, keys) in &body.signed_keys { for (key_id, key) in keys { let Ok(key) = serde_json::to_value(key) .inspect_err(|e| debug_warn!(?key_id, "Invalid \"key\" JSON: {e}")) else { - let mut failure = failure.clone(); - failure.error = String::from("Invalid \"key\" JSON"); - failure_reasons.insert(key_id.to_owned(), failure); continue; }; let Some(signatures) = key.get("signatures") else { - let mut failure = failure.clone(); - failure.error = String::from("Missing \"signatures\" field"); - failure_reasons.insert(key_id.to_owned(), failure); continue; }; let Some(sender_user_val) = signatures.get(sender_user.to_string()) else { - let mut failure = failure.clone(); - failure.error = String::from("Invalid user in signatures field"); - failure_reasons.insert(key_id.to_owned(), failure); continue; }; let Some(sender_user_object) = sender_user_val.as_object() else { - let mut failure = failure.clone(); - failure.error = String::from("signatures field is not a JSON object"); - failure_reasons.insert(key_id.to_owned(), failure); continue; }; for (signature, val) in sender_user_object.clone() { - let signature = (signature, val.to_string()); + let Some(val) = val.as_str().map(ToOwned::to_owned) else { + continue; + }; + let signature = (signature, val); - if let Err(e) = services + if let Err(_e) = services .users .sign_key(user_id, key_id, signature, sender_user) .await .inspect_err(|e| debug_warn!("{e}")) { - let mut failure = failure.clone(); - failure.error = format!("Error signing key: {e}"); - failure_reasons.insert(key_id.to_owned(), failure); continue; } } } - - if !failure_reasons.is_empty() { - failures.insert(user_id.to_owned(), failure_reasons.clone()); - } } - Ok(upload_signatures::v3::Response { failures }) + Ok(upload_signatures::v3::Response { failures: BTreeMap::new() }) } /// # `POST /_matrix/client/r0/keys/changes` From e28ae8fb4d442cba0eb52728a129372289c85ccd Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 5 Apr 2025 14:26:00 -0400 Subject: [PATCH 317/328] downgrade `deranged` crate Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0753f81d..86833adb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1279,9 +1279,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cfac68e08048ae1883171632c2aef3ebc555621ae56fbccce1cbf22dd7f058" +checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" dependencies = [ "powerfmt", ] From d6cc447add272f9eff0b2c77fb751dcf055d3208 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 21:26:53 +0000 Subject: [PATCH 318/328] simplify acl brick-check conditions Signed-off-by: Jason Volk --- src/api/client/state.rs | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 5c5c71f2..2ddc8f14 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -211,7 +211,7 @@ async fn allowed_to_send_state_event( // irreversible mistakes match json.deserialize_as::() { | Ok(acl_content) => { - if acl_content.allow.is_empty() { + if acl_content.allow_is_empty() { return Err!(Request(BadJson(debug_warn!( ?room_id, "Sending an ACL event with an empty allow key will permanently \ @@ -220,9 +220,7 @@ async fn allowed_to_send_state_event( )))); } - if acl_content.deny.contains(&String::from("*")) - && acl_content.allow.contains(&String::from("*")) - { + if acl_content.deny_contains("*") && acl_content.allow_contains("*") { return Err!(Request(BadJson(debug_warn!( ?room_id, "Sending an ACL event with a deny and allow key value of \"*\" will \ @@ -231,11 +229,9 @@ async fn allowed_to_send_state_event( )))); } - if acl_content.deny.contains(&String::from("*")) + if acl_content.deny_contains("*") && !acl_content.is_allowed(services.globals.server_name()) - && !acl_content - .allow - .contains(&services.globals.server_name().to_string()) + && !acl_content.allow_contains(services.globals.server_name().as_str()) { return Err!(Request(BadJson(debug_warn!( ?room_id, @@ -245,11 +241,9 @@ async fn allowed_to_send_state_event( )))); } - if !acl_content.allow.contains(&String::from("*")) + if !acl_content.allow_contains("*") && !acl_content.is_allowed(services.globals.server_name()) - && !acl_content - .allow - .contains(&services.globals.server_name().to_string()) + && !acl_content.allow_contains(services.globals.server_name().as_str()) { return Err!(Request(BadJson(debug_warn!( ?room_id, From 500faa8d7fcefab2f5bee867bf268f87fc0643fa Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Apr 2025 01:05:43 +0000 Subject: [PATCH 319/328] simplify space join rules related Signed-off-by: Jason Volk --- Cargo.lock | 22 ++--- Cargo.toml | 2 +- src/api/client/room/summary.rs | 70 +++++++++------ src/service/rooms/spaces/mod.rs | 110 ++++++++++++------------ src/service/rooms/state_accessor/mod.rs | 37 +------- 5 files changed, 113 insertions(+), 128 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 86833adb..c2c5182f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3654,7 +3654,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "assign", "js_int", @@ -3674,7 +3674,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "ruma-common", @@ -3686,7 +3686,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "as_variant", "assign", @@ -3709,7 +3709,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "as_variant", "base64 0.22.1", @@ -3741,7 +3741,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3766,7 +3766,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "bytes", "headers", @@ -3788,7 +3788,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3797,7 +3797,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "ruma-common", @@ -3807,7 +3807,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3822,7 +3822,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "ruma-common", @@ -3834,7 +3834,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index a44fc0f0..b1c5acb5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -350,7 +350,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +rev = "920148dca1076454ca0ca5d43b5ce1aa708381d4" features = [ "compat", "rand", diff --git a/src/api/client/room/summary.rs b/src/api/client/room/summary.rs index 34820e83..2fa81bd2 100644 --- a/src/api/client/room/summary.rs +++ b/src/api/client/room/summary.rs @@ -4,9 +4,13 @@ use conduwuit::{ Err, Result, debug_warn, utils::{IterStream, future::TryExtExt}, }; -use futures::{FutureExt, StreamExt, future::join3, stream::FuturesUnordered}; +use futures::{ + FutureExt, StreamExt, + future::{OptionFuture, join3}, + stream::FuturesUnordered, +}; use ruma::{ - OwnedRoomId, OwnedServerName, RoomId, UserId, + OwnedServerName, RoomId, UserId, api::{ client::room::get_summary, federation::space::{SpaceHierarchyParentSummary, get_hierarchy}, @@ -91,13 +95,9 @@ async fn room_summary_response( join_rule: room.join_rule, room_type: room.room_type, room_version: room.room_version, - membership: if sender_user.is_none() { - None - } else { - Some(MembershipState::Leave) - }, encryption: room.encryption, allowed_room_ids: room.allowed_room_ids, + membership: sender_user.is_some().then_some(MembershipState::Leave), }) } @@ -106,20 +106,22 @@ async fn local_room_summary_response( room_id: &RoomId, sender_user: Option<&UserId>, ) -> Result { - let join_rule = services.rooms.state_accessor.get_space_join_rule(room_id); + let join_rule = services.rooms.state_accessor.get_join_rules(room_id); + let world_readable = services.rooms.state_accessor.is_world_readable(room_id); + let guest_can_join = services.rooms.state_accessor.guest_can_join(room_id); - let ((join_rule, allowed_room_ids), world_readable, guest_can_join) = + let (join_rule, world_readable, guest_can_join) = join3(join_rule, world_readable, guest_can_join).await; user_can_see_summary( services, room_id, - &join_rule, + &join_rule.clone().into(), guest_can_join, world_readable, - &allowed_room_ids, + join_rule.allowed_rooms(), sender_user, ) .await?; @@ -129,26 +131,43 @@ async fn local_room_summary_response( .state_accessor .get_canonical_alias(room_id) .ok(); + let name = services.rooms.state_accessor.get_name(room_id).ok(); + let topic = services.rooms.state_accessor.get_room_topic(room_id).ok(); + let room_type = services.rooms.state_accessor.get_room_type(room_id).ok(); + let avatar_url = services .rooms .state_accessor .get_avatar(room_id) .map(|res| res.into_option().unwrap_or_default().url); + let room_version = services.rooms.state.get_room_version(room_id).ok(); + let encryption = services .rooms .state_accessor .get_room_encryption(room_id) .ok(); + let num_joined_members = services .rooms .state_cache .room_joined_count(room_id) .unwrap_or(0); + let membership: OptionFuture<_> = sender_user + .map(|sender_user| { + services + .rooms + .state_accessor + .get_member(room_id, sender_user) + .map_ok_or(MembershipState::Leave, |content| content.membership) + }) + .into(); + let ( canonical_alias, name, @@ -158,6 +177,7 @@ async fn local_room_summary_response( room_type, room_version, encryption, + membership, ) = futures::join!( canonical_alias, name, @@ -167,6 +187,7 @@ async fn local_room_summary_response( room_type, room_version, encryption, + membership, ); Ok(get_summary::msc3266::Response { @@ -178,21 +199,12 @@ async fn local_room_summary_response( num_joined_members: num_joined_members.try_into().unwrap_or_default(), topic, world_readable, - join_rule, room_type, room_version, - membership: if let Some(sender_user) = sender_user { - services - .rooms - .state_accessor - .get_member(room_id, sender_user) - .await - .map_or(Some(MembershipState::Leave), |content| Some(content.membership)) - } else { - None - }, encryption, - allowed_room_ids, + membership, + allowed_room_ids: join_rule.allowed_rooms().map(Into::into).collect(), + join_rule: join_rule.into(), }) } @@ -241,7 +253,7 @@ async fn remote_room_summary_hierarchy_response( &room.join_rule, room.guest_can_join, room.world_readable, - &room.allowed_room_ids, + room.allowed_room_ids.iter().map(AsRef::as_ref), sender_user, ) .await @@ -254,15 +266,18 @@ async fn remote_room_summary_hierarchy_response( ))) } -async fn user_can_see_summary( +async fn user_can_see_summary<'a, I>( services: &Services, room_id: &RoomId, join_rule: &SpaceRoomJoinRule, guest_can_join: bool, world_readable: bool, - allowed_room_ids: &[OwnedRoomId], + allowed_room_ids: I, sender_user: Option<&UserId>, -) -> Result { +) -> Result +where + I: Iterator + Send, +{ match sender_user { | Some(sender_user) => { let user_can_see_state_events = services @@ -271,7 +286,6 @@ async fn user_can_see_summary( .user_can_see_state_events(sender_user, room_id); let is_guest = services.users.is_deactivated(sender_user).unwrap_or(false); let user_in_allowed_restricted_room = allowed_room_ids - .iter() .stream() .any(|room| services.rooms.state_cache.is_joined(sender_user, room)); diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index f51a5e3a..ea9756ba 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -121,21 +121,22 @@ pub async fn get_summary_and_children_local( | None => (), // cache miss | Some(None) => return Ok(None), | Some(Some(cached)) => { - return Ok(Some( - if self - .is_accessible_child( - current_room, - &cached.summary.join_rule, - identifier, - &cached.summary.allowed_room_ids, - ) - .await - { - SummaryAccessibility::Accessible(cached.summary.clone()) - } else { - SummaryAccessibility::Inaccessible - }, - )); + let allowed_rooms = cached.summary.allowed_room_ids.iter().map(AsRef::as_ref); + + let is_accessible_child = self.is_accessible_child( + current_room, + &cached.summary.join_rule, + identifier, + allowed_rooms, + ); + + let accessibility = if is_accessible_child.await { + SummaryAccessibility::Accessible(cached.summary.clone()) + } else { + SummaryAccessibility::Inaccessible + }; + + return Ok(Some(accessibility)); }, } @@ -145,12 +146,11 @@ pub async fn get_summary_and_children_local( .collect() .await; - let summary = self + let Ok(summary) = self .get_room_summary(current_room, children_pdus, identifier) .boxed() - .await; - - let Ok(summary) = summary else { + .await + else { return Ok(None); }; @@ -217,20 +217,19 @@ async fn get_summary_and_children_federation( .await; let identifier = Identifier::UserId(user_id); + let allowed_room_ids = summary.allowed_room_ids.iter().map(AsRef::as_ref); + let is_accessible_child = self - .is_accessible_child( - current_room, - &summary.join_rule, - &identifier, - &summary.allowed_room_ids, - ) + .is_accessible_child(current_room, &summary.join_rule, &identifier, allowed_room_ids) .await; - if is_accessible_child { - return Ok(Some(SummaryAccessibility::Accessible(summary))); - } + let accessibility = if is_accessible_child { + SummaryAccessibility::Accessible(summary) + } else { + SummaryAccessibility::Inaccessible + }; - Ok(Some(SummaryAccessibility::Inaccessible)) + Ok(Some(accessibility)) } /// Simply returns the stripped m.space.child events of a room @@ -305,14 +304,15 @@ async fn get_room_summary( children_state: Vec>, identifier: &Identifier<'_>, ) -> Result { - let (join_rule, allowed_room_ids) = self - .services - .state_accessor - .get_space_join_rule(room_id) - .await; + let join_rule = self.services.state_accessor.get_join_rules(room_id).await; let is_accessible_child = self - .is_accessible_child(room_id, &join_rule, identifier, &allowed_room_ids) + .is_accessible_child( + room_id, + &join_rule.clone().into(), + identifier, + join_rule.allowed_rooms(), + ) .await; if !is_accessible_child { @@ -379,7 +379,7 @@ async fn get_room_summary( encryption, ); - Ok(SpaceHierarchyParentSummary { + let summary = SpaceHierarchyParentSummary { canonical_alias, name, topic, @@ -388,24 +388,29 @@ async fn get_room_summary( avatar_url, room_type, children_state, - allowed_room_ids, - join_rule, - room_id: room_id.to_owned(), - num_joined_members: num_joined_members.try_into().unwrap_or_default(), encryption, room_version, - }) + room_id: room_id.to_owned(), + num_joined_members: num_joined_members.try_into().unwrap_or_default(), + allowed_room_ids: join_rule.allowed_rooms().map(Into::into).collect(), + join_rule: join_rule.clone().into(), + }; + + Ok(summary) } /// With the given identifier, checks if a room is accessable #[implement(Service)] -async fn is_accessible_child( +async fn is_accessible_child<'a, I>( &self, current_room: &RoomId, join_rule: &SpaceRoomJoinRule, identifier: &Identifier<'_>, - allowed_room_ids: &[OwnedRoomId], -) -> bool { + allowed_rooms: I, +) -> bool +where + I: Iterator + Send, +{ if let Identifier::ServerName(server_name) = identifier { // Checks if ACLs allow for the server to participate if self @@ -430,21 +435,18 @@ async fn is_accessible_child( } } - match join_rule { + match *join_rule { | SpaceRoomJoinRule::Public | SpaceRoomJoinRule::Knock | SpaceRoomJoinRule::KnockRestricted => true, | SpaceRoomJoinRule::Restricted => - allowed_room_ids - .iter() + allowed_rooms .stream() - .any(|room| async { - match identifier { - | Identifier::UserId(user) => - self.services.state_cache.is_joined(user, room).await, - | Identifier::ServerName(server) => - self.services.state_cache.server_in_room(server, room).await, - } + .any(async |room| match identifier { + | Identifier::UserId(user) => + self.services.state_cache.is_joined(user, room).await, + | Identifier::ServerName(server) => + self.services.state_cache.server_in_room(server, room).await, }) .await, diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 7fff5935..f719fc7b 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -9,7 +9,7 @@ use async_trait::async_trait; use conduwuit::{Result, err}; use database::Map; use ruma::{ - EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, OwnedRoomId, RoomId, UserId, + EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, RoomId, UserId, events::{ StateEventType, room::{ @@ -19,14 +19,13 @@ use ruma::{ encryption::RoomEncryptionEventContent, guest_access::{GuestAccess, RoomGuestAccessEventContent}, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent, RoomMembership}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, member::RoomMemberEventContent, name::RoomNameEventContent, topic::RoomTopicEventContent, }, }, room::RoomType, - space::SpaceRoomJoinRule, }; use crate::{Dep, rooms}; @@ -129,42 +128,12 @@ impl Service { .map(|c: RoomTopicEventContent| c.topic) } - /// Returns the space join rule (`SpaceRoomJoinRule`) for a given room and - /// any allowed room IDs if available. Will default to Invite and empty vec - /// if doesnt exist or invalid, - pub async fn get_space_join_rule( - &self, - room_id: &RoomId, - ) -> (SpaceRoomJoinRule, Vec) { - self.room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") - .await - .map_or_else( - |_| (SpaceRoomJoinRule::Invite, vec![]), - |c: RoomJoinRulesEventContent| { - (c.join_rule.clone().into(), self.allowed_room_ids(c.join_rule)) - }, - ) - } - /// Returns the join rules for a given room (`JoinRule` type). Will default /// to Invite if doesnt exist or invalid pub async fn get_join_rules(&self, room_id: &RoomId) -> JoinRule { self.room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") .await - .map_or_else(|_| JoinRule::Invite, |c: RoomJoinRulesEventContent| (c.join_rule)) - } - - /// Returns an empty vec if not a restricted room - pub fn allowed_room_ids(&self, join_rule: JoinRule) -> Vec { - let mut room_ids = Vec::with_capacity(1); // restricted rooms generally only have 1 allowed room ID - if let JoinRule::Restricted(r) | JoinRule::KnockRestricted(r) = join_rule { - for rule in r.allow { - if let AllowRule::RoomMembership(RoomMembership { room_id: membership }) = rule { - room_ids.push(membership.clone()); - } - } - } - room_ids + .map_or(JoinRule::Invite, |c: RoomJoinRulesEventContent| c.join_rule) } pub async fn get_room_type(&self, room_id: &RoomId) -> Result { From 9678948daf76b64368a6865d359ab162de1c5855 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 5 Apr 2025 18:31:14 -0400 Subject: [PATCH 320/328] use patch of resolv-conf crate to allow no-aaaa resolv.conf option Signed-off-by: June Clementine Strawberry --- Cargo.lock | 3 +-- Cargo.toml | 9 ++++++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c2c5182f..8817af1a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3625,8 +3625,7 @@ dependencies = [ [[package]] name = "resolv-conf" version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48375394603e3dd4b2d64371f7148fd8c7baa2680e28741f2cb8d23b59e3d4c4" +source = "git+https://github.com/girlbossceo/resolv-conf?rev=200e958941d522a70c5877e3d846f55b5586c68d#200e958941d522a70c5877e3d846f55b5586c68d" dependencies = [ "hostname", ] diff --git a/Cargo.toml b/Cargo.toml index b1c5acb5..62350dee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ license = "Apache-2.0" # See also `rust-toolchain.toml` readme = "README.md" repository = "https://github.com/girlbossceo/conduwuit" -rust-version = "1.85.0" +rust-version = "1.86.0" version = "0.5.0" [workspace.metadata.crane] @@ -580,6 +580,13 @@ rev = "9c8e51510c35077df888ee72a36b4b05637147da" git = "https://github.com/girlbossceo/hyper-util" rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941" +# allows no-aaaa option in resolv.conf +# bumps rust edition and toolchain to 1.86.0 and 2024 +# use sat_add on line number errors +[patch.crates-io.resolv-conf] +git = "https://github.com/girlbossceo/resolv-conf" +rev = "200e958941d522a70c5877e3d846f55b5586c68d" + # # Our crates # From 3cc92b32ec97667bbabfb44edc305a972a7d3437 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 5 Apr 2025 18:37:13 -0400 Subject: [PATCH 321/328] bump rust toolchain to 1.86.0 Signed-off-by: June Clementine Strawberry --- flake.nix | 2 +- rust-toolchain.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/flake.nix b/flake.nix index 9db2e90a..49e860ed 100644 --- a/flake.nix +++ b/flake.nix @@ -26,7 +26,7 @@ file = ./rust-toolchain.toml; # See also `rust-toolchain.toml` - sha256 = "sha256-AJ6LX/Q/Er9kS15bn9iflkUwcgYqRQxiOIL2ToVAXaU="; + sha256 = "sha256-X/4ZBHO3iW0fOenQ3foEvscgAPJYl2abspaBThDOukI="; }; mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: { diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 97b4a789..aadc8f99 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -9,7 +9,7 @@ # If you're having trouble making the relevant changes, bug a maintainer. [toolchain] -channel = "1.85.0" +channel = "1.86.0" profile = "minimal" components = [ # For rust-analyzer From 6578b83bce71e9a232ff8531e80ab7d6d12a731c Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 5 Apr 2025 20:09:22 -0400 Subject: [PATCH 322/328] parallelise IO of user searching, improve perf, raise max limit to 500 Signed-off-by: June Clementine Strawberry --- src/api/client/user_directory.rs | 121 ++++++++++++++----------------- 1 file changed, 55 insertions(+), 66 deletions(-) diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index 8f564eed..99b3bb67 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -1,16 +1,20 @@ use axum::extract::State; -use conduwuit::{Result, utils::TryFutureExtExt}; -use futures::{StreamExt, pin_mut}; +use conduwuit::{ + Result, + utils::{future::BoolExt, stream::BroadbandExt}, +}; +use futures::{FutureExt, StreamExt, pin_mut}; use ruma::{ - api::client::user_directory::search_users, - events::{ - StateEventType, - room::join_rules::{JoinRule, RoomJoinRulesEventContent}, - }, + api::client::user_directory::search_users::{self}, + events::room::join_rules::JoinRule, }; use crate::Ruma; +// conduwuit can handle a lot more results than synapse +const LIMIT_MAX: usize = 500; +const LIMIT_DEFAULT: usize = 10; + /// # `POST /_matrix/client/r0/user_directory/search` /// /// Searches all known users for a match. @@ -21,78 +25,63 @@ pub(crate) async fn search_users_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let limit = usize::try_from(body.limit).map_or(10, usize::from).min(100); // default limit is 10 + let sender_user = body.sender_user(); + let limit = usize::try_from(body.limit) + .map_or(LIMIT_DEFAULT, usize::from) + .min(LIMIT_MAX); - let users = services.users.stream().filter_map(|user_id| async { - // Filter out buggy users (they should not exist, but you never know...) - let user = search_users::v3::User { - user_id: user_id.to_owned(), - display_name: services.users.displayname(user_id).await.ok(), - avatar_url: services.users.avatar_url(user_id).await.ok(), - }; + let mut users = services + .users + .stream() + .map(ToOwned::to_owned) + .broad_filter_map(async |user_id| { + let user = search_users::v3::User { + user_id: user_id.clone(), + display_name: services.users.displayname(&user_id).await.ok(), + avatar_url: services.users.avatar_url(&user_id).await.ok(), + }; - let user_id_matches = user - .user_id - .to_string() - .to_lowercase() - .contains(&body.search_term.to_lowercase()); + let user_id_matches = user + .user_id + .as_str() + .to_lowercase() + .contains(&body.search_term.to_lowercase()); - let user_displayname_matches = user - .display_name - .as_ref() - .filter(|name| { + let user_displayname_matches = user.display_name.as_ref().is_some_and(|name| { name.to_lowercase() .contains(&body.search_term.to_lowercase()) - }) - .is_some(); + }); - if !user_id_matches && !user_displayname_matches { - return None; - } + if !user_id_matches && !user_displayname_matches { + return None; + } - // It's a matching user, but is the sender allowed to see them? - let mut user_visible = false; - - let user_is_in_public_rooms = services - .rooms - .state_cache - .rooms_joined(&user.user_id) - .any(|room| { - services - .rooms - .state_accessor - .room_state_get_content::( - room, - &StateEventType::RoomJoinRules, - "", - ) - .map_ok_or(false, |content| content.join_rule == JoinRule::Public) - }) - .await; - - if user_is_in_public_rooms { - user_visible = true; - } else { - let user_is_in_shared_rooms = services + let user_in_public_room = services .rooms .state_cache - .user_sees_user(sender_user, &user.user_id) - .await; + .rooms_joined(&user_id) + .map(ToOwned::to_owned) + .any(|room| async move { + services + .rooms + .state_accessor + .get_join_rules(&room) + .map(|rule| matches!(rule, JoinRule::Public)) + .await + }); - if user_is_in_shared_rooms { - user_visible = true; - } - } + let user_sees_user = services + .rooms + .state_cache + .user_sees_user(sender_user, &user_id); - user_visible.then_some(user) - }); + pin_mut!(user_in_public_room, user_sees_user); - pin_mut!(users); + user_in_public_room.or(user_sees_user).await.then_some(user) + }); - let limited = users.by_ref().next().await.is_some(); - - let results = users.take(limit).collect().await; + let results = users.by_ref().take(limit).collect().await; + let limited = users.next().await.is_some(); Ok(search_users::v3::Response { results, limited }) } From 5f8c68ab842d66ecda70726e2f9726824d51b815 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 6 Apr 2025 13:17:13 -0400 Subject: [PATCH 323/328] add trace logging for room summaries, use server_in_room instead of exists Signed-off-by: June Clementine Strawberry --- src/api/client/room/summary.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/src/api/client/room/summary.rs b/src/api/client/room/summary.rs index 2fa81bd2..67d2e2ad 100644 --- a/src/api/client/room/summary.rs +++ b/src/api/client/room/summary.rs @@ -1,7 +1,7 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Err, Result, debug_warn, + Err, Result, debug_warn, trace, utils::{IterStream, future::TryExtExt}, }; use futures::{ @@ -74,7 +74,12 @@ async fn room_summary_response( servers: &[OwnedServerName], sender_user: Option<&UserId>, ) -> Result { - if services.rooms.metadata.exists(room_id).await { + if services + .rooms + .state_cache + .server_in_room(services.globals.server_name(), room_id) + .await + { return local_room_summary_response(services, room_id, sender_user) .boxed() .await; @@ -106,14 +111,14 @@ async fn local_room_summary_response( room_id: &RoomId, sender_user: Option<&UserId>, ) -> Result { + trace!(?sender_user, "Sending local room summary response for {room_id:?}"); let join_rule = services.rooms.state_accessor.get_join_rules(room_id); - let world_readable = services.rooms.state_accessor.is_world_readable(room_id); - let guest_can_join = services.rooms.state_accessor.guest_can_join(room_id); let (join_rule, world_readable, guest_can_join) = join3(join_rule, world_readable, guest_can_join).await; + trace!("{join_rule:?}, {world_readable:?}, {guest_can_join:?}"); user_can_see_summary( services, @@ -215,6 +220,7 @@ async fn remote_room_summary_hierarchy_response( servers: &[OwnedServerName], sender_user: Option<&UserId>, ) -> Result { + trace!(?sender_user, ?servers, "Sending remote room summary response for {room_id:?}"); if !services.config.allow_federation { return Err!(Request(Forbidden("Federation is disabled."))); } @@ -237,6 +243,7 @@ async fn remote_room_summary_hierarchy_response( .collect(); while let Some(Ok(response)) = requests.next().await { + trace!("{response:?}"); let room = response.room.clone(); if room.room_id != room_id { debug_warn!( @@ -278,6 +285,7 @@ async fn user_can_see_summary<'a, I>( where I: Iterator + Send, { + let is_public_room = matches!(join_rule, Public | Knock | KnockRestricted); match sender_user { | Some(sender_user) => { let user_can_see_state_events = services @@ -296,7 +304,7 @@ where if user_can_see_state_events || (is_guest && guest_can_join) - || matches!(&join_rule, &Public | &Knock | &KnockRestricted) + || is_public_room || user_in_allowed_restricted_room { return Ok(()); @@ -309,7 +317,7 @@ where ))) }, | None => { - if matches!(join_rule, Public | Knock | KnockRestricted) || world_readable { + if is_public_room || world_readable { return Ok(()); } From ff276a42a36cfe565ff541ce064db25bbb1946c8 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 6 Apr 2025 13:19:09 -0400 Subject: [PATCH 324/328] drop unnecessary info log to debug Signed-off-by: June Clementine Strawberry --- src/api/client/keys.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index adbdd715..650c573f 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -1,7 +1,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use axum::extract::State; -use conduwuit::{Err, Error, Result, debug, debug_warn, err, info, result::NotFound, utils}; +use conduwuit::{Err, Error, Result, debug, debug_warn, err, result::NotFound, utils}; use conduwuit_service::{Services, users::parse_master_key}; use futures::{StreamExt, stream::FuturesUnordered}; use ruma::{ @@ -177,7 +177,7 @@ pub(crate) async fn upload_signing_keys_route( body.master_key.as_ref(), ) .await - .inspect_err(|e| info!(?e)) + .inspect_err(|e| debug!(?e)) { | Ok(exists) => { if let Some(result) = exists { From d5ad973464168c567c3f9615380ced9e0067da4f Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 6 Apr 2025 15:25:11 -0400 Subject: [PATCH 325/328] change forbidden_server_names and etc to allow regex patterns for wildcards Signed-off-by: June Clementine Strawberry --- conduwuit-example.toml | 27 ++++++++++------ src/api/client/directory.rs | 14 ++++++--- src/api/client/membership.rs | 6 ++-- src/api/client/message.rs | 3 +- src/api/router/auth.rs | 3 +- src/api/server/invite.rs | 6 ++-- src/api/server/make_join.rs | 6 ++-- src/api/server/make_knock.rs | 6 ++-- src/api/server/send_join.rs | 12 +++----- src/api/server/send_knock.rs | 6 ++-- src/core/config/mod.rs | 51 +++++++++++++++++-------------- src/service/federation/execute.rs | 2 +- src/service/media/remote.rs | 8 ++++- 13 files changed, 79 insertions(+), 71 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 46459547..118bc57d 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -594,7 +594,7 @@ # Currently, conduwuit doesn't support inbound batched key requests, so # this list should only contain other Synapse servers. # -# example: ["matrix.org", "envs.net", "tchncs.de"] +# example: ["matrix.org", "tchncs.de"] # #trusted_servers = ["matrix.org"] @@ -1186,13 +1186,16 @@ # #prune_missing_media = false -# Vector list of servers that conduwuit will refuse to download remote -# media from. +# Vector list of regex patterns of server names that conduwuit will refuse +# to download remote media from. +# +# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] # #prevent_media_downloads_from = [] -# List of forbidden server names that we will block incoming AND outgoing -# federation with, and block client room joins / remote user invites. +# List of forbidden server names via regex patterns that we will block +# incoming AND outgoing federation with, and block client room joins / +# remote user invites. # # This check is applied on the room ID, room alias, sender server name, # sender user's server name, inbound federation X-Matrix origin, and @@ -1200,11 +1203,15 @@ # # Basically "global" ACLs. # +# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] +# #forbidden_remote_server_names = [] -# List of forbidden server names that we will block all outgoing federated -# room directory requests for. Useful for preventing our users from -# wandering into bad servers or spaces. +# List of forbidden server names via regex patterns that we will block all +# outgoing federated room directory requests for. Useful for preventing +# our users from wandering into bad servers or spaces. +# +# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] # #forbidden_remote_room_directory_server_names = [] @@ -1315,7 +1322,7 @@ # used, and startup as warnings if any room aliases in your database have # a forbidden room alias/ID. # -# example: ["19dollarfortnitecards", "b[4a]droom"] +# example: ["19dollarfortnitecards", "b[4a]droom", "badphrase"] # #forbidden_alias_names = [] @@ -1328,7 +1335,7 @@ # startup as warnings if any local users in your database have a forbidden # username. # -# example: ["administrator", "b[a4]dusernam[3e]"] +# example: ["administrator", "b[a4]dusernam[3e]", "badphrase"] # #forbidden_usernames = [] diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 9ca35537..b44b9f64 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -52,10 +52,13 @@ pub(crate) async fn get_public_rooms_filtered_route( ) -> Result { if let Some(server) = &body.server { if services - .server .config .forbidden_remote_room_directory_server_names - .contains(server) + .is_match(server.host()) + || services + .config + .forbidden_remote_server_names + .is_match(server.host()) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } @@ -90,10 +93,13 @@ pub(crate) async fn get_public_rooms_route( ) -> Result { if let Some(server) = &body.server { if services - .server .config .forbidden_remote_room_directory_server_names - .contains(server) + .is_match(server.host()) + || services + .config + .forbidden_remote_server_names + .is_match(server.host()) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index d0345c8e..1eeacf83 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -79,10 +79,9 @@ async fn banned_room_check( if let Some(room_id) = room_id { if services.rooms.metadata.is_banned(room_id).await || services - .server .config .forbidden_remote_server_names - .contains(&room_id.server_name().unwrap().to_owned()) + .is_match(room_id.server_name().unwrap().host()) { warn!( "User {user_id} who is not an admin attempted to send an invite for or \ @@ -120,10 +119,9 @@ async fn banned_room_check( } } else if let Some(server_name) = server_name { if services - .server .config .forbidden_remote_server_names - .contains(&server_name.to_owned()) + .is_match(server_name.host()) { warn!( "User {user_id} who is not an admin tried joining a room which has the server \ diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 3e784a4a..db11ef4a 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -261,10 +261,9 @@ pub(crate) async fn is_ignored_pdu( let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok(); let ignored_server = services - .server .config .forbidden_remote_server_names - .contains(pdu.sender().server_name()); + .is_match(pdu.sender().server_name().host()); if ignored_type && (ignored_server || services.users.user_is_ignored(&pdu.sender, user_id).await) diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 5cd7b831..0eb61ca6 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -317,10 +317,9 @@ fn auth_server_checks(services: &Services, x_matrix: &XMatrix) -> Result<()> { let origin = &x_matrix.origin; if services - .server .config .forbidden_remote_server_names - .contains(origin) + .is_match(origin.host()) { return Err!(Request(Forbidden(debug_warn!( "Federation requests from {origin} denied." diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index cda34fb5..edd6ac16 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -38,20 +38,18 @@ pub(crate) async fn create_invite_route( if let Some(server) = body.room_id.server_name() { if services - .server .config .forbidden_remote_server_names - .contains(&server.to_owned()) + .is_match(server.host()) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } } if services - .server .config .forbidden_remote_server_names - .contains(body.origin()) + .is_match(body.origin().host()) { warn!( "Received federated/remote invite from banned server {} for room ID {}. Rejecting.", diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index 4664b904..ac2c5485 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -42,10 +42,9 @@ pub(crate) async fn create_join_event_template_route( .await?; if services - .server .config .forbidden_remote_server_names - .contains(body.origin()) + .is_match(body.origin().host()) { warn!( "Server {} for remote user {} tried joining room ID {} which has a server name that \ @@ -59,10 +58,9 @@ pub(crate) async fn create_join_event_template_route( if let Some(server) = body.room_id.server_name() { if services - .server .config .forbidden_remote_server_names - .contains(&server.to_owned()) + .is_match(server.host()) { return Err!(Request(Forbidden(warn!( "Room ID server name {server} is banned on this homeserver." diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs index 6d71ab2a..511c13b2 100644 --- a/src/api/server/make_knock.rs +++ b/src/api/server/make_knock.rs @@ -33,10 +33,9 @@ pub(crate) async fn create_knock_event_template_route( .await?; if services - .server .config .forbidden_remote_server_names - .contains(body.origin()) + .is_match(body.origin().host()) { warn!( "Server {} for remote user {} tried knocking room ID {} which has a server name \ @@ -50,10 +49,9 @@ pub(crate) async fn create_knock_event_template_route( if let Some(server) = body.room_id.server_name() { if services - .server .config .forbidden_remote_server_names - .contains(&server.to_owned()) + .is_match(server.host()) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 2e2e89ee..a66d8890 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -268,10 +268,9 @@ pub(crate) async fn create_join_event_v1_route( body: Ruma, ) -> Result { if services - .server .config .forbidden_remote_server_names - .contains(body.origin()) + .is_match(body.origin().host()) { warn!( "Server {} tried joining room ID {} through us who has a server name that is \ @@ -284,10 +283,9 @@ pub(crate) async fn create_join_event_v1_route( if let Some(server) = body.room_id.server_name() { if services - .server .config .forbidden_remote_server_names - .contains(&server.to_owned()) + .is_match(server.host()) { warn!( "Server {} tried joining room ID {} through us which has a server name that is \ @@ -316,20 +314,18 @@ pub(crate) async fn create_join_event_v2_route( body: Ruma, ) -> Result { if services - .server .config .forbidden_remote_server_names - .contains(body.origin()) + .is_match(body.origin().host()) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } if let Some(server) = body.room_id.server_name() { if services - .server .config .forbidden_remote_server_names - .contains(&server.to_owned()) + .is_match(server.host()) { warn!( "Server {} tried joining room ID {} through us which has a server name that is \ diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index c5ab0306..ee7b6cba 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -26,10 +26,9 @@ pub(crate) async fn create_knock_event_v1_route( body: Ruma, ) -> Result { if services - .server .config .forbidden_remote_server_names - .contains(body.origin()) + .is_match(body.origin().host()) { warn!( "Server {} tried knocking room ID {} who has a server name that is globally \ @@ -42,10 +41,9 @@ pub(crate) async fn create_knock_event_v1_route( if let Some(server) = body.room_id.server_name() { if services - .server .config .forbidden_remote_server_names - .contains(&server.to_owned()) + .is_match(server.host()) { warn!( "Server {} tried knocking room ID {} which has a server name that is globally \ diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index bb509a0d..0ca6bbaf 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -3,7 +3,7 @@ pub mod manager; pub mod proxy; use std::{ - collections::{BTreeMap, BTreeSet, HashSet}, + collections::{BTreeMap, BTreeSet}, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, path::{Path, PathBuf}, }; @@ -715,7 +715,7 @@ pub struct Config { /// Currently, conduwuit doesn't support inbound batched key requests, so /// this list should only contain other Synapse servers. /// - /// example: ["matrix.org", "envs.net", "tchncs.de"] + /// example: ["matrix.org", "tchncs.de"] /// /// default: ["matrix.org"] #[serde(default = "default_trusted_servers")] @@ -1361,15 +1361,18 @@ pub struct Config { #[serde(default)] pub prune_missing_media: bool, - /// Vector list of servers that conduwuit will refuse to download remote - /// media from. + /// Vector list of regex patterns of server names that conduwuit will refuse + /// to download remote media from. + /// + /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] /// /// default: [] - #[serde(default)] - pub prevent_media_downloads_from: HashSet, + #[serde(default, with = "serde_regex")] + pub prevent_media_downloads_from: RegexSet, - /// List of forbidden server names that we will block incoming AND outgoing - /// federation with, and block client room joins / remote user invites. + /// List of forbidden server names via regex patterns that we will block + /// incoming AND outgoing federation with, and block client room joins / + /// remote user invites. /// /// This check is applied on the room ID, room alias, sender server name, /// sender user's server name, inbound federation X-Matrix origin, and @@ -1377,17 +1380,21 @@ pub struct Config { /// /// Basically "global" ACLs. /// - /// default: [] - #[serde(default)] - pub forbidden_remote_server_names: HashSet, - - /// List of forbidden server names that we will block all outgoing federated - /// room directory requests for. Useful for preventing our users from - /// wandering into bad servers or spaces. + /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] /// /// default: [] - #[serde(default = "HashSet::new")] - pub forbidden_remote_room_directory_server_names: HashSet, + #[serde(default, with = "serde_regex")] + pub forbidden_remote_server_names: RegexSet, + + /// List of forbidden server names via regex patterns that we will block all + /// outgoing federated room directory requests for. Useful for preventing + /// our users from wandering into bad servers or spaces. + /// + /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] + /// + /// default: [] + #[serde(default, with = "serde_regex")] + pub forbidden_remote_room_directory_server_names: RegexSet, /// Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you /// do not want conduwuit to send outbound requests to. Defaults to @@ -1508,11 +1515,10 @@ pub struct Config { /// used, and startup as warnings if any room aliases in your database have /// a forbidden room alias/ID. /// - /// example: ["19dollarfortnitecards", "b[4a]droom"] + /// example: ["19dollarfortnitecards", "b[4a]droom", "badphrase"] /// /// default: [] - #[serde(default)] - #[serde(with = "serde_regex")] + #[serde(default, with = "serde_regex")] pub forbidden_alias_names: RegexSet, /// List of forbidden username patterns/strings. @@ -1524,11 +1530,10 @@ pub struct Config { /// startup as warnings if any local users in your database have a forbidden /// username. /// - /// example: ["administrator", "b[a4]dusernam[3e]"] + /// example: ["administrator", "b[a4]dusernam[3e]", "badphrase"] /// /// default: [] - #[serde(default)] - #[serde(with = "serde_regex")] + #[serde(default, with = "serde_regex")] pub forbidden_usernames: RegexSet, /// Retry failed and incomplete messages to remote servers immediately upon diff --git a/src/service/federation/execute.rs b/src/service/federation/execute.rs index 63f2ccfb..97314ffb 100644 --- a/src/service/federation/execute.rs +++ b/src/service/federation/execute.rs @@ -69,7 +69,7 @@ where .server .config .forbidden_remote_server_names - .contains(dest) + .is_match(dest.host()) { return Err!(Request(Forbidden(debug_warn!("Federation with {dest} is not allowed.")))); } diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index b6c853d2..cdcb429e 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -426,7 +426,13 @@ fn check_fetch_authorized(&self, mxc: &Mxc<'_>) -> Result<()> { .server .config .prevent_media_downloads_from - .contains(mxc.server_name) + .is_match(mxc.server_name.host()) + || self + .services + .server + .config + .forbidden_remote_server_names + .is_match(mxc.server_name.host()) { // we'll lie to the client and say the blocked server's media was not found and // log. the client has no way of telling anyways so this is a security bonus. From 99868b166173d7bd510a7f2dd3a1b1e415a99682 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 6 Apr 2025 15:30:01 -0400 Subject: [PATCH 326/328] update new complement flakes Signed-off-by: June Clementine Strawberry --- .../complement/test_results.jsonl | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index c0e28750..97c2e1b1 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -491,7 +491,7 @@ {"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself"} {"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel"} {"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Joining_room_twice_is_idempotent"} -{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.create_to_myself"} +{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.create_to_myself"} {"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.member_to_myself"} {"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_room_topic_reports_m.room.topic_to_myself"} {"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_state_twice_is_idempotent"} @@ -527,17 +527,17 @@ {"Action":"pass","Test":"TestRoomMessagesLazyLoadingLocalUser"} {"Action":"pass","Test":"TestRoomReadMarkers"} {"Action":"pass","Test":"TestRoomReceipts"} -{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin"} -{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_mxid"} -{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_profile_display_name"} -{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_mxid"} -{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_mxid"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_mxid"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_profile_display_name"} {"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Eve_cannot_find_Alice_by_room-specific_name_that_Eve_is_not_privy_to"} -{"Action":"fail","Test":"TestRoomSpecificUsernameChange"} -{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_mxid"} -{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_profile_display_name"} -{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_mxid"} -{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_mxid"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_mxid"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_profile_display_name"} {"Action":"pass","Test":"TestRoomSpecificUsernameChange/Eve_cannot_find_Alice_by_room-specific_name_that_Eve_is_not_privy_to"} {"Action":"fail","Test":"TestRoomState"} {"Action":"fail","Test":"TestRoomState/Parallel"} @@ -589,7 +589,7 @@ {"Action":"fail","Test":"TestSync/parallel/Newly_joined_room_has_correct_timeline_in_incremental_sync"} {"Action":"fail","Test":"TestSync/parallel/Newly_joined_room_includes_presence_in_incremental_sync"} {"Action":"pass","Test":"TestSync/parallel/Newly_joined_room_is_included_in_an_incremental_sync"} -{"Action":"fail","Test":"TestSync/parallel/sync_should_succeed_even_if_the_sync_token_points_to_a_redaction_of_an_unknown_event"} +{"Action":"pass","Test":"TestSync/parallel/sync_should_succeed_even_if_the_sync_token_points_to_a_redaction_of_an_unknown_event"} {"Action":"pass","Test":"TestSyncFilter"} {"Action":"pass","Test":"TestSyncFilter/Can_create_filter"} {"Action":"pass","Test":"TestSyncFilter/Can_download_filter"} From 47f83454570a1d4338137708b4b042e8c49b7cb7 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 8 Apr 2025 09:05:49 -0400 Subject: [PATCH 327/328] bump tokio because of RUSTSEC-2025-0023 Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8817af1a..c724e31e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4758,9 +4758,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.44.1" +version = "1.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f382da615b842244d4b8738c82ed1275e6c5dd90c459a30941cd07080b06c91a" +checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" dependencies = [ "backtrace", "bytes", diff --git a/Cargo.toml b/Cargo.toml index 62350dee..f5ee3f0f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -242,7 +242,7 @@ default-features = false features = ["std", "async-await"] [workspace.dependencies.tokio] -version = "1.44.1" +version = "1.44.2" default-features = false features = [ "fs", From d8311a5ff672fdc4729d956af5e3af8646b0670d Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 8 Apr 2025 23:38:54 -0400 Subject: [PATCH 328/328] bump crossbeam-channel bc yanked crate with potential double free Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c724e31e..d81fdbc0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1119,9 +1119,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.14" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" dependencies = [ "crossbeam-utils", ]