Compare commits
71 commits
renovate/o
...
main
Author | SHA1 | Date | |
---|---|---|---|
![]() |
d8311a5ff6 | ||
![]() |
47f8345457 | ||
![]() |
99868b1661 | ||
![]() |
d5ad973464 | ||
![]() |
ff276a42a3 | ||
![]() |
5f8c68ab84 | ||
![]() |
6578b83bce | ||
![]() |
3cc92b32ec | ||
![]() |
9678948daf | ||
![]() |
500faa8d7f | ||
![]() |
d6cc447add | ||
![]() |
e28ae8fb4d | ||
![]() |
c7246662f4 | ||
![]() |
a212bf7cfc | ||
![]() |
58b8c7516a | ||
![]() |
bb8320a691 | ||
![]() |
532dfd004d | ||
![]() |
4e5b87d0cd | ||
![]() |
00f7745ec4 | ||
![]() |
d036394ec7 | ||
![]() |
6a073b4fa4 | ||
![]() |
b7109131e2 | ||
![]() |
94b107b42b | ||
![]() |
29d55b8036 | ||
![]() |
45fd3875c8 | ||
![]() |
f9529937ce | ||
![]() |
0b56204f89 | ||
![]() |
58adb6fead | ||
![]() |
5d1404e9df | ||
![]() |
f14756fb76 | ||
![]() |
24be579477 | ||
![]() |
0e0b8cc403 | ||
![]() |
1036f8dfa8 | ||
![]() |
74012c5289 | ||
![]() |
ea246d91d9 | ||
![]() |
1b71b99c51 | ||
![]() |
0f81c1e1cc | ||
![]() |
bee1f89624 | ||
![]() |
5768ca8442 | ||
![]() |
3f0f89cddb | ||
![]() |
d3b65af616 | ||
![]() |
d60920c728 | ||
![]() |
db99d3a001 | ||
![]() |
bee4c6255a | ||
![]() |
dc6e9e74d9 | ||
![]() |
5bf5afaec8 | ||
![]() |
095734a8e7 | ||
![]() |
a93cb34dd6 | ||
![]() |
b03c493bf9 | ||
![]() |
d0132706cd | ||
![]() |
0e2009dbf5 | ||
![]() |
3e57b7d35d | ||
![]() |
75b6daa67f | ||
![]() |
6365f1a887 | ||
![]() |
b2bf35cfab | ||
![]() |
7f448d88a4 | ||
![]() |
c99f5770a0 | ||
![]() |
dfe058a244 | ||
![]() |
07ba00f74e | ||
![]() |
9d0ce3965e | ||
![]() |
d1b82ea225 | ||
![]() |
23e3f6526f | ||
![]() |
8010505853 | ||
![]() |
9ce95a7030 | ||
![]() |
d8ea8b378c | ||
![]() |
17003ba773 | ||
![]() |
a57336ec13 | ||
![]() |
7294368015 | ||
![]() |
aa4d2e2363 | ||
![]() |
07ec9d6d85 | ||
![]() |
33c5afe050 |
179 changed files with 3549 additions and 2751 deletions
24
.github/workflows/ci.yml
vendored
24
.github/workflows/ci.yml
vendored
|
@ -21,16 +21,6 @@ concurrency:
|
|||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
# sccache only on main repo
|
||||
SCCACHE_GHA_ENABLED: "${{ !startsWith(github.ref, 'refs/tags/') && (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'true' || 'false' }}"
|
||||
RUSTC_WRAPPER: "${{ !startsWith(github.ref, 'refs/tags/') && (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'sccache' || '' }}"
|
||||
SCCACHE_BUCKET: "${{ (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'sccache' || '' }}"
|
||||
SCCACHE_S3_USE_SSL: ${{ vars.SCCACHE_S3_USE_SSL }}
|
||||
SCCACHE_REGION: ${{ vars.SCCACHE_REGION }}
|
||||
SCCACHE_ENDPOINT: ${{ vars.SCCACHE_ENDPOINT }}
|
||||
SCCACHE_CACHE_MULTIARCH: ${{ vars.SCCACHE_CACHE_MULTIARCH }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
# Required to make some things output color
|
||||
TERM: ansi
|
||||
# Publishing to my nix binary cache
|
||||
|
@ -123,13 +113,6 @@ jobs:
|
|||
bin/nix-build-and-cache just '.#devShells.x86_64-linux.all-features'
|
||||
bin/nix-build-and-cache just '.#devShells.x86_64-linux.dynamic'
|
||||
|
||||
# use sccache for Rust
|
||||
- name: Run sccache-cache
|
||||
# we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting
|
||||
# releases and tags
|
||||
#if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }}
|
||||
uses: mozilla-actions/sccache-action@main
|
||||
|
||||
# use rust-cache
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
# we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting
|
||||
|
@ -247,13 +230,6 @@ jobs:
|
|||
direnv allow
|
||||
nix develop .#all-features --command true --impure
|
||||
|
||||
# use sccache for Rust
|
||||
- name: Run sccache-cache
|
||||
# we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting
|
||||
# releases and tags
|
||||
#if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }}
|
||||
uses: mozilla-actions/sccache-action@main
|
||||
|
||||
# use rust-cache
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
# we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting
|
||||
|
|
1001
Cargo.lock
generated
1001
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
100
Cargo.toml
100
Cargo.toml
|
@ -20,18 +20,18 @@ license = "Apache-2.0"
|
|||
# See also `rust-toolchain.toml`
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/girlbossceo/conduwuit"
|
||||
rust-version = "1.85.0"
|
||||
rust-version = "1.86.0"
|
||||
version = "0.5.0"
|
||||
|
||||
[workspace.metadata.crane]
|
||||
name = "conduwuit"
|
||||
|
||||
[workspace.dependencies.arrayvec]
|
||||
version = "0.7.4"
|
||||
version = "0.7.6"
|
||||
features = ["serde"]
|
||||
|
||||
[workspace.dependencies.smallvec]
|
||||
version = "1.13.2"
|
||||
version = "1.14.0"
|
||||
features = [
|
||||
"const_generics",
|
||||
"const_new",
|
||||
|
@ -45,7 +45,7 @@ version = "0.3"
|
|||
features = ["ffi", "std", "union"]
|
||||
|
||||
[workspace.dependencies.const-str]
|
||||
version = "0.5.7"
|
||||
version = "0.6.2"
|
||||
|
||||
[workspace.dependencies.ctor]
|
||||
version = "0.2.9"
|
||||
|
@ -81,13 +81,13 @@ version = "0.8.5"
|
|||
|
||||
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
||||
[workspace.dependencies.bytes]
|
||||
version = "1.9.0"
|
||||
version = "1.10.1"
|
||||
|
||||
[workspace.dependencies.http-body-util]
|
||||
version = "0.1.2"
|
||||
version = "0.1.3"
|
||||
|
||||
[workspace.dependencies.http]
|
||||
version = "1.2.0"
|
||||
version = "1.3.1"
|
||||
|
||||
[workspace.dependencies.regex]
|
||||
version = "1.11.1"
|
||||
|
@ -111,7 +111,7 @@ default-features = false
|
|||
features = ["typed-header", "tracing"]
|
||||
|
||||
[workspace.dependencies.axum-server]
|
||||
version = "0.7.1"
|
||||
version = "0.7.2"
|
||||
default-features = false
|
||||
|
||||
# to listen on both HTTP and HTTPS if listening on TLS dierctly from conduwuit for complement or sytest
|
||||
|
@ -122,7 +122,7 @@ version = "0.7"
|
|||
version = "0.6.1"
|
||||
|
||||
[workspace.dependencies.tower]
|
||||
version = "0.5.1"
|
||||
version = "0.5.2"
|
||||
default-features = false
|
||||
features = ["util"]
|
||||
|
||||
|
@ -141,12 +141,12 @@ features = [
|
|||
]
|
||||
|
||||
[workspace.dependencies.rustls]
|
||||
version = "0.23.19"
|
||||
version = "0.23.25"
|
||||
default-features = false
|
||||
features = ["aws_lc_rs"]
|
||||
|
||||
[workspace.dependencies.reqwest]
|
||||
version = "0.12.9"
|
||||
version = "0.12.15"
|
||||
default-features = false
|
||||
features = [
|
||||
"rustls-tls-native-roots",
|
||||
|
@ -156,12 +156,12 @@ features = [
|
|||
]
|
||||
|
||||
[workspace.dependencies.serde]
|
||||
version = "1.0.216"
|
||||
version = "1.0.219"
|
||||
default-features = false
|
||||
features = ["rc"]
|
||||
|
||||
[workspace.dependencies.serde_json]
|
||||
version = "1.0.133"
|
||||
version = "1.0.140"
|
||||
default-features = false
|
||||
features = ["raw_value"]
|
||||
|
||||
|
@ -204,7 +204,7 @@ features = [
|
|||
|
||||
# logging
|
||||
[workspace.dependencies.log]
|
||||
version = "0.4.22"
|
||||
version = "0.4.27"
|
||||
default-features = false
|
||||
[workspace.dependencies.tracing]
|
||||
version = "0.1.41"
|
||||
|
@ -224,7 +224,7 @@ default-features = false
|
|||
|
||||
# used for conduwuit's CLI and admin room command parsing
|
||||
[workspace.dependencies.clap]
|
||||
version = "4.5.23"
|
||||
version = "4.5.35"
|
||||
default-features = false
|
||||
features = [
|
||||
"derive",
|
||||
|
@ -237,12 +237,12 @@ features = [
|
|||
]
|
||||
|
||||
[workspace.dependencies.futures]
|
||||
version = "0.3.30"
|
||||
version = "0.3.31"
|
||||
default-features = false
|
||||
features = ["std", "async-await"]
|
||||
|
||||
[workspace.dependencies.tokio]
|
||||
version = "1.42.0"
|
||||
version = "1.44.2"
|
||||
default-features = false
|
||||
features = [
|
||||
"fs",
|
||||
|
@ -275,7 +275,7 @@ features = ["alloc", "std"]
|
|||
default-features = false
|
||||
|
||||
[workspace.dependencies.hyper]
|
||||
version = "1.5.1"
|
||||
version = "1.6.0"
|
||||
default-features = false
|
||||
features = [
|
||||
"server",
|
||||
|
@ -284,8 +284,7 @@ features = [
|
|||
]
|
||||
|
||||
[workspace.dependencies.hyper-util]
|
||||
# hyper-util >=0.1.9 seems to have DNS issues
|
||||
version = "=0.1.8"
|
||||
version = "0.1.11"
|
||||
default-features = false
|
||||
features = [
|
||||
"server-auto",
|
||||
|
@ -295,7 +294,7 @@ features = [
|
|||
|
||||
# to support multiple variations of setting a config option
|
||||
[workspace.dependencies.either]
|
||||
version = "1.13.0"
|
||||
version = "1.15.0"
|
||||
default-features = false
|
||||
features = ["serde"]
|
||||
|
||||
|
@ -306,22 +305,27 @@ default-features = false
|
|||
features = ["env", "toml"]
|
||||
|
||||
[workspace.dependencies.hickory-resolver]
|
||||
version = "0.24.2"
|
||||
version = "0.25.1"
|
||||
default-features = false
|
||||
features = [
|
||||
"serde",
|
||||
"system-config",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
# Used for conduwuit::Error type
|
||||
[workspace.dependencies.thiserror]
|
||||
version = "2.0.7"
|
||||
version = "2.0.12"
|
||||
default-features = false
|
||||
|
||||
# Used when hashing the state
|
||||
[workspace.dependencies.ring]
|
||||
version = "0.17.8"
|
||||
version = "0.17.14"
|
||||
default-features = false
|
||||
|
||||
# Used to make working with iterators easier, was already a transitive depdendency
|
||||
[workspace.dependencies.itertools]
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
|
||||
# to parse user-friendly time durations in admin commands
|
||||
#TODO: overlaps chrono?
|
||||
|
@ -337,7 +341,7 @@ version = "0.4.0"
|
|||
version = "2.3.1"
|
||||
|
||||
[workspace.dependencies.async-trait]
|
||||
version = "0.1.83"
|
||||
version = "0.1.88"
|
||||
|
||||
[workspace.dependencies.lru-cache]
|
||||
version = "0.1.2"
|
||||
|
@ -346,7 +350,7 @@ version = "0.1.2"
|
|||
[workspace.dependencies.ruma]
|
||||
git = "https://github.com/girlbossceo/ruwuma"
|
||||
#branch = "conduwuit-changes"
|
||||
rev = "d197318a2507d38ffe6ee524d0d52728ca72538a"
|
||||
rev = "920148dca1076454ca0ca5d43b5ce1aa708381d4"
|
||||
features = [
|
||||
"compat",
|
||||
"rand",
|
||||
|
@ -405,7 +409,7 @@ default-features = false
|
|||
|
||||
# optional opentelemetry, performance measurements, flamegraphs, etc for performance measurements and monitoring
|
||||
[workspace.dependencies.opentelemetry]
|
||||
version = "0.29.0"
|
||||
version = "0.21.0"
|
||||
|
||||
[workspace.dependencies.tracing-flame]
|
||||
version = "0.2.0"
|
||||
|
@ -414,16 +418,16 @@ version = "0.2.0"
|
|||
version = "0.22.0"
|
||||
|
||||
[workspace.dependencies.opentelemetry_sdk]
|
||||
version = "0.29.0"
|
||||
version = "0.21.2"
|
||||
features = ["rt-tokio"]
|
||||
|
||||
[workspace.dependencies.opentelemetry-jaeger]
|
||||
version = "0.22.0"
|
||||
version = "0.20.0"
|
||||
features = ["rt-tokio"]
|
||||
|
||||
# optional sentry metrics for crash/panic reporting
|
||||
[workspace.dependencies.sentry]
|
||||
version = "0.35.0"
|
||||
version = "0.37.0"
|
||||
default-features = false
|
||||
features = [
|
||||
"backtrace",
|
||||
|
@ -439,9 +443,9 @@ features = [
|
|||
]
|
||||
|
||||
[workspace.dependencies.sentry-tracing]
|
||||
version = "0.35.0"
|
||||
version = "0.37.0"
|
||||
[workspace.dependencies.sentry-tower]
|
||||
version = "0.35.0"
|
||||
version = "0.37.0"
|
||||
|
||||
# jemalloc usage
|
||||
[workspace.dependencies.tikv-jemalloc-sys]
|
||||
|
@ -475,7 +479,7 @@ default-features = false
|
|||
features = ["resource"]
|
||||
|
||||
[workspace.dependencies.sd-notify]
|
||||
version = "0.4.3"
|
||||
version = "0.4.5"
|
||||
default-features = false
|
||||
|
||||
[workspace.dependencies.hardened_malloc-rs]
|
||||
|
@ -492,25 +496,25 @@ version = "0.4.3"
|
|||
default-features = false
|
||||
|
||||
[workspace.dependencies.termimad]
|
||||
version = "0.31.1"
|
||||
version = "0.31.2"
|
||||
default-features = false
|
||||
|
||||
[workspace.dependencies.checked_ops]
|
||||
version = "0.1"
|
||||
|
||||
[workspace.dependencies.syn]
|
||||
version = "2.0.90"
|
||||
version = "2.0"
|
||||
default-features = false
|
||||
features = ["full", "extra-traits"]
|
||||
|
||||
[workspace.dependencies.quote]
|
||||
version = "1.0.37"
|
||||
version = "1.0"
|
||||
|
||||
[workspace.dependencies.proc-macro2]
|
||||
version = "1.0.89"
|
||||
version = "1.0"
|
||||
|
||||
[workspace.dependencies.bytesize]
|
||||
version = "1.3.2"
|
||||
version = "2.0"
|
||||
|
||||
[workspace.dependencies.core_affinity]
|
||||
version = "0.8.1"
|
||||
|
@ -522,11 +526,11 @@ version = "0.2"
|
|||
version = "0.2"
|
||||
|
||||
[workspace.dependencies.minicbor]
|
||||
version = "0.25.1"
|
||||
version = "0.26.3"
|
||||
features = ["std"]
|
||||
|
||||
[workspace.dependencies.minicbor-serde]
|
||||
version = "0.3.2"
|
||||
version = "0.4.1"
|
||||
features = ["std"]
|
||||
|
||||
[workspace.dependencies.maplit]
|
||||
|
@ -566,10 +570,23 @@ rev = "fe4aebeeaae435af60087ddd56b573a2e0be671d"
|
|||
git = "https://github.com/girlbossceo/async-channel"
|
||||
rev = "92e5e74063bf2a3b10414bcc8a0d68b235644280"
|
||||
|
||||
# adds affinity masks for selecting more than one core at a time
|
||||
[patch.crates-io.core_affinity]
|
||||
git = "https://github.com/girlbossceo/core_affinity_rs"
|
||||
rev = "9c8e51510c35077df888ee72a36b4b05637147da"
|
||||
|
||||
# reverts hyperium#148 conflicting with our delicate federation resolver hooks
|
||||
[patch.crates-io.hyper-util]
|
||||
git = "https://github.com/girlbossceo/hyper-util"
|
||||
rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941"
|
||||
|
||||
# allows no-aaaa option in resolv.conf
|
||||
# bumps rust edition and toolchain to 1.86.0 and 2024
|
||||
# use sat_add on line number errors
|
||||
[patch.crates-io.resolv-conf]
|
||||
git = "https://github.com/girlbossceo/resolv-conf"
|
||||
rev = "200e958941d522a70c5877e3d846f55b5586c68d"
|
||||
|
||||
#
|
||||
# Our crates
|
||||
#
|
||||
|
@ -892,6 +909,7 @@ needless_continue = { level = "allow", priority = 1 }
|
|||
no_effect_underscore_binding = { level = "allow", priority = 1 }
|
||||
similar_names = { level = "allow", priority = 1 }
|
||||
single_match_else = { level = "allow", priority = 1 }
|
||||
struct_excessive_bools = { level = "allow", priority = 1 }
|
||||
struct_field_names = { level = "allow", priority = 1 }
|
||||
unnecessary_wraps = { level = "allow", priority = 1 }
|
||||
unused_async = { level = "allow", priority = 1 }
|
||||
|
|
|
@ -195,14 +195,6 @@
|
|||
#
|
||||
#servernameevent_data_cache_capacity = varies by system
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
#
|
||||
#server_visibility_cache_capacity = varies by system
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
#
|
||||
#user_visibility_cache_capacity = varies by system
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
#
|
||||
#stateinfo_cache_capacity = varies by system
|
||||
|
@ -535,9 +527,9 @@
|
|||
|
||||
# Default room version conduwuit will create rooms with.
|
||||
#
|
||||
# Per spec, room version 10 is the default.
|
||||
# Per spec, room version 11 is the default.
|
||||
#
|
||||
#default_room_version = 10
|
||||
#default_room_version = 11
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
#
|
||||
|
@ -602,7 +594,7 @@
|
|||
# Currently, conduwuit doesn't support inbound batched key requests, so
|
||||
# this list should only contain other Synapse servers.
|
||||
#
|
||||
# example: ["matrix.org", "envs.net", "tchncs.de"]
|
||||
# example: ["matrix.org", "tchncs.de"]
|
||||
#
|
||||
#trusted_servers = ["matrix.org"]
|
||||
|
||||
|
@ -1194,13 +1186,16 @@
|
|||
#
|
||||
#prune_missing_media = false
|
||||
|
||||
# Vector list of servers that conduwuit will refuse to download remote
|
||||
# media from.
|
||||
# Vector list of regex patterns of server names that conduwuit will refuse
|
||||
# to download remote media from.
|
||||
#
|
||||
# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||
#
|
||||
#prevent_media_downloads_from = []
|
||||
|
||||
# List of forbidden server names that we will block incoming AND outgoing
|
||||
# federation with, and block client room joins / remote user invites.
|
||||
# List of forbidden server names via regex patterns that we will block
|
||||
# incoming AND outgoing federation with, and block client room joins /
|
||||
# remote user invites.
|
||||
#
|
||||
# This check is applied on the room ID, room alias, sender server name,
|
||||
# sender user's server name, inbound federation X-Matrix origin, and
|
||||
|
@ -1208,11 +1203,15 @@
|
|||
#
|
||||
# Basically "global" ACLs.
|
||||
#
|
||||
# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||
#
|
||||
#forbidden_remote_server_names = []
|
||||
|
||||
# List of forbidden server names that we will block all outgoing federated
|
||||
# room directory requests for. Useful for preventing our users from
|
||||
# wandering into bad servers or spaces.
|
||||
# List of forbidden server names via regex patterns that we will block all
|
||||
# outgoing federated room directory requests for. Useful for preventing
|
||||
# our users from wandering into bad servers or spaces.
|
||||
#
|
||||
# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||
#
|
||||
#forbidden_remote_room_directory_server_names = []
|
||||
|
||||
|
@ -1323,7 +1322,7 @@
|
|||
# used, and startup as warnings if any room aliases in your database have
|
||||
# a forbidden room alias/ID.
|
||||
#
|
||||
# example: ["19dollarfortnitecards", "b[4a]droom"]
|
||||
# example: ["19dollarfortnitecards", "b[4a]droom", "badphrase"]
|
||||
#
|
||||
#forbidden_alias_names = []
|
||||
|
||||
|
@ -1336,7 +1335,7 @@
|
|||
# startup as warnings if any local users in your database have a forbidden
|
||||
# username.
|
||||
#
|
||||
# example: ["administrator", "b[a4]dusernam[3e]"]
|
||||
# example: ["administrator", "b[a4]dusernam[3e]", "badphrase"]
|
||||
#
|
||||
#forbidden_usernames = []
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
file = ./rust-toolchain.toml;
|
||||
|
||||
# See also `rust-toolchain.toml`
|
||||
sha256 = "sha256-AJ6LX/Q/Er9kS15bn9iflkUwcgYqRQxiOIL2ToVAXaU=";
|
||||
sha256 = "sha256-X/4ZBHO3iW0fOenQ3foEvscgAPJYl2abspaBThDOukI=";
|
||||
};
|
||||
|
||||
mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: {
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
# If you're having trouble making the relevant changes, bug a maintainer.
|
||||
|
||||
[toolchain]
|
||||
channel = "1.85.0"
|
||||
channel = "1.86.0"
|
||||
profile = "minimal"
|
||||
components = [
|
||||
# For rust-analyzer
|
||||
|
|
|
@ -6,7 +6,9 @@ use std::{
|
|||
};
|
||||
|
||||
use conduwuit::{
|
||||
Error, PduEvent, PduId, RawPduId, Result, debug_error, err, info, trace, utils,
|
||||
Error, Result, debug_error, err, info,
|
||||
matrix::pdu::{PduEvent, PduId, RawPduId},
|
||||
trace, utils,
|
||||
utils::{
|
||||
stream::{IterStream, ReadyExt},
|
||||
string::EMPTY,
|
||||
|
|
|
@ -1,15 +1,16 @@
|
|||
use std::{borrow::Cow, collections::BTreeMap, ops::Deref};
|
||||
use std::{borrow::Cow, collections::BTreeMap, ops::Deref, sync::Arc};
|
||||
|
||||
use clap::Subcommand;
|
||||
use conduwuit::{
|
||||
Err, Result, apply, at, is_zero,
|
||||
utils::{
|
||||
IterStream,
|
||||
stream::{ReadyExt, TryIgnore, TryParallelExt},
|
||||
stream::{IterStream, ReadyExt, TryIgnore, TryParallelExt},
|
||||
string::EMPTY,
|
||||
},
|
||||
};
|
||||
use futures::{FutureExt, StreamExt, TryStreamExt};
|
||||
use conduwuit_database::Map;
|
||||
use conduwuit_service::Services;
|
||||
use futures::{FutureExt, Stream, StreamExt, TryStreamExt};
|
||||
use ruma::events::room::message::RoomMessageEventContent;
|
||||
use tokio::time::Instant;
|
||||
|
||||
|
@ -172,22 +173,18 @@ pub(super) async fn compact(
|
|||
) -> Result<RoomMessageEventContent> {
|
||||
use conduwuit_database::compact::Options;
|
||||
|
||||
let default_all_maps = map
|
||||
.is_none()
|
||||
.then(|| {
|
||||
let default_all_maps: Option<_> = map.is_none().then(|| {
|
||||
self.services
|
||||
.db
|
||||
.keys()
|
||||
.map(Deref::deref)
|
||||
.map(ToOwned::to_owned)
|
||||
})
|
||||
.into_iter()
|
||||
.flatten();
|
||||
});
|
||||
|
||||
let maps: Vec<_> = map
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.chain(default_all_maps)
|
||||
.chain(default_all_maps.into_iter().flatten())
|
||||
.map(|map| self.services.db.get(&map))
|
||||
.filter_map(Result::ok)
|
||||
.cloned()
|
||||
|
@ -237,25 +234,8 @@ pub(super) async fn raw_count(
|
|||
) -> Result<RoomMessageEventContent> {
|
||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||
|
||||
let default_all_maps = map
|
||||
.is_none()
|
||||
.then(|| self.services.db.keys().map(Deref::deref))
|
||||
.into_iter()
|
||||
.flatten();
|
||||
|
||||
let maps: Vec<_> = map
|
||||
.iter()
|
||||
.map(String::as_str)
|
||||
.chain(default_all_maps)
|
||||
.map(|map| self.services.db.get(map))
|
||||
.filter_map(Result::ok)
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let timer = Instant::now();
|
||||
let count = maps
|
||||
.iter()
|
||||
.stream()
|
||||
let count = with_maps_or(map.as_deref(), self.services)
|
||||
.then(|map| map.raw_count_prefix(&prefix))
|
||||
.ready_fold(0_usize, usize::saturating_add)
|
||||
.await;
|
||||
|
@ -300,25 +280,8 @@ pub(super) async fn raw_keys_sizes(
|
|||
) -> Result<RoomMessageEventContent> {
|
||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||
|
||||
let default_all_maps = map
|
||||
.is_none()
|
||||
.then(|| self.services.db.keys().map(Deref::deref))
|
||||
.into_iter()
|
||||
.flatten();
|
||||
|
||||
let maps: Vec<_> = map
|
||||
.iter()
|
||||
.map(String::as_str)
|
||||
.chain(default_all_maps)
|
||||
.map(|map| self.services.db.get(map))
|
||||
.filter_map(Result::ok)
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let timer = Instant::now();
|
||||
let result = maps
|
||||
.iter()
|
||||
.stream()
|
||||
let result = with_maps_or(map.as_deref(), self.services)
|
||||
.map(|map| map.raw_keys_prefix(&prefix))
|
||||
.flatten()
|
||||
.ignore_err()
|
||||
|
@ -345,25 +308,8 @@ pub(super) async fn raw_keys_total(
|
|||
) -> Result<RoomMessageEventContent> {
|
||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||
|
||||
let default_all_maps = map
|
||||
.is_none()
|
||||
.then(|| self.services.db.keys().map(Deref::deref))
|
||||
.into_iter()
|
||||
.flatten();
|
||||
|
||||
let maps: Vec<_> = map
|
||||
.iter()
|
||||
.map(String::as_str)
|
||||
.chain(default_all_maps)
|
||||
.map(|map| self.services.db.get(map))
|
||||
.filter_map(Result::ok)
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let timer = Instant::now();
|
||||
let result = maps
|
||||
.iter()
|
||||
.stream()
|
||||
let result = with_maps_or(map.as_deref(), self.services)
|
||||
.map(|map| map.raw_keys_prefix(&prefix))
|
||||
.flatten()
|
||||
.ignore_err()
|
||||
|
@ -387,25 +333,8 @@ pub(super) async fn raw_vals_sizes(
|
|||
) -> Result<RoomMessageEventContent> {
|
||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||
|
||||
let default_all_maps = map
|
||||
.is_none()
|
||||
.then(|| self.services.db.keys().map(Deref::deref))
|
||||
.into_iter()
|
||||
.flatten();
|
||||
|
||||
let maps: Vec<_> = map
|
||||
.iter()
|
||||
.map(String::as_str)
|
||||
.chain(default_all_maps)
|
||||
.map(|map| self.services.db.get(map))
|
||||
.filter_map(Result::ok)
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let timer = Instant::now();
|
||||
let result = maps
|
||||
.iter()
|
||||
.stream()
|
||||
let result = with_maps_or(map.as_deref(), self.services)
|
||||
.map(|map| map.raw_stream_prefix(&prefix))
|
||||
.flatten()
|
||||
.ignore_err()
|
||||
|
@ -433,25 +362,8 @@ pub(super) async fn raw_vals_total(
|
|||
) -> Result<RoomMessageEventContent> {
|
||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||
|
||||
let default_all_maps = map
|
||||
.is_none()
|
||||
.then(|| self.services.db.keys().map(Deref::deref))
|
||||
.into_iter()
|
||||
.flatten();
|
||||
|
||||
let maps: Vec<_> = map
|
||||
.iter()
|
||||
.map(String::as_str)
|
||||
.chain(default_all_maps)
|
||||
.map(|map| self.services.db.get(map))
|
||||
.filter_map(Result::ok)
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let timer = Instant::now();
|
||||
let result = maps
|
||||
.iter()
|
||||
.stream()
|
||||
let result = with_maps_or(map.as_deref(), self.services)
|
||||
.map(|map| map.raw_stream_prefix(&prefix))
|
||||
.flatten()
|
||||
.ignore_err()
|
||||
|
@ -573,3 +485,20 @@ pub(super) async fn raw_maps(&self) -> Result<RoomMessageEventContent> {
|
|||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!("{list:#?}")))
|
||||
}
|
||||
|
||||
fn with_maps_or<'a>(
|
||||
map: Option<&'a str>,
|
||||
services: &'a Services,
|
||||
) -> impl Stream<Item = &'a Arc<Map>> + Send + 'a {
|
||||
let default_all_maps = map
|
||||
.is_none()
|
||||
.then(|| services.db.keys().map(Deref::deref))
|
||||
.into_iter()
|
||||
.flatten();
|
||||
|
||||
map.into_iter()
|
||||
.chain(default_all_maps)
|
||||
.map(|map| services.db.get(map))
|
||||
.filter_map(Result::ok)
|
||||
.stream()
|
||||
}
|
||||
|
|
|
@ -2,7 +2,8 @@ use std::{collections::BTreeMap, fmt::Write as _};
|
|||
|
||||
use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room};
|
||||
use conduwuit::{
|
||||
PduBuilder, Result, debug, debug_warn, error, info, is_equal_to,
|
||||
Result, debug, debug_warn, error, info, is_equal_to,
|
||||
matrix::pdu::PduBuilder,
|
||||
utils::{self, ReadyExt},
|
||||
warn,
|
||||
};
|
||||
|
|
|
@ -35,6 +35,7 @@ brotli_compression = [
|
|||
]
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
axum-client-ip.workspace = true
|
||||
axum-extra.workspace = true
|
||||
axum.workspace = true
|
||||
|
|
|
@ -3,9 +3,13 @@ use std::fmt::Write;
|
|||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{
|
||||
Err, Error, PduBuilder, Result, debug_info, err, error, info, is_equal_to, utils,
|
||||
utils::ReadyExt, warn,
|
||||
Err, Error, Result, debug_info, err, error, info, is_equal_to,
|
||||
matrix::pdu::PduBuilder,
|
||||
utils,
|
||||
utils::{ReadyExt, stream::BroadbandExt},
|
||||
warn,
|
||||
};
|
||||
use conduwuit_service::Services;
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use register::RegistrationKind;
|
||||
use ruma::{
|
||||
|
@ -29,7 +33,6 @@ use ruma::{
|
|||
},
|
||||
push,
|
||||
};
|
||||
use service::Services;
|
||||
|
||||
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH, join_room_by_id_helper};
|
||||
use crate::Ruma;
|
||||
|
@ -145,7 +148,7 @@ pub(crate) async fn register_route(
|
|||
let is_guest = body.kind == RegistrationKind::Guest;
|
||||
let emergency_mode_enabled = services.config.emergency_password.is_some();
|
||||
|
||||
if !services.globals.allow_registration() && body.appservice_info.is_none() {
|
||||
if !services.config.allow_registration && body.appservice_info.is_none() {
|
||||
match (body.username.as_ref(), body.initial_device_display_name.as_ref()) {
|
||||
| (Some(username), Some(device_display_name)) => {
|
||||
info!(%is_guest, user = %username, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled");
|
||||
|
@ -165,8 +168,8 @@ pub(crate) async fn register_route(
|
|||
}
|
||||
|
||||
if is_guest
|
||||
&& (!services.globals.allow_guest_registration()
|
||||
|| (services.globals.allow_registration()
|
||||
&& (!services.config.allow_guest_registration
|
||||
|| (services.config.allow_registration
|
||||
&& services.globals.registration_token.is_some()))
|
||||
{
|
||||
info!(
|
||||
|
@ -317,14 +320,14 @@ pub(crate) async fn register_route(
|
|||
// Success!
|
||||
},
|
||||
| _ => match body.json_body {
|
||||
| Some(json) => {
|
||||
| Some(ref json) => {
|
||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||
services.uiaa.create(
|
||||
&UserId::parse_with_server_name("", services.globals.server_name())
|
||||
.unwrap(),
|
||||
"".into(),
|
||||
&uiaainfo,
|
||||
&json,
|
||||
json,
|
||||
);
|
||||
return Err(Error::Uiaa(uiaainfo));
|
||||
},
|
||||
|
@ -372,8 +375,12 @@ pub(crate) async fn register_route(
|
|||
)
|
||||
.await?;
|
||||
|
||||
// Inhibit login does not work for guests
|
||||
if !is_guest && body.inhibit_login {
|
||||
if (!is_guest && body.inhibit_login)
|
||||
|| body
|
||||
.appservice_info
|
||||
.as_ref()
|
||||
.is_some_and(|appservice| appservice.registration.device_management)
|
||||
{
|
||||
return Ok(register::v3::Response {
|
||||
access_token: None,
|
||||
user_id,
|
||||
|
@ -440,7 +447,7 @@ pub(crate) async fn register_route(
|
|||
}
|
||||
|
||||
// log in conduit admin channel if a guest registered
|
||||
if body.appservice_info.is_none() && is_guest && services.globals.log_guest_registrations() {
|
||||
if body.appservice_info.is_none() && is_guest && services.config.log_guest_registrations {
|
||||
debug_info!("New guest user \"{user_id}\" registered on this server.");
|
||||
|
||||
if !device_display_name.is_empty() {
|
||||
|
@ -489,7 +496,7 @@ pub(crate) async fn register_route(
|
|||
|
||||
if body.appservice_info.is_none()
|
||||
&& !services.server.config.auto_join_rooms.is_empty()
|
||||
&& (services.globals.allow_guests_auto_join_rooms() || !is_guest)
|
||||
&& (services.config.allow_guests_auto_join_rooms || !is_guest)
|
||||
{
|
||||
for room in &services.server.config.auto_join_rooms {
|
||||
let Ok(room_id) = services.rooms.alias.resolve(room).await else {
|
||||
|
@ -627,6 +634,26 @@ pub(crate) async fn change_password_route(
|
|||
.ready_filter(|id| *id != sender_device)
|
||||
.for_each(|id| services.users.remove_device(sender_user, id))
|
||||
.await;
|
||||
|
||||
// Remove all pushers except the ones associated with this session
|
||||
services
|
||||
.pusher
|
||||
.get_pushkeys(sender_user)
|
||||
.map(ToOwned::to_owned)
|
||||
.broad_filter_map(|pushkey| async move {
|
||||
services
|
||||
.pusher
|
||||
.get_pusher_device(&pushkey)
|
||||
.await
|
||||
.ok()
|
||||
.filter(|pusher_device| pusher_device != sender_device)
|
||||
.is_some()
|
||||
.then_some(pushkey)
|
||||
})
|
||||
.for_each(|pushkey| async move {
|
||||
services.pusher.delete_pusher(sender_user, &pushkey).await;
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
info!("User {sender_user} changed their password.");
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{Err, err};
|
||||
use conduwuit::{Err, Result, err};
|
||||
use conduwuit_service::Services;
|
||||
use ruma::{
|
||||
RoomId, UserId,
|
||||
api::client::config::{
|
||||
|
@ -15,7 +16,7 @@ use ruma::{
|
|||
use serde::Deserialize;
|
||||
use serde_json::{json, value::RawValue as RawJsonValue};
|
||||
|
||||
use crate::{Result, Ruma, service::Services};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}`
|
||||
///
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{Err, Result, debug};
|
||||
use conduwuit_service::Services;
|
||||
use futures::StreamExt;
|
||||
use rand::seq::SliceRandom;
|
||||
use ruma::{
|
||||
OwnedServerName, RoomAliasId, RoomId,
|
||||
api::client::alias::{create_alias, delete_alias, get_alias},
|
||||
};
|
||||
use service::Services;
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
|
|
|
@ -22,7 +22,13 @@ pub(crate) async fn appservice_ping(
|
|||
)));
|
||||
}
|
||||
|
||||
if appservice_info.registration.url.is_none() {
|
||||
if appservice_info.registration.url.is_none()
|
||||
|| appservice_info
|
||||
.registration
|
||||
.url
|
||||
.as_ref()
|
||||
.is_some_and(|url| url.is_empty() || url == "null")
|
||||
{
|
||||
return Err!(Request(UrlNotSet(
|
||||
"Appservice does not have a URL set, there is nothing to ping."
|
||||
)));
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use std::cmp::Ordering;
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Err, err};
|
||||
use conduwuit::{Err, Result, err};
|
||||
use ruma::{
|
||||
UInt,
|
||||
api::client::backup::{
|
||||
|
@ -13,7 +13,7 @@ use ruma::{
|
|||
},
|
||||
};
|
||||
|
||||
use crate::{Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `POST /_matrix/client/r0/room_keys/version`
|
||||
///
|
||||
|
|
|
@ -1,18 +1,20 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Err, PduEvent, Result, at, debug_warn, err, ref_at,
|
||||
Err, Result, at, debug_warn, err,
|
||||
matrix::pdu::PduEvent,
|
||||
ref_at,
|
||||
utils::{
|
||||
IterStream,
|
||||
future::TryExtExt,
|
||||
stream::{BroadbandExt, ReadyExt, TryIgnore, WidebandExt},
|
||||
},
|
||||
};
|
||||
use conduwuit_service::rooms::{lazy_loading, lazy_loading::Options, short::ShortStateKey};
|
||||
use futures::{
|
||||
FutureExt, StreamExt, TryFutureExt, TryStreamExt,
|
||||
future::{OptionFuture, join, join3, try_join3},
|
||||
};
|
||||
use ruma::{OwnedEventId, UserId, api::client::context::get_context, events::StateEventType};
|
||||
use service::rooms::{lazy_loading, lazy_loading::Options, short::ShortStateKey};
|
||||
|
||||
use crate::{
|
||||
Ruma,
|
||||
|
@ -105,7 +107,7 @@ pub(crate) async fn get_context_route(
|
|||
.collect();
|
||||
|
||||
let (base_event, events_before, events_after): (_, Vec<_>, Vec<_>) =
|
||||
join3(base_event, events_before, events_after).await;
|
||||
join3(base_event, events_before, events_after).boxed().await;
|
||||
|
||||
let lazy_loading_context = lazy_loading::Context {
|
||||
user_id: sender_user,
|
||||
|
@ -182,7 +184,7 @@ pub(crate) async fn get_context_route(
|
|||
.await;
|
||||
|
||||
Ok(get_context::v3::Response {
|
||||
event: base_event.map(at!(1)).as_ref().map(PduEvent::to_room_event),
|
||||
event: base_event.map(at!(1)).map(PduEvent::into_room_event),
|
||||
|
||||
start: events_before
|
||||
.last()
|
||||
|
@ -201,13 +203,13 @@ pub(crate) async fn get_context_route(
|
|||
events_before: events_before
|
||||
.into_iter()
|
||||
.map(at!(1))
|
||||
.map(|pdu| pdu.to_room_event())
|
||||
.map(PduEvent::into_room_event)
|
||||
.collect(),
|
||||
|
||||
events_after: events_after
|
||||
.into_iter()
|
||||
.map(at!(1))
|
||||
.map(|pdu| pdu.to_room_event())
|
||||
.map(PduEvent::into_room_event)
|
||||
.collect(),
|
||||
|
||||
state,
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{Err, err};
|
||||
use conduwuit::{Err, Error, Result, debug, err, utils};
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
MilliSecondsSinceUnixEpoch,
|
||||
MilliSecondsSinceUnixEpoch, OwnedDeviceId,
|
||||
api::client::{
|
||||
device::{self, delete_device, delete_devices, get_device, get_devices, update_device},
|
||||
error::ErrorKind,
|
||||
|
@ -12,7 +12,7 @@ use ruma::{
|
|||
};
|
||||
|
||||
use super::SESSION_ID_LENGTH;
|
||||
use crate::{Error, Result, Ruma, utils};
|
||||
use crate::{Ruma, client::DEVICE_ID_LENGTH};
|
||||
|
||||
/// # `GET /_matrix/client/r0/devices`
|
||||
///
|
||||
|
@ -59,14 +59,15 @@ pub(crate) async fn update_device_route(
|
|||
InsecureClientIp(client): InsecureClientIp,
|
||||
body: Ruma<update_device::v3::Request>,
|
||||
) -> Result<update_device::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let sender_user = body.sender_user();
|
||||
let appservice = body.appservice_info.as_ref();
|
||||
|
||||
let mut device = services
|
||||
match services
|
||||
.users
|
||||
.get_device_metadata(sender_user, &body.device_id)
|
||||
.await
|
||||
.map_err(|_| err!(Request(NotFound("Device not found."))))?;
|
||||
|
||||
{
|
||||
| Ok(mut device) => {
|
||||
device.display_name.clone_from(&body.display_name);
|
||||
device.last_seen_ip.clone_from(&Some(client.to_string()));
|
||||
device
|
||||
|
@ -79,6 +80,37 @@ pub(crate) async fn update_device_route(
|
|||
.await?;
|
||||
|
||||
Ok(update_device::v3::Response {})
|
||||
},
|
||||
| Err(_) => {
|
||||
let Some(appservice) = appservice else {
|
||||
return Err!(Request(NotFound("Device not found.")));
|
||||
};
|
||||
if !appservice.registration.device_management {
|
||||
return Err!(Request(NotFound("Device not found.")));
|
||||
}
|
||||
|
||||
debug!(
|
||||
"Creating new device for {sender_user} from appservice {} as MSC4190 is enabled \
|
||||
and device ID does not exist",
|
||||
appservice.registration.id
|
||||
);
|
||||
|
||||
let device_id = OwnedDeviceId::from(utils::random_string(DEVICE_ID_LENGTH));
|
||||
|
||||
services
|
||||
.users
|
||||
.create_device(
|
||||
sender_user,
|
||||
&device_id,
|
||||
&appservice.registration.as_token,
|
||||
None,
|
||||
Some(client.to_string()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
return Ok(update_device::v3::Response {});
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// # `DELETE /_matrix/client/r0/devices/{deviceId}`
|
||||
|
@ -95,8 +127,21 @@ pub(crate) async fn delete_device_route(
|
|||
State(services): State<crate::State>,
|
||||
body: Ruma<delete_device::v3::Request>,
|
||||
) -> Result<delete_device::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||
let (sender_user, sender_device) = body.sender();
|
||||
let appservice = body.appservice_info.as_ref();
|
||||
|
||||
if appservice.is_some_and(|appservice| appservice.registration.device_management) {
|
||||
debug!(
|
||||
"Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \
|
||||
enabled"
|
||||
);
|
||||
services
|
||||
.users
|
||||
.remove_device(sender_user, &body.device_id)
|
||||
.await;
|
||||
|
||||
return Ok(delete_device::v3::Response {});
|
||||
}
|
||||
|
||||
// UIAA
|
||||
let mut uiaainfo = UiaaInfo {
|
||||
|
@ -120,11 +165,11 @@ pub(crate) async fn delete_device_route(
|
|||
// Success!
|
||||
},
|
||||
| _ => match body.json_body {
|
||||
| Some(json) => {
|
||||
| Some(ref json) => {
|
||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||
services
|
||||
.uiaa
|
||||
.create(sender_user, sender_device, &uiaainfo, &json);
|
||||
.create(sender_user, sender_device, &uiaainfo, json);
|
||||
|
||||
return Err!(Uiaa(uiaainfo));
|
||||
},
|
||||
|
@ -142,11 +187,12 @@ pub(crate) async fn delete_device_route(
|
|||
Ok(delete_device::v3::Response {})
|
||||
}
|
||||
|
||||
/// # `PUT /_matrix/client/r0/devices/{deviceId}`
|
||||
/// # `POST /_matrix/client/v3/delete_devices`
|
||||
///
|
||||
/// Deletes the given device.
|
||||
/// Deletes the given list of devices.
|
||||
///
|
||||
/// - Requires UIAA to verify user password
|
||||
/// - Requires UIAA to verify user password unless from an appservice with
|
||||
/// MSC4190 enabled.
|
||||
///
|
||||
/// For each device:
|
||||
/// - Invalidates access token
|
||||
|
@ -158,8 +204,20 @@ pub(crate) async fn delete_devices_route(
|
|||
State(services): State<crate::State>,
|
||||
body: Ruma<delete_devices::v3::Request>,
|
||||
) -> Result<delete_devices::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||
let (sender_user, sender_device) = body.sender();
|
||||
let appservice = body.appservice_info.as_ref();
|
||||
|
||||
if appservice.is_some_and(|appservice| appservice.registration.device_management) {
|
||||
debug!(
|
||||
"Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \
|
||||
enabled"
|
||||
);
|
||||
for device_id in &body.devices {
|
||||
services.users.remove_device(sender_user, device_id).await;
|
||||
}
|
||||
|
||||
return Ok(delete_devices::v3::Response {});
|
||||
}
|
||||
|
||||
// UIAA
|
||||
let mut uiaainfo = UiaaInfo {
|
||||
|
@ -183,11 +241,11 @@ pub(crate) async fn delete_devices_route(
|
|||
// Success!
|
||||
},
|
||||
| _ => match body.json_body {
|
||||
| Some(json) => {
|
||||
| Some(ref json) => {
|
||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||
services
|
||||
.uiaa
|
||||
.create(sender_user, sender_device, &uiaainfo, &json);
|
||||
.create(sender_user, sender_device, &uiaainfo, json);
|
||||
|
||||
return Err(Error::Uiaa(uiaainfo));
|
||||
},
|
||||
|
|
|
@ -1,7 +1,19 @@
|
|||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{Err, Error, Result, info, warn};
|
||||
use futures::{StreamExt, TryFutureExt};
|
||||
use conduwuit::{
|
||||
Err, Result, err, info,
|
||||
utils::{
|
||||
TryFutureExtExt,
|
||||
math::Expected,
|
||||
result::FlatOk,
|
||||
stream::{ReadyExt, WidebandExt},
|
||||
},
|
||||
};
|
||||
use conduwuit_service::Services;
|
||||
use futures::{
|
||||
FutureExt, StreamExt, TryFutureExt,
|
||||
future::{join, join4, join5},
|
||||
};
|
||||
use ruma::{
|
||||
OwnedRoomId, RoomId, ServerName, UInt, UserId,
|
||||
api::{
|
||||
|
@ -10,7 +22,6 @@ use ruma::{
|
|||
get_public_rooms, get_public_rooms_filtered, get_room_visibility,
|
||||
set_room_visibility,
|
||||
},
|
||||
error::ErrorKind,
|
||||
room,
|
||||
},
|
||||
federation,
|
||||
|
@ -25,7 +36,6 @@ use ruma::{
|
|||
},
|
||||
uint,
|
||||
};
|
||||
use service::Services;
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
|
@ -42,10 +52,13 @@ pub(crate) async fn get_public_rooms_filtered_route(
|
|||
) -> Result<get_public_rooms_filtered::v3::Response> {
|
||||
if let Some(server) = &body.server {
|
||||
if services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_room_directory_server_names
|
||||
.contains(server)
|
||||
.is_match(server.host())
|
||||
|| services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(server.host())
|
||||
{
|
||||
return Err!(Request(Forbidden("Server is banned on this homeserver.")));
|
||||
}
|
||||
|
@ -61,11 +74,7 @@ pub(crate) async fn get_public_rooms_filtered_route(
|
|||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
warn!(?body.server, "Failed to return /publicRooms: {e}");
|
||||
Error::BadRequest(
|
||||
ErrorKind::Unknown,
|
||||
"Failed to return the requested server's public room list.",
|
||||
)
|
||||
err!(Request(Unknown(warn!(?body.server, "Failed to return /publicRooms: {e}"))))
|
||||
})?;
|
||||
|
||||
Ok(response)
|
||||
|
@ -84,10 +93,13 @@ pub(crate) async fn get_public_rooms_route(
|
|||
) -> Result<get_public_rooms::v3::Response> {
|
||||
if let Some(server) = &body.server {
|
||||
if services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_room_directory_server_names
|
||||
.contains(server)
|
||||
.is_match(server.host())
|
||||
|| services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(server.host())
|
||||
{
|
||||
return Err!(Request(Forbidden("Server is banned on this homeserver.")));
|
||||
}
|
||||
|
@ -103,11 +115,7 @@ pub(crate) async fn get_public_rooms_route(
|
|||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
warn!(?body.server, "Failed to return /publicRooms: {e}");
|
||||
Error::BadRequest(
|
||||
ErrorKind::Unknown,
|
||||
"Failed to return the requested server's public room list.",
|
||||
)
|
||||
err!(Request(Unknown(warn!(?body.server, "Failed to return /publicRooms: {e}"))))
|
||||
})?;
|
||||
|
||||
Ok(get_public_rooms::v3::Response {
|
||||
|
@ -127,7 +135,7 @@ pub(crate) async fn set_room_visibility_route(
|
|||
InsecureClientIp(client): InsecureClientIp,
|
||||
body: Ruma<set_room_visibility::v3::Request>,
|
||||
) -> Result<set_room_visibility::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let sender_user = body.sender_user();
|
||||
|
||||
if !services.rooms.metadata.exists(&body.room_id).await {
|
||||
// Return 404 if the room doesn't exist
|
||||
|
@ -171,10 +179,9 @@ pub(crate) async fn set_room_visibility_route(
|
|||
.await;
|
||||
}
|
||||
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::forbidden(),
|
||||
return Err!(Request(Forbidden(
|
||||
"Publishing rooms to the room directory is not allowed",
|
||||
));
|
||||
)));
|
||||
}
|
||||
|
||||
services.rooms.directory.set_public(&body.room_id);
|
||||
|
@ -192,10 +199,7 @@ pub(crate) async fn set_room_visibility_route(
|
|||
},
|
||||
| room::Visibility::Private => services.rooms.directory.set_not_public(&body.room_id),
|
||||
| _ => {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Room visibility type is not supported.",
|
||||
));
|
||||
return Err!(Request(InvalidParam("Room visibility type is not supported.",)));
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -211,7 +215,7 @@ pub(crate) async fn get_room_visibility_route(
|
|||
) -> Result<get_room_visibility::v3::Response> {
|
||||
if !services.rooms.metadata.exists(&body.room_id).await {
|
||||
// Return 404 if the room doesn't exist
|
||||
return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found"));
|
||||
return Err!(Request(NotFound("Room not found")));
|
||||
}
|
||||
|
||||
Ok(get_room_visibility::v3::Response {
|
||||
|
@ -259,8 +263,8 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
|||
}
|
||||
|
||||
// Use limit or else 10, with maximum 100
|
||||
let limit = limit.map_or(10, u64::from);
|
||||
let mut num_since: u64 = 0;
|
||||
let limit: usize = limit.map_or(10_u64, u64::from).try_into()?;
|
||||
let mut num_since: usize = 0;
|
||||
|
||||
if let Some(s) = &since {
|
||||
let mut characters = s.chars();
|
||||
|
@ -268,14 +272,14 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
|||
| Some('n') => false,
|
||||
| Some('p') => true,
|
||||
| _ => {
|
||||
return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token"));
|
||||
return Err!(Request(InvalidParam("Invalid `since` token")));
|
||||
},
|
||||
};
|
||||
|
||||
num_since = characters
|
||||
.collect::<String>()
|
||||
.parse()
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token."))?;
|
||||
.map_err(|_| err!(Request(InvalidParam("Invalid `since` token."))))?;
|
||||
|
||||
if backwards {
|
||||
num_since = num_since.saturating_sub(limit);
|
||||
|
@ -287,11 +291,12 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
|||
.directory
|
||||
.public_rooms()
|
||||
.map(ToOwned::to_owned)
|
||||
.then(|room_id| public_rooms_chunk(services, room_id))
|
||||
.filter_map(|chunk| async move {
|
||||
.wide_then(|room_id| public_rooms_chunk(services, room_id))
|
||||
.ready_filter_map(|chunk| {
|
||||
if !filter.room_types.is_empty() && !filter.room_types.contains(&RoomTypeFilter::from(chunk.room_type.clone())) {
|
||||
return None;
|
||||
}
|
||||
|
||||
if let Some(query) = filter.generic_search_term.as_ref().map(|q| q.to_lowercase()) {
|
||||
if let Some(name) = &chunk.name {
|
||||
if name.as_str().to_lowercase().contains(&query) {
|
||||
|
@ -323,40 +328,24 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
|||
|
||||
all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members));
|
||||
|
||||
let total_room_count_estimate = UInt::try_from(all_rooms.len()).unwrap_or_else(|_| uint!(0));
|
||||
let total_room_count_estimate = UInt::try_from(all_rooms.len())
|
||||
.unwrap_or_else(|_| uint!(0))
|
||||
.into();
|
||||
|
||||
let chunk: Vec<_> = all_rooms
|
||||
.into_iter()
|
||||
.skip(
|
||||
num_since
|
||||
.try_into()
|
||||
.expect("num_since should not be this high"),
|
||||
)
|
||||
.take(limit.try_into().expect("limit should not be this high"))
|
||||
.collect();
|
||||
let chunk: Vec<_> = all_rooms.into_iter().skip(num_since).take(limit).collect();
|
||||
|
||||
let prev_batch = if num_since == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(format!("p{num_since}"))
|
||||
};
|
||||
let prev_batch = num_since.ne(&0).then_some(format!("p{num_since}"));
|
||||
|
||||
let next_batch = if chunk.len() < limit.try_into().unwrap() {
|
||||
None
|
||||
} else {
|
||||
Some(format!(
|
||||
"n{}",
|
||||
num_since
|
||||
.checked_add(limit)
|
||||
.expect("num_since and limit should not be that large")
|
||||
))
|
||||
};
|
||||
let next_batch = chunk
|
||||
.len()
|
||||
.ge(&limit)
|
||||
.then_some(format!("n{}", num_since.expected_add(limit)));
|
||||
|
||||
Ok(get_public_rooms_filtered::v3::Response {
|
||||
chunk,
|
||||
prev_batch,
|
||||
next_batch,
|
||||
total_room_count_estimate: Some(total_room_count_estimate),
|
||||
total_room_count_estimate,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -374,7 +363,7 @@ async fn user_can_publish_room(
|
|||
.await
|
||||
{
|
||||
| Ok(event) => serde_json::from_str(event.content.get())
|
||||
.map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels"))
|
||||
.map_err(|_| err!(Database("Invalid event content for m.room.power_levels")))
|
||||
.map(|content: RoomPowerLevelsEventContent| {
|
||||
RoomPowerLevels::from(content)
|
||||
.user_can_send_state(user_id, StateEventType::RoomHistoryVisibility)
|
||||
|
@ -394,43 +383,23 @@ async fn user_can_publish_room(
|
|||
}
|
||||
|
||||
async fn public_rooms_chunk(services: &Services, room_id: OwnedRoomId) -> PublicRoomsChunk {
|
||||
PublicRoomsChunk {
|
||||
canonical_alias: services
|
||||
let name = services.rooms.state_accessor.get_name(&room_id).ok();
|
||||
|
||||
let room_type = services.rooms.state_accessor.get_room_type(&room_id).ok();
|
||||
|
||||
let canonical_alias = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_canonical_alias(&room_id)
|
||||
.await
|
||||
.ok(),
|
||||
name: services.rooms.state_accessor.get_name(&room_id).await.ok(),
|
||||
num_joined_members: services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_joined_count(&room_id)
|
||||
.await
|
||||
.unwrap_or(0)
|
||||
.try_into()
|
||||
.expect("joined count overflows ruma UInt"),
|
||||
topic: services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_room_topic(&room_id)
|
||||
.await
|
||||
.ok(),
|
||||
world_readable: services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.is_world_readable(&room_id)
|
||||
.await,
|
||||
guest_can_join: services.rooms.state_accessor.guest_can_join(&room_id).await,
|
||||
avatar_url: services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_avatar(&room_id)
|
||||
.await
|
||||
.into_option()
|
||||
.unwrap_or_default()
|
||||
.url,
|
||||
join_rule: services
|
||||
.ok();
|
||||
|
||||
let avatar_url = services.rooms.state_accessor.get_avatar(&room_id);
|
||||
|
||||
let topic = services.rooms.state_accessor.get_room_topic(&room_id).ok();
|
||||
|
||||
let world_readable = services.rooms.state_accessor.is_world_readable(&room_id);
|
||||
|
||||
let join_rule = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&room_id, &StateEventType::RoomJoinRules, "")
|
||||
|
@ -439,15 +408,36 @@ async fn public_rooms_chunk(services: &Services, room_id: OwnedRoomId) -> Public
|
|||
| JoinRule::Knock => "knock".into(),
|
||||
| JoinRule::KnockRestricted(_) => "knock_restricted".into(),
|
||||
| _ => "invite".into(),
|
||||
})
|
||||
.await
|
||||
.unwrap_or_default(),
|
||||
room_type: services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_room_type(&room_id)
|
||||
.await
|
||||
.ok(),
|
||||
});
|
||||
|
||||
let guest_can_join = services.rooms.state_accessor.guest_can_join(&room_id);
|
||||
|
||||
let num_joined_members = services.rooms.state_cache.room_joined_count(&room_id);
|
||||
|
||||
let (
|
||||
(avatar_url, canonical_alias, guest_can_join, join_rule, name),
|
||||
(num_joined_members, room_type, topic, world_readable),
|
||||
) = join(
|
||||
join5(avatar_url, canonical_alias, guest_can_join, join_rule, name),
|
||||
join4(num_joined_members, room_type, topic, world_readable),
|
||||
)
|
||||
.boxed()
|
||||
.await;
|
||||
|
||||
PublicRoomsChunk {
|
||||
avatar_url: avatar_url.into_option().unwrap_or_default().url,
|
||||
canonical_alias,
|
||||
guest_can_join,
|
||||
join_rule: join_rule.unwrap_or_default(),
|
||||
name,
|
||||
num_joined_members: num_joined_members
|
||||
.map(TryInto::try_into)
|
||||
.map(Result::ok)
|
||||
.flat_ok()
|
||||
.unwrap_or_else(|| uint!(0)),
|
||||
room_id,
|
||||
room_type,
|
||||
topic,
|
||||
world_readable,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::err;
|
||||
use conduwuit::{Result, err};
|
||||
use ruma::api::client::filter::{create_filter, get_filter};
|
||||
|
||||
use crate::{Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}`
|
||||
///
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Err, Error, Result, debug, debug_warn, err, info, result::NotFound, utils};
|
||||
use conduwuit::{Err, Error, Result, debug, debug_warn, err, result::NotFound, utils};
|
||||
use conduwuit_service::{Services, users::parse_master_key};
|
||||
use futures::{StreamExt, stream::FuturesUnordered};
|
||||
use ruma::{
|
||||
OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId,
|
||||
|
@ -9,7 +10,8 @@ use ruma::{
|
|||
client::{
|
||||
error::ErrorKind,
|
||||
keys::{
|
||||
claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures,
|
||||
claim_keys, get_key_changes, get_keys, upload_keys,
|
||||
upload_signatures::{self},
|
||||
upload_signing_keys,
|
||||
},
|
||||
uiaa::{AuthFlow, AuthType, UiaaInfo},
|
||||
|
@ -22,10 +24,7 @@ use ruma::{
|
|||
use serde_json::json;
|
||||
|
||||
use super::SESSION_ID_LENGTH;
|
||||
use crate::{
|
||||
Ruma,
|
||||
service::{Services, users::parse_master_key},
|
||||
};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `POST /_matrix/client/r0/keys/upload`
|
||||
///
|
||||
|
@ -178,7 +177,7 @@ pub(crate) async fn upload_signing_keys_route(
|
|||
body.master_key.as_ref(),
|
||||
)
|
||||
.await
|
||||
.inspect_err(|e| info!(?e))
|
||||
.inspect_err(|e| debug!(?e))
|
||||
{
|
||||
| Ok(exists) => {
|
||||
if let Some(result) = exists {
|
||||
|
@ -308,53 +307,60 @@ async fn check_for_new_keys(
|
|||
/// # `POST /_matrix/client/r0/keys/signatures/upload`
|
||||
///
|
||||
/// Uploads end-to-end key signatures from the sender user.
|
||||
///
|
||||
/// TODO: clean this timo-code up more and integrate failures. tried to improve
|
||||
/// it a bit to stop exploding the entire request on bad sigs, but needs way
|
||||
/// more work.
|
||||
pub(crate) async fn upload_signatures_route(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<upload_signatures::v3::Request>,
|
||||
) -> Result<upload_signatures::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
if body.signed_keys.is_empty() {
|
||||
debug!("Empty signed_keys sent in key signature upload");
|
||||
return Ok(upload_signatures::v3::Response::new());
|
||||
}
|
||||
|
||||
let sender_user = body.sender_user();
|
||||
|
||||
for (user_id, keys) in &body.signed_keys {
|
||||
for (key_id, key) in keys {
|
||||
let key = serde_json::to_value(key)
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?;
|
||||
let Ok(key) = serde_json::to_value(key)
|
||||
.inspect_err(|e| debug_warn!(?key_id, "Invalid \"key\" JSON: {e}"))
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
|
||||
for signature in key
|
||||
.get("signatures")
|
||||
.ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Missing signatures field."))?
|
||||
.get(sender_user.to_string())
|
||||
.ok_or(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Invalid user in signatures field.",
|
||||
))?
|
||||
.as_object()
|
||||
.ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid signature."))?
|
||||
.clone()
|
||||
{
|
||||
// Signature validation?
|
||||
let signature = (
|
||||
signature.0,
|
||||
signature
|
||||
.1
|
||||
.as_str()
|
||||
.ok_or(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Invalid signature value.",
|
||||
))?
|
||||
.to_owned(),
|
||||
);
|
||||
let Some(signatures) = key.get("signatures") else {
|
||||
continue;
|
||||
};
|
||||
|
||||
services
|
||||
let Some(sender_user_val) = signatures.get(sender_user.to_string()) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let Some(sender_user_object) = sender_user_val.as_object() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
for (signature, val) in sender_user_object.clone() {
|
||||
let Some(val) = val.as_str().map(ToOwned::to_owned) else {
|
||||
continue;
|
||||
};
|
||||
let signature = (signature, val);
|
||||
|
||||
if let Err(_e) = services
|
||||
.users
|
||||
.sign_key(user_id, key_id, signature, sender_user)
|
||||
.await?;
|
||||
.await
|
||||
.inspect_err(|e| debug_warn!("{e}"))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(upload_signatures::v3::Response {
|
||||
failures: BTreeMap::new(), // TODO: integrate
|
||||
})
|
||||
Ok(upload_signatures::v3::Response { failures: BTreeMap::new() })
|
||||
}
|
||||
|
||||
/// # `POST /_matrix/client/r0/keys/changes`
|
||||
|
|
|
@ -9,13 +9,25 @@ use std::{
|
|||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{
|
||||
Err, PduEvent, Result, StateKey, at, debug, debug_info, debug_warn, err, error, info,
|
||||
pdu::{PduBuilder, gen_event_id_canonical_json},
|
||||
Err, Result, at, debug, debug_info, debug_warn, err, error, info,
|
||||
matrix::{
|
||||
StateKey,
|
||||
pdu::{PduBuilder, PduEvent, gen_event_id, gen_event_id_canonical_json},
|
||||
state_res,
|
||||
},
|
||||
result::{FlatOk, NotFound},
|
||||
state_res, trace,
|
||||
trace,
|
||||
utils::{self, IterStream, ReadyExt, shuffle},
|
||||
warn,
|
||||
};
|
||||
use conduwuit_service::{
|
||||
Services,
|
||||
appservice::RegistrationInfo,
|
||||
rooms::{
|
||||
state::RoomMutexGuard,
|
||||
state_compressor::{CompressedState, HashSetCompressStateEvent},
|
||||
},
|
||||
};
|
||||
use futures::{FutureExt, StreamExt, TryFutureExt, future::join4, join};
|
||||
use ruma::{
|
||||
CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName,
|
||||
|
@ -44,15 +56,6 @@ use ruma::{
|
|||
},
|
||||
},
|
||||
};
|
||||
use service::{
|
||||
Services,
|
||||
appservice::RegistrationInfo,
|
||||
pdu::gen_event_id,
|
||||
rooms::{
|
||||
state::RoomMutexGuard,
|
||||
state_compressor::{CompressedState, HashSetCompressStateEvent},
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{Ruma, client::full_user_deactivate};
|
||||
|
||||
|
@ -76,10 +79,9 @@ async fn banned_room_check(
|
|||
if let Some(room_id) = room_id {
|
||||
if services.rooms.metadata.is_banned(room_id).await
|
||||
|| services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.contains(&room_id.server_name().unwrap().to_owned())
|
||||
.is_match(room_id.server_name().unwrap().host())
|
||||
{
|
||||
warn!(
|
||||
"User {user_id} who is not an admin attempted to send an invite for or \
|
||||
|
@ -117,10 +119,9 @@ async fn banned_room_check(
|
|||
}
|
||||
} else if let Some(server_name) = server_name {
|
||||
if services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.contains(&server_name.to_owned())
|
||||
.is_match(server_name.host())
|
||||
{
|
||||
warn!(
|
||||
"User {user_id} who is not an admin tried joining a room which has the server \
|
||||
|
@ -475,9 +476,9 @@ pub(crate) async fn leave_room_route(
|
|||
State(services): State<crate::State>,
|
||||
body: Ruma<leave_room::v3::Request>,
|
||||
) -> Result<leave_room::v3::Response> {
|
||||
leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone()).await?;
|
||||
|
||||
Ok(leave_room::v3::Response::new())
|
||||
leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone())
|
||||
.await
|
||||
.map(|()| leave_room::v3::Response::new())
|
||||
}
|
||||
|
||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/invite`
|
||||
|
@ -491,7 +492,7 @@ pub(crate) async fn invite_user_route(
|
|||
) -> Result<invite_user::v3::Response> {
|
||||
let sender_user = body.sender_user();
|
||||
|
||||
if !services.users.is_admin(sender_user).await && services.globals.block_non_admin_invites() {
|
||||
if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites {
|
||||
info!(
|
||||
"User {sender_user} is not an admin and attempted to send an invite to room {}",
|
||||
&body.room_id
|
||||
|
@ -1628,7 +1629,7 @@ pub(crate) async fn invite_helper(
|
|||
reason: Option<String>,
|
||||
is_direct: bool,
|
||||
) -> Result {
|
||||
if !services.users.is_admin(sender_user).await && services.globals.block_non_admin_invites() {
|
||||
if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites {
|
||||
info!(
|
||||
"User {sender_user} is not an admin and attempted to send an invite to room \
|
||||
{room_id}"
|
||||
|
@ -1763,8 +1764,8 @@ pub(crate) async fn invite_helper(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// Make a user leave all their joined rooms, forgets all rooms, and ignores
|
||||
// errors
|
||||
// Make a user leave all their joined rooms, rescinds knocks, forgets all rooms,
|
||||
// and ignores errors
|
||||
pub async fn leave_all_rooms(services: &Services, user_id: &UserId) {
|
||||
let rooms_joined = services
|
||||
.rooms
|
||||
|
@ -1778,7 +1779,17 @@ pub async fn leave_all_rooms(services: &Services, user_id: &UserId) {
|
|||
.rooms_invited(user_id)
|
||||
.map(|(r, _)| r);
|
||||
|
||||
let all_rooms: Vec<_> = rooms_joined.chain(rooms_invited).collect().await;
|
||||
let rooms_knocked = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_knocked(user_id)
|
||||
.map(|(r, _)| r);
|
||||
|
||||
let all_rooms: Vec<_> = rooms_joined
|
||||
.chain(rooms_invited)
|
||||
.chain(rooms_knocked)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
for room_id in all_rooms {
|
||||
// ignore errors
|
||||
|
@ -1795,7 +1806,40 @@ pub async fn leave_room(
|
|||
user_id: &UserId,
|
||||
room_id: &RoomId,
|
||||
reason: Option<String>,
|
||||
) -> Result<()> {
|
||||
) -> Result {
|
||||
let default_member_content = RoomMemberEventContent {
|
||||
membership: MembershipState::Leave,
|
||||
reason: reason.clone(),
|
||||
join_authorized_via_users_server: None,
|
||||
is_direct: None,
|
||||
avatar_url: None,
|
||||
displayname: None,
|
||||
third_party_invite: None,
|
||||
blurhash: None,
|
||||
};
|
||||
|
||||
if services.rooms.metadata.is_banned(room_id).await
|
||||
|| services.rooms.metadata.is_disabled(room_id).await
|
||||
{
|
||||
// the room is banned/disabled, the room must be rejected locally since we
|
||||
// cant/dont want to federate with this server
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
.update_membership(
|
||||
room_id,
|
||||
user_id,
|
||||
default_member_content,
|
||||
user_id,
|
||||
None,
|
||||
None,
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Ask a remote server if we don't have this room and are not knocking on it
|
||||
if !services
|
||||
.rooms
|
||||
|
@ -1828,7 +1872,7 @@ pub async fn leave_room(
|
|||
.update_membership(
|
||||
room_id,
|
||||
user_id,
|
||||
RoomMemberEventContent::new(MembershipState::Leave),
|
||||
default_member_content,
|
||||
user_id,
|
||||
last_state,
|
||||
None,
|
||||
|
@ -1848,26 +1892,23 @@ pub async fn leave_room(
|
|||
)
|
||||
.await
|
||||
else {
|
||||
// Fix for broken rooms
|
||||
warn!(
|
||||
debug_warn!(
|
||||
"Trying to leave a room you are not a member of, marking room as left locally."
|
||||
);
|
||||
|
||||
services
|
||||
return services
|
||||
.rooms
|
||||
.state_cache
|
||||
.update_membership(
|
||||
room_id,
|
||||
user_id,
|
||||
RoomMemberEventContent::new(MembershipState::Leave),
|
||||
default_member_content,
|
||||
user_id,
|
||||
None,
|
||||
None,
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
|
||||
return Ok(());
|
||||
.await;
|
||||
};
|
||||
|
||||
services
|
||||
|
@ -1897,7 +1938,7 @@ async fn remote_leave_room(
|
|||
room_id: &RoomId,
|
||||
) -> Result<()> {
|
||||
let mut make_leave_response_and_server =
|
||||
Err!(BadServerResponse("No server available to assist in leaving."));
|
||||
Err!(BadServerResponse("No remote server available to assist in leaving {room_id}."));
|
||||
|
||||
let mut servers: HashSet<OwnedServerName> = services
|
||||
.rooms
|
||||
|
@ -1977,20 +2018,25 @@ async fn remote_leave_room(
|
|||
let (make_leave_response, remote_server) = make_leave_response_and_server?;
|
||||
|
||||
let Some(room_version_id) = make_leave_response.room_version else {
|
||||
return Err!(BadServerResponse("Remote room version is not supported by conduwuit"));
|
||||
return Err!(BadServerResponse(warn!(
|
||||
"No room version was returned by {remote_server} for {room_id}, room version is \
|
||||
likely not supported by conduwuit"
|
||||
)));
|
||||
};
|
||||
|
||||
if !services.server.supported_room_version(&room_version_id) {
|
||||
return Err!(BadServerResponse(
|
||||
"Remote room version {room_version_id} is not supported by conduwuit"
|
||||
));
|
||||
return Err!(BadServerResponse(warn!(
|
||||
"Remote room version {room_version_id} for {room_id} is not supported by conduwuit",
|
||||
)));
|
||||
}
|
||||
|
||||
let mut leave_event_stub = serde_json::from_str::<CanonicalJsonObject>(
|
||||
make_leave_response.event.get(),
|
||||
)
|
||||
.map_err(|e| {
|
||||
err!(BadServerResponse("Invalid make_leave event json received from server: {e:?}"))
|
||||
err!(BadServerResponse(warn!(
|
||||
"Invalid make_leave event json received from {remote_server} for {room_id}: {e:?}"
|
||||
)))
|
||||
})?;
|
||||
|
||||
// TODO: Is origin needed?
|
||||
|
|
|
@ -1,12 +1,24 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Err, Event, PduCount, PduEvent, Result, at,
|
||||
Err, Result, at,
|
||||
matrix::{
|
||||
Event,
|
||||
pdu::{PduCount, PduEvent},
|
||||
},
|
||||
utils::{
|
||||
IterStream, ReadyExt,
|
||||
result::{FlatOk, LogErr},
|
||||
stream::{BroadbandExt, TryIgnore, WidebandExt},
|
||||
},
|
||||
};
|
||||
use conduwuit_service::{
|
||||
Services,
|
||||
rooms::{
|
||||
lazy_loading,
|
||||
lazy_loading::{Options, Witness},
|
||||
timeline::PdusIterItem,
|
||||
},
|
||||
};
|
||||
use futures::{FutureExt, StreamExt, TryFutureExt, future::OptionFuture, pin_mut};
|
||||
use ruma::{
|
||||
RoomId, UserId,
|
||||
|
@ -17,14 +29,6 @@ use ruma::{
|
|||
events::{AnyStateEvent, StateEventType, TimelineEventType, TimelineEventType::*},
|
||||
serde::Raw,
|
||||
};
|
||||
use service::{
|
||||
Services,
|
||||
rooms::{
|
||||
lazy_loading,
|
||||
lazy_loading::{Options, Witness},
|
||||
timeline::PdusIterItem,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
|
@ -157,7 +161,7 @@ pub(crate) async fn get_message_events_route(
|
|||
let chunk = events
|
||||
.into_iter()
|
||||
.map(at!(1))
|
||||
.map(|pdu| pdu.to_room_event())
|
||||
.map(PduEvent::into_room_event)
|
||||
.collect();
|
||||
|
||||
Ok(get_message_events::v3::Response {
|
||||
|
@ -257,10 +261,9 @@ pub(crate) async fn is_ignored_pdu(
|
|||
let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok();
|
||||
|
||||
let ignored_server = services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.contains(pdu.sender().server_name());
|
||||
.is_match(pdu.sender().server_name().host());
|
||||
|
||||
if ignored_type
|
||||
&& (ignored_server || services.users.user_is_ignored(&pdu.sender, user_id).await)
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::utils;
|
||||
use conduwuit::{Error, Result, utils};
|
||||
use ruma::{
|
||||
api::client::{account, error::ErrorKind},
|
||||
authentication::TokenType,
|
||||
};
|
||||
|
||||
use super::TOKEN_LENGTH;
|
||||
use crate::{Error, Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `POST /_matrix/client/v3/user/{userId}/openid/request_token`
|
||||
///
|
||||
|
|
|
@ -1,12 +1,10 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use axum::extract::State;
|
||||
use ruma::api::client::{
|
||||
error::ErrorKind,
|
||||
presence::{get_presence, set_presence},
|
||||
};
|
||||
use conduwuit::{Err, Result};
|
||||
use ruma::api::client::presence::{get_presence, set_presence};
|
||||
|
||||
use crate::{Error, Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `PUT /_matrix/client/r0/presence/{userId}/status`
|
||||
///
|
||||
|
@ -15,24 +13,17 @@ pub(crate) async fn set_presence_route(
|
|||
State(services): State<crate::State>,
|
||||
body: Ruma<set_presence::v3::Request>,
|
||||
) -> Result<set_presence::v3::Response> {
|
||||
if !services.globals.allow_local_presence() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::forbidden(),
|
||||
"Presence is disabled on this server",
|
||||
));
|
||||
if !services.config.allow_local_presence {
|
||||
return Err!(Request(Forbidden("Presence is disabled on this server")));
|
||||
}
|
||||
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
if sender_user != &body.user_id && body.appservice_info.is_none() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Not allowed to set presence of other users",
|
||||
));
|
||||
if body.sender_user() != body.user_id && body.appservice_info.is_none() {
|
||||
return Err!(Request(InvalidParam("Not allowed to set presence of other users")));
|
||||
}
|
||||
|
||||
services
|
||||
.presence
|
||||
.set_presence(sender_user, &body.presence, None, None, body.status_msg.clone())
|
||||
.set_presence(body.sender_user(), &body.presence, None, None, body.status_msg.clone())
|
||||
.await?;
|
||||
|
||||
Ok(set_presence::v3::Response {})
|
||||
|
@ -47,21 +38,15 @@ pub(crate) async fn get_presence_route(
|
|||
State(services): State<crate::State>,
|
||||
body: Ruma<get_presence::v3::Request>,
|
||||
) -> Result<get_presence::v3::Response> {
|
||||
if !services.globals.allow_local_presence() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::forbidden(),
|
||||
"Presence is disabled on this server",
|
||||
));
|
||||
if !services.config.allow_local_presence {
|
||||
return Err!(Request(Forbidden("Presence is disabled on this server",)));
|
||||
}
|
||||
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
let mut presence_event = None;
|
||||
|
||||
let has_shared_rooms = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.user_sees_user(sender_user, &body.user_id)
|
||||
.user_sees_user(body.sender_user(), &body.user_id)
|
||||
.await;
|
||||
|
||||
if has_shared_rooms {
|
||||
|
@ -99,9 +84,6 @@ pub(crate) async fn get_presence_route(
|
|||
presence: presence.content.presence,
|
||||
})
|
||||
},
|
||||
| _ => Err(Error::BadRequest(
|
||||
ErrorKind::NotFound,
|
||||
"Presence state for this user was not found",
|
||||
)),
|
||||
| _ => Err!(Request(NotFound("Presence state for this user was not found"))),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,10 +3,11 @@ use std::collections::BTreeMap;
|
|||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Err, Error, Result,
|
||||
pdu::PduBuilder,
|
||||
matrix::pdu::PduBuilder,
|
||||
utils::{IterStream, stream::TryIgnore},
|
||||
warn,
|
||||
};
|
||||
use conduwuit_service::Services;
|
||||
use futures::{StreamExt, TryStreamExt, future::join3};
|
||||
use ruma::{
|
||||
OwnedMxcUri, OwnedRoomId, UserId,
|
||||
|
@ -22,7 +23,6 @@ use ruma::{
|
|||
events::room::member::{MembershipState, RoomMemberEventContent},
|
||||
presence::PresenceState,
|
||||
};
|
||||
use service::Services;
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
|
@ -52,7 +52,7 @@ pub(crate) async fn set_displayname_route(
|
|||
update_displayname(&services, &body.user_id, body.displayname.clone(), &all_joined_rooms)
|
||||
.await;
|
||||
|
||||
if services.globals.allow_local_presence() {
|
||||
if services.config.allow_local_presence {
|
||||
// Presence update
|
||||
services
|
||||
.presence
|
||||
|
@ -147,7 +147,7 @@ pub(crate) async fn set_avatar_url_route(
|
|||
)
|
||||
.await;
|
||||
|
||||
if services.globals.allow_local_presence() {
|
||||
if services.config.allow_local_presence {
|
||||
// Presence update
|
||||
services
|
||||
.presence
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{Err, err};
|
||||
use conduwuit::{Err, Error, Result, err};
|
||||
use conduwuit_service::Services;
|
||||
use ruma::{
|
||||
CanonicalJsonObject, CanonicalJsonValue,
|
||||
api::client::{
|
||||
|
@ -19,9 +20,8 @@ use ruma::{
|
|||
RemovePushRuleError, Ruleset,
|
||||
},
|
||||
};
|
||||
use service::Services;
|
||||
|
||||
use crate::{Error, Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `GET /_matrix/client/r0/pushrules/`
|
||||
///
|
||||
|
@ -503,7 +503,7 @@ pub(crate) async fn set_pushers_route(
|
|||
|
||||
services
|
||||
.pusher
|
||||
.set_pusher(sender_user, &body.action)
|
||||
.set_pusher(sender_user, body.sender_device(), &body.action)
|
||||
.await?;
|
||||
|
||||
Ok(set_pusher::v3::Response::new())
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use std::collections::BTreeMap;
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Err, PduCount, err};
|
||||
use conduwuit::{Err, PduCount, Result, err};
|
||||
use ruma::{
|
||||
MilliSecondsSinceUnixEpoch,
|
||||
api::client::{read_marker::set_read_marker, receipt::create_receipt},
|
||||
|
@ -11,7 +11,7 @@ use ruma::{
|
|||
},
|
||||
};
|
||||
|
||||
use crate::{Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers`
|
||||
///
|
||||
|
@ -50,7 +50,7 @@ pub(crate) async fn set_read_marker_route(
|
|||
}
|
||||
|
||||
// ping presence
|
||||
if services.globals.allow_local_presence() {
|
||||
if services.config.allow_local_presence {
|
||||
services
|
||||
.presence
|
||||
.ping_presence(sender_user, &ruma::presence::PresenceState::Online)
|
||||
|
@ -126,7 +126,7 @@ pub(crate) async fn create_receipt_route(
|
|||
}
|
||||
|
||||
// ping presence
|
||||
if services.globals.allow_local_presence() {
|
||||
if services.config.allow_local_presence {
|
||||
services
|
||||
.presence
|
||||
.ping_presence(sender_user, &ruma::presence::PresenceState::Online)
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{Result, matrix::pdu::PduBuilder};
|
||||
use ruma::{
|
||||
api::client::redact::redact_event, events::room::redaction::RoomRedactionEventContent,
|
||||
};
|
||||
|
||||
use crate::{Result, Ruma, service::pdu::PduBuilder};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}`
|
||||
///
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
PduCount, Result, at,
|
||||
Result, at,
|
||||
matrix::pdu::PduCount,
|
||||
utils::{IterStream, ReadyExt, result::FlatOk, stream::WidebandExt},
|
||||
};
|
||||
use conduwuit_service::{Services, rooms::timeline::PdusIterItem};
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
EventId, RoomId, UInt, UserId,
|
||||
|
@ -15,7 +17,6 @@ use ruma::{
|
|||
},
|
||||
events::{TimelineEventType, relation::RelationType},
|
||||
};
|
||||
use service::{Services, rooms::timeline::PdusIterItem};
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
|
|
|
@ -2,7 +2,8 @@ use std::time::Duration;
|
|||
|
||||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{Err, info, utils::ReadyExt};
|
||||
use conduwuit::{Err, Error, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt};
|
||||
use conduwuit_service::Services;
|
||||
use rand::Rng;
|
||||
use ruma::{
|
||||
EventId, RoomId, UserId,
|
||||
|
@ -15,10 +16,7 @@ use ruma::{
|
|||
};
|
||||
use tokio::time::sleep;
|
||||
|
||||
use crate::{
|
||||
Error, Result, Ruma, debug_info,
|
||||
service::{Services, pdu::PduEvent},
|
||||
};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `POST /_matrix/client/v3/rooms/{roomId}/report`
|
||||
///
|
||||
|
|
|
@ -2,8 +2,11 @@ use std::collections::BTreeMap;
|
|||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Err, Error, Result, StateKey, debug_info, debug_warn, err, error, info, pdu::PduBuilder, warn,
|
||||
Err, Error, Result, debug_info, debug_warn, err, error, info,
|
||||
matrix::{StateKey, pdu::PduBuilder},
|
||||
warn,
|
||||
};
|
||||
use conduwuit_service::{Services, appservice::RegistrationInfo};
|
||||
use futures::FutureExt;
|
||||
use ruma::{
|
||||
CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId,
|
||||
|
@ -29,7 +32,6 @@ use ruma::{
|
|||
serde::{JsonObject, Raw},
|
||||
};
|
||||
use serde_json::{json, value::to_raw_value};
|
||||
use service::{Services, appservice::RegistrationInfo};
|
||||
|
||||
use crate::{Ruma, client::invite_helper};
|
||||
|
||||
|
@ -372,7 +374,7 @@ pub(crate) async fn create_room_route(
|
|||
|
||||
// Silently skip encryption events if they are not allowed
|
||||
if pdu_builder.event_type == TimelineEventType::RoomEncryption
|
||||
&& !services.globals.allow_encryption()
|
||||
&& !services.config.allow_encryption
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -40,5 +40,5 @@ pub(crate) async fn get_room_event_route(
|
|||
|
||||
event.add_age().ok();
|
||||
|
||||
Ok(get_room_event::v3::Response { event: event.to_room_event() })
|
||||
Ok(get_room_event::v3::Response { event: event.into_room_event() })
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ pub(crate) async fn room_initial_sync_route(
|
|||
chunk: events
|
||||
.into_iter()
|
||||
.map(at!(1))
|
||||
.map(|pdu| pdu.to_room_event())
|
||||
.map(PduEvent::into_room_event)
|
||||
.collect(),
|
||||
};
|
||||
|
||||
|
|
|
@ -2,9 +2,14 @@ mod aliases;
|
|||
mod create;
|
||||
mod event;
|
||||
mod initial_sync;
|
||||
mod summary;
|
||||
mod upgrade;
|
||||
|
||||
pub(crate) use self::{
|
||||
aliases::get_room_aliases_route, create::create_room_route, event::get_room_event_route,
|
||||
initial_sync::room_initial_sync_route, upgrade::upgrade_room_route,
|
||||
aliases::get_room_aliases_route,
|
||||
create::create_room_route,
|
||||
event::get_room_event_route,
|
||||
initial_sync::room_initial_sync_route,
|
||||
summary::{get_room_summary, get_room_summary_legacy},
|
||||
upgrade::upgrade_room_route,
|
||||
};
|
||||
|
|
330
src/api/client/room/summary.rs
Normal file
330
src/api/client/room/summary.rs
Normal file
|
@ -0,0 +1,330 @@
|
|||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{
|
||||
Err, Result, debug_warn, trace,
|
||||
utils::{IterStream, future::TryExtExt},
|
||||
};
|
||||
use futures::{
|
||||
FutureExt, StreamExt,
|
||||
future::{OptionFuture, join3},
|
||||
stream::FuturesUnordered,
|
||||
};
|
||||
use ruma::{
|
||||
OwnedServerName, RoomId, UserId,
|
||||
api::{
|
||||
client::room::get_summary,
|
||||
federation::space::{SpaceHierarchyParentSummary, get_hierarchy},
|
||||
},
|
||||
events::room::member::MembershipState,
|
||||
space::SpaceRoomJoinRule::{self, *},
|
||||
};
|
||||
use service::Services;
|
||||
|
||||
use crate::{Ruma, RumaResponse};
|
||||
|
||||
/// # `GET /_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary`
|
||||
///
|
||||
/// Returns a short description of the state of a room.
|
||||
///
|
||||
/// This is the "wrong" endpoint that some implementations/clients may use
|
||||
/// according to the MSC. Request and response bodies are the same as
|
||||
/// `get_room_summary`.
|
||||
///
|
||||
/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266)
|
||||
pub(crate) async fn get_room_summary_legacy(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(client): InsecureClientIp,
|
||||
body: Ruma<get_summary::msc3266::Request>,
|
||||
) -> Result<RumaResponse<get_summary::msc3266::Response>> {
|
||||
get_room_summary(State(services), InsecureClientIp(client), body)
|
||||
.boxed()
|
||||
.await
|
||||
.map(RumaResponse)
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}`
|
||||
///
|
||||
/// Returns a short description of the state of a room.
|
||||
///
|
||||
/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266)
|
||||
#[tracing::instrument(skip_all, fields(%client), name = "room_summary")]
|
||||
pub(crate) async fn get_room_summary(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(client): InsecureClientIp,
|
||||
body: Ruma<get_summary::msc3266::Request>,
|
||||
) -> Result<get_summary::msc3266::Response> {
|
||||
let (room_id, servers) = services
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_with_servers(&body.room_id_or_alias, Some(body.via.clone()))
|
||||
.await?;
|
||||
|
||||
if services.rooms.metadata.is_banned(&room_id).await {
|
||||
return Err!(Request(Forbidden("This room is banned on this homeserver.")));
|
||||
}
|
||||
|
||||
room_summary_response(&services, &room_id, &servers, body.sender_user.as_deref())
|
||||
.boxed()
|
||||
.await
|
||||
}
|
||||
|
||||
async fn room_summary_response(
|
||||
services: &Services,
|
||||
room_id: &RoomId,
|
||||
servers: &[OwnedServerName],
|
||||
sender_user: Option<&UserId>,
|
||||
) -> Result<get_summary::msc3266::Response> {
|
||||
if services
|
||||
.rooms
|
||||
.state_cache
|
||||
.server_in_room(services.globals.server_name(), room_id)
|
||||
.await
|
||||
{
|
||||
return local_room_summary_response(services, room_id, sender_user)
|
||||
.boxed()
|
||||
.await;
|
||||
}
|
||||
|
||||
let room =
|
||||
remote_room_summary_hierarchy_response(services, room_id, servers, sender_user).await?;
|
||||
|
||||
Ok(get_summary::msc3266::Response {
|
||||
room_id: room_id.to_owned(),
|
||||
canonical_alias: room.canonical_alias,
|
||||
avatar_url: room.avatar_url,
|
||||
guest_can_join: room.guest_can_join,
|
||||
name: room.name,
|
||||
num_joined_members: room.num_joined_members,
|
||||
topic: room.topic,
|
||||
world_readable: room.world_readable,
|
||||
join_rule: room.join_rule,
|
||||
room_type: room.room_type,
|
||||
room_version: room.room_version,
|
||||
encryption: room.encryption,
|
||||
allowed_room_ids: room.allowed_room_ids,
|
||||
membership: sender_user.is_some().then_some(MembershipState::Leave),
|
||||
})
|
||||
}
|
||||
|
||||
async fn local_room_summary_response(
|
||||
services: &Services,
|
||||
room_id: &RoomId,
|
||||
sender_user: Option<&UserId>,
|
||||
) -> Result<get_summary::msc3266::Response> {
|
||||
trace!(?sender_user, "Sending local room summary response for {room_id:?}");
|
||||
let join_rule = services.rooms.state_accessor.get_join_rules(room_id);
|
||||
let world_readable = services.rooms.state_accessor.is_world_readable(room_id);
|
||||
let guest_can_join = services.rooms.state_accessor.guest_can_join(room_id);
|
||||
|
||||
let (join_rule, world_readable, guest_can_join) =
|
||||
join3(join_rule, world_readable, guest_can_join).await;
|
||||
trace!("{join_rule:?}, {world_readable:?}, {guest_can_join:?}");
|
||||
|
||||
user_can_see_summary(
|
||||
services,
|
||||
room_id,
|
||||
&join_rule.clone().into(),
|
||||
guest_can_join,
|
||||
world_readable,
|
||||
join_rule.allowed_rooms(),
|
||||
sender_user,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let canonical_alias = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_canonical_alias(room_id)
|
||||
.ok();
|
||||
|
||||
let name = services.rooms.state_accessor.get_name(room_id).ok();
|
||||
|
||||
let topic = services.rooms.state_accessor.get_room_topic(room_id).ok();
|
||||
|
||||
let room_type = services.rooms.state_accessor.get_room_type(room_id).ok();
|
||||
|
||||
let avatar_url = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_avatar(room_id)
|
||||
.map(|res| res.into_option().unwrap_or_default().url);
|
||||
|
||||
let room_version = services.rooms.state.get_room_version(room_id).ok();
|
||||
|
||||
let encryption = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_room_encryption(room_id)
|
||||
.ok();
|
||||
|
||||
let num_joined_members = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_joined_count(room_id)
|
||||
.unwrap_or(0);
|
||||
|
||||
let membership: OptionFuture<_> = sender_user
|
||||
.map(|sender_user| {
|
||||
services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_member(room_id, sender_user)
|
||||
.map_ok_or(MembershipState::Leave, |content| content.membership)
|
||||
})
|
||||
.into();
|
||||
|
||||
let (
|
||||
canonical_alias,
|
||||
name,
|
||||
num_joined_members,
|
||||
topic,
|
||||
avatar_url,
|
||||
room_type,
|
||||
room_version,
|
||||
encryption,
|
||||
membership,
|
||||
) = futures::join!(
|
||||
canonical_alias,
|
||||
name,
|
||||
num_joined_members,
|
||||
topic,
|
||||
avatar_url,
|
||||
room_type,
|
||||
room_version,
|
||||
encryption,
|
||||
membership,
|
||||
);
|
||||
|
||||
Ok(get_summary::msc3266::Response {
|
||||
room_id: room_id.to_owned(),
|
||||
canonical_alias,
|
||||
avatar_url,
|
||||
guest_can_join,
|
||||
name,
|
||||
num_joined_members: num_joined_members.try_into().unwrap_or_default(),
|
||||
topic,
|
||||
world_readable,
|
||||
room_type,
|
||||
room_version,
|
||||
encryption,
|
||||
membership,
|
||||
allowed_room_ids: join_rule.allowed_rooms().map(Into::into).collect(),
|
||||
join_rule: join_rule.into(),
|
||||
})
|
||||
}
|
||||
|
||||
/// used by MSC3266 to fetch a room's info if we do not know about it
|
||||
async fn remote_room_summary_hierarchy_response(
|
||||
services: &Services,
|
||||
room_id: &RoomId,
|
||||
servers: &[OwnedServerName],
|
||||
sender_user: Option<&UserId>,
|
||||
) -> Result<SpaceHierarchyParentSummary> {
|
||||
trace!(?sender_user, ?servers, "Sending remote room summary response for {room_id:?}");
|
||||
if !services.config.allow_federation {
|
||||
return Err!(Request(Forbidden("Federation is disabled.")));
|
||||
}
|
||||
|
||||
if services.rooms.metadata.is_disabled(room_id).await {
|
||||
return Err!(Request(Forbidden(
|
||||
"Federaton of room {room_id} is currently disabled on this server."
|
||||
)));
|
||||
}
|
||||
|
||||
let request = get_hierarchy::v1::Request::new(room_id.to_owned());
|
||||
|
||||
let mut requests: FuturesUnordered<_> = servers
|
||||
.iter()
|
||||
.map(|server| {
|
||||
services
|
||||
.sending
|
||||
.send_federation_request(server, request.clone())
|
||||
})
|
||||
.collect();
|
||||
|
||||
while let Some(Ok(response)) = requests.next().await {
|
||||
trace!("{response:?}");
|
||||
let room = response.room.clone();
|
||||
if room.room_id != room_id {
|
||||
debug_warn!(
|
||||
"Room ID {} returned does not belong to the requested room ID {}",
|
||||
room.room_id,
|
||||
room_id
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
return user_can_see_summary(
|
||||
services,
|
||||
room_id,
|
||||
&room.join_rule,
|
||||
room.guest_can_join,
|
||||
room.world_readable,
|
||||
room.allowed_room_ids.iter().map(AsRef::as_ref),
|
||||
sender_user,
|
||||
)
|
||||
.await
|
||||
.map(|()| room);
|
||||
}
|
||||
|
||||
Err!(Request(NotFound(
|
||||
"Room is unknown to this server and was unable to fetch over federation with the \
|
||||
provided servers available"
|
||||
)))
|
||||
}
|
||||
|
||||
async fn user_can_see_summary<'a, I>(
|
||||
services: &Services,
|
||||
room_id: &RoomId,
|
||||
join_rule: &SpaceRoomJoinRule,
|
||||
guest_can_join: bool,
|
||||
world_readable: bool,
|
||||
allowed_room_ids: I,
|
||||
sender_user: Option<&UserId>,
|
||||
) -> Result
|
||||
where
|
||||
I: Iterator<Item = &'a RoomId> + Send,
|
||||
{
|
||||
let is_public_room = matches!(join_rule, Public | Knock | KnockRestricted);
|
||||
match sender_user {
|
||||
| Some(sender_user) => {
|
||||
let user_can_see_state_events = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_state_events(sender_user, room_id);
|
||||
let is_guest = services.users.is_deactivated(sender_user).unwrap_or(false);
|
||||
let user_in_allowed_restricted_room = allowed_room_ids
|
||||
.stream()
|
||||
.any(|room| services.rooms.state_cache.is_joined(sender_user, room));
|
||||
|
||||
let (user_can_see_state_events, is_guest, user_in_allowed_restricted_room) =
|
||||
join3(user_can_see_state_events, is_guest, user_in_allowed_restricted_room)
|
||||
.boxed()
|
||||
.await;
|
||||
|
||||
if user_can_see_state_events
|
||||
|| (is_guest && guest_can_join)
|
||||
|| is_public_room
|
||||
|| user_in_allowed_restricted_room
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Err!(Request(Forbidden(
|
||||
"Room is not world readable, not publicly accessible/joinable, restricted room \
|
||||
conditions not met, and guest access is forbidden. Not allowed to see details \
|
||||
of this room."
|
||||
)))
|
||||
},
|
||||
| None => {
|
||||
if is_public_room || world_readable {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Err!(Request(Forbidden(
|
||||
"Room is not world readable or publicly accessible/joinable, authentication is \
|
||||
required"
|
||||
)))
|
||||
},
|
||||
}
|
||||
}
|
|
@ -1,7 +1,10 @@
|
|||
use std::cmp::max;
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Error, Result, StateKey, err, info, pdu::PduBuilder};
|
||||
use conduwuit::{
|
||||
Error, Result, err, info,
|
||||
matrix::{StateKey, pdu::PduBuilder},
|
||||
};
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
CanonicalJsonObject, RoomId, RoomVersionId,
|
||||
|
@ -103,7 +106,7 @@ pub(crate) async fn upgrade_room_route(
|
|||
// Use the m.room.tombstone event as the predecessor
|
||||
let predecessor = Some(ruma::events::room::create::PreviousRoom::new(
|
||||
body.room_id.clone(),
|
||||
(*tombstone_event_id).to_owned(),
|
||||
Some(tombstone_event_id),
|
||||
));
|
||||
|
||||
// Send a m.room.create event containing a predecessor field and the applicable
|
||||
|
|
|
@ -2,10 +2,12 @@ use std::collections::BTreeMap;
|
|||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Err, PduEvent, Result, at, is_true,
|
||||
Err, Result, at, is_true,
|
||||
matrix::pdu::PduEvent,
|
||||
result::FlatOk,
|
||||
utils::{IterStream, stream::ReadyExt},
|
||||
};
|
||||
use conduwuit_service::{Services, rooms::search::RoomQuery};
|
||||
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::OptionFuture};
|
||||
use ruma::{
|
||||
OwnedRoomId, RoomId, UInt, UserId,
|
||||
|
@ -17,7 +19,6 @@ use ruma::{
|
|||
serde::Raw,
|
||||
};
|
||||
use search_events::v3::{Request, Response};
|
||||
use service::{Services, rooms::search::RoomQuery};
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
|
@ -143,7 +144,7 @@ async fn category_room_events(
|
|||
.map(at!(2))
|
||||
.flatten()
|
||||
.stream()
|
||||
.map(|pdu| pdu.to_room_event())
|
||||
.map(PduEvent::into_room_event)
|
||||
.map(|result| SearchResult {
|
||||
rank: None,
|
||||
result: Some(result),
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
use std::collections::BTreeMap;
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Err, err};
|
||||
use conduwuit::{Err, Result, err, matrix::pdu::PduBuilder, utils};
|
||||
use ruma::{api::client::message::send_message_event, events::MessageLikeEventType};
|
||||
use serde_json::from_str;
|
||||
|
||||
use crate::{Result, Ruma, service::pdu::PduBuilder, utils};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `PUT /_matrix/client/v3/rooms/{roomId}/send/{eventType}/{txnId}`
|
||||
///
|
||||
|
@ -25,8 +25,7 @@ pub(crate) async fn send_message_event_route(
|
|||
let appservice_info = body.appservice_info.as_ref();
|
||||
|
||||
// Forbid m.room.encrypted if encryption is disabled
|
||||
if MessageLikeEventType::RoomEncrypted == body.event_type
|
||||
&& !services.globals.allow_encryption()
|
||||
if MessageLikeEventType::RoomEncrypted == body.event_type && !services.config.allow_encryption
|
||||
{
|
||||
return Err!(Request(Forbidden("Encryption has been disabled")));
|
||||
}
|
||||
|
|
|
@ -2,7 +2,11 @@ use std::time::Duration;
|
|||
|
||||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{Err, debug, err, info, utils::ReadyExt};
|
||||
use conduwuit::{
|
||||
Err, Error, Result, debug, err, info, utils,
|
||||
utils::{ReadyExt, hash},
|
||||
};
|
||||
use conduwuit_service::uiaa::SESSION_ID_LENGTH;
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
UserId,
|
||||
|
@ -22,10 +26,9 @@ use ruma::{
|
|||
uiaa,
|
||||
},
|
||||
};
|
||||
use service::uiaa::SESSION_ID_LENGTH;
|
||||
|
||||
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH};
|
||||
use crate::{Error, Result, Ruma, utils, utils::hash};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `GET /_matrix/client/v3/login`
|
||||
///
|
||||
|
|
|
@ -8,16 +8,16 @@ use conduwuit::{
|
|||
Err, Result,
|
||||
utils::{future::TryExtExt, stream::IterStream},
|
||||
};
|
||||
use futures::{StreamExt, TryFutureExt, future::OptionFuture};
|
||||
use ruma::{
|
||||
OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, api::client::space::get_hierarchy,
|
||||
};
|
||||
use service::{
|
||||
use conduwuit_service::{
|
||||
Services,
|
||||
rooms::spaces::{
|
||||
PaginationToken, SummaryAccessibility, get_parent_children_via, summary_to_chunk,
|
||||
},
|
||||
};
|
||||
use futures::{StreamExt, TryFutureExt, future::OptionFuture};
|
||||
use ruma::{
|
||||
OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, api::client::space::get_hierarchy,
|
||||
};
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
|
@ -155,11 +155,7 @@ where
|
|||
break;
|
||||
}
|
||||
|
||||
if children.is_empty() {
|
||||
break;
|
||||
}
|
||||
|
||||
if parents.len() >= max_depth {
|
||||
if parents.len() > max_depth {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,10 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{Err, PduEvent, Result, err, pdu::PduBuilder, utils::BoolExt};
|
||||
use conduwuit::{
|
||||
Err, Result, err,
|
||||
matrix::pdu::{PduBuilder, PduEvent},
|
||||
utils::BoolExt,
|
||||
};
|
||||
use conduwuit_service::Services;
|
||||
use futures::TryStreamExt;
|
||||
use ruma::{
|
||||
OwnedEventId, RoomId, UserId,
|
||||
|
@ -16,7 +21,6 @@ use ruma::{
|
|||
},
|
||||
serde::Raw,
|
||||
};
|
||||
use service::Services;
|
||||
|
||||
use crate::{Ruma, RumaResponse};
|
||||
|
||||
|
@ -207,7 +211,7 @@ async fn allowed_to_send_state_event(
|
|||
// irreversible mistakes
|
||||
match json.deserialize_as::<RoomServerAclEventContent>() {
|
||||
| Ok(acl_content) => {
|
||||
if acl_content.allow.is_empty() {
|
||||
if acl_content.allow_is_empty() {
|
||||
return Err!(Request(BadJson(debug_warn!(
|
||||
?room_id,
|
||||
"Sending an ACL event with an empty allow key will permanently \
|
||||
|
@ -216,9 +220,7 @@ async fn allowed_to_send_state_event(
|
|||
))));
|
||||
}
|
||||
|
||||
if acl_content.deny.contains(&String::from("*"))
|
||||
&& acl_content.allow.contains(&String::from("*"))
|
||||
{
|
||||
if acl_content.deny_contains("*") && acl_content.allow_contains("*") {
|
||||
return Err!(Request(BadJson(debug_warn!(
|
||||
?room_id,
|
||||
"Sending an ACL event with a deny and allow key value of \"*\" will \
|
||||
|
@ -227,8 +229,9 @@ async fn allowed_to_send_state_event(
|
|||
))));
|
||||
}
|
||||
|
||||
if acl_content.deny.contains(&String::from("*"))
|
||||
if acl_content.deny_contains("*")
|
||||
&& !acl_content.is_allowed(services.globals.server_name())
|
||||
&& !acl_content.allow_contains(services.globals.server_name().as_str())
|
||||
{
|
||||
return Err!(Request(BadJson(debug_warn!(
|
||||
?room_id,
|
||||
|
@ -238,8 +241,9 @@ async fn allowed_to_send_state_event(
|
|||
))));
|
||||
}
|
||||
|
||||
if !acl_content.allow.contains(&String::from("*"))
|
||||
if !acl_content.allow_contains("*")
|
||||
&& !acl_content.is_allowed(services.globals.server_name())
|
||||
&& !acl_content.allow_contains(services.globals.server_name().as_str())
|
||||
{
|
||||
return Err!(Request(BadJson(debug_warn!(
|
||||
?room_id,
|
||||
|
|
|
@ -3,12 +3,14 @@ mod v4;
|
|||
mod v5;
|
||||
|
||||
use conduwuit::{
|
||||
PduCount,
|
||||
Error, PduCount, Result,
|
||||
matrix::pdu::PduEvent,
|
||||
utils::{
|
||||
IterStream,
|
||||
stream::{BroadbandExt, ReadyExt, TryIgnore},
|
||||
},
|
||||
};
|
||||
use conduwuit_service::Services;
|
||||
use futures::{StreamExt, pin_mut};
|
||||
use ruma::{
|
||||
RoomId, UserId,
|
||||
|
@ -21,7 +23,6 @@ use ruma::{
|
|||
pub(crate) use self::{
|
||||
v3::sync_events_route, v4::sync_events_v4_route, v5::sync_events_v5_route,
|
||||
};
|
||||
use crate::{Error, PduEvent, Result, service::Services};
|
||||
|
||||
pub(crate) const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] =
|
||||
&[CallInvite, PollStart, Beacon, RoomEncrypted, RoomMessage, Sticker];
|
||||
|
|
|
@ -6,15 +6,20 @@ use std::{
|
|||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
PduCount, PduEvent, Result, at, err, error, extract_variant, is_equal_to, pair_of,
|
||||
pdu::{Event, EventHash},
|
||||
ref_at,
|
||||
Result, at, err, error, extract_variant, is_equal_to,
|
||||
matrix::{
|
||||
Event,
|
||||
pdu::{EventHash, PduCount, PduEvent},
|
||||
},
|
||||
pair_of, ref_at,
|
||||
result::FlatOk,
|
||||
utils::{
|
||||
self, BoolExt, IterStream, ReadyExt, TryFutureExtExt,
|
||||
future::OptionStream,
|
||||
math::ruma_from_u64,
|
||||
stream::{BroadbandExt, Tools, TryExpect, WidebandExt},
|
||||
},
|
||||
warn,
|
||||
};
|
||||
use conduwuit_service::{
|
||||
Services,
|
||||
|
@ -118,7 +123,7 @@ pub(crate) async fn sync_events_route(
|
|||
let (sender_user, sender_device) = body.sender();
|
||||
|
||||
// Presence update
|
||||
if services.globals.allow_local_presence() {
|
||||
if services.config.allow_local_presence {
|
||||
services
|
||||
.presence
|
||||
.ping_presence(sender_user, &body.body.set_presence)
|
||||
|
@ -279,8 +284,8 @@ pub(crate) async fn build_sync_events(
|
|||
});
|
||||
|
||||
let presence_updates: OptionFuture<_> = services
|
||||
.globals
|
||||
.allow_local_presence()
|
||||
.config
|
||||
.allow_local_presence
|
||||
.then(|| process_presence_updates(services, since, sender_user))
|
||||
.into();
|
||||
|
||||
|
@ -428,9 +433,12 @@ async fn handle_left_room(
|
|||
return Ok(None);
|
||||
}
|
||||
|
||||
if !services.rooms.metadata.exists(room_id).await {
|
||||
if !services.rooms.metadata.exists(room_id).await
|
||||
|| services.rooms.metadata.is_disabled(room_id).await
|
||||
|| services.rooms.metadata.is_banned(room_id).await
|
||||
{
|
||||
// This is just a rejected invite, not a room we know
|
||||
// Insert a leave event anyways
|
||||
// Insert a leave event anyways for the client
|
||||
let event = PduEvent {
|
||||
event_id: EventId::new(services.globals.server_name()),
|
||||
sender: sender_user.to_owned(),
|
||||
|
@ -461,7 +469,7 @@ async fn handle_left_room(
|
|||
events: Vec::new(),
|
||||
},
|
||||
state: RoomState {
|
||||
events: vec![event.to_sync_state_event()],
|
||||
events: vec![event.into_sync_state_event()],
|
||||
},
|
||||
}));
|
||||
}
|
||||
|
@ -489,7 +497,7 @@ async fn handle_left_room(
|
|||
.room_state_get_id(room_id, &StateEventType::RoomMember, sender_user.as_str())
|
||||
.await
|
||||
else {
|
||||
error!("Left room but no left state event");
|
||||
warn!("Left {room_id} but no left state event");
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
|
@ -499,7 +507,7 @@ async fn handle_left_room(
|
|||
.pdu_shortstatehash(&left_event_id)
|
||||
.await
|
||||
else {
|
||||
error!(event_id = %left_event_id, "Leave event has no state");
|
||||
warn!(event_id = %left_event_id, "Leave event has no state in {room_id}");
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
|
@ -546,7 +554,7 @@ async fn handle_left_room(
|
|||
continue;
|
||||
}
|
||||
|
||||
left_state_events.push(pdu.to_sync_state_event());
|
||||
left_state_events.push(pdu.into_sync_state_event());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -865,8 +873,8 @@ async fn load_joined_room(
|
|||
},
|
||||
state: RoomState {
|
||||
events: state_events
|
||||
.iter()
|
||||
.map(PduEvent::to_sync_state_event)
|
||||
.into_iter()
|
||||
.map(PduEvent::into_sync_state_event)
|
||||
.collect(),
|
||||
},
|
||||
ephemeral: Ephemeral { events: edus },
|
||||
|
@ -1029,7 +1037,7 @@ async fn calculate_state_incremental<'a>(
|
|||
})
|
||||
.into();
|
||||
|
||||
let state_diff: OptionFuture<_> = (!full_state && state_changed)
|
||||
let state_diff_ids: OptionFuture<_> = (!full_state && state_changed)
|
||||
.then(|| {
|
||||
StreamExt::into_future(
|
||||
services
|
||||
|
@ -1054,45 +1062,9 @@ async fn calculate_state_incremental<'a>(
|
|||
})
|
||||
.into();
|
||||
|
||||
let lazy_state_ids = lazy_state_ids
|
||||
.map(|opt| {
|
||||
opt.map(|(curr, next)| {
|
||||
let opt = curr;
|
||||
let iter = Option::into_iter(opt);
|
||||
IterStream::stream(iter).chain(next)
|
||||
})
|
||||
})
|
||||
.map(Option::into_iter)
|
||||
.map(IterStream::stream)
|
||||
.flatten_stream()
|
||||
.flatten();
|
||||
|
||||
let state_diff_ids = state_diff
|
||||
.map(|opt| {
|
||||
opt.map(|(curr, next)| {
|
||||
let opt = curr;
|
||||
let iter = Option::into_iter(opt);
|
||||
IterStream::stream(iter).chain(next)
|
||||
})
|
||||
})
|
||||
.map(Option::into_iter)
|
||||
.map(IterStream::stream)
|
||||
.flatten_stream()
|
||||
.flatten();
|
||||
|
||||
let state_events = current_state_ids
|
||||
.map(|opt| {
|
||||
opt.map(|(curr, next)| {
|
||||
let opt = curr;
|
||||
let iter = Option::into_iter(opt);
|
||||
IterStream::stream(iter).chain(next)
|
||||
})
|
||||
})
|
||||
.map(Option::into_iter)
|
||||
.map(IterStream::stream)
|
||||
.flatten_stream()
|
||||
.flatten()
|
||||
.chain(state_diff_ids)
|
||||
.stream()
|
||||
.chain(state_diff_ids.stream())
|
||||
.broad_filter_map(|(shortstatekey, shorteventid)| async move {
|
||||
if witness.is_none() || encrypted_room {
|
||||
return Some(shorteventid);
|
||||
|
@ -1100,7 +1072,7 @@ async fn calculate_state_incremental<'a>(
|
|||
|
||||
lazy_filter(services, sender_user, shortstatekey, shorteventid).await
|
||||
})
|
||||
.chain(lazy_state_ids)
|
||||
.chain(lazy_state_ids.stream())
|
||||
.broad_filter_map(|shorteventid| {
|
||||
services
|
||||
.rooms
|
||||
|
|
|
@ -6,7 +6,7 @@ use std::{
|
|||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Error, PduCount, Result, debug, error, extract_variant,
|
||||
Error, PduCount, PduEvent, Result, debug, error, extract_variant,
|
||||
utils::{
|
||||
BoolExt, IterStream, ReadyExt, TryFutureExtExt,
|
||||
math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated},
|
||||
|
@ -438,7 +438,10 @@ pub(crate) async fn sync_events_v4_route(
|
|||
|
||||
let mut known_subscription_rooms = BTreeSet::new();
|
||||
for (room_id, room) in &body.room_subscriptions {
|
||||
if !services.rooms.metadata.exists(room_id).await {
|
||||
if !services.rooms.metadata.exists(room_id).await
|
||||
|| services.rooms.metadata.is_disabled(room_id).await
|
||||
|| services.rooms.metadata.is_banned(room_id).await
|
||||
{
|
||||
continue;
|
||||
}
|
||||
let todo_room =
|
||||
|
@ -634,7 +637,7 @@ pub(crate) async fn sync_events_v4_route(
|
|||
.state_accessor
|
||||
.room_state_get(room_id, &state.0, &state.1)
|
||||
.await
|
||||
.map(|s| s.to_sync_state_event())
|
||||
.map(PduEvent::into_sync_state_event)
|
||||
.ok()
|
||||
})
|
||||
.collect()
|
||||
|
|
|
@ -6,13 +6,19 @@ use std::{
|
|||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Error, Result, TypeStateKey, debug, error, extract_variant, trace,
|
||||
Error, Result, debug, error, extract_variant,
|
||||
matrix::{
|
||||
TypeStateKey,
|
||||
pdu::{PduCount, PduEvent},
|
||||
},
|
||||
trace,
|
||||
utils::{
|
||||
BoolExt, IterStream, ReadyExt, TryFutureExtExt,
|
||||
math::{ruma_from_usize, usize_from_ruma},
|
||||
},
|
||||
warn,
|
||||
};
|
||||
use conduwuit_service::rooms::read_receipt::pack_receipts;
|
||||
use futures::{FutureExt, StreamExt, TryFutureExt};
|
||||
use ruma::{
|
||||
DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId,
|
||||
|
@ -27,7 +33,6 @@ use ruma::{
|
|||
serde::Raw,
|
||||
uint,
|
||||
};
|
||||
use service::{PduCount, rooms::read_receipt::pack_receipts};
|
||||
|
||||
use super::{filter_rooms, share_encrypted_room};
|
||||
use crate::{
|
||||
|
@ -214,7 +219,10 @@ async fn fetch_subscriptions(
|
|||
) {
|
||||
let mut known_subscription_rooms = BTreeSet::new();
|
||||
for (room_id, room) in &body.room_subscriptions {
|
||||
if !services.rooms.metadata.exists(room_id).await {
|
||||
if !services.rooms.metadata.exists(room_id).await
|
||||
|| services.rooms.metadata.is_disabled(room_id).await
|
||||
|| services.rooms.metadata.is_banned(room_id).await
|
||||
{
|
||||
continue;
|
||||
}
|
||||
let todo_room =
|
||||
|
@ -507,7 +515,7 @@ async fn process_rooms(
|
|||
.state_accessor
|
||||
.room_state_get(room_id, &state.0, &state.1)
|
||||
.await
|
||||
.map(|s| s.to_sync_state_event())
|
||||
.map(PduEvent::into_sync_state_event)
|
||||
.ok()
|
||||
})
|
||||
.collect()
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use std::collections::BTreeMap;
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::Result;
|
||||
use ruma::{
|
||||
api::client::tag::{create_tag, delete_tag, get_tags},
|
||||
events::{
|
||||
|
@ -9,7 +10,7 @@ use ruma::{
|
|||
},
|
||||
};
|
||||
|
||||
use crate::{Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
|
||||
///
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
use std::collections::BTreeMap;
|
||||
|
||||
use conduwuit::Result;
|
||||
use ruma::api::client::thirdparty::get_protocols;
|
||||
|
||||
use crate::{Result, Ruma, RumaResponse};
|
||||
use crate::{Ruma, RumaResponse};
|
||||
|
||||
/// # `GET /_matrix/client/r0/thirdparty/protocols`
|
||||
///
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{PduCount, PduEvent, at};
|
||||
use conduwuit::{
|
||||
Result, at,
|
||||
matrix::pdu::{PduCount, PduEvent},
|
||||
};
|
||||
use futures::StreamExt;
|
||||
use ruma::{api::client::threads::get_threads, uint};
|
||||
|
||||
use crate::{Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/threads`
|
||||
pub(crate) async fn get_threads_route(
|
||||
|
@ -53,7 +56,7 @@ pub(crate) async fn get_threads_route(
|
|||
chunk: threads
|
||||
.into_iter()
|
||||
.map(at!(1))
|
||||
.map(|pdu| pdu.to_room_event())
|
||||
.map(PduEvent::into_room_event)
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ use std::collections::BTreeMap;
|
|||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Error, Result};
|
||||
use conduwuit_service::sending::EduBuf;
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
api::{
|
||||
|
@ -10,7 +11,6 @@ use ruma::{
|
|||
},
|
||||
to_device::DeviceIdOrAllDevices,
|
||||
};
|
||||
use service::sending::EduBuf;
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{Err, utils::math::Tried};
|
||||
use conduwuit::{Err, Result, utils, utils::math::Tried};
|
||||
use ruma::api::client::typing::create_typing_event;
|
||||
|
||||
use crate::{Result, Ruma, utils};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}`
|
||||
///
|
||||
|
@ -64,7 +64,7 @@ pub(crate) async fn create_typing_event_route(
|
|||
}
|
||||
|
||||
// ping presence
|
||||
if services.globals.allow_local_presence() {
|
||||
if services.config.allow_local_presence {
|
||||
services
|
||||
.presence
|
||||
.ping_presence(&body.user_id, &ruma::presence::PresenceState::Online)
|
||||
|
|
|
@ -2,7 +2,7 @@ use std::collections::BTreeMap;
|
|||
|
||||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::Err;
|
||||
use conduwuit::{Err, Error, Result};
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
OwnedRoomId,
|
||||
|
@ -14,16 +14,14 @@ use ruma::{
|
|||
delete_profile_key, delete_timezone_key, get_profile_key, get_timezone_key,
|
||||
set_profile_key, set_timezone_key,
|
||||
},
|
||||
room::get_summary,
|
||||
},
|
||||
federation,
|
||||
},
|
||||
events::room::member::MembershipState,
|
||||
presence::PresenceState,
|
||||
};
|
||||
|
||||
use super::{update_avatar_url, update_displayname};
|
||||
use crate::{Error, Result, Ruma, RumaResponse};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `GET /_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`
|
||||
///
|
||||
|
@ -38,13 +36,10 @@ pub(crate) async fn get_mutual_rooms_route(
|
|||
InsecureClientIp(client): InsecureClientIp,
|
||||
body: Ruma<mutual_rooms::unstable::Request>,
|
||||
) -> Result<mutual_rooms::unstable::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let sender_user = body.sender_user();
|
||||
|
||||
if sender_user == &body.user_id {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Unknown,
|
||||
"You cannot request rooms in common with yourself.",
|
||||
));
|
||||
if sender_user == body.user_id {
|
||||
return Err!(Request(Unknown("You cannot request rooms in common with yourself.")));
|
||||
}
|
||||
|
||||
if !services.users.exists(&body.user_id).await {
|
||||
|
@ -65,129 +60,6 @@ pub(crate) async fn get_mutual_rooms_route(
|
|||
})
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary`
|
||||
///
|
||||
/// Returns a short description of the state of a room.
|
||||
///
|
||||
/// This is the "wrong" endpoint that some implementations/clients may use
|
||||
/// according to the MSC. Request and response bodies are the same as
|
||||
/// `get_room_summary`.
|
||||
///
|
||||
/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266)
|
||||
pub(crate) async fn get_room_summary_legacy(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(client): InsecureClientIp,
|
||||
body: Ruma<get_summary::msc3266::Request>,
|
||||
) -> Result<RumaResponse<get_summary::msc3266::Response>> {
|
||||
get_room_summary(State(services), InsecureClientIp(client), body)
|
||||
.await
|
||||
.map(RumaResponse)
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}`
|
||||
///
|
||||
/// Returns a short description of the state of a room.
|
||||
///
|
||||
/// TODO: support fetching remote room info if we don't know the room
|
||||
///
|
||||
/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266)
|
||||
#[tracing::instrument(skip_all, fields(%client), name = "room_summary")]
|
||||
pub(crate) async fn get_room_summary(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(client): InsecureClientIp,
|
||||
body: Ruma<get_summary::msc3266::Request>,
|
||||
) -> Result<get_summary::msc3266::Response> {
|
||||
let sender_user = body.sender_user.as_ref();
|
||||
|
||||
let room_id = services.rooms.alias.resolve(&body.room_id_or_alias).await?;
|
||||
|
||||
if !services.rooms.metadata.exists(&room_id).await {
|
||||
return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server"));
|
||||
}
|
||||
|
||||
if sender_user.is_none()
|
||||
&& !services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.is_world_readable(&room_id)
|
||||
.await
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::forbidden(),
|
||||
"Room is not world readable, authentication is required",
|
||||
));
|
||||
}
|
||||
|
||||
Ok(get_summary::msc3266::Response {
|
||||
room_id: room_id.clone(),
|
||||
canonical_alias: services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_canonical_alias(&room_id)
|
||||
.await
|
||||
.ok(),
|
||||
avatar_url: services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_avatar(&room_id)
|
||||
.await
|
||||
.into_option()
|
||||
.unwrap_or_default()
|
||||
.url,
|
||||
guest_can_join: services.rooms.state_accessor.guest_can_join(&room_id).await,
|
||||
name: services.rooms.state_accessor.get_name(&room_id).await.ok(),
|
||||
num_joined_members: services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_joined_count(&room_id)
|
||||
.await
|
||||
.unwrap_or(0)
|
||||
.try_into()?,
|
||||
topic: services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_room_topic(&room_id)
|
||||
.await
|
||||
.ok(),
|
||||
world_readable: services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.is_world_readable(&room_id)
|
||||
.await,
|
||||
join_rule: services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_join_rule(&room_id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.0,
|
||||
room_type: services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_room_type(&room_id)
|
||||
.await
|
||||
.ok(),
|
||||
room_version: services.rooms.state.get_room_version(&room_id).await.ok(),
|
||||
membership: if let Some(sender_user) = sender_user {
|
||||
services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_member(&room_id, sender_user)
|
||||
.await
|
||||
.map_or_else(|_| MembershipState::Leave, |content| content.membership)
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
},
|
||||
encryption: services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_room_encryption(&room_id)
|
||||
.await
|
||||
.ok(),
|
||||
})
|
||||
}
|
||||
|
||||
/// # `DELETE /_matrix/client/unstable/uk.tcpip.msc4133/profile/:user_id/us.cloke.msc4175.tz`
|
||||
///
|
||||
/// Deletes the `tz` (timezone) of a user, as per MSC4133 and MSC4175.
|
||||
|
@ -205,7 +77,7 @@ pub(crate) async fn delete_timezone_key_route(
|
|||
|
||||
services.users.set_timezone(&body.user_id, None);
|
||||
|
||||
if services.globals.allow_local_presence() {
|
||||
if services.config.allow_local_presence {
|
||||
// Presence update
|
||||
services
|
||||
.presence
|
||||
|
@ -233,7 +105,7 @@ pub(crate) async fn set_timezone_key_route(
|
|||
|
||||
services.users.set_timezone(&body.user_id, body.tz.clone());
|
||||
|
||||
if services.globals.allow_local_presence() {
|
||||
if services.config.allow_local_presence {
|
||||
// Presence update
|
||||
services
|
||||
.presence
|
||||
|
@ -326,7 +198,7 @@ pub(crate) async fn set_profile_key_route(
|
|||
);
|
||||
}
|
||||
|
||||
if services.globals.allow_local_presence() {
|
||||
if services.config.allow_local_presence {
|
||||
// Presence update
|
||||
services
|
||||
.presence
|
||||
|
@ -385,7 +257,7 @@ pub(crate) async fn delete_profile_key_route(
|
|||
.set_profile_key(&body.user_id, &body.key_name, None);
|
||||
}
|
||||
|
||||
if services.globals.allow_local_presence() {
|
||||
if services.config.allow_local_presence {
|
||||
// Presence update
|
||||
services
|
||||
.presence
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
use std::collections::BTreeMap;
|
||||
|
||||
use axum::{Json, extract::State, response::IntoResponse};
|
||||
use conduwuit::Result;
|
||||
use futures::StreamExt;
|
||||
use ruma::api::client::discovery::get_supported_versions;
|
||||
|
||||
use crate::{Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `GET /_matrix/client/versions`
|
||||
///
|
||||
|
|
|
@ -1,15 +1,19 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::utils::TryFutureExtExt;
|
||||
use futures::{StreamExt, pin_mut};
|
||||
use conduwuit::{
|
||||
Result,
|
||||
utils::{future::BoolExt, stream::BroadbandExt},
|
||||
};
|
||||
use futures::{FutureExt, StreamExt, pin_mut};
|
||||
use ruma::{
|
||||
api::client::user_directory::search_users,
|
||||
events::{
|
||||
StateEventType,
|
||||
room::join_rules::{JoinRule, RoomJoinRulesEventContent},
|
||||
},
|
||||
api::client::user_directory::search_users::{self},
|
||||
events::room::join_rules::JoinRule,
|
||||
};
|
||||
|
||||
use crate::{Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
// conduwuit can handle a lot more results than synapse
|
||||
const LIMIT_MAX: usize = 500;
|
||||
const LIMIT_DEFAULT: usize = 10;
|
||||
|
||||
/// # `POST /_matrix/client/r0/user_directory/search`
|
||||
///
|
||||
|
@ -21,78 +25,63 @@ pub(crate) async fn search_users_route(
|
|||
State(services): State<crate::State>,
|
||||
body: Ruma<search_users::v3::Request>,
|
||||
) -> Result<search_users::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let limit = usize::try_from(body.limit).map_or(10, usize::from).min(100); // default limit is 10
|
||||
let sender_user = body.sender_user();
|
||||
let limit = usize::try_from(body.limit)
|
||||
.map_or(LIMIT_DEFAULT, usize::from)
|
||||
.min(LIMIT_MAX);
|
||||
|
||||
let users = services.users.stream().filter_map(|user_id| async {
|
||||
// Filter out buggy users (they should not exist, but you never know...)
|
||||
let mut users = services
|
||||
.users
|
||||
.stream()
|
||||
.map(ToOwned::to_owned)
|
||||
.broad_filter_map(async |user_id| {
|
||||
let user = search_users::v3::User {
|
||||
user_id: user_id.to_owned(),
|
||||
display_name: services.users.displayname(user_id).await.ok(),
|
||||
avatar_url: services.users.avatar_url(user_id).await.ok(),
|
||||
user_id: user_id.clone(),
|
||||
display_name: services.users.displayname(&user_id).await.ok(),
|
||||
avatar_url: services.users.avatar_url(&user_id).await.ok(),
|
||||
};
|
||||
|
||||
let user_id_matches = user
|
||||
.user_id
|
||||
.to_string()
|
||||
.as_str()
|
||||
.to_lowercase()
|
||||
.contains(&body.search_term.to_lowercase());
|
||||
|
||||
let user_displayname_matches = user
|
||||
.display_name
|
||||
.as_ref()
|
||||
.filter(|name| {
|
||||
let user_displayname_matches = user.display_name.as_ref().is_some_and(|name| {
|
||||
name.to_lowercase()
|
||||
.contains(&body.search_term.to_lowercase())
|
||||
})
|
||||
.is_some();
|
||||
});
|
||||
|
||||
if !user_id_matches && !user_displayname_matches {
|
||||
return None;
|
||||
}
|
||||
|
||||
// It's a matching user, but is the sender allowed to see them?
|
||||
let mut user_visible = false;
|
||||
|
||||
let user_is_in_public_rooms = services
|
||||
let user_in_public_room = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_joined(&user.user_id)
|
||||
.any(|room| {
|
||||
.rooms_joined(&user_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.any(|room| async move {
|
||||
services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content::<RoomJoinRulesEventContent>(
|
||||
room,
|
||||
&StateEventType::RoomJoinRules,
|
||||
"",
|
||||
)
|
||||
.map_ok_or(false, |content| content.join_rule == JoinRule::Public)
|
||||
})
|
||||
.await;
|
||||
|
||||
if user_is_in_public_rooms {
|
||||
user_visible = true;
|
||||
} else {
|
||||
let user_is_in_shared_rooms = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.user_sees_user(sender_user, &user.user_id)
|
||||
.await;
|
||||
|
||||
if user_is_in_shared_rooms {
|
||||
user_visible = true;
|
||||
}
|
||||
}
|
||||
|
||||
user_visible.then_some(user)
|
||||
.get_join_rules(&room)
|
||||
.map(|rule| matches!(rule, JoinRule::Public))
|
||||
.await
|
||||
});
|
||||
|
||||
pin_mut!(users);
|
||||
let user_sees_user = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.user_sees_user(sender_user, &user_id);
|
||||
|
||||
let limited = users.by_ref().next().await.is_some();
|
||||
pin_mut!(user_in_public_room, user_sees_user);
|
||||
|
||||
let results = users.take(limit).collect().await;
|
||||
user_in_public_room.or(user_sees_user).await.then_some(user)
|
||||
});
|
||||
|
||||
let results = users.by_ref().take(limit).collect().await;
|
||||
let limited = users.next().await.is_some();
|
||||
|
||||
Ok(search_users::v3::Response { results, limited })
|
||||
}
|
||||
|
|
|
@ -2,12 +2,12 @@ use std::time::{Duration, SystemTime};
|
|||
|
||||
use axum::extract::State;
|
||||
use base64::{Engine as _, engine::general_purpose};
|
||||
use conduwuit::{Err, utils};
|
||||
use conduwuit::{Err, Result, utils};
|
||||
use hmac::{Hmac, Mac};
|
||||
use ruma::{SecondsSinceUnixEpoch, UserId, api::client::voip::get_turn_server_info};
|
||||
use sha1::Sha1;
|
||||
|
||||
use crate::{Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
const RANDOM_USER_ID_LENGTH: usize = 10;
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
use axum::{Json, extract::State, response::IntoResponse};
|
||||
use conduwuit::{Error, Result};
|
||||
use ruma::api::client::{
|
||||
discovery::{
|
||||
discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo},
|
||||
|
@ -7,7 +8,7 @@ use ruma::api::client::{
|
|||
error::ErrorKind,
|
||||
};
|
||||
|
||||
use crate::{Error, Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `GET /.well-known/matrix/client`
|
||||
///
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#![type_length_limit = "16384"] //TODO: reduce me
|
||||
#![allow(clippy::toplevel_ref_arg)]
|
||||
|
||||
pub mod client;
|
||||
|
@ -7,8 +8,6 @@ pub mod server;
|
|||
extern crate conduwuit_core as conduwuit;
|
||||
extern crate conduwuit_service as service;
|
||||
|
||||
pub(crate) use conduwuit::{Error, Result, debug_info, pdu::PduEvent, utils};
|
||||
|
||||
pub(crate) use self::router::{Ruma, RumaResponse, State};
|
||||
|
||||
conduwuit::mod_ctor! {}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use std::{mem, ops::Deref};
|
||||
|
||||
use axum::{async_trait, body::Body, extract::FromRequest};
|
||||
use async_trait::async_trait;
|
||||
use axum::{body::Body, extract::FromRequest};
|
||||
use bytes::{BufMut, Bytes, BytesMut};
|
||||
use conduwuit::{Error, Result, debug, debug_warn, err, trace, utils::string::EMPTY};
|
||||
use ruma::{
|
||||
|
|
|
@ -317,10 +317,9 @@ fn auth_server_checks(services: &Services, x_matrix: &XMatrix) -> Result<()> {
|
|||
|
||||
let origin = &x_matrix.origin;
|
||||
if services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.contains(origin)
|
||||
.is_match(origin.host())
|
||||
{
|
||||
return Err!(Request(Forbidden(debug_warn!(
|
||||
"Federation requests from {origin} denied."
|
||||
|
|
|
@ -6,11 +6,17 @@ use conduwuit::{
|
|||
utils::{IterStream, ReadyExt, stream::TryTools},
|
||||
};
|
||||
use futures::{FutureExt, StreamExt, TryStreamExt};
|
||||
use ruma::{MilliSecondsSinceUnixEpoch, api::federation::backfill::get_backfill, uint};
|
||||
use ruma::{MilliSecondsSinceUnixEpoch, api::federation::backfill::get_backfill};
|
||||
|
||||
use super::AccessCheck;
|
||||
use crate::Ruma;
|
||||
|
||||
/// arbitrary number but synapse's is 100 and we can handle lots of these
|
||||
/// anyways
|
||||
const LIMIT_MAX: usize = 150;
|
||||
/// no spec defined number but we can handle a lot of these
|
||||
const LIMIT_DEFAULT: usize = 50;
|
||||
|
||||
/// # `GET /_matrix/federation/v1/backfill/<room_id>`
|
||||
///
|
||||
/// Retrieves events from before the sender joined the room, if the room's
|
||||
|
@ -30,9 +36,9 @@ pub(crate) async fn get_backfill_route(
|
|||
|
||||
let limit = body
|
||||
.limit
|
||||
.min(uint!(100))
|
||||
.try_into()
|
||||
.expect("UInt could not be converted to usize");
|
||||
.unwrap_or(LIMIT_DEFAULT)
|
||||
.min(LIMIT_MAX);
|
||||
|
||||
let from = body
|
||||
.v
|
||||
|
|
|
@ -1,13 +1,15 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{Error, Result};
|
||||
use ruma::{
|
||||
CanonicalJsonValue, EventId, RoomId,
|
||||
api::{client::error::ErrorKind, federation::event::get_missing_events},
|
||||
};
|
||||
use conduwuit::{Result, debug, debug_error, utils::to_canonical_object};
|
||||
use ruma::api::federation::event::get_missing_events;
|
||||
|
||||
use super::AccessCheck;
|
||||
use crate::Ruma;
|
||||
|
||||
/// arbitrary number but synapse's is 20 and we can handle lots of these anyways
|
||||
const LIMIT_MAX: usize = 50;
|
||||
/// spec says default is 10
|
||||
const LIMIT_DEFAULT: usize = 10;
|
||||
|
||||
/// # `POST /_matrix/federation/v1/get_missing_events/{roomId}`
|
||||
///
|
||||
/// Retrieves events that the sender is missing.
|
||||
|
@ -24,7 +26,11 @@ pub(crate) async fn get_missing_events_route(
|
|||
.check()
|
||||
.await?;
|
||||
|
||||
let limit = body.limit.try_into()?;
|
||||
let limit = body
|
||||
.limit
|
||||
.try_into()
|
||||
.unwrap_or(LIMIT_DEFAULT)
|
||||
.min(LIMIT_MAX);
|
||||
|
||||
let mut queued_events = body.latest_events.clone();
|
||||
// the vec will never have more entries the limit
|
||||
|
@ -32,23 +38,14 @@ pub(crate) async fn get_missing_events_route(
|
|||
|
||||
let mut i: usize = 0;
|
||||
while i < queued_events.len() && events.len() < limit {
|
||||
if let Ok(pdu) = services
|
||||
.rooms
|
||||
.timeline
|
||||
.get_pdu_json(&queued_events[i])
|
||||
.await
|
||||
{
|
||||
let room_id_str = pdu
|
||||
.get("room_id")
|
||||
.and_then(|val| val.as_str())
|
||||
.ok_or_else(|| Error::bad_database("Invalid event in database."))?;
|
||||
|
||||
let event_room_id = <&RoomId>::try_from(room_id_str)
|
||||
.map_err(|_| Error::bad_database("Invalid room_id in event in database."))?;
|
||||
|
||||
if event_room_id != body.room_id {
|
||||
return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event from wrong room."));
|
||||
}
|
||||
let Ok(pdu) = services.rooms.timeline.get_pdu(&queued_events[i]).await else {
|
||||
debug!(
|
||||
?body.origin,
|
||||
"Event {} does not exist locally, skipping", &queued_events[i]
|
||||
);
|
||||
i = i.saturating_add(1);
|
||||
continue;
|
||||
};
|
||||
|
||||
if body.earliest_events.contains(&queued_events[i]) {
|
||||
i = i.saturating_add(1);
|
||||
|
@ -61,31 +58,32 @@ pub(crate) async fn get_missing_events_route(
|
|||
.server_can_see_event(body.origin(), &body.room_id, &queued_events[i])
|
||||
.await
|
||||
{
|
||||
debug!(
|
||||
?body.origin,
|
||||
"Server cannot see {:?} in {:?}, skipping", pdu.event_id, pdu.room_id
|
||||
);
|
||||
i = i.saturating_add(1);
|
||||
continue;
|
||||
}
|
||||
|
||||
let prev_events = pdu
|
||||
.get("prev_events")
|
||||
.and_then(CanonicalJsonValue::as_array)
|
||||
.unwrap_or_default();
|
||||
|
||||
queued_events.extend(
|
||||
prev_events
|
||||
.iter()
|
||||
.map(<&EventId>::try_from)
|
||||
.filter_map(Result::ok)
|
||||
.map(ToOwned::to_owned),
|
||||
let Ok(event) = to_canonical_object(&pdu) else {
|
||||
debug_error!(
|
||||
?body.origin,
|
||||
"Failed to convert PDU in database to canonical JSON: {pdu:?}"
|
||||
);
|
||||
|
||||
events.push(
|
||||
services
|
||||
.sending
|
||||
.convert_to_outgoing_federation_event(pdu)
|
||||
.await,
|
||||
);
|
||||
}
|
||||
i = i.saturating_add(1);
|
||||
continue;
|
||||
};
|
||||
|
||||
let prev_events = pdu.prev_events.iter().map(ToOwned::to_owned);
|
||||
|
||||
let event = services
|
||||
.sending
|
||||
.convert_to_outgoing_federation_event(event)
|
||||
.await;
|
||||
|
||||
queued_events.extend(prev_events);
|
||||
events.push(event);
|
||||
}
|
||||
|
||||
Ok(get_missing_events::v1::Response { events })
|
||||
|
|
|
@ -3,9 +3,11 @@ use conduwuit::{
|
|||
Err, Result,
|
||||
utils::stream::{BroadbandExt, IterStream},
|
||||
};
|
||||
use conduwuit_service::rooms::spaces::{
|
||||
Identifier, SummaryAccessibility, get_parent_children_via,
|
||||
};
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use ruma::api::federation::space::get_hierarchy;
|
||||
use service::rooms::spaces::{Identifier, SummaryAccessibility, get_parent_children_via};
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use base64::{Engine as _, engine::general_purpose};
|
||||
use conduwuit::{Err, Error, PduEvent, Result, err, utils, utils::hash::sha256, warn};
|
||||
use conduwuit::{
|
||||
Err, Error, PduEvent, Result, err, pdu::gen_event_id, utils, utils::hash::sha256, warn,
|
||||
};
|
||||
use ruma::{
|
||||
CanonicalJsonValue, OwnedUserId, UserId,
|
||||
api::{client::error::ErrorKind, federation::membership::create_invite},
|
||||
events::room::member::{MembershipState, RoomMemberEventContent},
|
||||
serde::JsonObject,
|
||||
};
|
||||
use service::pdu::gen_event_id;
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
|
@ -37,20 +38,18 @@ pub(crate) async fn create_invite_route(
|
|||
|
||||
if let Some(server) = body.room_id.server_name() {
|
||||
if services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.contains(&server.to_owned())
|
||||
.is_match(server.host())
|
||||
{
|
||||
return Err!(Request(Forbidden("Server is banned on this homeserver.")));
|
||||
}
|
||||
}
|
||||
|
||||
if services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.contains(body.origin())
|
||||
.is_match(body.origin().host())
|
||||
{
|
||||
warn!(
|
||||
"Received federated/remote invite from banned server {} for room ID {}. Rejecting.",
|
||||
|
@ -103,8 +102,7 @@ pub(crate) async fn create_invite_route(
|
|||
return Err!(Request(Forbidden("This room is banned on this homeserver.")));
|
||||
}
|
||||
|
||||
if services.globals.block_non_admin_invites() && !services.users.is_admin(&invited_user).await
|
||||
{
|
||||
if services.config.block_non_admin_invites && !services.users.is_admin(&invited_user).await {
|
||||
return Err!(Request(Forbidden("This server does not allow room invites.")));
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{Err, debug_info, utils::IterStream, warn};
|
||||
use conduwuit::{
|
||||
Err, Error, Result, debug_info, matrix::pdu::PduBuilder, utils::IterStream, warn,
|
||||
};
|
||||
use conduwuit_service::Services;
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
CanonicalJsonObject, OwnedUserId, RoomId, RoomVersionId, UserId,
|
||||
|
@ -14,10 +17,7 @@ use ruma::{
|
|||
};
|
||||
use serde_json::value::to_raw_value;
|
||||
|
||||
use crate::{
|
||||
Error, Result, Ruma,
|
||||
service::{Services, pdu::PduBuilder},
|
||||
};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}`
|
||||
///
|
||||
|
@ -42,10 +42,9 @@ pub(crate) async fn create_join_event_template_route(
|
|||
.await?;
|
||||
|
||||
if services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.contains(body.origin())
|
||||
.is_match(body.origin().host())
|
||||
{
|
||||
warn!(
|
||||
"Server {} for remote user {} tried joining room ID {} which has a server name that \
|
||||
|
@ -59,10 +58,9 @@ pub(crate) async fn create_join_event_template_route(
|
|||
|
||||
if let Some(server) = body.room_id.server_name() {
|
||||
if services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.contains(&server.to_owned())
|
||||
.is_match(server.host())
|
||||
{
|
||||
return Err!(Request(Forbidden(warn!(
|
||||
"Room ID server name {server} is banned on this homeserver."
|
||||
|
|
|
@ -1,15 +1,14 @@
|
|||
use RoomVersionId::*;
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Err, debug_warn};
|
||||
use conduwuit::{Err, Error, Result, debug_warn, matrix::pdu::PduBuilder, warn};
|
||||
use ruma::{
|
||||
RoomVersionId,
|
||||
api::{client::error::ErrorKind, federation::knock::create_knock_event_template},
|
||||
events::room::member::{MembershipState, RoomMemberEventContent},
|
||||
};
|
||||
use serde_json::value::to_raw_value;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::{Error, Result, Ruma, service::pdu::PduBuilder};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `GET /_matrix/federation/v1/make_knock/{roomId}/{userId}`
|
||||
///
|
||||
|
@ -34,10 +33,9 @@ pub(crate) async fn create_knock_event_template_route(
|
|||
.await?;
|
||||
|
||||
if services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.contains(body.origin())
|
||||
.is_match(body.origin().host())
|
||||
{
|
||||
warn!(
|
||||
"Server {} for remote user {} tried knocking room ID {} which has a server name \
|
||||
|
@ -51,10 +49,9 @@ pub(crate) async fn create_knock_event_template_route(
|
|||
|
||||
if let Some(server) = body.room_id.server_name() {
|
||||
if services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.contains(&server.to_owned())
|
||||
.is_match(server.host())
|
||||
{
|
||||
return Err!(Request(Forbidden("Server is banned on this homeserver.")));
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{Err, Result};
|
||||
use conduwuit::{Err, Result, matrix::pdu::PduBuilder};
|
||||
use ruma::{
|
||||
api::federation::membership::prepare_leave_event,
|
||||
events::room::member::{MembershipState, RoomMemberEventContent},
|
||||
|
@ -7,7 +7,7 @@ use ruma::{
|
|||
use serde_json::value::to_raw_value;
|
||||
|
||||
use super::make_join::maybe_strip_event_id;
|
||||
use crate::{Ruma, service::pdu::PduBuilder};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `GET /_matrix/federation/v1/make_leave/{roomId}/{eventId}`
|
||||
///
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::Result;
|
||||
use ruma::api::federation::openid::get_openid_userinfo;
|
||||
|
||||
use crate::{Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `GET /_matrix/federation/v1/openid/userinfo`
|
||||
///
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{Error, Result};
|
||||
use ruma::{
|
||||
api::{
|
||||
client::error::ErrorKind,
|
||||
|
@ -8,7 +9,7 @@ use ruma::{
|
|||
directory::Filter,
|
||||
};
|
||||
|
||||
use crate::{Error, Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `POST /_matrix/federation/v1/publicRooms`
|
||||
///
|
||||
|
|
|
@ -9,11 +9,15 @@ use conduwuit::{
|
|||
result::LogErr,
|
||||
trace,
|
||||
utils::{
|
||||
IterStream, ReadyExt,
|
||||
IterStream, ReadyExt, millis_since_unix_epoch,
|
||||
stream::{BroadbandExt, TryBroadbandExt, automatic_width},
|
||||
},
|
||||
warn,
|
||||
};
|
||||
use conduwuit_service::{
|
||||
Services,
|
||||
sending::{EDU_LIMIT, PDU_LIMIT},
|
||||
};
|
||||
use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt};
|
||||
use itertools::Itertools;
|
||||
use ruma::{
|
||||
|
@ -33,16 +37,8 @@ use ruma::{
|
|||
serde::Raw,
|
||||
to_device::DeviceIdOrAllDevices,
|
||||
};
|
||||
use service::{
|
||||
Services,
|
||||
sending::{EDU_LIMIT, PDU_LIMIT},
|
||||
};
|
||||
use utils::millis_since_unix_epoch;
|
||||
|
||||
use crate::{
|
||||
Ruma,
|
||||
utils::{self},
|
||||
};
|
||||
use crate::Ruma;
|
||||
|
||||
type ResolvedMap = BTreeMap<OwnedEventId, Result>;
|
||||
type Pdu = (OwnedRoomId, OwnedEventId, CanonicalJsonObject);
|
||||
|
|
|
@ -9,6 +9,7 @@ use conduwuit::{
|
|||
utils::stream::{IterStream, TryBroadbandExt},
|
||||
warn,
|
||||
};
|
||||
use conduwuit_service::Services;
|
||||
use futures::{FutureExt, StreamExt, TryStreamExt};
|
||||
use ruma::{
|
||||
CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId,
|
||||
|
@ -20,7 +21,6 @@ use ruma::{
|
|||
},
|
||||
};
|
||||
use serde_json::value::{RawValue as RawJsonValue, to_raw_value};
|
||||
use service::Services;
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
|
@ -268,10 +268,9 @@ pub(crate) async fn create_join_event_v1_route(
|
|||
body: Ruma<create_join_event::v1::Request>,
|
||||
) -> Result<create_join_event::v1::Response> {
|
||||
if services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.contains(body.origin())
|
||||
.is_match(body.origin().host())
|
||||
{
|
||||
warn!(
|
||||
"Server {} tried joining room ID {} through us who has a server name that is \
|
||||
|
@ -284,10 +283,9 @@ pub(crate) async fn create_join_event_v1_route(
|
|||
|
||||
if let Some(server) = body.room_id.server_name() {
|
||||
if services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.contains(&server.to_owned())
|
||||
.is_match(server.host())
|
||||
{
|
||||
warn!(
|
||||
"Server {} tried joining room ID {} through us which has a server name that is \
|
||||
|
@ -316,20 +314,18 @@ pub(crate) async fn create_join_event_v2_route(
|
|||
body: Ruma<create_join_event::v2::Request>,
|
||||
) -> Result<create_join_event::v2::Response> {
|
||||
if services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.contains(body.origin())
|
||||
.is_match(body.origin().host())
|
||||
{
|
||||
return Err!(Request(Forbidden("Server is banned on this homeserver.")));
|
||||
}
|
||||
|
||||
if let Some(server) = body.room_id.server_name() {
|
||||
if services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.contains(&server.to_owned())
|
||||
.is_match(server.host())
|
||||
{
|
||||
warn!(
|
||||
"Server {} tried joining room ID {} through us which has a server name that is \
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{Err, PduEvent, Result, err, pdu::gen_event_id_canonical_json, warn};
|
||||
use conduwuit::{
|
||||
Err, Result, err,
|
||||
matrix::pdu::{PduEvent, gen_event_id_canonical_json},
|
||||
warn,
|
||||
};
|
||||
use futures::FutureExt;
|
||||
use ruma::{
|
||||
OwnedServerName, OwnedUserId,
|
||||
|
@ -22,10 +26,9 @@ pub(crate) async fn create_knock_event_v1_route(
|
|||
body: Ruma<send_knock::v1::Request>,
|
||||
) -> Result<send_knock::v1::Response> {
|
||||
if services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.contains(body.origin())
|
||||
.is_match(body.origin().host())
|
||||
{
|
||||
warn!(
|
||||
"Server {} tried knocking room ID {} who has a server name that is globally \
|
||||
|
@ -38,10 +41,9 @@ pub(crate) async fn create_knock_event_v1_route(
|
|||
|
||||
if let Some(server) = body.room_id.server_name() {
|
||||
if services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.contains(&server.to_owned())
|
||||
.is_match(server.host())
|
||||
{
|
||||
warn!(
|
||||
"Server {} tried knocking room ID {} which has a server name that is globally \
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
#![allow(deprecated)]
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Err, Result, err};
|
||||
use conduwuit::{Err, Result, err, matrix::pdu::gen_event_id_canonical_json};
|
||||
use conduwuit_service::Services;
|
||||
use futures::FutureExt;
|
||||
use ruma::{
|
||||
OwnedRoomId, OwnedUserId, RoomId, ServerName,
|
||||
|
@ -13,10 +14,7 @@ use ruma::{
|
|||
};
|
||||
use serde_json::value::RawValue as RawJsonValue;
|
||||
|
||||
use crate::{
|
||||
Ruma,
|
||||
service::{Services, pdu::gen_event_id_canonical_json},
|
||||
};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `PUT /_matrix/federation/v1/send_leave/{roomId}/{eventId}`
|
||||
///
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use conduwuit::Result;
|
||||
use ruma::api::federation::discovery::get_server_version;
|
||||
|
||||
use crate::{Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `GET /_matrix/federation/v1/version`
|
||||
///
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{Error, Result};
|
||||
use ruma::api::{client::error::ErrorKind, federation::discovery::discover_homeserver};
|
||||
|
||||
use crate::{Error, Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `GET /.well-known/matrix/server`
|
||||
///
|
||||
|
|
|
@ -59,6 +59,7 @@ conduwuit_mods = [
|
|||
argon2.workspace = true
|
||||
arrayvec.workspace = true
|
||||
axum.workspace = true
|
||||
axum-extra.workspace = true
|
||||
bytes.workspace = true
|
||||
bytesize.workspace = true
|
||||
cargo_toml.workspace = true
|
||||
|
|
|
@ -8,7 +8,6 @@ use std::{
|
|||
};
|
||||
|
||||
use arrayvec::ArrayVec;
|
||||
use const_str::concat_bytes;
|
||||
use tikv_jemalloc_ctl as mallctl;
|
||||
use tikv_jemalloc_sys as ffi;
|
||||
use tikv_jemallocator as jemalloc;
|
||||
|
@ -20,7 +19,7 @@ use crate::{
|
|||
|
||||
#[cfg(feature = "jemalloc_conf")]
|
||||
#[unsafe(no_mangle)]
|
||||
pub static malloc_conf: &[u8] = concat_bytes!(
|
||||
pub static malloc_conf: &[u8] = const_str::concat_bytes!(
|
||||
"lg_extent_max_active_fit:4",
|
||||
",oversize_threshold:16777216",
|
||||
",tcache_max:2097152",
|
||||
|
@ -336,6 +335,12 @@ where
|
|||
Ok(res)
|
||||
}
|
||||
|
||||
#[tracing::instrument(
|
||||
name = "get",
|
||||
level = "trace"
|
||||
skip_all,
|
||||
fields(?key)
|
||||
)]
|
||||
fn get<T>(key: &Key) -> Result<T>
|
||||
where
|
||||
T: Copy + Debug,
|
||||
|
@ -347,6 +352,12 @@ where
|
|||
unsafe { mallctl::raw::read_mib(key.as_slice()) }.map_err(map_err)
|
||||
}
|
||||
|
||||
#[tracing::instrument(
|
||||
name = "xchg",
|
||||
level = "trace"
|
||||
skip_all,
|
||||
fields(?key, ?val)
|
||||
)]
|
||||
fn xchg<T>(key: &Key, val: T) -> Result<T>
|
||||
where
|
||||
T: Copy + Debug,
|
||||
|
|
|
@ -3,7 +3,7 @@ pub mod manager;
|
|||
pub mod proxy;
|
||||
|
||||
use std::{
|
||||
collections::{BTreeMap, BTreeSet, HashSet},
|
||||
collections::{BTreeMap, BTreeSet},
|
||||
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr},
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
@ -252,14 +252,6 @@ pub struct Config {
|
|||
#[serde(default = "default_servernameevent_data_cache_capacity")]
|
||||
pub servernameevent_data_cache_capacity: u32,
|
||||
|
||||
/// default: varies by system
|
||||
#[serde(default = "default_server_visibility_cache_capacity")]
|
||||
pub server_visibility_cache_capacity: u32,
|
||||
|
||||
/// default: varies by system
|
||||
#[serde(default = "default_user_visibility_cache_capacity")]
|
||||
pub user_visibility_cache_capacity: u32,
|
||||
|
||||
/// default: varies by system
|
||||
#[serde(default = "default_stateinfo_cache_capacity")]
|
||||
pub stateinfo_cache_capacity: u32,
|
||||
|
@ -648,9 +640,9 @@ pub struct Config {
|
|||
|
||||
/// Default room version conduwuit will create rooms with.
|
||||
///
|
||||
/// Per spec, room version 10 is the default.
|
||||
/// Per spec, room version 11 is the default.
|
||||
///
|
||||
/// default: 10
|
||||
/// default: 11
|
||||
#[serde(default = "default_default_room_version")]
|
||||
pub default_room_version: RoomVersionId,
|
||||
|
||||
|
@ -723,7 +715,7 @@ pub struct Config {
|
|||
/// Currently, conduwuit doesn't support inbound batched key requests, so
|
||||
/// this list should only contain other Synapse servers.
|
||||
///
|
||||
/// example: ["matrix.org", "envs.net", "tchncs.de"]
|
||||
/// example: ["matrix.org", "tchncs.de"]
|
||||
///
|
||||
/// default: ["matrix.org"]
|
||||
#[serde(default = "default_trusted_servers")]
|
||||
|
@ -1369,15 +1361,18 @@ pub struct Config {
|
|||
#[serde(default)]
|
||||
pub prune_missing_media: bool,
|
||||
|
||||
/// Vector list of servers that conduwuit will refuse to download remote
|
||||
/// media from.
|
||||
/// Vector list of regex patterns of server names that conduwuit will refuse
|
||||
/// to download remote media from.
|
||||
///
|
||||
/// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||
///
|
||||
/// default: []
|
||||
#[serde(default)]
|
||||
pub prevent_media_downloads_from: HashSet<OwnedServerName>,
|
||||
#[serde(default, with = "serde_regex")]
|
||||
pub prevent_media_downloads_from: RegexSet,
|
||||
|
||||
/// List of forbidden server names that we will block incoming AND outgoing
|
||||
/// federation with, and block client room joins / remote user invites.
|
||||
/// List of forbidden server names via regex patterns that we will block
|
||||
/// incoming AND outgoing federation with, and block client room joins /
|
||||
/// remote user invites.
|
||||
///
|
||||
/// This check is applied on the room ID, room alias, sender server name,
|
||||
/// sender user's server name, inbound federation X-Matrix origin, and
|
||||
|
@ -1385,17 +1380,21 @@ pub struct Config {
|
|||
///
|
||||
/// Basically "global" ACLs.
|
||||
///
|
||||
/// default: []
|
||||
#[serde(default)]
|
||||
pub forbidden_remote_server_names: HashSet<OwnedServerName>,
|
||||
|
||||
/// List of forbidden server names that we will block all outgoing federated
|
||||
/// room directory requests for. Useful for preventing our users from
|
||||
/// wandering into bad servers or spaces.
|
||||
/// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||
///
|
||||
/// default: []
|
||||
#[serde(default = "HashSet::new")]
|
||||
pub forbidden_remote_room_directory_server_names: HashSet<OwnedServerName>,
|
||||
#[serde(default, with = "serde_regex")]
|
||||
pub forbidden_remote_server_names: RegexSet,
|
||||
|
||||
/// List of forbidden server names via regex patterns that we will block all
|
||||
/// outgoing federated room directory requests for. Useful for preventing
|
||||
/// our users from wandering into bad servers or spaces.
|
||||
///
|
||||
/// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||
///
|
||||
/// default: []
|
||||
#[serde(default, with = "serde_regex")]
|
||||
pub forbidden_remote_room_directory_server_names: RegexSet,
|
||||
|
||||
/// Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you
|
||||
/// do not want conduwuit to send outbound requests to. Defaults to
|
||||
|
@ -1516,11 +1515,10 @@ pub struct Config {
|
|||
/// used, and startup as warnings if any room aliases in your database have
|
||||
/// a forbidden room alias/ID.
|
||||
///
|
||||
/// example: ["19dollarfortnitecards", "b[4a]droom"]
|
||||
/// example: ["19dollarfortnitecards", "b[4a]droom", "badphrase"]
|
||||
///
|
||||
/// default: []
|
||||
#[serde(default)]
|
||||
#[serde(with = "serde_regex")]
|
||||
#[serde(default, with = "serde_regex")]
|
||||
pub forbidden_alias_names: RegexSet,
|
||||
|
||||
/// List of forbidden username patterns/strings.
|
||||
|
@ -1532,11 +1530,10 @@ pub struct Config {
|
|||
/// startup as warnings if any local users in your database have a forbidden
|
||||
/// username.
|
||||
///
|
||||
/// example: ["administrator", "b[a4]dusernam[3e]"]
|
||||
/// example: ["administrator", "b[a4]dusernam[3e]", "badphrase"]
|
||||
///
|
||||
/// default: []
|
||||
#[serde(default)]
|
||||
#[serde(with = "serde_regex")]
|
||||
#[serde(default, with = "serde_regex")]
|
||||
pub forbidden_usernames: RegexSet,
|
||||
|
||||
/// Retry failed and incomplete messages to remote servers immediately upon
|
||||
|
@ -2035,10 +2032,6 @@ fn default_servernameevent_data_cache_capacity() -> u32 {
|
|||
parallelism_scaled_u32(100_000).saturating_add(500_000)
|
||||
}
|
||||
|
||||
fn default_server_visibility_cache_capacity() -> u32 { parallelism_scaled_u32(500) }
|
||||
|
||||
fn default_user_visibility_cache_capacity() -> u32 { parallelism_scaled_u32(1000) }
|
||||
|
||||
fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(100) }
|
||||
|
||||
fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) }
|
||||
|
@ -2158,7 +2151,12 @@ fn default_rocksdb_max_log_file_size() -> usize {
|
|||
|
||||
fn default_rocksdb_parallelism_threads() -> usize { 0 }
|
||||
|
||||
fn default_rocksdb_compression_algo() -> String { "zstd".to_owned() }
|
||||
fn default_rocksdb_compression_algo() -> String {
|
||||
cfg!(feature = "zstd_compression")
|
||||
.then_some("zstd")
|
||||
.unwrap_or("none")
|
||||
.to_owned()
|
||||
}
|
||||
|
||||
/// Default RocksDB compression level is 32767, which is internally read by
|
||||
/// RocksDB as the default magic number and translated to the library's default
|
||||
|
@ -2177,7 +2175,7 @@ fn default_rocksdb_stats_level() -> u8 { 1 }
|
|||
// I know, it's a great name
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn default_default_room_version() -> RoomVersionId { RoomVersionId::V10 }
|
||||
pub fn default_default_room_version() -> RoomVersionId { RoomVersionId::V11 }
|
||||
|
||||
fn default_ip_range_denylist() -> Vec<String> {
|
||||
vec![
|
||||
|
|
|
@ -136,6 +136,7 @@ macro_rules! err_log {
|
|||
}
|
||||
|
||||
#[macro_export]
|
||||
#[collapse_debuginfo(yes)]
|
||||
macro_rules! err_lev {
|
||||
(debug_warn) => {
|
||||
if $crate::debug::logging() {
|
||||
|
|
|
@ -81,6 +81,8 @@ pub enum Error {
|
|||
#[error("Tracing reload error: {0}")]
|
||||
TracingReload(#[from] tracing_subscriber::reload::Error),
|
||||
#[error(transparent)]
|
||||
TypedHeader(#[from] axum_extra::typed_header::TypedHeaderRejection),
|
||||
#[error(transparent)]
|
||||
Yaml(#[from] serde_yaml::Error),
|
||||
|
||||
// ruma/conduwuit
|
||||
|
|
|
@ -86,7 +86,7 @@ pub(super) fn bad_request_code(kind: &ErrorKind) -> StatusCode {
|
|||
|
||||
pub(super) fn ruma_error_message(error: &ruma::api::client::error::Error) -> String {
|
||||
if let ErrorBody::Standard { message, .. } = &error.body {
|
||||
return message.to_string();
|
||||
return message.clone();
|
||||
}
|
||||
|
||||
format!("{error}")
|
||||
|
|
9
src/core/matrix/mod.rs
Normal file
9
src/core/matrix/mod.rs
Normal file
|
@ -0,0 +1,9 @@
|
|||
//! Core Matrix Library
|
||||
|
||||
pub mod event;
|
||||
pub mod pdu;
|
||||
pub mod state_res;
|
||||
|
||||
pub use event::Event;
|
||||
pub use pdu::{PduBuilder, PduCount, PduEvent, PduId, RawPduId, StateKey};
|
||||
pub use state_res::{EventTypeExt, RoomVersion, StateMap, TypeStateKey};
|
|
@ -1,7 +1,6 @@
|
|||
mod builder;
|
||||
mod content;
|
||||
mod count;
|
||||
mod event;
|
||||
mod event_id;
|
||||
mod filter;
|
||||
mod id;
|
||||
|
@ -17,8 +16,8 @@ mod unsigned;
|
|||
use std::cmp::Ordering;
|
||||
|
||||
use ruma::{
|
||||
CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName,
|
||||
OwnedUserId, UInt, events::TimelineEventType,
|
||||
CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId,
|
||||
OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, UInt, UserId, events::TimelineEventType,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::value::RawValue as RawJsonValue;
|
||||
|
@ -27,12 +26,12 @@ pub use self::{
|
|||
Count as PduCount, Id as PduId, Pdu as PduEvent, RawId as RawPduId,
|
||||
builder::{Builder, Builder as PduBuilder},
|
||||
count::Count,
|
||||
event::Event,
|
||||
event_id::*,
|
||||
id::*,
|
||||
raw_id::*,
|
||||
state_key::{ShortStateKey, StateKey},
|
||||
};
|
||||
use super::Event;
|
||||
use crate::Result;
|
||||
|
||||
/// Persistent Data Unit (Event)
|
||||
|
@ -79,6 +78,36 @@ impl Pdu {
|
|||
}
|
||||
}
|
||||
|
||||
impl Event for Pdu {
|
||||
type Id = OwnedEventId;
|
||||
|
||||
fn event_id(&self) -> &Self::Id { &self.event_id }
|
||||
|
||||
fn room_id(&self) -> &RoomId { &self.room_id }
|
||||
|
||||
fn sender(&self) -> &UserId { &self.sender }
|
||||
|
||||
fn event_type(&self) -> &TimelineEventType { &self.kind }
|
||||
|
||||
fn content(&self) -> &RawJsonValue { &self.content }
|
||||
|
||||
fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch {
|
||||
MilliSecondsSinceUnixEpoch(self.origin_server_ts)
|
||||
}
|
||||
|
||||
fn state_key(&self) -> Option<&str> { self.state_key.as_deref() }
|
||||
|
||||
fn prev_events(&self) -> impl DoubleEndedIterator<Item = &Self::Id> + Send + '_ {
|
||||
self.prev_events.iter()
|
||||
}
|
||||
|
||||
fn auth_events(&self) -> impl DoubleEndedIterator<Item = &Self::Id> + Send + '_ {
|
||||
self.auth_events.iter()
|
||||
}
|
||||
|
||||
fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() }
|
||||
}
|
||||
|
||||
/// Prevent derived equality which wouldn't limit itself to event_id
|
||||
impl Eq for Pdu {}
|
||||
|
||||
|
@ -87,12 +116,12 @@ impl PartialEq for Pdu {
|
|||
fn eq(&self, other: &Self) -> bool { self.event_id == other.event_id }
|
||||
}
|
||||
|
||||
/// Ordering determined by the Pdu's ID, not the memory representations.
|
||||
impl PartialOrd for Pdu {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) }
|
||||
}
|
||||
|
||||
/// Ordering determined by the Pdu's ID, not the memory representations.
|
||||
impl Ord for Pdu {
|
||||
fn cmp(&self, other: &Self) -> Ordering { self.event_id.cmp(&other.event_id) }
|
||||
}
|
||||
|
||||
/// Ordering determined by the Pdu's ID, not the memory representations.
|
||||
impl PartialOrd for Pdu {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) }
|
||||
}
|
|
@ -10,35 +10,18 @@ use serde_json::{json, value::Value as JsonValue};
|
|||
|
||||
use crate::implement;
|
||||
|
||||
#[must_use]
|
||||
#[implement(super::Pdu)]
|
||||
pub fn to_sync_room_event(&self) -> Raw<AnySyncTimelineEvent> {
|
||||
let (redacts, content) = self.copy_redacts();
|
||||
let mut json = json!({
|
||||
"content": content,
|
||||
"type": self.kind,
|
||||
"event_id": self.event_id,
|
||||
"sender": self.sender,
|
||||
"origin_server_ts": self.origin_server_ts,
|
||||
});
|
||||
|
||||
if let Some(unsigned) = &self.unsigned {
|
||||
json["unsigned"] = json!(unsigned);
|
||||
}
|
||||
if let Some(state_key) = &self.state_key {
|
||||
json["state_key"] = json!(state_key);
|
||||
}
|
||||
if let Some(redacts) = &redacts {
|
||||
json["redacts"] = json!(redacts);
|
||||
}
|
||||
|
||||
serde_json::from_value(json).expect("Raw::from_value always works")
|
||||
}
|
||||
|
||||
/// This only works for events that are also AnyRoomEvents.
|
||||
#[must_use]
|
||||
#[implement(super::Pdu)]
|
||||
pub fn to_any_event(&self) -> Raw<AnyEphemeralRoomEvent> {
|
||||
pub fn into_any_event(self) -> Raw<AnyEphemeralRoomEvent> {
|
||||
serde_json::from_value(self.into_any_event_value()).expect("Raw::from_value always works")
|
||||
}
|
||||
|
||||
/// This only works for events that are also AnyRoomEvents.
|
||||
#[implement(super::Pdu)]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn into_any_event_value(self) -> JsonValue {
|
||||
let (redacts, content) = self.copy_redacts();
|
||||
let mut json = json!({
|
||||
"content": content,
|
||||
|
@ -59,12 +42,24 @@ pub fn to_any_event(&self) -> Raw<AnyEphemeralRoomEvent> {
|
|||
json["redacts"] = json!(redacts);
|
||||
}
|
||||
|
||||
serde_json::from_value(json).expect("Raw::from_value always works")
|
||||
json
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
#[implement(super::Pdu)]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn into_room_event(self) -> Raw<AnyTimelineEvent> { self.to_room_event() }
|
||||
|
||||
#[implement(super::Pdu)]
|
||||
#[must_use]
|
||||
pub fn to_room_event(&self) -> Raw<AnyTimelineEvent> {
|
||||
serde_json::from_value(self.to_room_event_value()).expect("Raw::from_value always works")
|
||||
}
|
||||
|
||||
#[implement(super::Pdu)]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn to_room_event_value(&self) -> JsonValue {
|
||||
let (redacts, content) = self.copy_redacts();
|
||||
let mut json = json!({
|
||||
"content": content,
|
||||
|
@ -85,12 +80,25 @@ pub fn to_room_event(&self) -> Raw<AnyTimelineEvent> {
|
|||
json["redacts"] = json!(redacts);
|
||||
}
|
||||
|
||||
serde_json::from_value(json).expect("Raw::from_value always works")
|
||||
json
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
#[implement(super::Pdu)]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn into_message_like_event(self) -> Raw<AnyMessageLikeEvent> { self.to_message_like_event() }
|
||||
|
||||
#[implement(super::Pdu)]
|
||||
#[must_use]
|
||||
pub fn to_message_like_event(&self) -> Raw<AnyMessageLikeEvent> {
|
||||
serde_json::from_value(self.to_message_like_event_value())
|
||||
.expect("Raw::from_value always works")
|
||||
}
|
||||
|
||||
#[implement(super::Pdu)]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn to_message_like_event_value(&self) -> JsonValue {
|
||||
let (redacts, content) = self.copy_redacts();
|
||||
let mut json = json!({
|
||||
"content": content,
|
||||
|
@ -111,11 +119,55 @@ pub fn to_message_like_event(&self) -> Raw<AnyMessageLikeEvent> {
|
|||
json["redacts"] = json!(redacts);
|
||||
}
|
||||
|
||||
serde_json::from_value(json).expect("Raw::from_value always works")
|
||||
json
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
#[implement(super::Pdu)]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn into_sync_room_event(self) -> Raw<AnySyncTimelineEvent> { self.to_sync_room_event() }
|
||||
|
||||
#[implement(super::Pdu)]
|
||||
#[must_use]
|
||||
pub fn to_sync_room_event(&self) -> Raw<AnySyncTimelineEvent> {
|
||||
serde_json::from_value(self.to_sync_room_event_value()).expect("Raw::from_value always works")
|
||||
}
|
||||
|
||||
#[implement(super::Pdu)]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn to_sync_room_event_value(&self) -> JsonValue {
|
||||
let (redacts, content) = self.copy_redacts();
|
||||
let mut json = json!({
|
||||
"content": content,
|
||||
"type": self.kind,
|
||||
"event_id": self.event_id,
|
||||
"sender": self.sender,
|
||||
"origin_server_ts": self.origin_server_ts,
|
||||
});
|
||||
|
||||
if let Some(unsigned) = &self.unsigned {
|
||||
json["unsigned"] = json!(unsigned);
|
||||
}
|
||||
if let Some(state_key) = &self.state_key {
|
||||
json["state_key"] = json!(state_key);
|
||||
}
|
||||
if let Some(redacts) = &redacts {
|
||||
json["redacts"] = json!(redacts);
|
||||
}
|
||||
|
||||
json
|
||||
}
|
||||
|
||||
#[implement(super::Pdu)]
|
||||
#[must_use]
|
||||
pub fn into_state_event(self) -> Raw<AnyStateEvent> {
|
||||
serde_json::from_value(self.into_state_event_value()).expect("Raw::from_value always works")
|
||||
}
|
||||
|
||||
#[implement(super::Pdu)]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn into_state_event_value(self) -> JsonValue {
|
||||
let mut json = json!({
|
||||
"content": self.content,
|
||||
|
@ -134,15 +186,17 @@ pub fn into_state_event_value(self) -> JsonValue {
|
|||
json
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
#[implement(super::Pdu)]
|
||||
pub fn into_state_event(self) -> Raw<AnyStateEvent> {
|
||||
serde_json::from_value(self.into_state_event_value()).expect("Raw::from_value always works")
|
||||
#[must_use]
|
||||
pub fn into_sync_state_event(self) -> Raw<AnySyncStateEvent> {
|
||||
serde_json::from_value(self.into_sync_state_event_value())
|
||||
.expect("Raw::from_value always works")
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
#[implement(super::Pdu)]
|
||||
pub fn to_sync_state_event(&self) -> Raw<AnySyncStateEvent> {
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn into_sync_state_event_value(self) -> JsonValue {
|
||||
let mut json = json!({
|
||||
"content": self.content,
|
||||
"type": self.kind,
|
||||
|
@ -156,39 +210,65 @@ pub fn to_sync_state_event(&self) -> Raw<AnySyncStateEvent> {
|
|||
json["unsigned"] = json!(unsigned);
|
||||
}
|
||||
|
||||
serde_json::from_value(json).expect("Raw::from_value always works")
|
||||
json
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
#[implement(super::Pdu)]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn into_stripped_state_event(self) -> Raw<AnyStrippedStateEvent> {
|
||||
self.to_stripped_state_event()
|
||||
}
|
||||
|
||||
#[implement(super::Pdu)]
|
||||
#[must_use]
|
||||
pub fn to_stripped_state_event(&self) -> Raw<AnyStrippedStateEvent> {
|
||||
let json = json!({
|
||||
serde_json::from_value(self.to_stripped_state_event_value())
|
||||
.expect("Raw::from_value always works")
|
||||
}
|
||||
|
||||
#[implement(super::Pdu)]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn to_stripped_state_event_value(&self) -> JsonValue {
|
||||
json!({
|
||||
"content": self.content,
|
||||
"type": self.kind,
|
||||
"sender": self.sender,
|
||||
"state_key": self.state_key,
|
||||
});
|
||||
|
||||
serde_json::from_value(json).expect("Raw::from_value always works")
|
||||
})
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
#[implement(super::Pdu)]
|
||||
pub fn to_stripped_spacechild_state_event(&self) -> Raw<HierarchySpaceChildEvent> {
|
||||
let json = json!({
|
||||
#[must_use]
|
||||
pub fn into_stripped_spacechild_state_event(self) -> Raw<HierarchySpaceChildEvent> {
|
||||
serde_json::from_value(self.into_stripped_spacechild_state_event_value())
|
||||
.expect("Raw::from_value always works")
|
||||
}
|
||||
|
||||
#[implement(super::Pdu)]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn into_stripped_spacechild_state_event_value(self) -> JsonValue {
|
||||
json!({
|
||||
"content": self.content,
|
||||
"type": self.kind,
|
||||
"sender": self.sender,
|
||||
"state_key": self.state_key,
|
||||
"origin_server_ts": self.origin_server_ts,
|
||||
});
|
||||
|
||||
serde_json::from_value(json).expect("Raw::from_value always works")
|
||||
})
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
#[implement(super::Pdu)]
|
||||
#[must_use]
|
||||
pub fn into_member_event(self) -> Raw<StateEvent<RoomMemberEventContent>> {
|
||||
serde_json::from_value(self.into_member_event_value()).expect("Raw::from_value always works")
|
||||
}
|
||||
|
||||
#[implement(super::Pdu)]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn into_member_event_value(self) -> JsonValue {
|
||||
let mut json = json!({
|
||||
"content": self.content,
|
||||
"type": self.kind,
|
||||
|
@ -204,5 +284,5 @@ pub fn into_member_event(self) -> Raw<StateEvent<RoomMemberEventContent>> {
|
|||
json["unsigned"] = json!(unsigned);
|
||||
}
|
||||
|
||||
serde_json::from_value(json).expect("Raw::from_value always works")
|
||||
json
|
||||
}
|
672
src/core/matrix/state_res/benches.rs
Normal file
672
src/core/matrix/state_res/benches.rs
Normal file
|
@ -0,0 +1,672 @@
|
|||
#[cfg(conduwuit_bench)]
|
||||
extern crate test;
|
||||
|
||||
use std::{
|
||||
borrow::Borrow,
|
||||
collections::{HashMap, HashSet},
|
||||
sync::{
|
||||
Arc,
|
||||
atomic::{AtomicU64, Ordering::SeqCst},
|
||||
},
|
||||
};
|
||||
|
||||
use futures::{future, future::ready};
|
||||
use maplit::{btreemap, hashmap, hashset};
|
||||
use ruma::{
|
||||
EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, Signatures, UserId,
|
||||
events::{
|
||||
StateEventType, TimelineEventType,
|
||||
pdu::{EventHash, Pdu, RoomV3Pdu},
|
||||
room::{
|
||||
join_rules::{JoinRule, RoomJoinRulesEventContent},
|
||||
member::{MembershipState, RoomMemberEventContent},
|
||||
},
|
||||
},
|
||||
int, room_id, uint, user_id,
|
||||
};
|
||||
use serde_json::{
|
||||
json,
|
||||
value::{RawValue as RawJsonValue, to_raw_value as to_raw_json_value},
|
||||
};
|
||||
|
||||
use self::event::PduEvent;
|
||||
use crate::state_res::{self as state_res, Error, Event, Result, StateMap};
|
||||
|
||||
static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0);
|
||||
|
||||
#[cfg(conduwuit_bench)]
|
||||
#[cfg_attr(conduwuit_bench, bench)]
|
||||
fn lexico_topo_sort(c: &mut test::Bencher) {
|
||||
let graph = hashmap! {
|
||||
event_id("l") => hashset![event_id("o")],
|
||||
event_id("m") => hashset![event_id("n"), event_id("o")],
|
||||
event_id("n") => hashset![event_id("o")],
|
||||
event_id("o") => hashset![], // "o" has zero outgoing edges but 4 incoming edges
|
||||
event_id("p") => hashset![event_id("o")],
|
||||
};
|
||||
|
||||
c.iter(|| {
|
||||
let _ = state_res::lexicographical_topological_sort(&graph, &|_| {
|
||||
future::ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0))))
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
#[cfg(conduwuit_bench)]
|
||||
#[cfg_attr(conduwuit_bench, bench)]
|
||||
fn resolution_shallow_auth_chain(c: &mut test::Bencher) {
|
||||
let parallel_fetches = 32;
|
||||
let mut store = TestStore(hashmap! {});
|
||||
|
||||
// build up the DAG
|
||||
let (state_at_bob, state_at_charlie, _) = store.set_up();
|
||||
|
||||
c.iter(|| async {
|
||||
let ev_map = store.0.clone();
|
||||
let state_sets = [&state_at_bob, &state_at_charlie];
|
||||
let fetch = |id: OwnedEventId| ready(ev_map.get(&id).map(Arc::clone));
|
||||
let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some());
|
||||
let auth_chain_sets: Vec<HashSet<_>> = state_sets
|
||||
.iter()
|
||||
.map(|map| {
|
||||
store
|
||||
.auth_event_ids(room_id(), map.values().cloned().collect())
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
let _ = match state_res::resolve(
|
||||
&RoomVersionId::V6,
|
||||
state_sets.into_iter(),
|
||||
&auth_chain_sets,
|
||||
&fetch,
|
||||
&exists,
|
||||
parallel_fetches,
|
||||
)
|
||||
.await
|
||||
{
|
||||
| Ok(state) => state,
|
||||
| Err(e) => panic!("{e}"),
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
#[cfg(conduwuit_bench)]
|
||||
#[cfg_attr(conduwuit_bench, bench)]
|
||||
fn resolve_deeper_event_set(c: &mut test::Bencher) {
|
||||
let parallel_fetches = 32;
|
||||
let mut inner = INITIAL_EVENTS();
|
||||
let ban = BAN_STATE_SET();
|
||||
|
||||
inner.extend(ban);
|
||||
let store = TestStore(inner.clone());
|
||||
|
||||
let state_set_a = [
|
||||
inner.get(&event_id("CREATE")).unwrap(),
|
||||
inner.get(&event_id("IJR")).unwrap(),
|
||||
inner.get(&event_id("IMA")).unwrap(),
|
||||
inner.get(&event_id("IMB")).unwrap(),
|
||||
inner.get(&event_id("IMC")).unwrap(),
|
||||
inner.get(&event_id("MB")).unwrap(),
|
||||
inner.get(&event_id("PA")).unwrap(),
|
||||
]
|
||||
.iter()
|
||||
.map(|ev| {
|
||||
(
|
||||
(ev.event_type().clone().into(), ev.state_key().unwrap().into()),
|
||||
ev.event_id().to_owned(),
|
||||
)
|
||||
})
|
||||
.collect::<StateMap<_>>();
|
||||
|
||||
let state_set_b = [
|
||||
inner.get(&event_id("CREATE")).unwrap(),
|
||||
inner.get(&event_id("IJR")).unwrap(),
|
||||
inner.get(&event_id("IMA")).unwrap(),
|
||||
inner.get(&event_id("IMB")).unwrap(),
|
||||
inner.get(&event_id("IMC")).unwrap(),
|
||||
inner.get(&event_id("IME")).unwrap(),
|
||||
inner.get(&event_id("PA")).unwrap(),
|
||||
]
|
||||
.iter()
|
||||
.map(|ev| {
|
||||
(
|
||||
(ev.event_type().clone().into(), ev.state_key().unwrap().into()),
|
||||
ev.event_id().to_owned(),
|
||||
)
|
||||
})
|
||||
.collect::<StateMap<_>>();
|
||||
|
||||
c.iter(|| async {
|
||||
let state_sets = [&state_set_a, &state_set_b];
|
||||
let auth_chain_sets: Vec<HashSet<_>> = state_sets
|
||||
.iter()
|
||||
.map(|map| {
|
||||
store
|
||||
.auth_event_ids(room_id(), map.values().cloned().collect())
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
let fetch = |id: OwnedEventId| ready(inner.get(&id).map(Arc::clone));
|
||||
let exists = |id: OwnedEventId| ready(inner.get(&id).is_some());
|
||||
let _ = match state_res::resolve(
|
||||
&RoomVersionId::V6,
|
||||
state_sets.into_iter(),
|
||||
&auth_chain_sets,
|
||||
&fetch,
|
||||
&exists,
|
||||
parallel_fetches,
|
||||
)
|
||||
.await
|
||||
{
|
||||
| Ok(state) => state,
|
||||
| Err(_) => panic!("resolution failed during benchmarking"),
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
//*/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPLEMENTATION DETAILS AHEAD
|
||||
//
|
||||
/////////////////////////////////////////////////////////////////////*/
|
||||
struct TestStore<E: Event>(HashMap<OwnedEventId, Arc<E>>);
|
||||
|
||||
#[allow(unused)]
|
||||
impl<E: Event> TestStore<E> {
|
||||
fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result<Arc<E>> {
|
||||
self.0
|
||||
.get(event_id)
|
||||
.map(Arc::clone)
|
||||
.ok_or_else(|| Error::NotFound(format!("{} not found", event_id)))
|
||||
}
|
||||
|
||||
/// Returns the events that correspond to the `event_ids` sorted in the same
|
||||
/// order.
|
||||
fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result<Vec<Arc<E>>> {
|
||||
let mut events = vec![];
|
||||
for id in event_ids {
|
||||
events.push(self.get_event(room_id, id)?);
|
||||
}
|
||||
Ok(events)
|
||||
}
|
||||
|
||||
/// Returns a Vec of the related auth events to the given `event`.
|
||||
fn auth_event_ids(&self, room_id: &RoomId, event_ids: Vec<E::Id>) -> Result<HashSet<E::Id>> {
|
||||
let mut result = HashSet::new();
|
||||
let mut stack = event_ids;
|
||||
|
||||
// DFS for auth event chain
|
||||
while !stack.is_empty() {
|
||||
let ev_id = stack.pop().unwrap();
|
||||
if result.contains(&ev_id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
result.insert(ev_id.clone());
|
||||
|
||||
let event = self.get_event(room_id, ev_id.borrow())?;
|
||||
|
||||
stack.extend(event.auth_events().map(ToOwned::to_owned));
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Returns a vector representing the difference in auth chains of the given
|
||||
/// `events`.
|
||||
fn auth_chain_diff(
|
||||
&self,
|
||||
room_id: &RoomId,
|
||||
event_ids: Vec<Vec<E::Id>>,
|
||||
) -> Result<Vec<E::Id>> {
|
||||
let mut auth_chain_sets = vec![];
|
||||
for ids in event_ids {
|
||||
// TODO state store `auth_event_ids` returns self in the event ids list
|
||||
// when an event returns `auth_event_ids` self is not contained
|
||||
let chain = self
|
||||
.auth_event_ids(room_id, ids)?
|
||||
.into_iter()
|
||||
.collect::<HashSet<_>>();
|
||||
auth_chain_sets.push(chain);
|
||||
}
|
||||
|
||||
if let Some(first) = auth_chain_sets.first().cloned() {
|
||||
let common = auth_chain_sets
|
||||
.iter()
|
||||
.skip(1)
|
||||
.fold(first, |a, b| a.intersection(b).cloned().collect::<HashSet<_>>());
|
||||
|
||||
Ok(auth_chain_sets
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.filter(|id| !common.contains(id.borrow()))
|
||||
.collect())
|
||||
} else {
|
||||
Ok(vec![])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TestStore<PduEvent> {
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn set_up(
|
||||
&mut self,
|
||||
) -> (StateMap<OwnedEventId>, StateMap<OwnedEventId>, StateMap<OwnedEventId>) {
|
||||
let create_event = to_pdu_event::<&EventId>(
|
||||
"CREATE",
|
||||
alice(),
|
||||
TimelineEventType::RoomCreate,
|
||||
Some(""),
|
||||
to_raw_json_value(&json!({ "creator": alice() })).unwrap(),
|
||||
&[],
|
||||
&[],
|
||||
);
|
||||
let cre = create_event.event_id().to_owned();
|
||||
self.0.insert(cre.clone(), Arc::clone(&create_event));
|
||||
|
||||
let alice_mem = to_pdu_event(
|
||||
"IMA",
|
||||
alice(),
|
||||
TimelineEventType::RoomMember,
|
||||
Some(alice().to_string().as_str()),
|
||||
member_content_join(),
|
||||
&[cre.clone()],
|
||||
&[cre.clone()],
|
||||
);
|
||||
self.0
|
||||
.insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem));
|
||||
|
||||
let join_rules = to_pdu_event(
|
||||
"IJR",
|
||||
alice(),
|
||||
TimelineEventType::RoomJoinRules,
|
||||
Some(""),
|
||||
to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(),
|
||||
&[cre.clone(), alice_mem.event_id().to_owned()],
|
||||
&[alice_mem.event_id().to_owned()],
|
||||
);
|
||||
self.0
|
||||
.insert(join_rules.event_id().to_owned(), join_rules.clone());
|
||||
|
||||
// Bob and Charlie join at the same time, so there is a fork
|
||||
// this will be represented in the state_sets when we resolve
|
||||
let bob_mem = to_pdu_event(
|
||||
"IMB",
|
||||
bob(),
|
||||
TimelineEventType::RoomMember,
|
||||
Some(bob().to_string().as_str()),
|
||||
member_content_join(),
|
||||
&[cre.clone(), join_rules.event_id().to_owned()],
|
||||
&[join_rules.event_id().to_owned()],
|
||||
);
|
||||
self.0
|
||||
.insert(bob_mem.event_id().to_owned(), bob_mem.clone());
|
||||
|
||||
let charlie_mem = to_pdu_event(
|
||||
"IMC",
|
||||
charlie(),
|
||||
TimelineEventType::RoomMember,
|
||||
Some(charlie().to_string().as_str()),
|
||||
member_content_join(),
|
||||
&[cre, join_rules.event_id().to_owned()],
|
||||
&[join_rules.event_id().to_owned()],
|
||||
);
|
||||
self.0
|
||||
.insert(charlie_mem.event_id().to_owned(), charlie_mem.clone());
|
||||
|
||||
let state_at_bob = [&create_event, &alice_mem, &join_rules, &bob_mem]
|
||||
.iter()
|
||||
.map(|ev| {
|
||||
(
|
||||
(ev.event_type().clone().into(), ev.state_key().unwrap().into()),
|
||||
ev.event_id().to_owned(),
|
||||
)
|
||||
})
|
||||
.collect::<StateMap<_>>();
|
||||
|
||||
let state_at_charlie = [&create_event, &alice_mem, &join_rules, &charlie_mem]
|
||||
.iter()
|
||||
.map(|ev| {
|
||||
(
|
||||
(ev.event_type().clone().into(), ev.state_key().unwrap().into()),
|
||||
ev.event_id().to_owned(),
|
||||
)
|
||||
})
|
||||
.collect::<StateMap<_>>();
|
||||
|
||||
let expected = [&create_event, &alice_mem, &join_rules, &bob_mem, &charlie_mem]
|
||||
.iter()
|
||||
.map(|ev| {
|
||||
(
|
||||
(ev.event_type().clone().into(), ev.state_key().unwrap().into()),
|
||||
ev.event_id().to_owned(),
|
||||
)
|
||||
})
|
||||
.collect::<StateMap<_>>();
|
||||
|
||||
(state_at_bob, state_at_charlie, expected)
|
||||
}
|
||||
}
|
||||
|
||||
fn event_id(id: &str) -> OwnedEventId {
|
||||
if id.contains('$') {
|
||||
return id.try_into().unwrap();
|
||||
}
|
||||
format!("${}:foo", id).try_into().unwrap()
|
||||
}
|
||||
|
||||
fn alice() -> &'static UserId { user_id!("@alice:foo") }
|
||||
|
||||
fn bob() -> &'static UserId { user_id!("@bob:foo") }
|
||||
|
||||
fn charlie() -> &'static UserId { user_id!("@charlie:foo") }
|
||||
|
||||
fn ella() -> &'static UserId { user_id!("@ella:foo") }
|
||||
|
||||
fn room_id() -> &'static RoomId { room_id!("!test:foo") }
|
||||
|
||||
fn member_content_ban() -> Box<RawJsonValue> {
|
||||
to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Ban)).unwrap()
|
||||
}
|
||||
|
||||
fn member_content_join() -> Box<RawJsonValue> {
|
||||
to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap()
|
||||
}
|
||||
|
||||
fn to_pdu_event<S>(
|
||||
id: &str,
|
||||
sender: &UserId,
|
||||
ev_type: TimelineEventType,
|
||||
state_key: Option<&str>,
|
||||
content: Box<RawJsonValue>,
|
||||
auth_events: &[S],
|
||||
prev_events: &[S],
|
||||
) -> Arc<PduEvent>
|
||||
where
|
||||
S: AsRef<str>,
|
||||
{
|
||||
// We don't care if the addition happens in order just that it is atomic
|
||||
// (each event has its own value)
|
||||
let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst);
|
||||
let id = if id.contains('$') {
|
||||
id.to_owned()
|
||||
} else {
|
||||
format!("${}:foo", id)
|
||||
};
|
||||
let auth_events = auth_events
|
||||
.iter()
|
||||
.map(AsRef::as_ref)
|
||||
.map(event_id)
|
||||
.collect::<Vec<_>>();
|
||||
let prev_events = prev_events
|
||||
.iter()
|
||||
.map(AsRef::as_ref)
|
||||
.map(event_id)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let state_key = state_key.map(ToOwned::to_owned);
|
||||
Arc::new(PduEvent {
|
||||
event_id: id.try_into().unwrap(),
|
||||
rest: Pdu::RoomV3Pdu(RoomV3Pdu {
|
||||
room_id: room_id().to_owned(),
|
||||
sender: sender.to_owned(),
|
||||
origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()),
|
||||
state_key,
|
||||
kind: ev_type,
|
||||
content,
|
||||
redacts: None,
|
||||
unsigned: btreemap! {},
|
||||
auth_events,
|
||||
prev_events,
|
||||
depth: uint!(0),
|
||||
hashes: EventHash::new(String::new()),
|
||||
signatures: Signatures::new(),
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
// all graphs start with these input events
|
||||
#[allow(non_snake_case)]
|
||||
fn INITIAL_EVENTS() -> HashMap<OwnedEventId, Arc<PduEvent>> {
|
||||
vec![
|
||||
to_pdu_event::<&EventId>(
|
||||
"CREATE",
|
||||
alice(),
|
||||
TimelineEventType::RoomCreate,
|
||||
Some(""),
|
||||
to_raw_json_value(&json!({ "creator": alice() })).unwrap(),
|
||||
&[],
|
||||
&[],
|
||||
),
|
||||
to_pdu_event(
|
||||
"IMA",
|
||||
alice(),
|
||||
TimelineEventType::RoomMember,
|
||||
Some(alice().as_str()),
|
||||
member_content_join(),
|
||||
&["CREATE"],
|
||||
&["CREATE"],
|
||||
),
|
||||
to_pdu_event(
|
||||
"IPOWER",
|
||||
alice(),
|
||||
TimelineEventType::RoomPowerLevels,
|
||||
Some(""),
|
||||
to_raw_json_value(&json!({ "users": { alice(): 100 } })).unwrap(),
|
||||
&["CREATE", "IMA"],
|
||||
&["IMA"],
|
||||
),
|
||||
to_pdu_event(
|
||||
"IJR",
|
||||
alice(),
|
||||
TimelineEventType::RoomJoinRules,
|
||||
Some(""),
|
||||
to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(),
|
||||
&["CREATE", "IMA", "IPOWER"],
|
||||
&["IPOWER"],
|
||||
),
|
||||
to_pdu_event(
|
||||
"IMB",
|
||||
bob(),
|
||||
TimelineEventType::RoomMember,
|
||||
Some(bob().to_string().as_str()),
|
||||
member_content_join(),
|
||||
&["CREATE", "IJR", "IPOWER"],
|
||||
&["IJR"],
|
||||
),
|
||||
to_pdu_event(
|
||||
"IMC",
|
||||
charlie(),
|
||||
TimelineEventType::RoomMember,
|
||||
Some(charlie().to_string().as_str()),
|
||||
member_content_join(),
|
||||
&["CREATE", "IJR", "IPOWER"],
|
||||
&["IMB"],
|
||||
),
|
||||
to_pdu_event::<&EventId>(
|
||||
"START",
|
||||
charlie(),
|
||||
TimelineEventType::RoomTopic,
|
||||
Some(""),
|
||||
to_raw_json_value(&json!({})).unwrap(),
|
||||
&[],
|
||||
&[],
|
||||
),
|
||||
to_pdu_event::<&EventId>(
|
||||
"END",
|
||||
charlie(),
|
||||
TimelineEventType::RoomTopic,
|
||||
Some(""),
|
||||
to_raw_json_value(&json!({})).unwrap(),
|
||||
&[],
|
||||
&[],
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
.map(|ev| (ev.event_id().to_owned(), ev))
|
||||
.collect()
|
||||
}
|
||||
|
||||
// all graphs start with these input events
|
||||
#[allow(non_snake_case)]
|
||||
fn BAN_STATE_SET() -> HashMap<OwnedEventId, Arc<PduEvent>> {
|
||||
vec![
|
||||
to_pdu_event(
|
||||
"PA",
|
||||
alice(),
|
||||
TimelineEventType::RoomPowerLevels,
|
||||
Some(""),
|
||||
to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(),
|
||||
&["CREATE", "IMA", "IPOWER"], // auth_events
|
||||
&["START"], // prev_events
|
||||
),
|
||||
to_pdu_event(
|
||||
"PB",
|
||||
alice(),
|
||||
TimelineEventType::RoomPowerLevels,
|
||||
Some(""),
|
||||
to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(),
|
||||
&["CREATE", "IMA", "IPOWER"],
|
||||
&["END"],
|
||||
),
|
||||
to_pdu_event(
|
||||
"MB",
|
||||
alice(),
|
||||
TimelineEventType::RoomMember,
|
||||
Some(ella().as_str()),
|
||||
member_content_ban(),
|
||||
&["CREATE", "IMA", "PB"],
|
||||
&["PA"],
|
||||
),
|
||||
to_pdu_event(
|
||||
"IME",
|
||||
ella(),
|
||||
TimelineEventType::RoomMember,
|
||||
Some(ella().as_str()),
|
||||
member_content_join(),
|
||||
&["CREATE", "IJR", "PA"],
|
||||
&["MB"],
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
.map(|ev| (ev.event_id().to_owned(), ev))
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Convenience trait for adding event type plus state key to state maps.
|
||||
trait EventTypeExt {
|
||||
fn with_state_key(self, state_key: impl Into<String>) -> (StateEventType, String);
|
||||
}
|
||||
|
||||
impl EventTypeExt for &TimelineEventType {
|
||||
fn with_state_key(self, state_key: impl Into<String>) -> (StateEventType, String) {
|
||||
(self.to_string().into(), state_key.into())
|
||||
}
|
||||
}
|
||||
|
||||
mod event {
|
||||
use ruma::{
|
||||
MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId,
|
||||
events::{TimelineEventType, pdu::Pdu},
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::value::RawValue as RawJsonValue;
|
||||
|
||||
use super::Event;
|
||||
|
||||
impl Event for PduEvent {
|
||||
type Id = OwnedEventId;
|
||||
|
||||
fn event_id(&self) -> &Self::Id { &self.event_id }
|
||||
|
||||
fn room_id(&self) -> &RoomId {
|
||||
match &self.rest {
|
||||
| Pdu::RoomV1Pdu(ev) => &ev.room_id,
|
||||
| Pdu::RoomV3Pdu(ev) => &ev.room_id,
|
||||
#[cfg(not(feature = "unstable-exhaustive-types"))]
|
||||
| _ => unreachable!("new PDU version"),
|
||||
}
|
||||
}
|
||||
|
||||
fn sender(&self) -> &UserId {
|
||||
match &self.rest {
|
||||
| Pdu::RoomV1Pdu(ev) => &ev.sender,
|
||||
| Pdu::RoomV3Pdu(ev) => &ev.sender,
|
||||
#[cfg(not(feature = "unstable-exhaustive-types"))]
|
||||
| _ => unreachable!("new PDU version"),
|
||||
}
|
||||
}
|
||||
|
||||
fn event_type(&self) -> &TimelineEventType {
|
||||
match &self.rest {
|
||||
| Pdu::RoomV1Pdu(ev) => &ev.kind,
|
||||
| Pdu::RoomV3Pdu(ev) => &ev.kind,
|
||||
#[cfg(not(feature = "unstable-exhaustive-types"))]
|
||||
| _ => unreachable!("new PDU version"),
|
||||
}
|
||||
}
|
||||
|
||||
fn content(&self) -> &RawJsonValue {
|
||||
match &self.rest {
|
||||
| Pdu::RoomV1Pdu(ev) => &ev.content,
|
||||
| Pdu::RoomV3Pdu(ev) => &ev.content,
|
||||
#[cfg(not(feature = "unstable-exhaustive-types"))]
|
||||
| _ => unreachable!("new PDU version"),
|
||||
}
|
||||
}
|
||||
|
||||
fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch {
|
||||
match &self.rest {
|
||||
| Pdu::RoomV1Pdu(ev) => ev.origin_server_ts,
|
||||
| Pdu::RoomV3Pdu(ev) => ev.origin_server_ts,
|
||||
#[cfg(not(feature = "unstable-exhaustive-types"))]
|
||||
| _ => unreachable!("new PDU version"),
|
||||
}
|
||||
}
|
||||
|
||||
fn state_key(&self) -> Option<&str> {
|
||||
match &self.rest {
|
||||
| Pdu::RoomV1Pdu(ev) => ev.state_key.as_deref(),
|
||||
| Pdu::RoomV3Pdu(ev) => ev.state_key.as_deref(),
|
||||
#[cfg(not(feature = "unstable-exhaustive-types"))]
|
||||
| _ => unreachable!("new PDU version"),
|
||||
}
|
||||
}
|
||||
|
||||
fn prev_events(&self) -> Box<dyn DoubleEndedIterator<Item = &Self::Id> + Send + '_> {
|
||||
match &self.rest {
|
||||
| Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)),
|
||||
| Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()),
|
||||
#[cfg(not(feature = "unstable-exhaustive-types"))]
|
||||
| _ => unreachable!("new PDU version"),
|
||||
}
|
||||
}
|
||||
|
||||
fn auth_events(&self) -> Box<dyn DoubleEndedIterator<Item = &Self::Id> + Send + '_> {
|
||||
match &self.rest {
|
||||
| Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)),
|
||||
| Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()),
|
||||
#[cfg(not(feature = "unstable-exhaustive-types"))]
|
||||
| _ => unreachable!("new PDU version"),
|
||||
}
|
||||
}
|
||||
|
||||
fn redacts(&self) -> Option<&Self::Id> {
|
||||
match &self.rest {
|
||||
| Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(),
|
||||
| Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(),
|
||||
#[cfg(not(feature = "unstable-exhaustive-types"))]
|
||||
| _ => unreachable!("new PDU version"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
pub(crate) struct PduEvent {
|
||||
pub(crate) event_id: OwnedEventId,
|
||||
#[serde(flatten)]
|
||||
pub(crate) rest: Pdu,
|
||||
}
|
||||
}
|
|
@ -4,11 +4,13 @@ pub(crate) mod error;
|
|||
pub mod event_auth;
|
||||
mod power_levels;
|
||||
mod room_version;
|
||||
mod state_event;
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_utils;
|
||||
|
||||
#[cfg(test)]
|
||||
mod benches;
|
||||
|
||||
use std::{
|
||||
borrow::Borrow,
|
||||
cmp::{Ordering, Reverse},
|
||||
|
@ -33,9 +35,12 @@ use self::power_levels::PowerLevelsContentFields;
|
|||
pub use self::{
|
||||
event_auth::{auth_check, auth_types_for_event},
|
||||
room_version::RoomVersion,
|
||||
state_event::Event,
|
||||
};
|
||||
use crate::{debug, pdu::StateKey, trace, warn};
|
||||
use crate::{
|
||||
debug,
|
||||
matrix::{event::Event, pdu::StateKey},
|
||||
trace, warn,
|
||||
};
|
||||
|
||||
/// A mapping of event type and state_key to some value `T`, usually an
|
||||
/// `EventId`.
|
||||
|
@ -146,7 +151,6 @@ where
|
|||
&event_fetch,
|
||||
parallel_fetches,
|
||||
)
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
debug!(count = sorted_control_levels.len(), "power events");
|
||||
|
@ -161,7 +165,6 @@ where
|
|||
&event_fetch,
|
||||
parallel_fetches,
|
||||
)
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
debug!(count = resolved_control.len(), "resolved power events");
|
||||
|
@ -189,7 +192,6 @@ where
|
|||
|
||||
let sorted_left_events =
|
||||
mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch, parallel_fetches)
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
trace!(list = ?sorted_left_events, "events left, sorted");
|
||||
|
@ -201,7 +203,6 @@ where
|
|||
&event_fetch,
|
||||
parallel_fetches,
|
||||
)
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
// Add unconflicted state to the resolved state
|
Some files were not shown because too many files have changed in this diff Show more
Reference in a new issue