Compare commits

...
This repository has been archived on 2025-08-14. You can view files and clone it, but you cannot make any changes to it's state, such as pushing and creating new issues, pull requests or comments.

112 commits

Author SHA1 Message Date
strawberry
2847656ebe fix search result amount/count being incorrect
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:08 -04:00
strawberry
907df232ec bump deps, nix flake lockfile, cleanup some things, bump rust, provide macOS binaries, fix more build issues and macOS building
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:08 -04:00
Jason Volk
bdbe7c40d7 split client/sync
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
a14766be60 merge rooms state_compressor service and data
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
7546df4233 slightly cleanup appservice_in_room
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
6e113c8b43 merge remaining rooms state_cache data and service
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
e996c3d24a merge rooms user service and data
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
12431fd102 merge rooms state service and data
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
5dc57628ef add resolve_with_servers() to alias service; simplify api
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
0a5e1fdd46 add federation client for select high-timeout requests
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
aebf414f4d Refactor server_keys service/interface and related callsites
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
ef32656517 add random shuffle util
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
6cc5086851 use string::EMPTY; minor formatting and misc cleanups
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
a2b876c5d3 add timepoint_from_now to complement timepoint_ago in utils
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
b1534130ed Refactor for structured insertions
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
5bcf6429d7 re-scheme naming of stream iterator overloads
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
7b2eba7449 add IgnoreAll directive to deserializer
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
5b84247c55 add serialized insert interface
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
07d5a68582 further develop serializer for insertions
add JSON delegator to db serializer

consolidate writes through memfun; simplifications

Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
30b9859842 add document comments to config items
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
27f9c1c0c1 cleanup Config::load()
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
ff5527bc12 initial example-config generator
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:08 -04:00
Jason Volk
a5dc16b7c5 add macro util to determine if cargo build or check/clippy.
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
7a08d394a0 add non-allocating fixed-size random string generator
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
bb551ee346 add tuple access functor-macro
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
31ade205df add util to restore state on scope exit
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
03f3316177 relax Sized bound for debug::type_name
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
1b1c2d1af5 sort rustfmt
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
e879404841 Add constructions and Default for PduBuilder
simplify various RoomMemberEventContent constructions

Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
15d6a616d6 misc cleanup
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
3dc4b56c42 additional database stream deserializations for serde_json::from_ elim
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
2e850a0db5 refactor various patterns for serde_json::from_ elim
bump ruma

Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
8306a390ce refactor to pdu.get_content() for serde_json::from_ elim
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
e869936040 refactor to room_state_get_content() for serde_json::from_ elim
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
a8dc8c2890 Add state_get_content(shortid) for serde_json::from elim
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
4104c8cd40 abstract account-data deserializations for serde_json::from_elim
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
4a121757ec abstract common patterns as core pdu memberfns
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
45a92e2f54 parallelize calculate_invite_state
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
961b36f2ad add is_not_found as Error member function; tweak interface; add doc comments
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
5cac7937bf add unwrap_or to TryFutureExtExt
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
48c96d62b7 add mactors for true/false
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
4b13d1b220 add FlatOk trait to Result/Option suite
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
f22e351397 catch panics at base functions to integrate with other fatal errors.
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
a98240749c split admin-room branch from build_and_append_pdu (fixes large stack warning)
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
dd08503833 use loop condition to account for loole channel close
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
1da5df73c2 fix unnecessary re-serializations
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
strawberry
3c4fc77df5 feature-gate direct TLS mode to make rustls/aws-lc-rs optional
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
b15e007fa3 dont answer the admin room ID over /_matrix/federation/v1/query/directory
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
acf4a38490 mark the server user bot as online/offline on shutdown/startup
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
morguldir
5eb40f8927 fix sliding sync room type filter regression
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
Jason Volk
1812e75580 fix aliasid_alias key deserialization
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
94937e5b46 fix trait-solver issue requiring recursion_limit increase
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
8a12255652 fix get_all_media_keys deserialization
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
13e2520593 consume all bytes for top-level Ignore; add comments/tweaks
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
strawberry
1d37f02f2d miniscule spaces code optimisations
still terrible though

Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
04e9401fc8 add m.call and m.call.member to list of permissions to set on public rooms
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
3b718073bf fix room directory regression
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
0bfbb2b963 add MSC4151 room reporting support
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
d09e9ade05 drop unnecessary error to debug_warn
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
b05cf75dc4 fix: dont add remote users for push targets, use hashset instead of vec
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
e71fb9c710 update last_seen_ip and last_seen_ts on updating device metadata
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
5e66638111 improve UX of admin media deletion commands, ignore errors by default, support deleting local media too
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
5d0e88409a remove unnecessary full type annos
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
bd7731cf7d dont send non-state events from ignored users over /context/{eventId}
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
243736e0b7 dont send events from ignored users over /messages
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
eb9a881f36 small doc style fix
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
7892a0ad26 disable log colours in the complement config
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
605260c210 add missing feat_sha256_media to fresh database creations
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
e468d4fc24 add config option to disable ANSI log colours
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
d6d8ae1e27 add back server name to error sending PDU to remote server
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
eab593f9b8 add support for reading a registration token from a file
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
a4b14c157a fix list_rooms admin command filters
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
336aac2a4a use ok_or_else for a rare error
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
226b839d68 dont allow sending/receiving room invites with ignored users
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
5d6b9d323d dont send non-state events from ignored users over sync
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
e4b8ab8efa docs: nixos and unix socket fail, jemalloc and hardened.nix
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
b2a8eb3d7a dont send read receipts and typing indicators from ignored users
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
bd6be69ccf send EDUs to appservices if in events
to_device is not supported yet

Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
3d0beabe35 bump ruma, cargo.lock, and deps
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
6e491a5fc4 enable jemalloc_stats feature by default
this was supposed to be enabled by default

Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
e7eef6f9a4 remove old "rocksdb" and "sha256_media" cargo features
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
1b59965da1 tiny micro-optimisations in some config stuff
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
002454a25b support reading TURN secret from a file (turn_secret_file)
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
3a0bd8e0a7 allow taking multiple --config arguments to "include"/merge more config files
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
278f4fa37e improve some general documentation
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
07fd77ae47 allow users to respond to polls by default (org.matrix.msc3381.poll.response)
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
strawberry
3b8f5f76ce drop target-cpu optimised builds
this seems too broken.

Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
Jason Volk
1a9b1be9b2 bump tonic
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
481fcb95eb optimize auth_chain short_id to event_id translation step
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
a11aa52a27 refactor multi-get to handle result type
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
17539e476a various cleanup tweaks/fixes
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
58c4894432 add rocksdb secondary; fix read_only mode.
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
ea7c7ffe09 additional stream tools
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
4a265a476c Add rocksdb logging integration with tracing.
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
72e52f41bf merge rooms/short Data w/ Service; optimize queries
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
2aba38dc93 add ArrayVec-backed serialized query overload; doc comments
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
df54e0c140 split remaining map suites
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
strawberry
a25e4d4012 add missing await to first admin room creation
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-10-21 16:56:07 -04:00
Jason Volk
a18b0d4804 minor auth_chain optimizations/cleanup
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
1c294a17d8 handle serde_json for deserialized()
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
452e10b329 Database Refactor
combine service/users data w/ mod unit

split sliding sync related out of service/users

instrument database entry points

remove increment crap from database interface

de-wrap all database get() calls

de-wrap all database insert() calls

de-wrap all database remove() calls

refactor database interface for async streaming

add query key serializer for database

implement Debug for result handle

add query deserializer for database

add deserialization trait for option handle

start a stream utils suite

de-wrap/asyncify/type-query count_one_time_keys()

de-wrap/asyncify users count

add admin query users command suite

de-wrap/asyncify users exists

de-wrap/partially asyncify user filter related

asyncify/de-wrap users device/keys related

asyncify/de-wrap user auth/misc related

asyncify/de-wrap users blurhash

asyncify/de-wrap account_data get; merge Data into Service

partial asyncify/de-wrap uiaa; merge Data into Service

partially asyncify/de-wrap transaction_ids get; merge Data into Service

partially asyncify/de-wrap key_backups; merge Data into Service

asyncify/de-wrap pusher service getters; merge Data into Service

asyncify/de-wrap rooms alias getters/some iterators

asyncify/de-wrap rooms directory getters/iterator

partially asyncify/de-wrap rooms lazy-loading

partially asyncify/de-wrap rooms metadata

asyncify/dewrap rooms outlier

asyncify/dewrap rooms pdu_metadata

dewrap/partially asyncify rooms read receipt

de-wrap rooms search service

de-wrap/partially asyncify rooms user service

partial de-wrap rooms state_compressor

de-wrap rooms state_cache

de-wrap room state et al

de-wrap rooms timeline service

additional users device/keys related

de-wrap/asyncify sender

asyncify services

refactor database to TryFuture/TryStream

refactor services for TryFuture/TryStream

asyncify api handlers

additional asyncification for admin module

abstract stream related; support reverse streams

additional stream conversions

asyncify state-res related

Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
ede255a2f5 add UnwrapInfallible to Result
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
907a182ede re-export crates used by error macros
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
2e98a211c3 add is_not_found functor to error; tweak status code matcher
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
0f6d253cda add missing err! case
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
e587013084 add util functors for is_zero/is_equal; move clamp to math utils
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
76a191e79c move common_elements util into unit
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
e1b62785e3 add err log trait to Result
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
61d95edf77 add expected! macro to checked math expression suite
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
070332c0d1 add MapExpect to Result
add DebugInspect to Result

move Result typedef into unit

Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
67bba33ba7 add str traits for split, between, unquote; consolidate tests
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
Jason Volk
8265dedd57 simplify service trait bounds and lifetimes
Signed-off-by: Jason Volk <jason@zemos.net>
2024-10-21 16:56:07 -04:00
299 changed files with 19301 additions and 15873 deletions

View file

@ -69,7 +69,9 @@ jobs:
with:
diagnostic-endpoint: ""
extra-conf: |
show-trace = true
experimental-features = nix-command flakes
extra-experimental-features = nix-command flakes
accept-flake-config = true
- name: Enable Cachix binary cache
@ -172,8 +174,8 @@ jobs:
strategy:
matrix:
include:
- target: aarch64-unknown-linux-musl
- target: x86_64-unknown-linux-musl
- target: aarch64-linux-musl
- target: x86_64-linux-musl
steps:
- name: Sync repository
uses: https://github.com/actions/checkout@v4

View file

@ -16,7 +16,6 @@ on:
- 'docker/**'
branches:
- main
- change-ci-cache
tags:
- '*'
# Allows you to run this workflow manually from the Actions tab
@ -24,7 +23,7 @@ on:
concurrency:
group: ${{ github.head_ref || github.ref_name }}
cancel-in-progress: true
cancel-in-progress: false
env:
# sccache only on main repo
@ -51,25 +50,41 @@ env:
# Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps
NIX_CONFIG: |
show-trace = true
extra-substituters = https://attic.kennel.juneis.dog/conduit https://attic.kennel.juneis.dog/conduwuit https://cache.lix.systems https://conduwuit.cachix.org
extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://cache.lix.systems https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
# complement uses libolm
NIXPKGS_ALLOW_INSECURE: 1
experimental-features = nix-command flakes
extra-experimental-features = nix-command flakes
accept-flake-config = true
download-buffer-size = 134217728
permissions:
packages: write
contents: read
jobs:
tests:
name: Test
test_and_build:
name: Test and Build Artifacts
strategy:
matrix:
target: ["aarch64-linux-musl", "x86_64-linux-musl"]
runs-on: ubuntu-latest
env:
CARGO_PROFILE: "test"
steps:
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
- name: Free up more runner space
run: |
set +o pipefail
# large docker images
sudo docker image prune --all --force || true
# large packages
sudo apt-get purge -y '^llvm-.*' 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true
sudo apt-get autoremove -y
sudo apt-get clean
# large folders
sudo rm -rf /var/lib/apt/lists/* /usr/local/games /usr/local/sqlpackage /usr/local/.ghcup /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/local/share/vcpkg /usr/local/lib/python* /usr/local/lib/node_modules /usr/local/julia* /opt/mssql-tools /etc/skel /usr/share/vim /usr/share/postgresql /usr/share/man /usr/share/apache-maven-* /usr/share/R /usr/share/alsa /usr/share/miniconda /usr/share/grub /usr/share/gradle-* /usr/share/locale /usr/share/texinfo /usr/share/kotlinc /usr/share/swift /usr/share/doc /usr/share/az_9.3.0 /usr/share/sbt /usr/share/ri /usr/share/icons /usr/share/java /usr/share/fonts /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/lib/R /usr/lib/postgresql /usr/lib/heroku /usr/lib/gcc
set -o pipefail
- name: Sync repository
uses: actions/checkout@v4
@ -85,7 +100,7 @@ jobs:
exit 1
fi
- uses: nixbuild/nix-quick-install-action@v28
- uses: nixbuild/nix-quick-install-action@master
- name: Restore and cache Nix store
uses: nix-community/cache-nix-action@v5.1.0
@ -117,8 +132,10 @@ jobs:
- name: Apply Nix binary cache configuration
run: |
sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null <<EOF
extra-substituters = https://attic.kennel.juneis.dog/conduit https://attic.kennel.juneis.dog/conduwuit https://cache.lix.systems https://conduwuit.cachix.org
extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://cache.lix.systems https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
accept-flake-config = true
download-buffer-size = 134217728
EOF
- name: Use alternative Nix binary caches if specified
@ -129,16 +146,19 @@ jobs:
extra-trusted-public-keys = ${{ env.ATTIC_PUBLIC_KEY }}
EOF
- name: Prepare build environment
- name: Prepare build environment (Linux)
run: |
echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc"
nix profile install --impure --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv
direnv allow
nix develop .#all-features --command true --impure
- name: Cache CI dependencies
- name: Cache CI dependencies (Linux)
run: |
bin/nix-build-and-cache ci
bin/nix-build-and-cache just '.#devShells.x86_64-linux.default'
bin/nix-build-and-cache just '.#devShells.x86_64-linux.all-features'
bin/nix-build-and-cache just '.#devShells.x86_64-linux.dynamic'
# use sccache for Rust
- name: Run sccache-cache
@ -151,10 +171,14 @@ jobs:
cache-all-crates: "true"
- name: Run CI tests
env:
CARGO_PROFILE: "test"
run: |
direnv exec . engage > >(tee -a test_output.log)
- name: Run Complement tests
env:
CARGO_PROFILE: "test"
run: |
# the nix devshell sets $COMPLEMENT_SRC, so "/dev/null" is no-op
direnv exec . bin/complement "/dev/null" complement_test_logs.jsonl complement_test_results.jsonl > >(tee -a test_output.log)
@ -198,94 +222,24 @@ jobs:
echo '# Complement diff results' >> $GITHUB_STEP_SUMMARY
echo '```diff' >> $GITHUB_STEP_SUMMARY
tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY
tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
fi
- name: Run cargo clean test artifacts
- name: Run cargo clean test artifacts to free up space
run: |
cargo clean --profile test
build:
name: Build
runs-on: ubuntu-latest
needs: tests
strategy:
matrix:
include:
- target: aarch64-unknown-linux-musl
- target: x86_64-unknown-linux-musl
steps:
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
- name: Sync repository
uses: actions/checkout@v4
- uses: nixbuild/nix-quick-install-action@v28
- name: Restore and cache Nix store
uses: nix-community/cache-nix-action@v5.1.0
with:
# restore and save a cache using this key
primary-key: nix-${{ runner.os }}-${{ matrix.target }}-${{ hashFiles('**/*.nix', '**/.lock') }}
# if there's no cache hit, restore a cache by this prefix
restore-prefixes-first-match: nix-${{ runner.os }}-
# collect garbage until Nix store size (in bytes) is at most this number
# before trying to save a new cache
gc-max-store-size-linux: 2073741824
# do purge caches
purge: true
# purge all versions of the cache
purge-prefixes: nix-${{ runner.os }}-
# created more than this number of seconds ago relative to the start of the `Post Restore` phase
purge-last-accessed: 86400
# except the version with the `primary-key`, if it exists
purge-primary-key: never
# always save the cache
save-always: true
- name: Enable Cachix binary cache
run: |
nix profile install nixpkgs#cachix
cachix use crane
cachix use nix-community
- name: Apply Nix binary cache configuration
run: |
sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null <<EOF
extra-substituters = https://attic.kennel.juneis.dog/conduit https://attic.kennel.juneis.dog/conduwuit https://cache.lix.systems https://conduwuit.cachix.org
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
EOF
- name: Use alternative Nix binary caches if specified
if: ${{ (env.ATTIC_ENDPOINT != '') && (env.ATTIC_PUBLIC_KEY != '') }}
run: |
sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null <<EOF
extra-substituters = ${{ env.ATTIC_ENDPOINT }}
extra-trusted-public-keys = ${{ env.ATTIC_PUBLIC_KEY }}
EOF
- name: Prepare build environment
run: |
echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc"
nix profile install --impure --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv
direnv allow
nix develop .#all-features --command true --impure
# use sccache for Rust
- name: Run sccache-cache
if: (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]')
uses: mozilla-actions/sccache-action@main
# use rust-cache
- uses: Swatinem/rust-cache@v2
with:
cache-all-crates: "true"
- name: Build static ${{ matrix.target }}
run: |
CARGO_DEB_TARGET_TUPLE=$(echo ${{ matrix.target }} | grep -o -E '^([^-]*-){3}[^-]*')
if [[ ${{ matrix.target }} == "x86_64-linux-musl" ]]
then
CARGO_DEB_TARGET_TUPLE="x86_64-unknown-linux-musl"
elif [[ ${{ matrix.target }} == "aarch64-linux-musl" ]]
then
CARGO_DEB_TARGET_TUPLE="aarch64-unknown-linux-musl"
fi
SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct)
bin/nix-build-and-cache just .#static-${{ matrix.target }}-all-features
@ -302,7 +256,7 @@ jobs:
# quick smoke test of the x86_64 static release binary
- name: Run x86_64 static release binary
run: |
# GH actions default runners are x86_64 only
# GH actions default ubuntu runners are x86_64 only
if file result/bin/conduit | grep x86-64; then
result/bin/conduit --version
fi
@ -401,10 +355,86 @@ jobs:
if-no-files-found: error
compression-level: 0
build_mac_binaries:
name: Build MacOS Binaries
strategy:
matrix:
os: [macos-latest, macos-13]
runs-on: ${{ matrix.os }}
steps:
- name: Sync repository
uses: actions/checkout@v4
- name: Tag comparison check
if: ${{ startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') }}
run: |
# Tag mismatch with latest repo tag check to prevent potential downgrades
LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`)
if [ $LATEST_TAG != ${{ github.ref_name }} ]; then
echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.'
echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' >> $GITHUB_STEP_SUMMARY
exit 1
fi
# use sccache for Rust
- name: Run sccache-cache
if: (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]')
uses: mozilla-actions/sccache-action@main
# use rust-cache
- uses: Swatinem/rust-cache@v2
with:
cache-all-crates: "true"
# Nix can't do portable macOS builds yet
- name: Build macOS x86_64 binary
if: ${{ matrix.os == 'macos-13' }}
run: |
CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short HEAD)" cargo build --release
cp -v -f target/release/conduit conduwuit-macos-x86_64
otool -L conduwuit-macos-x86_64
# quick smoke test of the x86_64 macOS binary
- name: Run x86_64 macOS release binary
if: ${{ matrix.os == 'macos-13' }}
run: |
./conduwuit-macos-x86_64 --version
- name: Build macOS arm64 binary
if: ${{ matrix.os == 'macos-latest' }}
run: |
CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short HEAD)" cargo build --release
cp -v -f target/release/conduit conduwuit-macos-arm64
otool -L conduwuit-macos-arm64
# quick smoke test of the arm64 macOS binary
- name: Run arm64 macOS release binary
if: ${{ matrix.os == 'macos-latest' }}
run: |
./conduwuit-macos-arm64 --version
- name: Upload macOS x86_64 binary
if: ${{ matrix.os == 'macos-13' }}
uses: actions/upload-artifact@v4
with:
name: conduwuit-macos-x86_64
path: conduwuit-macos-x86_64
if-no-files-found: error
- name: Upload macOS arm64 binary
if: ${{ matrix.os == 'macos-latest' }}
uses: actions/upload-artifact@v4
with:
name: conduwuit-macos-arm64
path: conduwuit-macos-arm64
if-no-files-found: error
docker:
name: Docker publish
runs-on: ubuntu-latest
needs: build
needs: test_and_build
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && github.event.pull_request.user.login != 'renovate[bot]'
env:
DOCKER_ARM64: docker.io/${{ github.repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-arm64v8
@ -451,10 +481,10 @@ jobs:
- name: Move OCI images into position
run: |
mv -v oci-image-x86_64-unknown-linux-musl/*.tar.gz oci-image-amd64.tar.gz
mv -v oci-image-aarch64-unknown-linux-musl/*.tar.gz oci-image-arm64v8.tar.gz
mv -v oci-image-x86_64-unknown-linux-musl-debug/*.tar.gz oci-image-amd64-debug.tar.gz
mv -v oci-image-aarch64-unknown-linux-musl-debug/*.tar.gz oci-image-arm64v8-debug.tar.gz
mv -v oci-image-x86_64-linux-musl/*.tar.gz oci-image-amd64.tar.gz
mv -v oci-image-aarch64-linux-musl/*.tar.gz oci-image-arm64v8.tar.gz
mv -v oci-image-x86_64-linux-musl-debug/*.tar.gz oci-image-amd64-debug.tar.gz
mv -v oci-image-aarch64-linux-musl-debug/*.tar.gz oci-image-arm64v8-debug.tar.gz
- name: Load and push amd64 image
if: ${{ (vars.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }}

View file

@ -24,8 +24,12 @@ env:
# Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps
NIX_CONFIG: |
show-trace = true
extra-substituters = https://attic.kennel.juneis.dog/conduit https://attic.kennel.juneis.dog/conduwuit https://cache.lix.systems https://conduwuit.cachix.org
extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://cache.lix.systems https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
experimental-features = nix-command flakes
extra-experimental-features = nix-command flakes
accept-flake-config = true
download-buffer-size = 134217728
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
@ -89,8 +93,10 @@ jobs:
- name: Apply Nix binary cache configuration
run: |
sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null <<EOF
extra-substituters = https://attic.kennel.juneis.dog/conduit https://attic.kennel.juneis.dog/conduwuit https://cache.lix.systems https://conduwuit.cachix.org
extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://cache.lix.systems https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
accept-flake-config = true
download-buffer-size = 134217728
EOF
- name: Use alternative Nix binary caches if specified

View file

@ -26,7 +26,7 @@ jobs:
uses: actions/checkout@v4
- name: Run Trivy code and vulnerability scanner on repo
uses: aquasecurity/trivy-action@0.24.0
uses: aquasecurity/trivy-action@0.28.0
with:
scan-type: repo
format: sarif
@ -34,7 +34,7 @@ jobs:
severity: CRITICAL,HIGH,MEDIUM,LOW
- name: Run Trivy code and vulnerability scanner on filesystem
uses: aquasecurity/trivy-action@0.24.0
uses: aquasecurity/trivy-action@0.28.0
with:
scan-type: fs
format: sarif

View file

@ -10,6 +10,13 @@ variables:
FF_USE_FASTZIP: true
# Print progress reports for cache and artifact transfers
TRANSFER_METER_FREQUENCY: 5s
NIX_CONFIG: |
show-trace = true
extra-substituters = https://attic.kennel.juneis.dog/conduit https://attic.kennel.juneis.dog/conduwuit https://cache.lix.systems https://conduwuit.cachix.org
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
experimental-features = nix-command flakes
extra-experimental-features = nix-command flakes
accept-flake-config = true
# Avoid duplicate pipelines
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
@ -23,6 +30,13 @@ workflow:
before_script:
# Enable nix-command and flakes
- if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi
- if command -v nix > /dev/null; then echo "extra-experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi
# Accept flake config from "untrusted" users
- if command -v nix > /dev/null; then echo "accept-flake-config = true" >> /etc/nix/nix.conf; fi
# Increase download buffer size to 128MB
- if command -v nix > /dev/null; then echo "download-buffer-size = 134217728" >> /etc/nix/nix.conf; fi
# Add conduwuit binary cache
- if command -v nix > /dev/null; then echo "extra-substituters = https://attic.kennel.juneis.dog/conduwuit" >> /etc/nix/nix.conf; fi
@ -47,6 +61,8 @@ before_script:
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi
- if command -v nix > /dev/null; then echo "extra-substituters = https://aseipp-nix-cache.freetls.fastly.net" >> /etc/nix/nix.conf; fi
# Install direnv and nix-direnv
- if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi
@ -56,6 +72,10 @@ before_script:
# Set CARGO_HOME to a cacheable path
- export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo"
# /tmp on gitlab is weird and causes https://github.com/NixOS/nix/issues/11470
- export TEMP="$PWD"
- export TMPDIR="$PWD"
ci:
stage: ci
image: nixos/nix:2.24.9
@ -85,29 +105,29 @@ artifacts:
stage: artifacts
image: nixos/nix:2.24.9
script:
- ./bin/nix-build-and-cache just .#static-x86_64-unknown-linux-musl
- cp result/bin/conduit x86_64-unknown-linux-musl
- ./bin/nix-build-and-cache just .#static-x86_64-linux-musl
- cp result/bin/conduit x86_64-linux-musl
- mkdir -p target/release
- cp result/bin/conduit target/release
- direnv exec . cargo deb --no-build --no-strip
- mv target/debian/*.deb x86_64-unknown-linux-musl.deb
- mv target/debian/*.deb x86_64-linux-musl.deb
# Since the OCI image package is based on the binary package, this has the
# fun side effect of uploading the normal binary too. Conduit users who are
# deploying with Nix can leverage this fact by adding our binary cache to
# their systems.
#
# Note that although we have an `oci-image-x86_64-unknown-linux-musl`
# Note that although we have an `oci-image-x86_64-linux-musl`
# output, we don't build it because it would be largely redundant to this
# one since it's all containerized anyway.
- ./bin/nix-build-and-cache just .#oci-image
- cp result oci-image-amd64.tar.gz
- ./bin/nix-build-and-cache just .#static-aarch64-unknown-linux-musl
- cp result/bin/conduit aarch64-unknown-linux-musl
- ./bin/nix-build-and-cache just .#static-aarch64-linux-musl
- cp result/bin/conduit aarch64-linux-musl
- ./bin/nix-build-and-cache just .#oci-image-aarch64-unknown-linux-musl
- ./bin/nix-build-and-cache just .#oci-image-aarch64-linux-musl
- cp result oci-image-arm64v8.tar.gz
- ./bin/nix-build-and-cache just .#book
@ -115,9 +135,9 @@ artifacts:
- cp -r --dereference result public
artifacts:
paths:
- x86_64-unknown-linux-musl
- aarch64-unknown-linux-musl
- x86_64-unknown-linux-musl.deb
- x86_64-linux-musl
- aarch64-linux-musl
- x86_64-linux-musl.deb
- oci-image-amd64.tar.gz
- oci-image-arm64v8.tar.gz
- public

201
Cargo.lock generated
View file

@ -91,9 +91,9 @@ checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002"
[[package]]
name = "async-compression"
version = "0.4.13"
version = "0.4.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e614738943d3f68c628ae3dbce7c3daffb196665f82f8c8ea6b65de73c79429"
checksum = "103db485efc3e41214fe4fda9f3dbeae2eb9082f48fd236e6095627a9422066e"
dependencies = [
"brotli",
"flate2",
@ -222,9 +222,9 @@ dependencies = [
[[package]]
name = "axum-client-ip"
version = "0.6.0"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72188bed20deb981f3a4a9fe674e5980fd9e9c2bd880baa94715ad5d60d64c67"
checksum = "9eefda7e2b27e1bda4d6fa8a06b50803b8793769045918bc37ad062d48a6efac"
dependencies = [
"axum",
"forwarded-header-value",
@ -290,7 +290,7 @@ dependencies = [
"hyper",
"hyper-util",
"pin-project-lite",
"rustls 0.23.14",
"rustls 0.23.15",
"rustls-pemfile",
"rustls-pki-types",
"tokio",
@ -310,7 +310,7 @@ dependencies = [
"http",
"http-body-util",
"pin-project",
"rustls 0.23.14",
"rustls 0.23.15",
"tokio",
"tokio-rustls",
"tokio-util",
@ -353,9 +353,9 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
[[package]]
name = "bindgen"
version = "0.69.4"
version = "0.69.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0"
checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088"
dependencies = [
"bitflags 2.6.0",
"cexpr",
@ -433,9 +433,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
[[package]]
name = "bytemuck"
version = "1.18.0"
version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae"
checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d"
[[package]]
name = "byteorder"
@ -478,9 +478,9 @@ dependencies = [
[[package]]
name = "cc"
version = "1.1.25"
version = "1.1.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8d9e0b4957f635b8d3da819d0db5603620467ecf1f692d22a8c2717ce27e6d8"
checksum = "b16803a61b81d9eabb7eae2588776c4c1e584b738ede45fdbb4c972cec1e9945"
dependencies = [
"jobserver",
"libc",
@ -539,9 +539,9 @@ dependencies = [
[[package]]
name = "clap"
version = "4.5.19"
version = "4.5.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7be5744db7978a28d9df86a214130d106a89ce49644cbc4e3f0c22c3fba30615"
checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8"
dependencies = [
"clap_builder",
"clap_derive",
@ -549,9 +549,9 @@ dependencies = [
[[package]]
name = "clap_builder"
version = "4.5.19"
version = "4.5.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a5fbc17d3ef8278f55b282b2a2e75ae6f6c7d4bb70ed3d0382375104bfafdb4b"
checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54"
dependencies = [
"anstyle",
"clap_lex",
@ -626,10 +626,11 @@ dependencies = [
"clap",
"conduit_api",
"conduit_core",
"conduit_database",
"conduit_macros",
"conduit_service",
"const-str",
"futures-util",
"futures",
"log",
"ruma",
"serde_json",
@ -652,7 +653,7 @@ dependencies = [
"conduit_database",
"conduit_service",
"const-str",
"futures-util",
"futures",
"hmac",
"http",
"http-body-util",
@ -689,6 +690,7 @@ dependencies = [
"cyborgtime",
"either",
"figment",
"futures",
"hardened_malloc-rs",
"http",
"http-body-util",
@ -703,11 +705,11 @@ dependencies = [
"reqwest",
"ring",
"ruma",
"rustls 0.23.14",
"sanitize-filename",
"serde",
"serde_json",
"serde_regex",
"serde_yaml",
"thiserror",
"tikv-jemalloc-ctl",
"tikv-jemalloc-sys",
@ -725,10 +727,14 @@ dependencies = [
name = "conduit_database"
version = "0.4.7"
dependencies = [
"arrayvec",
"conduit_core",
"const-str",
"futures",
"log",
"rust-rocksdb-uwu",
"serde",
"serde_json",
"tokio",
"tracing",
]
@ -757,13 +763,14 @@ dependencies = [
"conduit_core",
"conduit_service",
"const-str",
"futures",
"http",
"http-body-util",
"hyper",
"hyper-util",
"log",
"ruma",
"rustls 0.23.14",
"rustls 0.23.15",
"sd-notify",
"sentry",
"sentry-tower",
@ -785,7 +792,7 @@ dependencies = [
"conduit_core",
"conduit_database",
"const-str",
"futures-util",
"futures",
"hickory-resolver",
"http",
"image",
@ -1284,6 +1291,20 @@ dependencies = [
"new_debug_unreachable",
]
[[package]]
name = "futures"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
dependencies = [
"futures-channel",
"futures-core",
"futures-io",
"futures-sink",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-channel"
version = "0.3.31"
@ -1346,6 +1367,7 @@ version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
dependencies = [
"futures-channel",
"futures-core",
"futures-io",
"futures-macro",
@ -1656,9 +1678,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
[[package]]
name = "hyper"
version = "1.4.1"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05"
checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a"
dependencies = [
"bytes",
"futures-channel",
@ -1685,7 +1707,7 @@ dependencies = [
"http",
"hyper",
"hyper-util",
"rustls 0.23.14",
"rustls 0.23.15",
"rustls-native-certs",
"rustls-pki-types",
"tokio",
@ -1749,9 +1771,9 @@ dependencies = [
[[package]]
name = "image"
version = "0.25.2"
version = "0.25.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99314c8a2152b8ddb211f924cdae532d8c5e4c8bb54728e12fff1b0cd5963a10"
checksum = "bc144d44a31d753b02ce64093d532f55ff8dc4ebf2ffb8a63c0dda691385acae"
dependencies = [
"bytemuck",
"byteorder-lite",
@ -1766,9 +1788,9 @@ dependencies = [
[[package]]
name = "image-webp"
version = "0.1.3"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f79afb8cbee2ef20f59ccd477a218c12a93943d075b492015ecb1bb81f8ee904"
checksum = "e031e8e3d94711a9ccb5d6ea357439ef3dcbed361798bd4071dc4d9793fbe22f"
dependencies = [
"byteorder-lite",
"quick-error 2.0.1",
@ -1874,9 +1896,9 @@ dependencies = [
[[package]]
name = "js-sys"
version = "0.3.70"
version = "0.3.72"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a"
checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9"
dependencies = [
"wasm-bindgen",
]
@ -1971,9 +1993,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
[[package]]
name = "libc"
version = "0.2.159"
version = "0.2.160"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5"
checksum = "f0b21006cd1874ae9e650973c565615676dc4a274c965bb0a73796dac838ce4f"
[[package]]
name = "libloading"
@ -2373,7 +2395,7 @@ dependencies = [
"glob",
"once_cell",
"opentelemetry",
"ordered-float 4.3.0",
"ordered-float 4.4.0",
"percent-encoding",
"rand",
"thiserror",
@ -2392,9 +2414,9 @@ dependencies = [
[[package]]
name = "ordered-float"
version = "4.3.0"
version = "4.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44d501f1a72f71d3c063a6bbc8f7271fa73aa09fe5d6283b6571e2ed176a2537"
checksum = "83e7ccb95e240b7c9506a3d544f10d935e142cc90b0a1d56954fb44d89ad6b97"
dependencies = [
"num-traits",
]
@ -2655,9 +2677,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
version = "1.0.86"
version = "1.0.88"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
checksum = "7c3a7fc5db1e57d5a779a352c8cdb57b29aa4c40cc69c3a68a7fedc815fbf2f9"
dependencies = [
"unicode-ident",
]
@ -2709,9 +2731,9 @@ dependencies = [
[[package]]
name = "pulldown-cmark"
version = "0.12.1"
version = "0.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "666f0f59e259aea2d72e6012290c09877a780935cc3c18b1ceded41f3890d59c"
checksum = "f86ba2052aebccc42cbbb3ed234b8b13ce76f75c3551a303cb2bcffcff12bb14"
dependencies = [
"bitflags 2.6.0",
"memchr",
@ -2748,7 +2770,7 @@ dependencies = [
"quinn-proto",
"quinn-udp",
"rustc-hash 2.0.0",
"rustls 0.23.14",
"rustls 0.23.15",
"socket2",
"thiserror",
"tokio",
@ -2765,7 +2787,7 @@ dependencies = [
"rand",
"ring",
"rustc-hash 2.0.0",
"rustls 0.23.14",
"rustls 0.23.15",
"slab",
"thiserror",
"tinyvec",
@ -2905,7 +2927,7 @@ dependencies = [
"percent-encoding",
"pin-project-lite",
"quinn",
"rustls 0.23.14",
"rustls 0.23.15",
"rustls-native-certs",
"rustls-pemfile",
"rustls-pki-types",
@ -2954,7 +2976,7 @@ dependencies = [
[[package]]
name = "ruma"
version = "0.10.1"
source = "git+https://github.com/girlbossceo/ruwuma?rev=b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad#b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad"
source = "git+https://github.com/girlbossceo/ruwuma?rev=9f6c48eea2239e4c065dde38534407ea2596ab99#9f6c48eea2239e4c065dde38534407ea2596ab99"
dependencies = [
"assign",
"js_int",
@ -2976,7 +2998,7 @@ dependencies = [
[[package]]
name = "ruma-appservice-api"
version = "0.10.0"
source = "git+https://github.com/girlbossceo/ruwuma?rev=b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad#b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad"
source = "git+https://github.com/girlbossceo/ruwuma?rev=9f6c48eea2239e4c065dde38534407ea2596ab99#9f6c48eea2239e4c065dde38534407ea2596ab99"
dependencies = [
"js_int",
"ruma-common",
@ -2988,7 +3010,7 @@ dependencies = [
[[package]]
name = "ruma-client-api"
version = "0.18.0"
source = "git+https://github.com/girlbossceo/ruwuma?rev=b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad#b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad"
source = "git+https://github.com/girlbossceo/ruwuma?rev=9f6c48eea2239e4c065dde38534407ea2596ab99#9f6c48eea2239e4c065dde38534407ea2596ab99"
dependencies = [
"as_variant",
"assign",
@ -3011,7 +3033,7 @@ dependencies = [
[[package]]
name = "ruma-common"
version = "0.13.0"
source = "git+https://github.com/girlbossceo/ruwuma?rev=b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad#b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad"
source = "git+https://github.com/girlbossceo/ruwuma?rev=9f6c48eea2239e4c065dde38534407ea2596ab99#9f6c48eea2239e4c065dde38534407ea2596ab99"
dependencies = [
"as_variant",
"base64 0.22.1",
@ -3041,7 +3063,7 @@ dependencies = [
[[package]]
name = "ruma-events"
version = "0.28.1"
source = "git+https://github.com/girlbossceo/ruwuma?rev=b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad#b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad"
source = "git+https://github.com/girlbossceo/ruwuma?rev=9f6c48eea2239e4c065dde38534407ea2596ab99#9f6c48eea2239e4c065dde38534407ea2596ab99"
dependencies = [
"as_variant",
"indexmap 2.6.0",
@ -3065,7 +3087,7 @@ dependencies = [
[[package]]
name = "ruma-federation-api"
version = "0.9.0"
source = "git+https://github.com/girlbossceo/ruwuma?rev=b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad#b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad"
source = "git+https://github.com/girlbossceo/ruwuma?rev=9f6c48eea2239e4c065dde38534407ea2596ab99#9f6c48eea2239e4c065dde38534407ea2596ab99"
dependencies = [
"bytes",
"http",
@ -3083,7 +3105,7 @@ dependencies = [
[[package]]
name = "ruma-identifiers-validation"
version = "0.9.5"
source = "git+https://github.com/girlbossceo/ruwuma?rev=b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad#b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad"
source = "git+https://github.com/girlbossceo/ruwuma?rev=9f6c48eea2239e4c065dde38534407ea2596ab99#9f6c48eea2239e4c065dde38534407ea2596ab99"
dependencies = [
"js_int",
"thiserror",
@ -3092,7 +3114,7 @@ dependencies = [
[[package]]
name = "ruma-identity-service-api"
version = "0.9.0"
source = "git+https://github.com/girlbossceo/ruwuma?rev=b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad#b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad"
source = "git+https://github.com/girlbossceo/ruwuma?rev=9f6c48eea2239e4c065dde38534407ea2596ab99#9f6c48eea2239e4c065dde38534407ea2596ab99"
dependencies = [
"js_int",
"ruma-common",
@ -3102,7 +3124,7 @@ dependencies = [
[[package]]
name = "ruma-macros"
version = "0.13.0"
source = "git+https://github.com/girlbossceo/ruwuma?rev=b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad#b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad"
source = "git+https://github.com/girlbossceo/ruwuma?rev=9f6c48eea2239e4c065dde38534407ea2596ab99#9f6c48eea2239e4c065dde38534407ea2596ab99"
dependencies = [
"cfg-if",
"once_cell",
@ -3118,7 +3140,7 @@ dependencies = [
[[package]]
name = "ruma-push-gateway-api"
version = "0.9.0"
source = "git+https://github.com/girlbossceo/ruwuma?rev=b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad#b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad"
source = "git+https://github.com/girlbossceo/ruwuma?rev=9f6c48eea2239e4c065dde38534407ea2596ab99#9f6c48eea2239e4c065dde38534407ea2596ab99"
dependencies = [
"js_int",
"ruma-common",
@ -3130,7 +3152,7 @@ dependencies = [
[[package]]
name = "ruma-server-util"
version = "0.3.0"
source = "git+https://github.com/girlbossceo/ruwuma?rev=b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad#b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad"
source = "git+https://github.com/girlbossceo/ruwuma?rev=9f6c48eea2239e4c065dde38534407ea2596ab99#9f6c48eea2239e4c065dde38534407ea2596ab99"
dependencies = [
"headers",
"http",
@ -3143,7 +3165,7 @@ dependencies = [
[[package]]
name = "ruma-signatures"
version = "0.15.0"
source = "git+https://github.com/girlbossceo/ruwuma?rev=b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad#b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad"
source = "git+https://github.com/girlbossceo/ruwuma?rev=9f6c48eea2239e4c065dde38534407ea2596ab99#9f6c48eea2239e4c065dde38534407ea2596ab99"
dependencies = [
"base64 0.22.1",
"ed25519-dalek",
@ -3159,9 +3181,10 @@ dependencies = [
[[package]]
name = "ruma-state-res"
version = "0.11.0"
source = "git+https://github.com/girlbossceo/ruwuma?rev=b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad#b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad"
source = "git+https://github.com/girlbossceo/ruwuma?rev=9f6c48eea2239e4c065dde38534407ea2596ab99#9f6c48eea2239e4c065dde38534407ea2596ab99"
dependencies = [
"itertools 0.12.1",
"futures-util",
"itertools 0.13.0",
"js_int",
"ruma-common",
"ruma-events",
@ -3173,8 +3196,8 @@ dependencies = [
[[package]]
name = "rust-librocksdb-sys"
version = "0.26.0+9.6.1"
source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=22f942609921ecf837399e1c2fe6fcb8dbb1095e#22f942609921ecf837399e1c2fe6fcb8dbb1095e"
version = "0.27.0+9.7.2"
source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=86920c73d54d52bd59f0d770cdefece3fba745db#86920c73d54d52bd59f0d770cdefece3fba745db"
dependencies = [
"bindgen",
"bzip2-sys",
@ -3191,7 +3214,7 @@ dependencies = [
[[package]]
name = "rust-rocksdb"
version = "0.30.0"
source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=22f942609921ecf837399e1c2fe6fcb8dbb1095e#22f942609921ecf837399e1c2fe6fcb8dbb1095e"
source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=86920c73d54d52bd59f0d770cdefece3fba745db#86920c73d54d52bd59f0d770cdefece3fba745db"
dependencies = [
"libc",
"rust-librocksdb-sys",
@ -3261,9 +3284,9 @@ dependencies = [
[[package]]
name = "rustls"
version = "0.23.14"
version = "0.23.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8"
checksum = "5fbb44d7acc4e873d613422379f69f237a1b141928c02f6bc6ccfddddc2d7993"
dependencies = [
"aws-lc-rs",
"log",
@ -3299,9 +3322,9 @@ dependencies = [
[[package]]
name = "rustls-pki-types"
version = "1.9.0"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55"
checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b"
[[package]]
name = "rustls-webpki"
@ -3317,9 +3340,9 @@ dependencies = [
[[package]]
name = "rustversion"
version = "1.0.17"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6"
checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248"
[[package]]
name = "rustyline-async"
@ -3354,9 +3377,9 @@ dependencies = [
[[package]]
name = "schannel"
version = "0.1.24"
version = "0.1.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b"
checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1"
dependencies = [
"windows-sys 0.59.0",
]
@ -3572,9 +3595,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.128"
version = "1.0.129"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8"
checksum = "6dbcf9b78a125ee667ae19388837dd12294b858d101fdd393cb9d5501ef09eb2"
dependencies = [
"itoa",
"memchr",
@ -3885,9 +3908,9 @@ dependencies = [
[[package]]
name = "termimad"
version = "0.30.0"
version = "0.30.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "920e7c4671e79f3d9df269da9c8edf0dbc580044fd727d3594f7bfba5eb6107a"
checksum = "22117210909e9dfff30a558f554c7fb3edb198ef614e7691386785fb7679677c"
dependencies = [
"coolor",
"crokey",
@ -4082,7 +4105,7 @@ version = "0.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4"
dependencies = [
"rustls 0.23.14",
"rustls 0.23.15",
"rustls-pki-types",
"tokio",
]
@ -4451,7 +4474,7 @@ dependencies = [
"base64 0.22.1",
"log",
"once_cell",
"rustls 0.23.14",
"rustls 0.23.15",
"rustls-pki-types",
"url",
"webpki-roots",
@ -4483,9 +4506,9 @@ checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9"
[[package]]
name = "uuid"
version = "1.10.0"
version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314"
checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a"
dependencies = [
"getrandom",
"serde",
@ -4526,9 +4549,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasm-bindgen"
version = "0.2.93"
version = "0.2.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5"
checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e"
dependencies = [
"cfg-if",
"once_cell",
@ -4537,9 +4560,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.93"
version = "0.2.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b"
checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358"
dependencies = [
"bumpalo",
"log",
@ -4552,9 +4575,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-futures"
version = "0.4.43"
version = "0.4.45"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed"
checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b"
dependencies = [
"cfg-if",
"js-sys",
@ -4564,9 +4587,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.93"
version = "0.2.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf"
checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
@ -4574,9 +4597,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.93"
version = "0.2.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836"
checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68"
dependencies = [
"proc-macro2",
"quote",
@ -4587,15 +4610,15 @@ dependencies = [
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.93"
version = "0.2.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484"
checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d"
[[package]]
name = "web-sys"
version = "0.3.70"
version = "0.3.72"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0"
checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112"
dependencies = [
"js-sys",
"wasm-bindgen",

View file

@ -19,7 +19,7 @@ license = "Apache-2.0"
# See also `rust-toolchain.toml`
readme = "README.md"
repository = "https://github.com/girlbossceo/conduwuit"
rust-version = "1.81.0"
rust-version = "1.82.0"
version = "0.4.7"
[workspace.metadata.crane]
@ -101,14 +101,13 @@ features = ["typed-header", "tracing"]
[workspace.dependencies.axum-server]
version = "0.7.1"
default-features = false
features = ["tls-rustls"]
# to listen on both HTTP and HTTPS if listening on TLS dierctly from conduwuit for complement or sytest
[workspace.dependencies.axum-server-dual-protocol]
version = "0.7"
[workspace.dependencies.axum-client-ip]
version = "0.6.0"
version = "0.6.1"
[workspace.dependencies.tower]
version = "0.5.1"
@ -200,7 +199,7 @@ default-features = false
# used for conduit's CLI and admin room command parsing
[workspace.dependencies.clap]
version = "4.5.15"
version = "4.5.20"
default-features = false
features = [
"std",
@ -211,9 +210,10 @@ features = [
"string",
]
[workspace.dependencies.futures-util]
[workspace.dependencies.futures]
version = "0.3.30"
default-features = false
features = ["std"]
[workspace.dependencies.tokio]
version = "1.40.0"
@ -248,7 +248,7 @@ features = ["alloc", "std"]
default-features = false
[workspace.dependencies.hyper]
version = "1.4.1"
version = "1.5.0"
default-features = false
features = [
"server",
@ -315,7 +315,7 @@ version = "0.1.2"
[workspace.dependencies.ruma]
git = "https://github.com/girlbossceo/ruwuma"
#branch = "conduwuit-changes"
rev = "b6f82a72b6c0899d8ac8e53206d375c2c6f0a2ad"
rev = "9f6c48eea2239e4c065dde38534407ea2596ab99"
features = [
"compat",
"rand",
@ -330,16 +330,19 @@ features = [
"ring-compat",
"identifiers-validation",
"unstable-unspecified",
"unstable-msc2409",
"unstable-msc2448",
"unstable-msc2666",
"unstable-msc2867",
"unstable-msc2870",
"unstable-msc3026",
"unstable-msc3061",
"unstable-msc3245",
"unstable-msc3266",
"unstable-msc3381", # polls
"unstable-msc3489", # beacon / live location
"unstable-msc3575",
"unstable-msc4075",
"unstable-msc4121",
"unstable-msc4125",
"unstable-msc4186",
@ -444,7 +447,7 @@ version = "0.4.3"
default-features = false
[workspace.dependencies.termimad]
version = "0.30.0"
version = "0.30.1"
default-features = false
[workspace.dependencies.checked_ops]
@ -459,8 +462,7 @@ features = ["full", "extra-traits"]
version = "1.0.36"
[workspace.dependencies.proc-macro2]
version = "1.0.86"
version = "1.0.88"
#
# Patches
@ -717,12 +719,16 @@ opt-level = 'z'
# primarily used for CI
[profile.test]
inherits = "dev"
strip = false
opt-level = 0
codegen-units = 16
incremental = false
[profile.test.package.'*']
inherits = "dev"
debug = 0
strip = false
opt-level = 0
codegen-units = 16
incremental = false
@ -810,6 +816,7 @@ significant_drop_tightening = { level = "allow", priority = 1 } # TODO
pedantic = { level = "warn", priority = -1 }
## some sadness
too_long_first_doc_paragraph = { level = "allow", priority = 1 }
doc_markdown = { level = "allow", priority = 1 }
enum_glob_use = { level = "allow", priority = 1 }
if_not_else = { level = "allow", priority = 1 }
@ -821,6 +828,7 @@ missing_panics_doc = { level = "allow", priority = 1 }
module_name_repetitions = { level = "allow", priority = 1 }
no_effect_underscore_binding = { level = "allow", priority = 1 }
similar_names = { level = "allow", priority = 1 }
single_match_else = { level = "allow", priority = 1 }
struct_field_names = { level = "allow", priority = 1 }
unnecessary_wraps = { level = "allow", priority = 1 }
unused_async = { level = "allow", priority = 1 }

View file

@ -9,7 +9,7 @@ Artifacts](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml/bad
<!-- ANCHOR_END: catchphrase -->
Visit the [Conduwuit documentation](https://conduwuit.puppyirl.gay/) for more
Visit the [conduwuit documentation](https://conduwuit.puppyirl.gay/) for more
information.
<!-- ANCHOR: body -->
@ -23,9 +23,9 @@ to communicate with users outside of Matrix, like a community on Discord.
#### What is the goal?
An efficient Matrix homeserver that's easy to set up and just works. You can
install it on a mini-computer like the Raspberry Pi to host Matrix for your
family, friends or company.
A high-performance and efficient Matrix homeserver that's easy to set up and
just works. You can install it on a mini-computer like the Raspberry Pi to
host Matrix for your family, friends or company.
#### Can I try it out?
@ -42,9 +42,28 @@ transfem.dev is also listed at
#### What is the current status?
conduwuit is a hard fork of Conduit which is in beta, meaning you can join and
participate in most Matrix rooms, but not all features are supported and you
might run into bugs from time to time.
conduwuit is technically a hard fork of Conduit, which is in Beta. The Beta status
initially was inherited from Conduit, however overtime this Beta status is rapidly
becoming less and less relevant as our codebase significantly diverges more and more.
conduwuit is quite stable and very usable as a daily driver and for a low-medium
sized homeserver. There is still a lot of more work to be done, but it is in a far
better place than the project was in early 2024.
#### How is conduwuit funded? Is conduwuit sustainable?
conduwuit has no external funding. This is made possible purely in my freetime with
contributors, also in their free time, and only by user-curated donations.
conduwuit has existed since around November 2023, but [only became more publicly known
in March/April 2024](https://matrix.org/blog/2024/04/26/this-week-in-matrix-2024-04-26/#conduwuit-website)
and we have no plans in stopping or slowing down any time soon!
#### Can I migrate or switch from Conduit?
conduwuit is a complete drop-in replacement for Conduit. As long as you are using RocksDB,
the only "migration" you need to do is replace the binary or container image. There
is no harm or additional steps required for using conduwuit.
<!-- ANCHOR_END: body -->
@ -59,6 +78,10 @@ If you run into any question, feel free to
#### Donate
conduwuit development is purely made possible by myself and contributors. I do
not get paid to work on this, and I work on it in my free time. Donations are
heavily appreciated! 💜🥺
- Liberapay: <https://liberapay.com/girlbossceo>
- Ko-fi: <https://ko-fi.com/puppygock>
- GitHub Sponsors: <https://github.com/sponsors/girlbossceo>

View file

@ -15,7 +15,7 @@ LOG_FILE="$2"
# A `.jsonl` file to write test results to
RESULTS_FILE="$3"
OCI_IMAGE="complement-conduit:main"
OCI_IMAGE="complement-conduwuit:main"
# Complement tests that are skipped due to flakiness/reliability issues
SKIPPED_COMPLEMENT_TESTS='-skip=TestClientSpacesSummary.*|TestJoinFederatedRoomFromApplicationServiceBridgeUser.*|TestJumpToDateEndpoint.*'
@ -34,7 +34,7 @@ toplevel="$(git rev-parse --show-toplevel)"
pushd "$toplevel" > /dev/null
bin/nix-build-and-cache just .#static-complement
bin/nix-build-and-cache just .#linux-complement
docker load < result
popd > /dev/null

View file

@ -26,7 +26,13 @@ just() {
"$ATTIC_TOKEN"
# Find all output paths of the installables and their build dependencies
readarray -t derivations < <(nix path-info --derivation "$@")
#readarray -t derivations < <(nix path-info --derivation "$@")
derivations=()
while IFS=$'\n' read derivation; do
derivations+=("$derivation")
done < <(nix path-info --derivation "$@")
cache=()
for derivation in "${derivations[@]}"; do
cache+=(
@ -77,8 +83,8 @@ ci() {
--inputs-from "$toplevel"
# Keep sorted
"$toplevel#devShells.x86_64-linux.default"
"$toplevel#devShells.x86_64-linux.all-features"
#"$toplevel#devShells.x86_64-linux.default"
#"$toplevel#devShells.x86_64-linux.all-features"
attic#default
cachix#default
nixpkgs#direnv

View file

@ -2,6 +2,6 @@ array-size-threshold = 4096
cognitive-complexity-threshold = 94 # TODO reduce me ALARA
excessive-nesting-threshold = 11 # TODO reduce me to 4 or 5
future-size-threshold = 7745 # TODO reduce me ALARA
stack-size-threshold = 144000 # reduce me ALARA
stack-size-threshold = 196608 # reduce me ALARA
too-many-lines-threshold = 700 # TODO reduce me to <= 100
type-complexity-threshold = 250 # reduce me to ~200

View file

@ -195,11 +195,14 @@ allow_guests_auto_join_rooms = false
# Enables registration. If set to false, no users can register on this
# server.
#
# If set to true without a token configured, users can register with no form of 2nd-
# step only if you set
# `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` to
# true in your config. If you would like
# registration only via token reg, please configure the `registration_token` key.
# true in your config.
#
# If you would like registration only via token reg, please configure
# `registration_token` or `registration_token_file`.
allow_registration = false
# Please note that an open registration homeserver with no second-step verification
# is highly prone to abuse and potential defederation by homeservers, including
@ -208,7 +211,14 @@ allow_registration = false
# A static registration token that new users will have to provide when creating
# an account. If unset and `allow_registration` is true, registration is open
# without any condition. YOU NEED TO EDIT THIS.
registration_token = "change this token for something specific to your server"
registration_token = "change this token/string here or set registration_token_file"
# Path to a file on the system that gets read for the registration token
#
# conduwuit must be able to access the file, and it must not be empty
#
# no default
#registration_token_file = "/etc/conduwuit/.reg_token"
# controls whether federation is allowed or not
# defaults to true
@ -344,7 +354,7 @@ allow_profile_lookup_federation_requests = true
# Controls the max log level for admin command log captures (logs generated from running admin commands)
#
# Defaults to "info" on release builds, else "debug" on debug builds
#admin_log_capture = info
#admin_log_capture = "info"
# Allows admins to enter commands in rooms other than #admins by prefixing with \!admin. The reply
# will be publicly visible to the room, originating from the sender.
@ -363,6 +373,11 @@ allow_profile_lookup_federation_requests = true
# Defaults to "info"
#log = "info"
# controls whether logs will be outputted with ANSI colours
#
# defaults to true
#log_colors = true
# controls whether encrypted rooms and events are allowed (default true)
#allow_encryption = false
@ -845,9 +860,20 @@ allow_profile_lookup_federation_requests = true
# vector list of TURN URIs/servers to use
#
# replace "example.turn.uri" with your TURN domain, such as the coturn "realm".
# if using TURN over TLS, replace "turn:" with "turns:"
#
# No default
#turn_uris = ["turn:example.turn.uri?transport=udp", "turn:example.turn.uri?transport=tcp"]
# TURN secret to use that's read from the file path specified
#
# this takes priority over "turn_secret" first, and falls back to "turn_secret" if invalid or
# failed to open.
#
# no default
#turn_secret_file = "/path/to/secret.txt"
# TURN secret to use for generating the HMAC-SHA1 hash apart of username and password generation
#
# this is more secure, but if needed you can use traditional username/password below.

View file

@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"]
[dependencies.rust-rocksdb]
git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1"
rev = "22f942609921ecf837399e1c2fe6fcb8dbb1095e"
rev = "86920c73d54d52bd59f0d770cdefece3fba745db"
#branch = "master"
default-features = false

View file

@ -16,7 +16,7 @@ services:
CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit
CONDUWUIT_DATABASE_BACKEND: rocksdb
CONDUWUIT_PORT: 6167 # should match the loadbalancer traefik label
CONDUWUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
CONDUWUIT_ALLOW_REGISTRATION: 'true'
CONDUWUIT_ALLOW_FEDERATION: 'true'
CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true'

View file

@ -32,7 +32,7 @@ services:
CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit
CONDUWUIT_DATABASE_BACKEND: rocksdb
CONDUWUIT_PORT: 6167
CONDUWUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
CONDUWUIT_ALLOW_REGISTRATION: 'true'
CONDUWUIT_ALLOW_FEDERATION: 'true'
CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true'

View file

@ -15,7 +15,8 @@ services:
CONDUWUIT_SERVER_NAME: your.server.name.example # EDIT THIS
CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]'
CONDUWUIT_ALLOW_REGISTRATION: 'false' # After setting a secure registration token, you can enable this
CONDUWUIT_REGISTRATION_TOKEN: # This is a token you can use to register on the server
CONDUWUIT_REGISTRATION_TOKEN: "" # This is a token you can use to register on the server
#CONDUWUIT_REGISTRATION_TOKEN_FILE: "" # Alternatively you can configure a path to a token file to read
CONDUWUIT_ADDRESS: 0.0.0.0
CONDUWUIT_PORT: 6167 # you need to match this with the traefik load balancer label if you're want to change it
CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit
@ -23,7 +24,6 @@ services:
### Uncomment and change values as desired, note that conduwuit has plenty of config options, so you should check out the example example config too
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
# CONDUWUIT_LOG: info # default is: "warn,state_res=warn"
# CONDUWUIT_ALLOW_JAEGER: 'false'
# CONDUWUIT_ALLOW_ENCRYPTION: 'true'
# CONDUWUIT_ALLOW_FEDERATION: 'true'
# CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
@ -31,7 +31,7 @@ services:
# CONDUWUIT_ALLOW_OUTGOING_PRESENCE: true
# CONDUWUIT_ALLOW_LOCAL_PRESENCE: true
# CONDUWUIT_WORKERS: 10
# CONDUWUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
# CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
# CONDUWUIT_NEW_USER_DISPLAYNAME_SUFFIX = "🏳<200d>⚧"
# We need some way to serve the client and server .well-known json. The simplest way is via the CONDUWUIT_WELL_KNOWN

View file

@ -16,7 +16,7 @@ services:
CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit
CONDUWUIT_DATABASE_BACKEND: rocksdb
CONDUWUIT_PORT: 6167
CONDUWUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
CONDUWUIT_ALLOW_REGISTRATION: 'true'
CONDUWUIT_ALLOW_FEDERATION: 'true'
CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true'

View file

@ -13,18 +13,38 @@ what you need.
Prebuilt fully static musl binaries can be downloaded from the latest tagged
release [here](https://github.com/girlbossceo/conduwuit/releases/latest) or
`main` CI branch workflow artifact output. These also include Debian packages.
`main` CI branch workflow artifact output. These also include Debian/Ubuntu packages.
These binaries have jemalloc and io_uring statically linked and included with
them.
them, so no additional dynamic dependencies need to be installed.
Alternatively, you may compile the binary yourself. We recommend using
Nix (or [Lix](https://lix.systems) to build conduwuit as this has the most guaranteed
reproducibiltiy and easiest to get a build environment and output going.
Nix (or [Lix](https://lix.systems)) to build conduwuit as this has the most guaranteed
reproducibiltiy and easiest to get a build environment and output going. This also
allows easy cross-compilation.
You can run the `nix build -L .#static-x86_64-linux-musl-all-features` or
`nix build -L .#static-aarch64-linux-musl-all-features` commands based
on architecture to cross-compile the necessary static binary located at
`result/bin/conduit`. This is reproducible with the static binaries produced in our CI.
Otherwise, follow standard Rust project build guides (installing git and cloning
the repo, getting the Rust toolchain via rustup, installing LLVM toolchain +
libclang for RocksDB, installing liburing for io_uring and RocksDB, etc).
## Migrating from Conduit
As mentioned in the README, there is little to no steps needed to migrate
from Conduit. As long as you are using the RocksDB database backend, just
replace the binary / container image / etc.
**Note**: If you are relying on Conduit's "automatic delegation" feature,
this will **NOT** work on conduwuit and you must configure delegation manually.
This is not a mistake and no support for this feature will be added.
See the `[global.well_known]` config section, or configure your web server
appropriately to send the delegation responses.
## Adding a conduwuit user
While conduwuit can run as any user it is better to use dedicated users for
@ -82,20 +102,40 @@ sudo chown -R root:root /etc/conduwuit sudo chmod -R 755 /etc/conduwuit
If you use the default database path you also need to run this:
```bash
sudo mkdir -p /var/lib/conduwuit/ sudo chown -R conduwuit:conduwuit /var/lib/conduwuit/
sudo mkdir -p /var/lib/conduwuit/
sudo chown -R conduwuit:conduwuit /var/lib/conduwuit/
sudo chmod 700 /var/lib/conduwuit/
```
## Setting up the Reverse Proxy
Refer to the documentation or various guides online of your chosen reverse proxy
software. A [Caddy](https://caddyserver.com/) example will be provided as this
software. There are many examples of basic Apache/Nginx reverse proxy setups
out there.
A [Caddy](https://caddyserver.com/) example will be provided as this
is the recommended reverse proxy for new users and is very trivial to use
(handles TLS, reverse proxy headers, etc transparently with proper defaults).
Lighttpd is not supported as it seems to mess with the `X-Matrix` Authorization
header, making federation non-functional. If using Apache, you need to use
`nocanon` to prevent this.
`nocanon` in your `ProxyPass` directive to prevent this (note that Apache
isn't very good as a general reverse proxy).
Nginx users may need to set `proxy_buffering off;` if there are issues with
uploading media like images.
You will need to reverse proxy everything under following routes:
- `/_matrix/` - core Matrix C-S and S-S APIs
- `/_conduwuit/` - ad-hoc conduwuit routes such as `/local_user_count` and
`/server_version`
You can optionally reverse proxy the following individual routes:
- `/.well-known/matrix/client` and `/.well-known/matrix/server` if using
conduwuit to perform delegation
- `/.well-known/matrix/support` if using conduwuit to send the homeserver admin
contact and support page (formerly known as MSC1929)
- `/` if you would like to see `hewwo from conduwuit woof!` at the root
### Caddy
@ -144,6 +184,9 @@ curl https://your.server.name/_conduwuit/server_version
# If using port 8448
curl https://your.server.name:8448/_conduwuit/server_version
# If federation is enabled
curl https://your.server.name:8448/_matrix/federation/v1/version
```
- To check if your server can talk with other homeservers, you can use the

View file

@ -8,6 +8,8 @@ conduwuit can be acquired by Nix (or [Lix][lix]) from various places:
A community maintained NixOS package is available at [`conduwuit`](https://search.nixos.org/packages?channel=unstable&show=conduwuit&from=0&size=50&sort=relevance&type=packages&query=conduwuit)
### Binary cache
A binary cache for conduwuit that the CI/CD publishes to is available at the
following places (both are the same just different names):
@ -31,6 +33,8 @@ conduwuit:lYPVh7o1hLu1idH4Xt2QHaRa49WRGSAqzcfFd94aOTw=
If specifying a Git remote URL in your flake, you can use any remotes that
are specified on the README (the mirrors), such as the GitHub: `github:girlbossceo/conduwuit`
### NixOS module
The `flake.nix` and `default.nix` do not currently provide a NixOS module (contributions
welcome!), so [`services.matrix-conduit`][module] from Nixpkgs can be used to configure
conduwuit.
@ -39,7 +43,35 @@ If you want to run the latest code, you should get conduwuit from the `flake.nix
or `default.nix` and set [`services.matrix-conduit.package`][package]
appropriately to use conduwuit instead of Conduit.
### UNIX sockets
Due to the lack of a conduwuit NixOS module, when using the `services.matrix-conduit` module
it is not possible to use UNIX sockets. This is because the UNIX socket option does not exist
in Conduit, and their module forces listening on `[::1]:6167` by default if unspecified.
Additionally, the [`matrix-conduit` systemd unit][systemd-unit] in the module does not allow
the `AF_UNIX` socket address family in their systemd unit's `RestrictAddressFamilies=` which
disallows the namespace from accessing or creating UNIX sockets.
There is no known workaround these. A conduwuit NixOS configuration module must be developed and
published by the community.
### jemalloc and hardened profile
conduwuit uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix]
due to them using `scudo` by default. You must either disable/hide `scudo` from conduwuit, or
disable jemalloc like so:
```nix
let
conduwuit = pkgs.unstable.conduwuit.override {
enableJemalloc = false;
};
in
```
[lix]: https://lix.systems/
[module]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit
[package]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit.package
[hardened.nix]: https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/profiles/hardened.nix#L22
[systemd-unit]: https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/matrix/conduit.nix#L132

View file

@ -21,6 +21,22 @@ These same values need to be set in conduwuit. See the [example
config](configuration/examples.md) in the TURN section for configuring these and
restart conduwuit after.
`turn_secret` or a path to `turn_secret_file` must have a value of your
coturn `static-auth-secret`, or use `turn_username` and `turn_password`
if using legacy username:password TURN authentication (not preferred).
`turn_uris` must be the list of TURN URIs you would like to send to the client.
Typically you will just replace the example domain `example.turn.uri` with the
`realm` you set from the example config.
If you are using TURN over TLS, you can replace `turn:` with `turns:` in the
`turn_uris` config option to instruct clients to attempt to connect to
TURN over TLS. This is highly recommended.
If you need unauthenticated access to the TURN URIs, or some clients may be
having trouble, you can enable `turn_guest_access` in conduwuit which disables
authentication for the TURN URI endpoint `/_matrix/client/v3/voip/turnServer`
### Run
Run the [Coturn](https://hub.docker.com/r/coturn/coturn) image using

381
flake.lock generated
View file

@ -5,16 +5,15 @@
"crane": "crane",
"flake-compat": "flake-compat",
"flake-parts": "flake-parts",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs",
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1725300620,
"narHash": "sha256-IdM+pZ6BnmD3o1fTJZ2BD43k7dwi1BbVfLDLpM1nE5s=",
"lastModified": 1729116596,
"narHash": "sha256-NnLMLIXGZtAscUF4dCShksuQ1nOGF6Y2dEeyj0rBbUg=",
"owner": "zhaofengli",
"repo": "attic",
"rev": "bea72d75b6165dfb529ba0c39cc6c7e9c7f0d234",
"rev": "2b05b7d986cf6009b1c1ef7daa4961cd1a658782",
"type": "github"
},
"original": {
@ -29,14 +28,14 @@
"devenv": "devenv",
"flake-compat": "flake-compat_3",
"git-hooks": "git-hooks",
"nixpkgs": "nixpkgs_3"
"nixpkgs": "nixpkgs_4"
},
"locked": {
"lastModified": 1724232775,
"narHash": "sha256-6u2DycIEgrgNYlLxyGqdFVmBNiKIitnQKJ1pbRP5oko=",
"lastModified": 1728672398,
"narHash": "sha256-KxuGSoVUFnQLB2ZcYODW7AVPAh9JqRlD5BrfsC/Q4qs=",
"owner": "cachix",
"repo": "cachix",
"rev": "03b6cb3f953097bff378fb8b9ea094bd091a4ec7",
"rev": "aac51f698309fd0f381149214b7eee213c66ef0a",
"type": "github"
},
"original": {
@ -54,12 +53,51 @@
"devenv",
"flake-compat"
],
"git-hooks": [
"cachix",
"devenv",
"pre-commit-hooks"
],
"nixpkgs": [
"cachix",
"devenv",
"nixpkgs"
]
},
"locked": {
"lastModified": 1726520618,
"narHash": "sha256-jOsaBmJ/EtX5t/vbylCdS7pWYcKGmWOKg4QKUzKr6dA=",
"owner": "cachix",
"repo": "cachix",
"rev": "695525f9086542dfb09fde0871dbf4174abbf634",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "cachix",
"type": "github"
}
},
"cachix_3": {
"inputs": {
"devenv": "devenv_3",
"flake-compat": [
"cachix",
"devenv",
"cachix",
"devenv",
"flake-compat"
],
"nixpkgs": [
"cachix",
"devenv",
"cachix",
"devenv",
"nixpkgs"
],
"pre-commit-hooks": [
"cachix",
"devenv",
"cachix",
"devenv",
"pre-commit-hooks"
@ -82,11 +120,11 @@
"complement": {
"flake": false,
"locked": {
"lastModified": 1724347376,
"narHash": "sha256-y0e/ULDJ92IhNQZsS/06g0s+AYZ82aJfrIO9qEse94c=",
"lastModified": 1727346895,
"narHash": "sha256-S7EuoeTuvMvk1yo7WiVa9GKiNlnP8kWKsmHYJ4qk2Cc=",
"owner": "matrix-org",
"repo": "complement",
"rev": "39733c1b2f8314800776748cc7164f9a34650686",
"rev": "c246a092fda4178b0c97d1ab02f64f8b55ca139a",
"type": "github"
},
"original": {
@ -119,11 +157,11 @@
},
"crane_2": {
"locked": {
"lastModified": 1725409566,
"narHash": "sha256-PrtLmqhM6UtJP7v7IGyzjBFhbG4eOAHT6LPYOFmYfbk=",
"lastModified": 1729273024,
"narHash": "sha256-Mb5SemVsootkn4Q2IiY0rr9vrXdCCpQ9HnZeD/J3uXs=",
"owner": "ipetkov",
"repo": "crane",
"rev": "7e4586bad4e3f8f97a9271def747cf58c4b68f3c",
"rev": "fa8b7445ddadc37850ed222718ca86622be01967",
"type": "github"
},
"original": {
@ -140,7 +178,7 @@
"cachix",
"flake-compat"
],
"nix": "nix_2",
"nix": "nix_3",
"nixpkgs": [
"cachix",
"nixpkgs"
@ -150,6 +188,43 @@
"git-hooks"
]
},
"locked": {
"lastModified": 1727963652,
"narHash": "sha256-os0EDjn7QVXL6RtHNb9TrZLXVm2Tc5/nZKk3KpbTzd8=",
"owner": "cachix",
"repo": "devenv",
"rev": "cb0052e25dbcc8267b3026160dc73cddaac7d5fd",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "devenv",
"type": "github"
}
},
"devenv_2": {
"inputs": {
"cachix": "cachix_3",
"flake-compat": [
"cachix",
"devenv",
"cachix",
"flake-compat"
],
"nix": "nix_2",
"nixpkgs": [
"cachix",
"devenv",
"cachix",
"nixpkgs"
],
"pre-commit-hooks": [
"cachix",
"devenv",
"cachix",
"git-hooks"
]
},
"locked": {
"lastModified": 1723156315,
"narHash": "sha256-0JrfahRMJ37Rf1i0iOOn+8Z4CLvbcGNwa2ChOAVrp/8=",
@ -164,9 +239,11 @@
"type": "github"
}
},
"devenv_2": {
"devenv_3": {
"inputs": {
"flake-compat": [
"cachix",
"devenv",
"cachix",
"devenv",
"cachix",
@ -176,6 +253,8 @@
"nixpkgs": "nixpkgs_2",
"poetry2nix": "poetry2nix",
"pre-commit-hooks": [
"cachix",
"devenv",
"cachix",
"devenv",
"cachix",
@ -205,11 +284,11 @@
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1725690497,
"narHash": "sha256-5fT+96rV7Hx29HG+4/oBbr3V+yExKuLN2vcBcPbVBlU=",
"lastModified": 1729375822,
"narHash": "sha256-bRo4xVwUhvJ4Gz+OhWMREFMdBOYSw4Yi1Apj01ebbug=",
"owner": "nix-community",
"repo": "fenix",
"rev": "4b8d964df93d1f918ee6c4f003b3548c432cc866",
"rev": "2853e7d9b5c52a148a9fb824bfe4f9f433f557ab",
"type": "github"
},
"original": {
@ -305,27 +384,32 @@
"type": "github"
}
},
"flake-utils": {
"flake-parts_2": {
"inputs": {
"systems": "systems"
"nixpkgs-lib": [
"cachix",
"devenv",
"nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"lastModified": 1712014858,
"narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "9126214d0a59633752a136528f5f3b9aa8565b7d",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-utils_2": {
"flake-utils": {
"inputs": {
"systems": "systems_2"
"systems": "systems"
},
"locked": {
"lastModified": 1689068808,
@ -341,16 +425,31 @@
"type": "github"
}
},
"flake-utils_3": {
"inputs": {
"systems": "systems_3"
},
"flake-utils_2": {
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"lastModified": 1667395993,
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_3": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1726560853,
"narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a",
"type": "github"
},
"original": {
@ -374,11 +473,11 @@
"nixpkgs-stable": "nixpkgs-stable_2"
},
"locked": {
"lastModified": 1723202784,
"narHash": "sha256-qbhjc/NEGaDbyy0ucycubq4N3//gDFFH3DOmp1D3u1Q=",
"lastModified": 1727854478,
"narHash": "sha256-/odH2nUMAwkMgOS2nG2z0exLQNJS4S2LfMW0teqU7co=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "c7012d0c18567c889b948781bc74a501e92275d1",
"rev": "5f58871c9657b5fc0a7f65670fe2ba99c26c1d79",
"type": "github"
},
"original": {
@ -409,14 +508,30 @@
"type": "github"
}
},
"libgit2": {
"flake": false,
"locked": {
"lastModified": 1697646580,
"narHash": "sha256-oX4Z3S9WtJlwvj0uH9HlYcWv+x1hqp8mhXl7HsLu2f0=",
"owner": "libgit2",
"repo": "libgit2",
"rev": "45fd9ed7ae1a9b74b957ef4f337bc3c8b3df01b5",
"type": "github"
},
"original": {
"owner": "libgit2",
"repo": "libgit2",
"type": "github"
}
},
"liburing": {
"flake": false,
"locked": {
"lastModified": 1725659644,
"narHash": "sha256-WjnpmopfvFoUbubIu9bki+Y6P4YXDfvnW4+72hniq3g=",
"lastModified": 1729374610,
"narHash": "sha256-RHJYCoCjgSPSsSpM4Ea5GQHdap6Ffa7X0XOgh2SMAHY=",
"owner": "axboe",
"repo": "liburing",
"rev": "0fe5c09195c0918f89582dd6ff098a58a0bdf62a",
"rev": "01b9255806be89ca9001a268c6efbb179c80504e",
"type": "github"
},
"original": {
@ -434,6 +549,8 @@
"devenv",
"cachix",
"devenv",
"cachix",
"devenv",
"nixpkgs"
],
"nixpkgs-regression": "nixpkgs-regression"
@ -476,6 +593,8 @@
"devenv",
"cachix",
"devenv",
"cachix",
"devenv",
"poetry2nix",
"nixpkgs"
]
@ -497,11 +616,15 @@
"nix_2": {
"inputs": {
"flake-compat": [
"cachix",
"devenv",
"cachix",
"devenv",
"flake-compat"
],
"nixpkgs": [
"cachix",
"devenv",
"cachix",
"devenv",
"nixpkgs"
@ -523,13 +646,42 @@
"type": "github"
}
},
"nix_3": {
"inputs": {
"flake-compat": [
"cachix",
"devenv",
"flake-compat"
],
"flake-parts": "flake-parts_2",
"libgit2": "libgit2",
"nixpkgs": "nixpkgs_3",
"nixpkgs-23-11": "nixpkgs-23-11",
"nixpkgs-regression": "nixpkgs-regression_3",
"pre-commit-hooks": "pre-commit-hooks"
},
"locked": {
"lastModified": 1727438425,
"narHash": "sha256-X8ES7I1cfNhR9oKp06F6ir4Np70WGZU5sfCOuNBEwMg=",
"owner": "domenkozar",
"repo": "nix",
"rev": "f6c5ae4c1b2e411e6b1e6a8181cc84363d6a7546",
"type": "github"
},
"original": {
"owner": "domenkozar",
"ref": "devenv-2.24",
"repo": "nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1724999960,
"narHash": "sha256-LB3jqSGW5u1ZcUcX6vO/qBOq5oXHlmOCxsTXGMEitp4=",
"lastModified": 1726042813,
"narHash": "sha256-LnNKCCxnwgF+575y0pxUdlGZBO/ru1CtGHIqQVfvjlA=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "b96f849e725333eb2b1c7f1cb84ff102062468ba",
"rev": "159be5db480d1df880a0135ca0bfed84c2f88353",
"type": "github"
},
"original": {
@ -539,6 +691,22 @@
"type": "github"
}
},
"nixpkgs-23-11": {
"locked": {
"lastModified": 1717159533,
"narHash": "sha256-oamiKNfr2MS6yH64rUn99mIZjc45nGJlj9eGth/3Xuw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "a62e6edd6d5e1fa0329b8653c801147986f8d446",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "a62e6edd6d5e1fa0329b8653c801147986f8d446",
"type": "github"
}
},
"nixpkgs-regression": {
"locked": {
"lastModified": 1643052045,
@ -571,6 +739,22 @@
"type": "github"
}
},
"nixpkgs-regression_3": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1724316499,
@ -621,11 +805,27 @@
},
"nixpkgs_3": {
"locked": {
"lastModified": 1722813957,
"narHash": "sha256-IAoYyYnED7P8zrBFMnmp7ydaJfwTnwcnqxUElC1I26Y=",
"lastModified": 1717432640,
"narHash": "sha256-+f9c4/ZX5MWDOuB1rKoWj+lBNm0z0rs4CK47HBLxy1o=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "cb9a96f23c491c081b38eab96d22fa958043c9fa",
"rev": "88269ab3044128b7c2f4c7d68448b2fb50456870",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "release-24.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_4": {
"locked": {
"lastModified": 1727802920,
"narHash": "sha256-HP89HZOT0ReIbI7IJZJQoJgxvB2Tn28V6XS3MNKnfLs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "27e30d177e57d912d614c88c622dcfdb2e6e6515",
"type": "github"
},
"original": {
@ -635,13 +835,13 @@
"type": "github"
}
},
"nixpkgs_4": {
"nixpkgs_5": {
"locked": {
"lastModified": 1725534445,
"narHash": "sha256-Yd0FK9SkWy+ZPuNqUgmVPXokxDgMJoGuNpMEtkfcf84=",
"lastModified": 1729265718,
"narHash": "sha256-4HQI+6LsO3kpWTYuVGIzhJs1cetFcwT7quWCk/6rqeo=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9bb1e7571aadf31ddb4af77fc64b2d59580f9a39",
"rev": "ccc0c2126893dd20963580b6478d1a10a4512185",
"type": "github"
},
"original": {
@ -653,13 +853,15 @@
},
"poetry2nix": {
"inputs": {
"flake-utils": "flake-utils_2",
"flake-utils": "flake-utils",
"nix-github-actions": "nix-github-actions",
"nixpkgs": [
"cachix",
"devenv",
"cachix",
"devenv",
"cachix",
"devenv",
"nixpkgs"
]
},
@ -677,19 +879,59 @@
"type": "github"
}
},
"pre-commit-hooks": {
"inputs": {
"flake-compat": [
"cachix",
"devenv",
"nix"
],
"flake-utils": "flake-utils_2",
"gitignore": [
"cachix",
"devenv",
"nix"
],
"nixpkgs": [
"cachix",
"devenv",
"nix",
"nixpkgs"
],
"nixpkgs-stable": [
"cachix",
"devenv",
"nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1712897695,
"narHash": "sha256-nMirxrGteNAl9sWiOhoN5tIHyjBbVi5e2tgZUgZlK3Y=",
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"rev": "40e6053ecb65fcbf12863338a6dcefb3f55f1bf8",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"type": "github"
}
},
"rocksdb": {
"flake": false,
"locked": {
"lastModified": 1725811807,
"narHash": "sha256-k6aubvLSJwW7CtIxBRSlM6Z8UiJDCdFUgDWPYLV47Qk=",
"lastModified": 1729189920,
"narHash": "sha256-uUqD4vwXxBMsM+6BTWKxEURCXByvDvV8QXunny3QlXU=",
"owner": "girlbossceo",
"repo": "rocksdb",
"rev": "347d50e212b24d98b5ad9841404ff80c1bb873f0",
"rev": "4ca9758edc373edb9e3e65e3662ae88aafb9d137",
"type": "github"
},
"original": {
"owner": "girlbossceo",
"ref": "v9.6.1",
"ref": "v9.7.2",
"repo": "rocksdb",
"type": "github"
}
@ -705,18 +947,18 @@
"flake-utils": "flake-utils_3",
"liburing": "liburing",
"nix-filter": "nix-filter",
"nixpkgs": "nixpkgs_4",
"nixpkgs": "nixpkgs_5",
"rocksdb": "rocksdb"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1725630423,
"narHash": "sha256-gNCLk3Zg7JlAwmWbVHTH6f3+iqdeQ4fheOotCZy8x5M=",
"lastModified": 1729255720,
"narHash": "sha256-yODOuZxBkS0UfqMa6nmbqNbVfIbsu0tYLbV5vZzmsqI=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "08c7bbc2dbe4dcc8968484f1a0e1e6fe7a1d4f6d",
"rev": "72b214fbfbe6f7b95a7877b962783bd42062cc0a",
"type": "github"
},
"original": {
@ -755,21 +997,6 @@
"repo": "default",
"type": "github"
}
},
"systems_3": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",

View file

@ -9,7 +9,7 @@
flake-utils.url = "github:numtide/flake-utils?ref=main";
nix-filter.url = "github:numtide/nix-filter?ref=main";
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.6.1"; flake = false; };
rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.7.2"; flake = false; };
liburing = { url = "github:axboe/liburing?ref=master"; flake = false; };
};
@ -18,7 +18,6 @@
let
pkgsHost = import inputs.nixpkgs{
inherit system;
config.permittedInsecurePackages = [ "olm-3.2.16" ];
};
pkgsHostStatic = pkgsHost.pkgsStatic;
@ -27,7 +26,7 @@
file = ./rust-toolchain.toml;
# See also `rust-toolchain.toml`
sha256 = "sha256-VZZnlyP69+Y3crrLHQyJirqlHrTtGTsyiSnZB8jEvVo=";
sha256 = "sha256-yMuSb5eQPO/bHv+Bcf/US8LVMbf/G/0MSfiPwBhiPpk=";
};
mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: {
@ -38,10 +37,19 @@
inherit inputs;
main = self.callPackage ./nix/pkgs/main {};
oci-image = self.callPackage ./nix/pkgs/oci-image {};
tini = pkgs.tini.overrideAttrs {
# newer clang/gcc is unhappy with tini-static: <https://3.dog/~strawberry/pb/c8y4>
patches = [ (pkgs.fetchpatch {
url = "https://patch-diff.githubusercontent.com/raw/krallin/tini/pull/224.patch";
hash = "sha256-4bTfAhRyIT71VALhHY13hUgbjLEUyvgkIJMt3w9ag3k=";
})
];
};
liburing = pkgs.liburing.overrideAttrs {
# Tests weren't building
outputs = [ "out" "dev" "man" ];
buildFlags = [ "library"];
buildFlags = [ "library" ];
src = inputs.liburing;
};
rocksdb = (pkgs.rocksdb.override {
@ -88,6 +96,17 @@
scopeHost = mkScope pkgsHost;
scopeHostStatic = mkScope pkgsHostStatic;
scopeCrossLinux = mkScope pkgsHost.pkgsLinux.pkgsStatic;
mkCrossScope = crossSystem:
let pkgsCrossStatic = (import inputs.nixpkgs {
inherit system;
crossSystem = {
config = crossSystem;
};
}).pkgsStatic;
in
mkScope pkgsCrossStatic;
mkDevShell = scope: scope.pkgs.mkShell {
env = scope.main.env // {
@ -100,9 +119,9 @@
# code.
COMPLEMENT_SRC = inputs.complement.outPath;
# Needed for Complement
CGO_CFLAGS = "-I${scope.pkgs.olm}/include";
CGO_LDFLAGS = "-L${scope.pkgs.olm}/lib";
# Needed for Complement: <https://github.com/golang/go/issues/52690>
CGO_CFLAGS = "-Wl,--no-gc-sections";
CGO_LDFLAGS = "-Wl,--no-gc-sections";
};
# Development tools
@ -116,9 +135,6 @@
toolchain
]
++ (with pkgsHost.pkgs; [
engage
cargo-audit
liburing
# Required by hardened-malloc.rs dep
binutils
@ -126,9 +142,15 @@
# Needed for producing Debian packages
cargo-deb
# Needed for CI
cargo-audit
# Needed for CI to check validity of produced Debian packages (dpkg-deb)
dpkg
# Needed for CI
engage
# Needed for Complement
go
@ -149,12 +171,22 @@
# needed so we can get rid of gcc and other unused deps that bloat OCI images
removeReferencesTo
])
]
# liburing is Linux-exclusive
++ lib.optional stdenv.hostPlatform.isLinux liburing
# needed to build Rust applications on macOS
++ lib.optionals stdenv.hostPlatform.isDarwin [
# https://github.com/NixOS/nixpkgs/issues/206242
# ld: library not found for -liconv
libiconv
# https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell
# https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612
pkgsBuildHost.darwin.apple_sdk.frameworks.Security
])
++ scope.main.buildInputs
++ scope.main.propagatedBuildInputs
++ scope.main.nativeBuildInputs;
meta.broken = scope.main.meta.broken;
};
in
{
@ -228,6 +260,8 @@
complement = scopeHost.complement;
static-complement = scopeHostStatic.complement;
# macOS containers don't exist, so the complement images must be forced to linux
linux-complement = (mkCrossScope "${pkgsHost.hostPlatform.qemuArch}-linux-musl").complement;
}
//
builtins.listToAttrs
@ -236,14 +270,7 @@
(crossSystem:
let
binaryName = "static-${crossSystem}";
pkgsCrossStatic =
(import inputs.nixpkgs {
inherit system;
crossSystem = {
config = crossSystem;
};
}).pkgsStatic;
scopeCrossStatic = mkScope pkgsCrossStatic;
scopeCrossStatic = mkCrossScope crossSystem;
in
[
# An output for a statically-linked binary
@ -373,11 +400,20 @@
};
};
}
# An output for a complement OCI image for the specified platform
{
name = "complement-${crossSystem}";
value = scopeCrossStatic.complement;
}
]
)
[
"x86_64-unknown-linux-musl"
"aarch64-unknown-linux-musl"
#"x86_64-apple-darwin"
#"aarch64-apple-darwin"
"x86_64-linux-gnu"
"x86_64-linux-musl"
"aarch64-linux-musl"
]
)
);

View file

@ -16,6 +16,7 @@ url_preview_domain_contains_allowlist = ["*"]
media_compat_file_link = false
media_startup_check = false
rocksdb_direct_io = false
log_colors = false
[global.tls]
certs = "/certificate.crt"

View file

@ -18,6 +18,15 @@ let
all_features = true;
disable_release_max_log_level = true;
disable_features = [
# no reason to use jemalloc for complement, just has compatibility/build issues
"jemalloc"
# console/CLI stuff isn't used or relevant for complement
"console"
"tokio_console"
# sentry telemetry isn't useful for complement, disabled by default anyways
"sentry_telemetry"
# the containers don't use or need systemd signal support
"systemd"
# this is non-functional on nix for some reason
"hardened_malloc"
# dont include experimental features
@ -57,7 +66,7 @@ let
in
dockerTools.buildImage {
name = "complement-${main.pname}";
name = "complement-conduwuit";
tag = "main";
copyToRoot = buildEnv {
@ -78,7 +87,7 @@ dockerTools.buildImage {
"${lib.getExe start}"
];
Entrypoint = if !stdenv.isDarwin
Entrypoint = if !stdenv.hostPlatform.isDarwin
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
# are handled as expected
then [ "${lib.getExe' tini "tini"}" "--" ]

View file

@ -13,12 +13,6 @@ lib.optionalAttrs stdenv.hostPlatform.isStatic {
lib.concatStringsSep
" "
([]
++ lib.optionals
stdenv.targetPlatform.isx86_64
[ "-C" "target-cpu=x86-64-v2" ]
++ lib.optionals
stdenv.targetPlatform.isAarch64
[ "-C" "target-cpu=cortex-a53" ] # cortex-a53 == ARMv8-A
# This disables PIE for static builds, which isn't great in terms
# of security. Unfortunately, my hand is forced because nixpkgs'
# `libstdc++.a` is built without `-fPIE`, which precludes us from
@ -41,7 +35,7 @@ lib.optionalAttrs stdenv.hostPlatform.isStatic {
# including it here. Linkers are weird.
(stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64)
&& stdenv.hostPlatform.isStatic
&& !stdenv.isDarwin
&& !stdenv.hostPlatform.isDarwin
&& !stdenv.cc.bintools.isLLVM
)
[
@ -58,7 +52,7 @@ lib.optionalAttrs stdenv.hostPlatform.isStatic {
# even covers the case of build scripts that need native code compiled and
# run on the build platform (I think).
#
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80
# [0]: https://github.com/NixOS/nixpkgs/blob/nixpkgs-unstable/pkgs/build-support/rust/lib/default.nix#L48-L68
//
(
let
@ -74,19 +68,23 @@ lib.optionalAttrs stdenv.hostPlatform.isStatic {
{
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" =
envVars.linkerForTarget;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForTarget;
}
)
//
(
let
inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
shouldUseLLD = platform: platform.isAarch64 && platform.isStatic && !stdenv.hostPlatform.isDarwin;
linkerForHost = if shouldUseLLD stdenv.targetPlatform
&& !stdenv.cc.bintools.isLLVM
then "${pkgsBuildHost.llvmPackages.bintools}/bin/${stdenv.cc.targetPrefix}ld.lld"
else envVars.ccForHost;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = linkerForHost;
CARGO_BUILD_TARGET = rustcTarget;
}
)
@ -98,7 +96,7 @@ lib.optionalAttrs stdenv.hostPlatform.isStatic {
{
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForBuild;
HOST_CC = "${pkgsBuildHost.stdenv.cc}/bin/cc";
HOST_CXX = "${pkgsBuildHost.stdenv.cc}/bin/c++";
}

View file

@ -40,7 +40,7 @@ features'' = lib.subtractLists disable_features' features';
featureEnabled = feature : builtins.elem feature features'';
enableLiburing = featureEnabled "io_uring" && !stdenv.isDarwin;
enableLiburing = featureEnabled "io_uring" && !stdenv.hostPlatform.isDarwin;
# This derivation will set the JEMALLOC_OVERRIDE variable, causing the
# tikv-jemalloc-sys crate to use the nixpkgs jemalloc instead of building it's
@ -72,35 +72,13 @@ buildDepsOnlyEnv =
# jemalloc symbols are prefixed.
#
# [1]: https://github.com/tikv/jemallocator/blob/ab0676d77e81268cd09b059260c75b38dbef2d51/jemalloc-sys/src/env.rs#L17
enableJemalloc = featureEnabled "jemalloc" && !stdenv.isDarwin;
enableJemalloc = featureEnabled "jemalloc" && !stdenv.hostPlatform.isDarwin;
# for some reason enableLiburing in nixpkgs rocksdb is default true
# which breaks Darwin entirely
enableLiburing = enableLiburing;
}).overrideAttrs (old: {
# TODO: static rocksdb fails to build on darwin, also see <https://github.com/NixOS/nixpkgs/issues/320448>
# build log at <https://girlboss.ceo/~strawberry/pb/JjGH>
meta.broken = stdenv.hostPlatform.isStatic && stdenv.isDarwin;
enableLiburing = enableLiburing;
sse42Support = stdenv.targetPlatform.isx86_64;
cmakeFlags = if stdenv.targetPlatform.isx86_64
then lib.subtractLists [ "-DPORTABLE=1" ] old.cmakeFlags
++ lib.optionals stdenv.targetPlatform.isx86_64 [
"-DPORTABLE=x86-64-v2"
"-DUSE_SSE=1"
"-DHAVE_SSE=1"
"-DHAVE_SSE42=1"
]
else if stdenv.targetPlatform.isAarch64
then lib.subtractLists [ "-DPORTABLE=1" ] old.cmakeFlags
++ lib.optionals stdenv.targetPlatform.isAarch64 [
# cortex-a53 == ARMv8-A
"-DPORTABLE=armv8-a"
]
else old.cmakeFlags;
});
in
{
@ -127,11 +105,7 @@ buildPackageEnv = {
# Only needed in static stdenv because these are transitive dependencies of rocksdb
CARGO_BUILD_RUSTFLAGS = buildDepsOnlyEnv.CARGO_BUILD_RUSTFLAGS
+ lib.optionalString (enableLiburing && stdenv.hostPlatform.isStatic)
" -L${lib.getLib liburing}/lib -luring"
+ lib.optionalString stdenv.targetPlatform.isx86_64
" -Ctarget-cpu=x86-64-v2"
+ lib.optionalString stdenv.targetPlatform.isAarch64
" -Ctarget-cpu=cortex-a53"; # cortex-a53 == ARMv8-A
" -L${lib.getLib liburing}/lib -luring";
};
@ -159,7 +133,17 @@ commonAttrs = {
dontStrip = profile == "dev" || profile == "test";
dontPatchELF = profile == "dev" || profile == "test";
buildInputs = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys';
buildInputs = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys'
# needed to build Rust applications on macOS
++ lib.optionals stdenv.hostPlatform.isDarwin [
# https://github.com/NixOS/nixpkgs/issues/206242
# ld: library not found for -liconv
libiconv
# https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell
# https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612
pkgsBuildHost.darwin.apple_sdk.frameworks.Security
];
nativeBuildInputs = [
# bindgen needs the build platform's libclang. Apparently due to "splicing
@ -176,8 +160,10 @@ commonAttrs = {
# needed so we can get rid of gcc and other unused deps that bloat OCI images
removeReferencesTo
]
++ lib.optionals stdenv.isDarwin [
# needed to build Rust applications on macOS
++ lib.optionals stdenv.hostPlatform.isDarwin [
# https://github.com/NixOS/nixpkgs/issues/206242
# ld: library not found for -liconv
libiconv
# https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell
@ -189,7 +175,7 @@ commonAttrs = {
#
# <https://github.com/input-output-hk/haskell.nix/issues/829>
postInstall = with pkgsBuildHost; ''
find "$out" -type f -exec remove-references-to -t ${stdenv.cc} -t ${gcc} -t ${libgcc} -t ${linuxHeaders} -t ${libidn2} -t ${libunistring} '{}' +
find "$out" -type f -exec remove-references-to -t ${stdenv.cc} -t ${rustc.unwrapped} -t ${rustc} '{}' +
'';
};
in

View file

@ -16,7 +16,7 @@ dockerTools.buildLayeredImage {
dockerTools.caCertificates
];
config = {
Entrypoint = if !stdenv.isDarwin
Entrypoint = if !stdenv.hostPlatform.isDarwin
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
# are handled as expected
then [ "${lib.getExe' tini "tini"}" "--" ]

View file

@ -2,8 +2,6 @@
#
# Other files that need upkeep when this changes:
#
# * `.gitlab-ci.yml`
# * `.github/workflows/ci.yml`
# * `Cargo.toml`
# * `flake.nix`
#
@ -11,13 +9,20 @@
# If you're having trouble making the relevant changes, bug a maintainer.
[toolchain]
channel = "1.81.0"
channel = "1.82.0"
profile = "minimal"
components = [
# For rust-analyzer
"rust-src",
"rust-analyzer",
# For CI and editors
"rustfmt",
"clippy",
]
targets = [
#"x86_64-apple-darwin",
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
"aarch64-unknown-linux-musl",
#"aarch64-apple-darwin",
]

View file

@ -1,28 +1,27 @@
edition = "2021"
array_width = 80
chain_width = 60
comment_width = 80
condense_wildcard_suffixes = true
edition = "2021"
fn_call_width = 80
fn_params_layout = "Compressed"
fn_single_line = true
format_code_in_doc_comments = true
format_macro_bodies = true
format_macro_matchers = true
format_strings = true
hex_literal_case = "Upper"
max_width = 120
tab_spaces = 4
array_width = 80
comment_width = 80
wrap_comments = true
fn_params_layout = "Compressed"
fn_call_width = 80
fn_single_line = true
group_imports = "StdExternalCrate"
hard_tabs = true
match_block_trailing_comma = true
hex_literal_case = "Upper"
imports_granularity = "Crate"
match_block_trailing_comma = true
max_width = 120
newline_style = "Unix"
normalize_comments = false
reorder_impl_items = true
reorder_imports = true
group_imports = "StdExternalCrate"
newline_style = "Unix"
tab_spaces = 4
use_field_init_shorthand = true
use_small_heuristics = "Off"
use_try_shorthand = true
chain_width = 60
wrap_comments = true

View file

@ -29,10 +29,11 @@ release_max_log_level = [
clap.workspace = true
conduit-api.workspace = true
conduit-core.workspace = true
conduit-database.workspace = true
conduit-macros.workspace = true
conduit-service.workspace = true
const-str.workspace = true
futures-util.workspace = true
futures.workspace = true
log.workspace = true
ruma.workspace = true
serde_json.workspace = true

View file

@ -1,5 +1,6 @@
use conduit::Result;
use conduit_macros::implement;
use futures::StreamExt;
use ruma::events::room::message::RoomMessageEventContent;
use crate::Command;
@ -10,14 +11,12 @@ use crate::Command;
#[implement(Command, params = "<'_>")]
pub(super) async fn check_all_users(&self) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let results = self.services.users.db.iter();
let users = self.services.users.iter().collect::<Vec<_>>().await;
let query_time = timer.elapsed();
let users = results.collect::<Vec<_>>();
let total = users.len();
let err_count = users.iter().filter(|user| user.is_err()).count();
let ok_count = users.iter().filter(|user| user.is_ok()).count();
let err_count = users.iter().filter(|_user| false).count();
let ok_count = users.iter().filter(|_user| true).count();
let message = format!(
"Database query completed in {query_time:?}:\n\n```\nTotal entries: {total:?}\nFailure/Invalid user count: \

View file

@ -1,18 +1,17 @@
use std::{
collections::{BTreeMap, HashMap},
collections::HashMap,
fmt::Write,
sync::Arc,
time::{Instant, SystemTime},
};
use api::client::validate_and_add_event_id;
use conduit::{debug, debug_error, err, info, trace, utils, warn, Error, PduEvent, Result};
use conduit::{debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, Result};
use futures::StreamExt;
use ruma::{
api::{client::error::ErrorKind, federation::event::get_room_state},
events::room::message::RoomMessageEventContent,
CanonicalJsonObject, EventId, OwnedRoomOrAliasId, RoomId, RoomVersionId, ServerName,
};
use tokio::sync::RwLock;
use tracing_subscriber::EnvFilter;
use crate::admin_command;
@ -26,37 +25,39 @@ pub(super) async fn echo(&self, message: Vec<String>) -> Result<RoomMessageEvent
#[admin_command]
pub(super) async fn get_auth_chain(&self, event_id: Box<EventId>) -> Result<RoomMessageEventContent> {
let event_id = Arc::<EventId>::from(event_id);
if let Some(event) = self.services.rooms.timeline.get_pdu_json(&event_id)? {
let room_id_str = event
.get("room_id")
.and_then(|val| val.as_str())
.ok_or_else(|| Error::bad_database("Invalid event in database"))?;
let Ok(event) = self.services.rooms.timeline.get_pdu_json(&event_id).await else {
return Ok(RoomMessageEventContent::notice_plain("Event not found."));
};
let room_id = <&RoomId>::try_from(room_id_str)
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
let room_id_str = event
.get("room_id")
.and_then(|val| val.as_str())
.ok_or_else(|| Error::bad_database("Invalid event in database"))?;
let start = Instant::now();
let count = self
.services
.rooms
.auth_chain
.event_ids_iter(room_id, vec![event_id])
.await?
.count();
let room_id = <&RoomId>::try_from(room_id_str)
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
let elapsed = start.elapsed();
Ok(RoomMessageEventContent::text_plain(format!(
"Loaded auth chain with length {count} in {elapsed:?}"
)))
} else {
Ok(RoomMessageEventContent::text_plain("Event not found."))
}
let start = Instant::now();
let count = self
.services
.rooms
.auth_chain
.event_ids_iter(room_id, &[&event_id])
.await?
.count()
.await;
let elapsed = start.elapsed();
Ok(RoomMessageEventContent::text_plain(format!(
"Loaded auth chain with length {count} in {elapsed:?}"
)))
}
#[admin_command]
pub(super) async fn parse_pdu(&self) -> Result<RoomMessageEventContent> {
if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```"
if self.body.len() < 2
|| !self.body[0].trim().starts_with("```")
|| self.body.last().unwrap_or(&EMPTY).trim() != "```"
{
return Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
@ -91,13 +92,16 @@ pub(super) async fn get_pdu(&self, event_id: Box<EventId>) -> Result<RoomMessage
.services
.rooms
.timeline
.get_non_outlier_pdu_json(&event_id)?;
if pdu_json.is_none() {
.get_non_outlier_pdu_json(&event_id)
.await;
if pdu_json.is_err() {
outlier = true;
pdu_json = self.services.rooms.timeline.get_pdu_json(&event_id)?;
pdu_json = self.services.rooms.timeline.get_pdu_json(&event_id).await;
}
match pdu_json {
Some(json) => {
Ok(json) => {
let json_text = serde_json::to_string_pretty(&json).expect("canonical json is valid json");
Ok(RoomMessageEventContent::notice_markdown(format!(
"{}\n```json\n{}\n```",
@ -109,7 +113,7 @@ pub(super) async fn get_pdu(&self, event_id: Box<EventId>) -> Result<RoomMessage
json_text
)))
},
None => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")),
Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")),
}
}
@ -130,7 +134,9 @@ pub(super) async fn get_remote_pdu_list(
));
}
if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```"
if self.body.len() < 2
|| !self.body[0].trim().starts_with("```")
|| self.body.last().unwrap_or(&EMPTY).trim() != "```"
{
return Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
@ -157,7 +163,8 @@ pub(super) async fn get_remote_pdu_list(
.send_message(RoomMessageEventContent::text_plain(format!(
"Failed to get remote PDU, ignoring error: {e}"
)))
.await;
.await
.ok();
warn!("Failed to get remote PDU, ignoring error: {e}");
} else {
success_count = success_count.saturating_add(1);
@ -196,6 +203,7 @@ pub(super) async fn get_remote_pdu(
&server,
ruma::api::federation::event::get_event::v1::Request {
event_id: event_id.clone().into(),
include_unredacted_content: None,
},
)
.await
@ -210,12 +218,14 @@ pub(super) async fn get_remote_pdu(
})?;
trace!("Attempting to parse PDU: {:?}", &response.pdu);
let parsed_pdu = {
let _parsed_pdu = {
let parsed_result = self
.services
.rooms
.event_handler
.parse_incoming_pdu(&response.pdu);
.parse_incoming_pdu(&response.pdu)
.await;
let (event_id, value, room_id) = match parsed_result {
Ok(t) => t,
Err(e) => {
@ -230,22 +240,11 @@ pub(super) async fn get_remote_pdu(
vec![(event_id, value, room_id)]
};
let pub_key_map = RwLock::new(BTreeMap::new());
debug!("Attempting to fetch homeserver signing keys for {server}");
self.services
.server_keys
.fetch_required_signing_keys(parsed_pdu.iter().map(|(_event_id, event, _room_id)| event), &pub_key_map)
.await
.unwrap_or_else(|e| {
warn!("Could not fetch all signatures for PDUs from {server}: {e:?}");
});
info!("Attempting to handle event ID {event_id} as backfilled PDU");
self.services
.rooms
.timeline
.backfill_pdu(&server, response.pdu, &pub_key_map)
.backfill_pdu(&server, response.pdu)
.await?;
let json_text = serde_json::to_string_pretty(&json).expect("canonical json is valid json");
@ -333,9 +332,12 @@ pub(super) async fn ping(&self, server: Box<ServerName>) -> Result<RoomMessageEv
#[admin_command]
pub(super) async fn force_device_list_updates(&self) -> Result<RoomMessageEventContent> {
// Force E2EE device list updates for all users
for user_id in self.services.users.iter().filter_map(Result::ok) {
self.services.users.mark_device_key_update(&user_id)?;
}
self.services
.users
.stream()
.for_each(|user_id| self.services.users.mark_device_key_update(user_id))
.await;
Ok(RoomMessageEventContent::text_plain(
"Marked all devices for all users as having new keys to update",
))
@ -419,12 +421,10 @@ pub(super) async fn sign_json(&self) -> Result<RoomMessageEventContent> {
let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
match serde_json::from_str(&string) {
Ok(mut value) => {
ruma::signatures::sign_json(
self.services.globals.server_name().as_str(),
self.services.globals.keypair(),
&mut value,
)
.expect("our request json is what ruma expects");
self.services
.server_keys
.sign_json(&mut value)
.expect("our request json is what ruma expects");
let json_text = serde_json::to_string_pretty(&value).expect("canonical json is valid json");
Ok(RoomMessageEventContent::text_plain(json_text))
},
@ -442,27 +442,31 @@ pub(super) async fn verify_json(&self) -> Result<RoomMessageEventContent> {
}
let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
match serde_json::from_str(&string) {
Ok(value) => {
let pub_key_map = RwLock::new(BTreeMap::new());
self.services
.server_keys
.fetch_required_signing_keys([&value], &pub_key_map)
.await?;
let pub_key_map = pub_key_map.read().await;
match ruma::signatures::verify_json(&pub_key_map, &value) {
Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")),
Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Signature verification failed: {e}"
))),
}
match serde_json::from_str::<CanonicalJsonObject>(&string) {
Ok(value) => match self.services.server_keys.verify_json(&value, None).await {
Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")),
Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Signature verification failed: {e}"
))),
},
Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))),
}
}
#[admin_command]
pub(super) async fn verify_pdu(&self, event_id: Box<EventId>) -> Result<RoomMessageEventContent> {
let mut event = self.services.rooms.timeline.get_pdu_json(&event_id).await?;
event.remove("event_id");
let msg = match self.services.server_keys.verify_event(&event, None).await {
Ok(ruma::signatures::Verified::Signatures) => "signatures OK, but content hash failed (redaction).",
Ok(ruma::signatures::Verified::All) => "signatures and hashes OK.",
Err(e) => return Err(e),
};
Ok(RoomMessageEventContent::notice_plain(msg))
}
#[admin_command]
#[tracing::instrument(skip(self))]
pub(super) async fn first_pdu_in_room(&self, room_id: Box<RoomId>) -> Result<RoomMessageEventContent> {
@ -470,7 +474,8 @@ pub(super) async fn first_pdu_in_room(&self, room_id: Box<RoomId>) -> Result<Roo
.services
.rooms
.state_cache
.server_in_room(&self.services.globals.config.server_name, &room_id)?
.server_in_room(&self.services.globals.config.server_name, &room_id)
.await
{
return Ok(RoomMessageEventContent::text_plain(
"We are not participating in the room / we don't know about the room ID.",
@ -481,8 +486,9 @@ pub(super) async fn first_pdu_in_room(&self, room_id: Box<RoomId>) -> Result<Roo
.services
.rooms
.timeline
.first_pdu_in_room(&room_id)?
.ok_or_else(|| Error::bad_database("Failed to find the first PDU in database"))?;
.first_pdu_in_room(&room_id)
.await
.map_err(|_| Error::bad_database("Failed to find the first PDU in database"))?;
Ok(RoomMessageEventContent::text_plain(format!("{first_pdu:?}")))
}
@ -494,7 +500,8 @@ pub(super) async fn latest_pdu_in_room(&self, room_id: Box<RoomId>) -> Result<Ro
.services
.rooms
.state_cache
.server_in_room(&self.services.globals.config.server_name, &room_id)?
.server_in_room(&self.services.globals.config.server_name, &room_id)
.await
{
return Ok(RoomMessageEventContent::text_plain(
"We are not participating in the room / we don't know about the room ID.",
@ -505,8 +512,9 @@ pub(super) async fn latest_pdu_in_room(&self, room_id: Box<RoomId>) -> Result<Ro
.services
.rooms
.timeline
.latest_pdu_in_room(&room_id)?
.ok_or_else(|| Error::bad_database("Failed to find the latest PDU in database"))?;
.latest_pdu_in_room(&room_id)
.await
.map_err(|_| Error::bad_database("Failed to find the latest PDU in database"))?;
Ok(RoomMessageEventContent::text_plain(format!("{latest_pdu:?}")))
}
@ -520,7 +528,8 @@ pub(super) async fn force_set_room_state_from_server(
.services
.rooms
.state_cache
.server_in_room(&self.services.globals.config.server_name, &room_id)?
.server_in_room(&self.services.globals.config.server_name, &room_id)
.await
{
return Ok(RoomMessageEventContent::text_plain(
"We are not participating in the room / we don't know about the room ID.",
@ -531,13 +540,13 @@ pub(super) async fn force_set_room_state_from_server(
.services
.rooms
.timeline
.latest_pdu_in_room(&room_id)?
.ok_or_else(|| Error::bad_database("Failed to find the latest PDU in database"))?;
.latest_pdu_in_room(&room_id)
.await
.map_err(|_| Error::bad_database("Failed to find the latest PDU in database"))?;
let room_version = self.services.rooms.state.get_room_version(&room_id)?;
let room_version = self.services.rooms.state.get_room_version(&room_id).await?;
let mut state: HashMap<u64, Arc<EventId>> = HashMap::new();
let pub_key_map = RwLock::new(BTreeMap::new());
let remote_state_response = self
.services
@ -551,30 +560,28 @@ pub(super) async fn force_set_room_state_from_server(
)
.await?;
let mut events = Vec::with_capacity(remote_state_response.pdus.len());
for pdu in remote_state_response.pdus.clone() {
events.push(match self.services.rooms.event_handler.parse_incoming_pdu(&pdu) {
match self
.services
.rooms
.event_handler
.parse_incoming_pdu(&pdu)
.await
{
Ok(t) => t,
Err(e) => {
warn!("Could not parse PDU, ignoring: {e}");
continue;
},
});
};
}
info!("Fetching required signing keys for all the state events we got");
self.services
.server_keys
.fetch_required_signing_keys(events.iter().map(|(_event_id, event, _room_id)| event), &pub_key_map)
.await?;
info!("Going through room_state response PDUs");
for result in remote_state_response
.pdus
.iter()
.map(|pdu| validate_and_add_event_id(self.services, pdu, &room_version, &pub_key_map))
{
for result in remote_state_response.pdus.iter().map(|pdu| {
self.services
.server_keys
.validate_and_add_event_id(pdu, &room_version)
}) {
let Ok((event_id, value)) = result.await else {
continue;
};
@ -587,23 +594,26 @@ pub(super) async fn force_set_room_state_from_server(
self.services
.rooms
.outlier
.add_pdu_outlier(&event_id, &value)?;
.add_pdu_outlier(&event_id, &value);
if let Some(state_key) = &pdu.state_key {
let shortstatekey = self
.services
.rooms
.short
.get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key)?;
.get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key)
.await;
state.insert(shortstatekey, pdu.event_id.clone());
}
}
info!("Going through auth_chain response");
for result in remote_state_response
.auth_chain
.iter()
.map(|pdu| validate_and_add_event_id(self.services, pdu, &room_version, &pub_key_map))
{
for result in remote_state_response.auth_chain.iter().map(|pdu| {
self.services
.server_keys
.validate_and_add_event_id(pdu, &room_version)
}) {
let Ok((event_id, value)) = result.await else {
continue;
};
@ -611,7 +621,7 @@ pub(super) async fn force_set_room_state_from_server(
self.services
.rooms
.outlier
.add_pdu_outlier(&event_id, &value)?;
.add_pdu_outlier(&event_id, &value);
}
let new_room_state = self
@ -626,7 +636,8 @@ pub(super) async fn force_set_room_state_from_server(
.services
.rooms
.state_compressor
.save_state(room_id.clone().as_ref(), new_room_state)?;
.save_state(room_id.clone().as_ref(), new_room_state)
.await?;
let state_lock = self.services.rooms.state.mutex.lock(&room_id).await;
self.services
@ -642,7 +653,8 @@ pub(super) async fn force_set_room_state_from_server(
self.services
.rooms
.state_cache
.update_joined_count(&room_id)?;
.update_joined_count(&room_id)
.await;
drop(state_lock);
@ -653,10 +665,33 @@ pub(super) async fn force_set_room_state_from_server(
#[admin_command]
pub(super) async fn get_signing_keys(
&self, server_name: Option<Box<ServerName>>, _cached: bool,
&self, server_name: Option<Box<ServerName>>, notary: Option<Box<ServerName>>, query: bool,
) -> Result<RoomMessageEventContent> {
let server_name = server_name.unwrap_or_else(|| self.services.server.config.server_name.clone().into());
let signing_keys = self.services.globals.signing_keys_for(&server_name)?;
if let Some(notary) = notary {
let signing_keys = self
.services
.server_keys
.notary_request(&notary, &server_name)
.await?;
return Ok(RoomMessageEventContent::notice_markdown(format!(
"```rs\n{signing_keys:#?}\n```"
)));
}
let signing_keys = if query {
self.services
.server_keys
.server_request(&server_name)
.await?
} else {
self.services
.server_keys
.signing_keys_for(&server_name)
.await?
};
Ok(RoomMessageEventContent::notice_markdown(format!(
"```rs\n{signing_keys:#?}\n```"
@ -664,34 +699,20 @@ pub(super) async fn get_signing_keys(
}
#[admin_command]
#[allow(dead_code)]
pub(super) async fn get_verify_keys(
&self, server_name: Option<Box<ServerName>>, cached: bool,
) -> Result<RoomMessageEventContent> {
pub(super) async fn get_verify_keys(&self, server_name: Option<Box<ServerName>>) -> Result<RoomMessageEventContent> {
let server_name = server_name.unwrap_or_else(|| self.services.server.config.server_name.clone().into());
let mut out = String::new();
if cached {
writeln!(out, "| Key ID | VerifyKey |")?;
writeln!(out, "| --- | --- |")?;
for (key_id, verify_key) in self.services.globals.verify_keys_for(&server_name)? {
writeln!(out, "| {key_id} | {verify_key:?} |")?;
}
return Ok(RoomMessageEventContent::notice_markdown(out));
}
let signature_ids: Vec<String> = Vec::new();
let keys = self
.services
.server_keys
.fetch_signing_keys_for_server(&server_name, signature_ids)
.await?;
.verify_keys_for(&server_name)
.await;
let mut out = String::new();
writeln!(out, "| Key ID | Public Key |")?;
writeln!(out, "| --- | --- |")?;
for (key_id, key) in keys {
writeln!(out, "| {key_id} | {key} |")?;
writeln!(out, "| {key_id} | {key:?} |")?;
}
Ok(RoomMessageEventContent::notice_markdown(out))
@ -814,7 +835,7 @@ pub(super) async fn database_stats(
&self, property: Option<String>, map: Option<String>,
) -> Result<RoomMessageEventContent> {
let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned());
let map_name = map.as_ref().map_or(utils::string::EMPTY, String::as_str);
let map_name = map.as_ref().map_or(EMPTY, String::as_str);
let mut out = String::new();
for (name, map) in self.services.db.iter_maps() {

View file

@ -80,8 +80,16 @@ pub(super) enum DebugCommand {
GetSigningKeys {
server_name: Option<Box<ServerName>>,
#[arg(long)]
notary: Option<Box<ServerName>>,
#[arg(short, long)]
cached: bool,
query: bool,
},
/// - Get and display signing keys from local cache or remote server.
GetVerifyKeys {
server_name: Option<Box<ServerName>>,
},
/// - Sends a federation request to the remote server's
@ -119,6 +127,13 @@ pub(super) enum DebugCommand {
/// the command.
VerifyJson,
/// - Verify PDU
///
/// This re-verifies a PDU existing in the database found by ID.
VerifyPdu {
event_id: Box<EventId>,
},
/// - Prints the very first PDU in the specified room (typically
/// m.room.create)
FirstPduInRoom {

View file

@ -1,19 +1,20 @@
use std::fmt::Write;
use conduit::Result;
use futures::StreamExt;
use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId, RoomId, ServerName, UserId};
use crate::{admin_command, escape_html, get_room_info};
#[admin_command]
pub(super) async fn disable_room(&self, room_id: Box<RoomId>) -> Result<RoomMessageEventContent> {
self.services.rooms.metadata.disable_room(&room_id, true)?;
self.services.rooms.metadata.disable_room(&room_id, true);
Ok(RoomMessageEventContent::text_plain("Room disabled."))
}
#[admin_command]
pub(super) async fn enable_room(&self, room_id: Box<RoomId>) -> Result<RoomMessageEventContent> {
self.services.rooms.metadata.disable_room(&room_id, false)?;
self.services.rooms.metadata.disable_room(&room_id, false);
Ok(RoomMessageEventContent::text_plain("Room enabled."))
}
@ -85,7 +86,7 @@ pub(super) async fn remote_user_in_rooms(&self, user_id: Box<UserId>) -> Result<
));
}
if !self.services.users.exists(&user_id)? {
if !self.services.users.exists(&user_id).await {
return Ok(RoomMessageEventContent::text_plain(
"Remote user does not exist in our database.",
));
@ -96,9 +97,9 @@ pub(super) async fn remote_user_in_rooms(&self, user_id: Box<UserId>) -> Result<
.rooms
.state_cache
.rooms_joined(&user_id)
.filter_map(Result::ok)
.map(|room_id| get_room_info(self.services, &room_id))
.collect();
.then(|room_id| get_room_info(self.services, room_id))
.collect()
.await;
if rooms.is_empty() {
return Ok(RoomMessageEventContent::text_plain("User is not in any rooms."));

View file

@ -1,6 +1,6 @@
use std::time::Duration;
use conduit::{debug, info, trace, utils::time::parse_timepoint_ago, warn, Result};
use conduit::{debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago, Result};
use conduit_service::media::Dim;
use ruma::{
events::room::message::RoomMessageEventContent, EventId, Mxc, MxcUri, OwnedMxcUri, OwnedServerName, ServerName,
@ -19,7 +19,7 @@ pub(super) async fn delete(
}
if let Some(mxc) = mxc {
debug!("Got MXC URL: {mxc}");
trace!("Got MXC URL: {mxc}");
self.services
.media
.delete(&mxc.as_str().try_into()?)
@ -28,14 +28,15 @@ pub(super) async fn delete(
return Ok(RoomMessageEventContent::text_plain(
"Deleted the MXC from our database and on our filesystem.",
));
} else if let Some(event_id) = event_id {
debug!("Got event ID to delete media from: {event_id}");
}
let mut mxc_urls = vec![];
let mut mxc_deletion_count: usize = 0;
if let Some(event_id) = event_id {
trace!("Got event ID to delete media from: {event_id}");
let mut mxc_urls = Vec::with_capacity(4);
// parsing the PDU for any MXC URLs begins here
if let Some(event_json) = self.services.rooms.timeline.get_pdu_json(&event_id)? {
if let Ok(event_json) = self.services.rooms.timeline.get_pdu_json(&event_id).await {
if let Some(content_key) = event_json.get("content") {
debug!("Event ID has \"content\".");
let content_obj = content_key.as_object();
@ -124,18 +125,28 @@ pub(super) async fn delete(
}
if mxc_urls.is_empty() {
// we shouldn't get here (should have errored earlier) but just in case for
// whatever reason we do...
info!("Parsed event ID {event_id} but did not contain any MXC URLs.");
return Ok(RoomMessageEventContent::text_plain("Parsed event ID but found no MXC URLs."));
}
let mut mxc_deletion_count: usize = 0;
for mxc_url in mxc_urls {
self.services
match self
.services
.media
.delete(&mxc_url.as_str().try_into()?)
.await?;
mxc_deletion_count = mxc_deletion_count.saturating_add(1);
.await
{
Ok(()) => {
debug_info!("Successfully deleted {mxc_url} from filesystem and database");
mxc_deletion_count = mxc_deletion_count.saturating_add(1);
},
Err(e) => {
debug_warn!("Failed to delete {mxc_url}, ignoring error and skipping: {e}");
continue;
},
}
}
return Ok(RoomMessageEventContent::text_plain(format!(
@ -158,34 +169,62 @@ pub(super) async fn delete_list(&self) -> Result<RoomMessageEventContent> {
));
}
let mut failed_parsed_mxcs: usize = 0;
let mxc_list = self
.body
.to_vec()
.drain(1..self.body.len().checked_sub(1).unwrap())
.collect::<Vec<_>>();
.filter_map(|mxc_s| {
mxc_s
.try_into()
.inspect_err(|e| {
debug_warn!("Failed to parse user-provided MXC URI: {e}");
failed_parsed_mxcs = failed_parsed_mxcs.saturating_add(1);
})
.ok()
})
.collect::<Vec<Mxc<'_>>>();
let mut mxc_deletion_count: usize = 0;
for mxc in mxc_list {
debug!("Deleting MXC {mxc} in bulk");
self.services.media.delete(&mxc.try_into()?).await?;
mxc_deletion_count = mxc_deletion_count
.checked_add(1)
.expect("mxc_deletion_count should not get this high");
for mxc in &mxc_list {
trace!(%failed_parsed_mxcs, %mxc_deletion_count, "Deleting MXC {mxc} in bulk");
match self.services.media.delete(mxc).await {
Ok(()) => {
debug_info!("Successfully deleted {mxc} from filesystem and database");
mxc_deletion_count = mxc_deletion_count.saturating_add(1);
},
Err(e) => {
debug_warn!("Failed to delete {mxc}, ignoring error and skipping: {e}");
continue;
},
}
}
Ok(RoomMessageEventContent::text_plain(format!(
"Finished bulk MXC deletion, deleted {mxc_deletion_count} total MXCs from our database and the filesystem.",
"Finished bulk MXC deletion, deleted {mxc_deletion_count} total MXCs from our database and the filesystem. \
{failed_parsed_mxcs} MXCs failed to be parsed from the database.",
)))
}
#[admin_command]
pub(super) async fn delete_past_remote_media(&self, duration: String, force: bool) -> Result<RoomMessageEventContent> {
pub(super) async fn delete_past_remote_media(
&self, duration: String, before: bool, after: bool, yes_i_want_to_delete_local_media: bool,
) -> Result<RoomMessageEventContent> {
if before && after {
return Ok(RoomMessageEventContent::text_plain(
"Please only pick one argument, --before or --after.",
));
}
assert!(!(before && after), "--before and --after should not be specified together");
let duration = parse_timepoint_ago(&duration)?;
let deleted_count = self
.services
.media
.delete_all_remote_media_at_after_time(duration, force)
.delete_all_remote_media_at_after_time(duration, before, after, yes_i_want_to_delete_local_media)
.await?;
Ok(RoomMessageEventContent::text_plain(format!(
@ -194,14 +233,10 @@ pub(super) async fn delete_past_remote_media(&self, duration: String, force: boo
}
#[admin_command]
pub(super) async fn delete_all_from_user(&self, username: String, force: bool) -> Result<RoomMessageEventContent> {
pub(super) async fn delete_all_from_user(&self, username: String) -> Result<RoomMessageEventContent> {
let user_id = parse_local_user_id(self.services, &username)?;
let deleted_count = self
.services
.media
.delete_from_user(&user_id, force)
.await?;
let deleted_count = self.services.media.delete_from_user(&user_id).await?;
Ok(RoomMessageEventContent::text_plain(format!(
"Deleted {deleted_count} total files.",
@ -210,34 +245,36 @@ pub(super) async fn delete_all_from_user(&self, username: String, force: bool) -
#[admin_command]
pub(super) async fn delete_all_from_server(
&self, server_name: Box<ServerName>, force: bool,
&self, server_name: Box<ServerName>, yes_i_want_to_delete_local_media: bool,
) -> Result<RoomMessageEventContent> {
if server_name == self.services.globals.server_name() {
return Ok(RoomMessageEventContent::text_plain("This command only works for remote media."));
if server_name == self.services.globals.server_name() && !yes_i_want_to_delete_local_media {
return Ok(RoomMessageEventContent::text_plain(
"This command only works for remote media by default.",
));
}
let Ok(all_mxcs) = self.services.media.get_all_mxcs().await else {
let Ok(all_mxcs) = self
.services
.media
.get_all_mxcs()
.await
.inspect_err(|e| error!("Failed to get MXC URIs from our database: {e}"))
else {
return Ok(RoomMessageEventContent::text_plain("Failed to get MXC URIs from our database"));
};
let mut deleted_count: usize = 0;
for mxc in all_mxcs {
let mxc_server_name = match mxc.server_name() {
Ok(server_name) => server_name,
Err(e) => {
if force {
warn!("Failed to parse MXC {mxc} server name from database, ignoring error and skipping: {e}");
continue;
}
return Ok(RoomMessageEventContent::text_plain(format!(
"Failed to parse MXC {mxc} server name from database: {e}",
)));
},
let Ok(mxc_server_name) = mxc.server_name().inspect_err(|e| {
debug_warn!("Failed to parse MXC {mxc} server name from database, ignoring error and skipping: {e}");
}) else {
continue;
};
if mxc_server_name != server_name || self.services.globals.server_is_ours(mxc_server_name) {
if mxc_server_name != server_name
|| (self.services.globals.server_is_ours(mxc_server_name) && !yes_i_want_to_delete_local_media)
{
trace!("skipping MXC URI {mxc}");
continue;
}
@ -249,12 +286,8 @@ pub(super) async fn delete_all_from_server(
deleted_count = deleted_count.saturating_add(1);
},
Err(e) => {
if force {
warn!("Failed to delete {mxc}, ignoring error and skipping: {e}");
continue;
}
return Ok(RoomMessageEventContent::text_plain(format!("Failed to delete MXC {mxc}: {e}")));
debug_warn!("Failed to delete {mxc}, ignoring error and skipping: {e}");
continue;
},
}
}
@ -267,7 +300,7 @@ pub(super) async fn delete_all_from_server(
#[admin_command]
pub(super) async fn get_file_info(&self, mxc: OwnedMxcUri) -> Result<RoomMessageEventContent> {
let mxc: Mxc<'_> = mxc.as_str().try_into()?;
let metadata = self.services.media.get_metadata(&mxc);
let metadata = self.services.media.get_metadata(&mxc).await;
Ok(RoomMessageEventContent::notice_markdown(format!("```\n{metadata:#?}\n```")))
}

View file

@ -10,7 +10,7 @@ use crate::admin_command_dispatch;
#[derive(Debug, Subcommand)]
pub(super) enum MediaCommand {
/// - Deletes a single media file from our database and on the filesystem
/// via a single MXC URL
/// via a single MXC URL or event ID (not redacted)
Delete {
/// The MXC URL to delete
#[arg(long)]
@ -23,37 +23,44 @@ pub(super) enum MediaCommand {
},
/// - Deletes a codeblock list of MXC URLs from our database and on the
/// filesystem
/// filesystem. This will always ignore errors.
DeleteList,
/// - Deletes all remote media in the last X amount of time using filesystem
/// metadata first created at date.
/// - Deletes all remote media in the last/after "X" time using filesystem
/// metadata first created at date, or fallback to last modified date.
/// This will always ignore errors by default.
///
/// Synapse
DeletePastRemoteMedia {
/// - The duration (at or after), e.g. "5m" to delete all media in the
/// past 5 minutes
/// - The duration (at or after/before), e.g. "5m" to delete all media
/// in the past or up to 5 minutes
duration: String,
/// Continues deleting remote media if an undeletable object is found
#[arg(short, long)]
force: bool,
#[arg(long, short)]
before: bool,
#[arg(long, short)]
after: bool,
/// Long argument to delete local media
#[arg(long)]
yes_i_want_to_delete_local_media: bool,
},
/// - Deletes all the local media from a local user on our server
/// - Deletes all the local media from a local user on our server. This will
/// always ignore errors by default.
DeleteAllFromUser {
username: String,
/// Continues deleting media if an undeletable object is found
#[arg(short, long)]
force: bool,
},
/// - Deletes all remote media from the specified remote server
/// - Deletes all remote media from the specified remote server. This will
/// always ignore errors by default.
DeleteAllFromServer {
server_name: Box<ServerName>,
/// Continues deleting media if an undeletable object is found
#[arg(short, long)]
force: bool,
/// Long argument to delete local media
#[arg(long)]
yes_i_want_to_delete_local_media: bool,
},
GetFileInfo {
@ -82,10 +89,10 @@ pub(super) enum MediaCommand {
#[arg(short, long, default_value("10000"))]
timeout: u32,
#[arg(short, long)]
#[arg(short, long, default_value("800"))]
width: u32,
#[arg(short, long)]
#[arg(short, long, default_value("800"))]
height: u32,
},
}

View file

@ -17,7 +17,7 @@ use conduit::{
utils::string::{collect_stream, common_prefix},
warn, Error, Result,
};
use futures_util::future::FutureExt;
use futures::future::FutureExt;
use ruma::{
events::{
relation::InReplyTo,

View file

@ -1,9 +1,6 @@
use clap::Subcommand;
use conduit::Result;
use ruma::{
events::{room::message::RoomMessageEventContent, RoomAccountDataEventType},
RoomId, UserId,
};
use ruma::{events::room::message::RoomMessageEventContent, RoomId, UserId};
use crate::Command;
@ -25,7 +22,7 @@ pub(crate) enum AccountDataCommand {
/// Full user ID
user_id: Box<UserId>,
/// Account data event type
kind: RoomAccountDataEventType,
kind: String,
/// Optional room ID of the account data
room_id: Option<Box<RoomId>>,
},
@ -44,7 +41,8 @@ pub(super) async fn process(subcommand: AccountDataCommand, context: &Command<'_
let timer = tokio::time::Instant::now();
let results = services
.account_data
.changes_since(room_id.as_deref(), &user_id, since)?;
.changes_since(room_id.as_deref(), &user_id, since)
.await?;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -59,7 +57,8 @@ pub(super) async fn process(subcommand: AccountDataCommand, context: &Command<'_
let timer = tokio::time::Instant::now();
let results = services
.account_data
.get(room_id.as_deref(), &user_id, kind)?;
.get_raw(room_id.as_deref(), &user_id, &kind)
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(

View file

@ -29,7 +29,9 @@ pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_>
let results = services
.appservice
.db
.get_registration(appservice_id.as_ref());
.get_registration(appservice_id.as_ref())
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -38,7 +40,7 @@ pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_>
},
AppserviceCommand::All => {
let timer = tokio::time::Instant::now();
let results = services.appservice.all();
let results = services.appservice.all().await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(

View file

@ -13,8 +13,6 @@ pub(crate) enum GlobalsCommand {
LastCheckForUpdatesId,
LoadKeypair,
/// - This returns an empty `Ok(BTreeMap<..>)` when there are no keys found
/// for the server.
SigningKeysFor {
@ -29,7 +27,7 @@ pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) -
match subcommand {
GlobalsCommand::DatabaseVersion => {
let timer = tokio::time::Instant::now();
let results = services.globals.db.database_version();
let results = services.globals.db.database_version().await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -47,16 +45,7 @@ pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) -
},
GlobalsCommand::LastCheckForUpdatesId => {
let timer = tokio::time::Instant::now();
let results = services.updates.last_check_for_updates_id();
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
)))
},
GlobalsCommand::LoadKeypair => {
let timer = tokio::time::Instant::now();
let results = services.globals.db.load_keypair();
let results = services.updates.last_check_for_updates_id().await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -67,7 +56,7 @@ pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) -
origin,
} => {
let timer = tokio::time::Instant::now();
let results = services.globals.db.verify_keys_for(&origin);
let results = services.server_keys.verify_keys_for(&origin).await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(

View file

@ -1,5 +1,6 @@
use clap::Subcommand;
use conduit::Result;
use futures::StreamExt;
use ruma::{events::room::message::RoomMessageEventContent, UserId};
use crate::Command;
@ -30,7 +31,7 @@ pub(super) async fn process(subcommand: PresenceCommand, context: &Command<'_>)
user_id,
} => {
let timer = tokio::time::Instant::now();
let results = services.presence.db.get_presence(&user_id)?;
let results = services.presence.db.get_presence(&user_id).await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -42,7 +43,7 @@ pub(super) async fn process(subcommand: PresenceCommand, context: &Command<'_>)
} => {
let timer = tokio::time::Instant::now();
let results = services.presence.db.presence_since(since);
let presence_since: Vec<(_, _, _)> = results.collect();
let presence_since: Vec<(_, _, _)> = results.collect().await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(

View file

@ -21,7 +21,7 @@ pub(super) async fn process(subcommand: PusherCommand, context: &Command<'_>) ->
user_id,
} => {
let timer = tokio::time::Instant::now();
let results = services.pusher.get_pushers(&user_id)?;
let results = services.pusher.get_pushers(&user_id).await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(

View file

@ -1,5 +1,6 @@
use clap::Subcommand;
use conduit::Result;
use futures::StreamExt;
use ruma::{events::room::message::RoomMessageEventContent, RoomAliasId, RoomId};
use crate::Command;
@ -31,7 +32,7 @@ pub(super) async fn process(subcommand: RoomAliasCommand, context: &Command<'_>)
alias,
} => {
let timer = tokio::time::Instant::now();
let results = services.rooms.alias.resolve_local_alias(&alias);
let results = services.rooms.alias.resolve_local_alias(&alias).await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -42,8 +43,13 @@ pub(super) async fn process(subcommand: RoomAliasCommand, context: &Command<'_>)
room_id,
} => {
let timer = tokio::time::Instant::now();
let results = services.rooms.alias.local_aliases_for_room(&room_id);
let aliases: Vec<_> = results.collect();
let aliases: Vec<_> = services
.rooms
.alias
.local_aliases_for_room(&room_id)
.map(ToOwned::to_owned)
.collect()
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -52,8 +58,13 @@ pub(super) async fn process(subcommand: RoomAliasCommand, context: &Command<'_>)
},
RoomAliasCommand::AllLocalAliases => {
let timer = tokio::time::Instant::now();
let results = services.rooms.alias.all_local_aliases();
let aliases: Vec<_> = results.collect();
let aliases = services
.rooms
.alias
.all_local_aliases()
.map(|(room_id, alias)| (room_id.to_owned(), alias.to_owned()))
.collect::<Vec<_>>()
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(

View file

@ -1,5 +1,6 @@
use clap::Subcommand;
use conduit::Result;
use futures::StreamExt;
use ruma::{events::room::message::RoomMessageEventContent, RoomId, ServerName, UserId};
use crate::Command;
@ -86,7 +87,11 @@ pub(super) async fn process(
room_id,
} => {
let timer = tokio::time::Instant::now();
let result = services.rooms.state_cache.server_in_room(&server, &room_id);
let result = services
.rooms
.state_cache
.server_in_room(&server, &room_id)
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -97,7 +102,13 @@ pub(super) async fn process(
room_id,
} => {
let timer = tokio::time::Instant::now();
let results: Result<Vec<_>> = services.rooms.state_cache.room_servers(&room_id).collect();
let results: Vec<_> = services
.rooms
.state_cache
.room_servers(&room_id)
.map(ToOwned::to_owned)
.collect()
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -108,7 +119,13 @@ pub(super) async fn process(
server,
} => {
let timer = tokio::time::Instant::now();
let results: Result<Vec<_>> = services.rooms.state_cache.server_rooms(&server).collect();
let results: Vec<_> = services
.rooms
.state_cache
.server_rooms(&server)
.map(ToOwned::to_owned)
.collect()
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -119,7 +136,13 @@ pub(super) async fn process(
room_id,
} => {
let timer = tokio::time::Instant::now();
let results: Result<Vec<_>> = services.rooms.state_cache.room_members(&room_id).collect();
let results: Vec<_> = services
.rooms
.state_cache
.room_members(&room_id)
.map(ToOwned::to_owned)
.collect()
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -134,7 +157,9 @@ pub(super) async fn process(
.rooms
.state_cache
.local_users_in_room(&room_id)
.collect();
.map(ToOwned::to_owned)
.collect()
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -149,7 +174,9 @@ pub(super) async fn process(
.rooms
.state_cache
.active_local_users_in_room(&room_id)
.collect();
.map(ToOwned::to_owned)
.collect()
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -160,7 +187,7 @@ pub(super) async fn process(
room_id,
} => {
let timer = tokio::time::Instant::now();
let results = services.rooms.state_cache.room_joined_count(&room_id);
let results = services.rooms.state_cache.room_joined_count(&room_id).await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -171,7 +198,11 @@ pub(super) async fn process(
room_id,
} => {
let timer = tokio::time::Instant::now();
let results = services.rooms.state_cache.room_invited_count(&room_id);
let results = services
.rooms
.state_cache
.room_invited_count(&room_id)
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -182,11 +213,13 @@ pub(super) async fn process(
room_id,
} => {
let timer = tokio::time::Instant::now();
let results: Result<Vec<_>> = services
let results: Vec<_> = services
.rooms
.state_cache
.room_useroncejoined(&room_id)
.collect();
.map(ToOwned::to_owned)
.collect()
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -197,11 +230,13 @@ pub(super) async fn process(
room_id,
} => {
let timer = tokio::time::Instant::now();
let results: Result<Vec<_>> = services
let results: Vec<_> = services
.rooms
.state_cache
.room_members_invited(&room_id)
.collect();
.map(ToOwned::to_owned)
.collect()
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -216,7 +251,8 @@ pub(super) async fn process(
let results = services
.rooms
.state_cache
.get_invite_count(&room_id, &user_id);
.get_invite_count(&room_id, &user_id)
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -231,7 +267,8 @@ pub(super) async fn process(
let results = services
.rooms
.state_cache
.get_left_count(&room_id, &user_id);
.get_left_count(&room_id, &user_id)
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -242,7 +279,13 @@ pub(super) async fn process(
user_id,
} => {
let timer = tokio::time::Instant::now();
let results: Result<Vec<_>> = services.rooms.state_cache.rooms_joined(&user_id).collect();
let results: Vec<_> = services
.rooms
.state_cache
.rooms_joined(&user_id)
.map(ToOwned::to_owned)
.collect()
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -253,7 +296,12 @@ pub(super) async fn process(
user_id,
} => {
let timer = tokio::time::Instant::now();
let results: Result<Vec<_>> = services.rooms.state_cache.rooms_invited(&user_id).collect();
let results: Vec<_> = services
.rooms
.state_cache
.rooms_invited(&user_id)
.collect()
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -264,7 +312,12 @@ pub(super) async fn process(
user_id,
} => {
let timer = tokio::time::Instant::now();
let results: Result<Vec<_>> = services.rooms.state_cache.rooms_left(&user_id).collect();
let results: Vec<_> = services
.rooms
.state_cache
.rooms_left(&user_id)
.collect()
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -276,7 +329,11 @@ pub(super) async fn process(
room_id,
} => {
let timer = tokio::time::Instant::now();
let results = services.rooms.state_cache.invite_state(&user_id, &room_id);
let results = services
.rooms
.state_cache
.invite_state(&user_id, &room_id)
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(

View file

@ -1,5 +1,6 @@
use clap::Subcommand;
use conduit::Result;
use futures::StreamExt;
use ruma::{events::room::message::RoomMessageEventContent, ServerName, UserId};
use service::sending::Destination;
@ -68,7 +69,7 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) -
SendingCommand::ActiveRequests => {
let timer = tokio::time::Instant::now();
let results = services.sending.db.active_requests();
let active_requests: Result<Vec<(_, _, _)>> = results.collect();
let active_requests = results.collect::<Vec<_>>().await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -133,7 +134,7 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) -
},
};
let queued_requests = results.collect::<Result<Vec<(_, _)>>>();
let queued_requests = results.collect::<Vec<_>>().await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -199,7 +200,7 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) -
},
};
let active_requests = results.collect::<Result<Vec<(_, _)>>>();
let active_requests = results.collect::<Vec<_>>().await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
@ -210,7 +211,7 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) -
server_name,
} => {
let timer = tokio::time::Instant::now();
let results = services.sending.db.get_latest_educount(&server_name);
let results = services.sending.db.get_latest_educount(&server_name).await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(

View file

@ -1,29 +1,344 @@
use clap::Subcommand;
use conduit::Result;
use ruma::events::room::message::RoomMessageEventContent;
use futures::stream::StreamExt;
use ruma::{events::room::message::RoomMessageEventContent, OwnedDeviceId, OwnedRoomId, OwnedUserId};
use crate::Command;
use crate::{admin_command, admin_command_dispatch};
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
/// All the getters and iterators from src/database/key_value/users.rs
pub(crate) enum UsersCommand {
Iter,
CountUsers,
IterUsers,
PasswordHash {
user_id: OwnedUserId,
},
ListDevices {
user_id: OwnedUserId,
},
ListDevicesMetadata {
user_id: OwnedUserId,
},
GetDeviceMetadata {
user_id: OwnedUserId,
device_id: OwnedDeviceId,
},
GetDevicesVersion {
user_id: OwnedUserId,
},
CountOneTimeKeys {
user_id: OwnedUserId,
device_id: OwnedDeviceId,
},
GetDeviceKeys {
user_id: OwnedUserId,
device_id: OwnedDeviceId,
},
GetUserSigningKey {
user_id: OwnedUserId,
},
GetMasterKey {
user_id: OwnedUserId,
},
GetToDeviceEvents {
user_id: OwnedUserId,
device_id: OwnedDeviceId,
},
GetLatestBackup {
user_id: OwnedUserId,
},
GetLatestBackupVersion {
user_id: OwnedUserId,
},
GetBackupAlgorithm {
user_id: OwnedUserId,
version: String,
},
GetAllBackups {
user_id: OwnedUserId,
version: String,
},
GetRoomBackups {
user_id: OwnedUserId,
version: String,
room_id: OwnedRoomId,
},
GetBackupSession {
user_id: OwnedUserId,
version: String,
room_id: OwnedRoomId,
session_id: String,
},
}
/// All the getters and iterators in key_value/users.rs
pub(super) async fn process(subcommand: UsersCommand, context: &Command<'_>) -> Result<RoomMessageEventContent> {
let services = context.services;
#[admin_command]
async fn get_backup_session(
&self, user_id: OwnedUserId, version: String, room_id: OwnedRoomId, session_id: String,
) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let result = self
.services
.key_backups
.get_session(&user_id, &version, &room_id, &session_id)
.await;
let query_time = timer.elapsed();
match subcommand {
UsersCommand::Iter => {
let timer = tokio::time::Instant::now();
let results = services.users.db.iter();
let users = results.collect::<Vec<_>>();
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{users:#?}\n```"
)))
},
}
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
}
#[admin_command]
async fn get_room_backups(
&self, user_id: OwnedUserId, version: String, room_id: OwnedRoomId,
) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let result = self
.services
.key_backups
.get_room(&user_id, &version, &room_id)
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
}
#[admin_command]
async fn get_all_backups(&self, user_id: OwnedUserId, version: String) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let result = self.services.key_backups.get_all(&user_id, &version).await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
}
#[admin_command]
async fn get_backup_algorithm(&self, user_id: OwnedUserId, version: String) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let result = self
.services
.key_backups
.get_backup(&user_id, &version)
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
}
#[admin_command]
async fn get_latest_backup_version(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let result = self
.services
.key_backups
.get_latest_backup_version(&user_id)
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
}
#[admin_command]
async fn get_latest_backup(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let result = self.services.key_backups.get_latest_backup(&user_id).await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
}
#[admin_command]
async fn iter_users(&self) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let result: Vec<OwnedUserId> = self.services.users.stream().map(Into::into).collect().await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
}
#[admin_command]
async fn count_users(&self) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let result = self.services.users.count().await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
}
#[admin_command]
async fn password_hash(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let result = self.services.users.password_hash(&user_id).await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
}
#[admin_command]
async fn list_devices(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let devices = self
.services
.users
.all_device_ids(&user_id)
.map(ToOwned::to_owned)
.collect::<Vec<_>>()
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{devices:#?}\n```"
)))
}
#[admin_command]
async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let devices = self
.services
.users
.all_devices_metadata(&user_id)
.collect::<Vec<_>>()
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{devices:#?}\n```"
)))
}
#[admin_command]
async fn get_device_metadata(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let device = self
.services
.users
.get_device_metadata(&user_id, &device_id)
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```"
)))
}
#[admin_command]
async fn get_devices_version(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let device = self.services.users.get_devicelist_version(&user_id).await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```"
)))
}
#[admin_command]
async fn count_one_time_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let result = self
.services
.users
.count_one_time_keys(&user_id, &device_id)
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
}
#[admin_command]
async fn get_device_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let result = self
.services
.users
.get_device_keys(&user_id, &device_id)
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
}
#[admin_command]
async fn get_user_signing_key(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let result = self.services.users.get_user_signing_key(&user_id).await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
}
#[admin_command]
async fn get_master_key(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let result = self
.services
.users
.get_master_key(None, &user_id, &|_| true)
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
}
#[admin_command]
async fn get_to_device_events(
&self, user_id: OwnedUserId, device_id: OwnedDeviceId,
) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let result = self
.services
.users
.get_to_device_events(&user_id, &device_id)
.collect::<Vec<_>>()
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
}

View file

@ -2,7 +2,8 @@ use std::fmt::Write;
use clap::Subcommand;
use conduit::Result;
use ruma::{events::room::message::RoomMessageEventContent, RoomAliasId, RoomId};
use futures::StreamExt;
use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
use crate::{escape_html, Command};
@ -66,8 +67,8 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) ->
force,
room_id,
..
} => match (force, services.rooms.alias.resolve_local_alias(&room_alias)) {
(true, Ok(Some(id))) => match services
} => match (force, services.rooms.alias.resolve_local_alias(&room_alias).await) {
(true, Ok(id)) => match services
.rooms
.alias
.set_alias(&room_alias, &room_id, server_user)
@ -77,10 +78,10 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) ->
))),
Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Failed to remove alias: {err}"))),
},
(false, Ok(Some(id))) => Ok(RoomMessageEventContent::text_plain(format!(
(false, Ok(id)) => Ok(RoomMessageEventContent::text_plain(format!(
"Refusing to overwrite in use alias for {id}, use -f or --force to overwrite"
))),
(_, Ok(None)) => match services
(_, Err(_)) => match services
.rooms
.alias
.set_alias(&room_alias, &room_id, server_user)
@ -88,12 +89,11 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) ->
Ok(()) => Ok(RoomMessageEventContent::text_plain("Successfully set alias")),
Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Failed to remove alias: {err}"))),
},
(_, Err(err)) => Ok(RoomMessageEventContent::text_plain(format!("Unable to lookup alias: {err}"))),
},
RoomAliasCommand::Remove {
..
} => match services.rooms.alias.resolve_local_alias(&room_alias) {
Ok(Some(id)) => match services
} => match services.rooms.alias.resolve_local_alias(&room_alias).await {
Ok(id) => match services
.rooms
.alias
.remove_alias(&room_alias, server_user)
@ -102,15 +102,13 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) ->
Ok(()) => Ok(RoomMessageEventContent::text_plain(format!("Removed alias from {id}"))),
Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Failed to remove alias: {err}"))),
},
Ok(None) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")),
Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to lookup alias: {err}"))),
Err(_) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")),
},
RoomAliasCommand::Which {
..
} => match services.rooms.alias.resolve_local_alias(&room_alias) {
Ok(Some(id)) => Ok(RoomMessageEventContent::text_plain(format!("Alias resolves to {id}"))),
Ok(None) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")),
Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to lookup alias: {err}"))),
} => match services.rooms.alias.resolve_local_alias(&room_alias).await {
Ok(id) => Ok(RoomMessageEventContent::text_plain(format!("Alias resolves to {id}"))),
Err(_) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")),
},
RoomAliasCommand::List {
..
@ -121,67 +119,63 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) ->
room_id,
} => {
if let Some(room_id) = room_id {
let aliases = services
let aliases: Vec<OwnedRoomAliasId> = services
.rooms
.alias
.local_aliases_for_room(&room_id)
.collect::<Result<Vec<_>, _>>();
match aliases {
Ok(aliases) => {
let plain_list = aliases.iter().fold(String::new(), |mut output, alias| {
writeln!(output, "- {alias}").expect("should be able to write to string buffer");
output
});
.map(Into::into)
.collect()
.await;
let html_list = aliases.iter().fold(String::new(), |mut output, alias| {
writeln!(output, "<li>{}</li>", escape_html(alias.as_ref()))
.expect("should be able to write to string buffer");
output
});
let plain_list = aliases.iter().fold(String::new(), |mut output, alias| {
writeln!(output, "- {alias}").expect("should be able to write to string buffer");
output
});
let plain = format!("Aliases for {room_id}:\n{plain_list}");
let html = format!("Aliases for {room_id}:\n<ul>{html_list}</ul>");
Ok(RoomMessageEventContent::text_html(plain, html))
},
Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to list aliases: {err}"))),
}
let html_list = aliases.iter().fold(String::new(), |mut output, alias| {
writeln!(output, "<li>{}</li>", escape_html(alias.as_ref()))
.expect("should be able to write to string buffer");
output
});
let plain = format!("Aliases for {room_id}:\n{plain_list}");
let html = format!("Aliases for {room_id}:\n<ul>{html_list}</ul>");
Ok(RoomMessageEventContent::text_html(plain, html))
} else {
let aliases = services
.rooms
.alias
.all_local_aliases()
.collect::<Result<Vec<_>, _>>();
match aliases {
Ok(aliases) => {
let server_name = services.globals.server_name();
let plain_list = aliases
.iter()
.fold(String::new(), |mut output, (alias, id)| {
writeln!(output, "- `{alias}` -> #{id}:{server_name}")
.expect("should be able to write to string buffer");
output
});
.map(|(room_id, localpart)| (room_id.into(), localpart.into()))
.collect::<Vec<(OwnedRoomId, String)>>()
.await;
let html_list = aliases
.iter()
.fold(String::new(), |mut output, (alias, id)| {
writeln!(
output,
"<li><code>{}</code> -> #{}:{}</li>",
escape_html(alias.as_ref()),
escape_html(id.as_ref()),
server_name
)
.expect("should be able to write to string buffer");
output
});
let server_name = services.globals.server_name();
let plain_list = aliases
.iter()
.fold(String::new(), |mut output, (alias, id)| {
writeln!(output, "- `{alias}` -> #{id}:{server_name}")
.expect("should be able to write to string buffer");
output
});
let plain = format!("Aliases:\n{plain_list}");
let html = format!("Aliases:\n<ul>{html_list}</ul>");
Ok(RoomMessageEventContent::text_html(plain, html))
},
Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Unable to list room aliases: {e}"))),
}
let html_list = aliases
.iter()
.fold(String::new(), |mut output, (alias, id)| {
writeln!(
output,
"<li><code>{}</code> -> #{}:{}</li>",
escape_html(alias.as_ref()),
escape_html(id),
server_name
)
.expect("should be able to write to string buffer");
output
});
let plain = format!("Aliases:\n{plain_list}");
let html = format!("Aliases:\n<ul>{html_list}</ul>");
Ok(RoomMessageEventContent::text_html(plain, html))
}
},
}

View file

@ -1,5 +1,6 @@
use conduit::Result;
use ruma::events::room::message::RoomMessageEventContent;
use futures::StreamExt;
use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId};
use crate::{admin_command, get_room_info, PAGE_SIZE};
@ -14,37 +15,16 @@ pub(super) async fn list_rooms(
.rooms
.metadata
.iter_ids()
.filter_map(|room_id| {
room_id
.ok()
.filter(|room_id| {
if exclude_disabled
&& self
.services
.rooms
.metadata
.is_disabled(room_id)
.unwrap_or(false)
{
return false;
}
if exclude_banned
&& self
.services
.rooms
.metadata
.is_banned(room_id)
.unwrap_or(false)
{
return false;
}
true
})
.map(|room_id| get_room_info(self.services, &room_id))
.filter_map(|room_id| async move {
(!exclude_disabled || !self.services.rooms.metadata.is_disabled(room_id).await).then_some(room_id)
})
.collect::<Vec<_>>();
.filter_map(|room_id| async move {
(!exclude_banned || !self.services.rooms.metadata.is_banned(room_id).await).then_some(room_id)
})
.then(|room_id| get_room_info(self.services, room_id))
.collect::<Vec<_>>()
.await;
rooms.sort_by_key(|r| r.1);
rooms.reverse();
@ -74,3 +54,10 @@ pub(super) async fn list_rooms(
Ok(RoomMessageEventContent::notice_markdown(output_plain))
}
#[admin_command]
pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result<RoomMessageEventContent> {
let result = self.services.rooms.metadata.exists(&room_id).await;
Ok(RoomMessageEventContent::notice_markdown(format!("{result}")))
}

View file

@ -2,7 +2,8 @@ use std::fmt::Write;
use clap::Subcommand;
use conduit::Result;
use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId, RoomId};
use futures::StreamExt;
use ruma::{events::room::message::RoomMessageEventContent, RoomId};
use crate::{escape_html, get_room_info, Command, PAGE_SIZE};
@ -31,36 +32,37 @@ pub(super) async fn process(command: RoomDirectoryCommand, context: &Command<'_>
match command {
RoomDirectoryCommand::Publish {
room_id,
} => match services.rooms.directory.set_public(&room_id) {
Ok(()) => Ok(RoomMessageEventContent::text_plain("Room published")),
Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to update room: {err}"))),
} => {
services.rooms.directory.set_public(&room_id);
Ok(RoomMessageEventContent::notice_plain("Room published"))
},
RoomDirectoryCommand::Unpublish {
room_id,
} => match services.rooms.directory.set_not_public(&room_id) {
Ok(()) => Ok(RoomMessageEventContent::text_plain("Room unpublished")),
Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to update room: {err}"))),
} => {
services.rooms.directory.set_not_public(&room_id);
Ok(RoomMessageEventContent::notice_plain("Room unpublished"))
},
RoomDirectoryCommand::List {
page,
} => {
// TODO: i know there's a way to do this with clap, but i can't seem to find it
let page = page.unwrap_or(1);
let mut rooms = services
let mut rooms: Vec<_> = services
.rooms
.directory
.public_rooms()
.filter_map(Result::ok)
.map(|id: OwnedRoomId| get_room_info(services, &id))
.collect::<Vec<_>>();
.then(|room_id| get_room_info(services, room_id))
.collect()
.await;
rooms.sort_by_key(|r| r.1);
rooms.reverse();
let rooms = rooms
let rooms: Vec<_> = rooms
.into_iter()
.skip(page.saturating_sub(1).saturating_mul(PAGE_SIZE))
.take(PAGE_SIZE)
.collect::<Vec<_>>();
.collect();
if rooms.is_empty() {
return Ok(RoomMessageEventContent::text_plain("No more rooms."));

View file

@ -1,5 +1,6 @@
use clap::Subcommand;
use conduit::Result;
use conduit::{utils::ReadyExt, Result};
use futures::StreamExt;
use ruma::{events::room::message::RoomMessageEventContent, RoomId};
use crate::{admin_command, admin_command_dispatch};
@ -32,46 +33,40 @@ async fn list_joined_members(&self, room_id: Box<RoomId>, local_only: bool) -> R
.rooms
.state_accessor
.get_name(&room_id)
.ok()
.flatten()
.unwrap_or_else(|| room_id.to_string());
.await
.unwrap_or_else(|_| room_id.to_string());
let members = self
let member_info: Vec<_> = self
.services
.rooms
.state_cache
.room_members(&room_id)
.filter_map(|member| {
if local_only {
member
.ok()
.filter(|user| self.services.globals.user_is_local(user))
} else {
member.ok()
}
});
let member_info = members
.into_iter()
.map(|user_id| {
(
user_id.clone(),
.ready_filter(|user_id| {
local_only
.then(|| self.services.globals.user_is_local(user_id))
.unwrap_or(true)
})
.map(ToOwned::to_owned)
.filter_map(|user_id| async move {
Some((
self.services
.users
.displayname(&user_id)
.unwrap_or(None)
.unwrap_or_else(|| user_id.to_string()),
)
.await
.unwrap_or_else(|_| user_id.to_string()),
user_id,
))
})
.collect::<Vec<_>>();
.collect()
.await;
let output_plain = format!(
"{} Members in Room \"{}\":\n```\n{}\n```",
member_info.len(),
room_name,
member_info
.iter()
.map(|(mxid, displayname)| format!("{mxid} | {displayname}"))
.into_iter()
.map(|(displayname, mxid)| format!("{mxid} | {displayname}"))
.collect::<Vec<_>>()
.join("\n")
);
@ -81,11 +76,12 @@ async fn list_joined_members(&self, room_id: Box<RoomId>, local_only: bool) -> R
#[admin_command]
async fn view_room_topic(&self, room_id: Box<RoomId>) -> Result<RoomMessageEventContent> {
let Some(room_topic) = self
let Ok(room_topic) = self
.services
.rooms
.state_accessor
.get_room_topic(&room_id)?
.get_room_topic(&room_id)
.await
else {
return Ok(RoomMessageEventContent::text_plain("Room does not have a room topic set."));
};

View file

@ -6,6 +6,7 @@ mod moderation;
use clap::Subcommand;
use conduit::Result;
use ruma::OwnedRoomId;
use self::{
alias::RoomAliasCommand, directory::RoomDirectoryCommand, info::RoomInfoCommand, moderation::RoomModerationCommand,
@ -49,4 +50,9 @@ pub(super) enum RoomCommand {
#[command(subcommand)]
/// - Manage the room directory
Directory(RoomDirectoryCommand),
/// - Check if we know about a room
Exists {
room_id: OwnedRoomId,
},
}

View file

@ -1,6 +1,11 @@
use api::client::leave_room;
use clap::Subcommand;
use conduit::{debug, error, info, warn, Result};
use conduit::{
debug, error, info,
utils::{IterStream, ReadyExt},
warn, Result,
};
use futures::StreamExt;
use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId, RoomAliasId, RoomId, RoomOrAliasId};
use crate::{admin_command, admin_command_dispatch, get_room_info};
@ -76,7 +81,7 @@ async fn ban_room(
let admin_room_alias = &self.services.globals.admin_alias;
if let Some(admin_room_id) = self.services.admin.get_admin_room()? {
if let Ok(admin_room_id) = self.services.admin.get_admin_room().await {
if room.to_string().eq(&admin_room_id) || room.to_string().eq(admin_room_alias) {
return Ok(RoomMessageEventContent::text_plain("Not allowed to ban the admin room."));
}
@ -95,7 +100,7 @@ async fn ban_room(
debug!("Room specified is a room ID, banning room ID");
self.services.rooms.metadata.ban_room(&room_id, true)?;
self.services.rooms.metadata.ban_room(&room_id, true);
room_id
} else if room.is_room_alias_id() {
@ -114,7 +119,13 @@ async fn ban_room(
get_alias_helper to fetch room ID remotely"
);
let room_id = if let Some(room_id) = self.services.rooms.alias.resolve_local_alias(&room_alias)? {
let room_id = if let Ok(room_id) = self
.services
.rooms
.alias
.resolve_local_alias(&room_alias)
.await
{
room_id
} else {
debug!("We don't have this room alias to a room ID locally, attempting to fetch room ID over federation");
@ -138,7 +149,7 @@ async fn ban_room(
}
};
self.services.rooms.metadata.ban_room(&room_id, true)?;
self.services.rooms.metadata.ban_room(&room_id, true);
room_id
} else {
@ -150,56 +161,40 @@ async fn ban_room(
debug!("Making all users leave the room {}", &room);
if force {
for local_user in self
let mut users = self
.services
.rooms
.state_cache
.room_members(&room_id)
.filter_map(|user| {
user.ok().filter(|local_user| {
self.services.globals.user_is_local(local_user)
// additional wrapped check here is to avoid adding remote users
// who are in the admin room to the list of local users (would
// fail auth check)
&& (self.services.globals.user_is_local(local_user)
// since this is a force operation, assume user is an admin
// if somehow this fails
&& self.services
.users
.is_admin(local_user)
.unwrap_or(true))
})
}) {
.ready_filter(|user| self.services.globals.user_is_local(user))
.boxed();
while let Some(local_user) = users.next().await {
debug!(
"Attempting leave for user {} in room {} (forced, ignoring all errors, evicting admins too)",
&local_user, &room_id
"Attempting leave for user {local_user} in room {room_id} (forced, ignoring all errors, evicting \
admins too)",
);
if let Err(e) = leave_room(self.services, &local_user, &room_id, None).await {
if let Err(e) = leave_room(self.services, local_user, &room_id, None).await {
warn!(%e, "Failed to leave room");
}
}
} else {
for local_user in self
let mut users = self
.services
.rooms
.state_cache
.room_members(&room_id)
.filter_map(|user| {
user.ok().filter(|local_user| {
local_user.server_name() == self.services.globals.server_name()
// additional wrapped check here is to avoid adding remote users
// who are in the admin room to the list of local users (would fail auth check)
&& (local_user.server_name()
== self.services.globals.server_name()
&& !self.services
.users
.is_admin(local_user)
.unwrap_or(false))
})
}) {
.ready_filter(|user| self.services.globals.user_is_local(user))
.boxed();
while let Some(local_user) = users.next().await {
if self.services.users.is_admin(local_user).await {
continue;
}
debug!("Attempting leave for user {} in room {}", &local_user, &room_id);
if let Err(e) = leave_room(self.services, &local_user, &room_id, None).await {
if let Err(e) = leave_room(self.services, local_user, &room_id, None).await {
error!(
"Error attempting to make local user {} leave room {} during room banning: {}",
&local_user, &room_id, e
@ -214,12 +209,14 @@ async fn ban_room(
}
// remove any local aliases, ignore errors
for ref local_alias in self
for local_alias in &self
.services
.rooms
.alias
.local_aliases_for_room(&room_id)
.filter_map(Result::ok)
.map(ToOwned::to_owned)
.collect::<Vec<_>>()
.await
{
_ = self
.services
@ -230,10 +227,10 @@ async fn ban_room(
}
// unpublish from room directory, ignore errors
_ = self.services.rooms.directory.set_not_public(&room_id);
self.services.rooms.directory.set_not_public(&room_id);
if disable_federation {
self.services.rooms.metadata.disable_room(&room_id, true)?;
self.services.rooms.metadata.disable_room(&room_id, true);
return Ok(RoomMessageEventContent::text_plain(
"Room banned, removed all our local users, and disabled incoming federation with room.",
));
@ -268,7 +265,7 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu
for &room in &rooms_s {
match <&RoomOrAliasId>::try_from(room) {
Ok(room_alias_or_id) => {
if let Some(admin_room_id) = self.services.admin.get_admin_room()? {
if let Ok(admin_room_id) = self.services.admin.get_admin_room().await {
if room.to_owned().eq(&admin_room_id) || room.to_owned().eq(admin_room_alias) {
info!("User specified admin room in bulk ban list, ignoring");
continue;
@ -300,43 +297,48 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu
if room_alias_or_id.is_room_alias_id() {
match RoomAliasId::parse(room_alias_or_id) {
Ok(room_alias) => {
let room_id =
if let Some(room_id) = self.services.rooms.alias.resolve_local_alias(&room_alias)? {
room_id
} else {
debug!(
"We don't have this room alias to a room ID locally, attempting to fetch room \
ID over federation"
);
let room_id = if let Ok(room_id) = self
.services
.rooms
.alias
.resolve_local_alias(&room_alias)
.await
{
room_id
} else {
debug!(
"We don't have this room alias to a room ID locally, attempting to fetch room ID \
over federation"
);
match self
.services
.rooms
.alias
.resolve_alias(&room_alias, None)
.await
{
Ok((room_id, servers)) => {
debug!(
?room_id,
?servers,
"Got federation response fetching room ID for {room}",
);
room_id
},
Err(e) => {
// don't fail if force blocking
if force {
warn!("Failed to resolve room alias {room} to a room ID: {e}");
continue;
}
match self
.services
.rooms
.alias
.resolve_alias(&room_alias, None)
.await
{
Ok((room_id, servers)) => {
debug!(
?room_id,
?servers,
"Got federation response fetching room ID for {room}",
);
room_id
},
Err(e) => {
// don't fail if force blocking
if force {
warn!("Failed to resolve room alias {room} to a room ID: {e}");
continue;
}
return Ok(RoomMessageEventContent::text_plain(format!(
"Failed to resolve room alias {room} to a room ID: {e}"
)));
},
}
};
return Ok(RoomMessageEventContent::text_plain(format!(
"Failed to resolve room alias {room} to a room ID: {e}"
)));
},
}
};
room_ids.push(room_id);
},
@ -374,74 +376,52 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu
}
for room_id in room_ids {
if self
.services
.rooms
.metadata
.ban_room(&room_id, true)
.is_ok()
{
debug!("Banned {room_id} successfully");
room_ban_count = room_ban_count.saturating_add(1);
}
self.services.rooms.metadata.ban_room(&room_id, true);
debug!("Banned {room_id} successfully");
room_ban_count = room_ban_count.saturating_add(1);
debug!("Making all users leave the room {}", &room_id);
if force {
for local_user in self
let mut users = self
.services
.rooms
.state_cache
.room_members(&room_id)
.filter_map(|user| {
user.ok().filter(|local_user| {
local_user.server_name() == self.services.globals.server_name()
// additional wrapped check here is to avoid adding remote
// users who are in the admin room to the list of local
// users (would fail auth check)
&& (local_user.server_name()
== self.services.globals.server_name()
// since this is a force operation, assume user is an
// admin if somehow this fails
&& self.services
.users
.is_admin(local_user)
.unwrap_or(true))
})
}) {
.ready_filter(|user| self.services.globals.user_is_local(user))
.boxed();
while let Some(local_user) = users.next().await {
debug!(
"Attempting leave for user {} in room {} (forced, ignoring all errors, evicting admins too)",
&local_user, room_id
"Attempting leave for user {local_user} in room {room_id} (forced, ignoring all errors, evicting \
admins too)",
);
if let Err(e) = leave_room(self.services, &local_user, &room_id, None).await {
if let Err(e) = leave_room(self.services, local_user, &room_id, None).await {
warn!(%e, "Failed to leave room");
}
}
} else {
for local_user in self
let mut users = self
.services
.rooms
.state_cache
.room_members(&room_id)
.filter_map(|user| {
user.ok().filter(|local_user| {
local_user.server_name() == self.services.globals.server_name()
// additional wrapped check here is to avoid adding remote
// users who are in the admin room to the list of local
// users (would fail auth check)
&& (local_user.server_name()
== self.services.globals.server_name()
&& !self.services
.users
.is_admin(local_user)
.unwrap_or(false))
})
}) {
debug!("Attempting leave for user {} in room {}", &local_user, &room_id);
if let Err(e) = leave_room(self.services, &local_user, &room_id, None).await {
.ready_filter(|user| self.services.globals.user_is_local(user))
.boxed();
while let Some(local_user) = users.next().await {
if self.services.users.is_admin(local_user).await {
continue;
}
debug!("Attempting leave for user {local_user} in room {room_id}");
if let Err(e) = leave_room(self.services, local_user, &room_id, None).await {
error!(
"Error attempting to make local user {} leave room {} during bulk room banning: {}",
&local_user, &room_id, e
"Error attempting to make local user {local_user} leave room {room_id} during bulk room \
banning: {e}",
);
return Ok(RoomMessageEventContent::text_plain(format!(
"Error attempting to make local user {} leave room {} during room banning (room is still \
banned but not removing any more users and not banning any more rooms): {}\nIf you would \
@ -453,26 +433,26 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu
}
// remove any local aliases, ignore errors
for ref local_alias in self
.services
self.services
.rooms
.alias
.local_aliases_for_room(&room_id)
.filter_map(Result::ok)
{
_ = self
.services
.rooms
.alias
.remove_alias(local_alias, &self.services.globals.server_user)
.await;
}
.map(ToOwned::to_owned)
.for_each(|local_alias| async move {
self.services
.rooms
.alias
.remove_alias(&local_alias, &self.services.globals.server_user)
.await
.ok();
})
.await;
// unpublish from room directory, ignore errors
_ = self.services.rooms.directory.set_not_public(&room_id);
self.services.rooms.directory.set_not_public(&room_id);
if disable_federation {
self.services.rooms.metadata.disable_room(&room_id, true)?;
self.services.rooms.metadata.disable_room(&room_id, true);
}
}
@ -503,7 +483,7 @@ async fn unban_room(&self, enable_federation: bool, room: Box<RoomOrAliasId>) ->
debug!("Room specified is a room ID, unbanning room ID");
self.services.rooms.metadata.ban_room(&room_id, false)?;
self.services.rooms.metadata.ban_room(&room_id, false);
room_id
} else if room.is_room_alias_id() {
@ -522,7 +502,13 @@ async fn unban_room(&self, enable_federation: bool, room: Box<RoomOrAliasId>) ->
get_alias_helper to fetch room ID remotely"
);
let room_id = if let Some(room_id) = self.services.rooms.alias.resolve_local_alias(&room_alias)? {
let room_id = if let Ok(room_id) = self
.services
.rooms
.alias
.resolve_local_alias(&room_alias)
.await
{
room_id
} else {
debug!("We don't have this room alias to a room ID locally, attempting to fetch room ID over federation");
@ -546,7 +532,7 @@ async fn unban_room(&self, enable_federation: bool, room: Box<RoomOrAliasId>) ->
}
};
self.services.rooms.metadata.ban_room(&room_id, false)?;
self.services.rooms.metadata.ban_room(&room_id, false);
room_id
} else {
@ -557,7 +543,7 @@ async fn unban_room(&self, enable_federation: bool, room: Box<RoomOrAliasId>) ->
};
if enable_federation {
self.services.rooms.metadata.disable_room(&room_id, false)?;
self.services.rooms.metadata.disable_room(&room_id, false);
return Ok(RoomMessageEventContent::text_plain("Room unbanned."));
}
@ -569,45 +555,42 @@ async fn unban_room(&self, enable_federation: bool, room: Box<RoomOrAliasId>) ->
#[admin_command]
async fn list_banned_rooms(&self, no_details: bool) -> Result<RoomMessageEventContent> {
let rooms = self
let room_ids: Vec<OwnedRoomId> = self
.services
.rooms
.metadata
.list_banned_rooms()
.collect::<Result<Vec<_>, _>>();
.map(Into::into)
.collect()
.await;
match rooms {
Ok(room_ids) => {
if room_ids.is_empty() {
return Ok(RoomMessageEventContent::text_plain("No rooms are banned."));
}
let mut rooms = room_ids
.into_iter()
.map(|room_id| get_room_info(self.services, &room_id))
.collect::<Vec<_>>();
rooms.sort_by_key(|r| r.1);
rooms.reverse();
let output_plain = format!(
"Rooms Banned ({}):\n```\n{}\n```",
rooms.len(),
rooms
.iter()
.map(|(id, members, name)| if no_details {
format!("{id}")
} else {
format!("{id}\tMembers: {members}\tName: {name}")
})
.collect::<Vec<_>>()
.join("\n")
);
Ok(RoomMessageEventContent::notice_markdown(output_plain))
},
Err(e) => {
error!("Failed to list banned rooms: {e}");
Ok(RoomMessageEventContent::text_plain(format!("Unable to list banned rooms: {e}")))
},
if room_ids.is_empty() {
return Ok(RoomMessageEventContent::text_plain("No rooms are banned."));
}
let mut rooms = room_ids
.iter()
.stream()
.then(|room_id| get_room_info(self.services, room_id))
.collect::<Vec<_>>()
.await;
rooms.sort_by_key(|r| r.1);
rooms.reverse();
let output_plain = format!(
"Rooms Banned ({}):\n```\n{}\n```",
rooms.len(),
rooms
.iter()
.map(|(id, members, name)| if no_details {
format!("{id}")
} else {
format!("{id}\tMembers: {members}\tName: {name}")
})
.collect::<Vec<_>>()
.join("\n")
);
Ok(RoomMessageEventContent::notice_markdown(output_plain))
}

View file

@ -1,7 +1,9 @@
use std::{collections::BTreeMap, fmt::Write as _};
use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room};
use conduit::{error, info, utils, warn, PduBuilder, Result};
use conduit::{error, info, is_equal_to, utils, warn, PduBuilder, Result};
use conduit_api::client::{leave_all_rooms, update_avatar_url, update_displayname};
use futures::StreamExt;
use ruma::{
events::{
room::{
@ -10,11 +12,10 @@ use ruma::{
redaction::RoomRedactionEventContent,
},
tag::{TagEvent, TagEventContent, TagInfo},
RoomAccountDataEventType, StateEventType, TimelineEventType,
RoomAccountDataEventType, StateEventType,
},
EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId,
};
use serde_json::value::to_raw_value;
use crate::{
admin_command, get_room_info,
@ -25,16 +26,19 @@ const AUTO_GEN_PASSWORD_LENGTH: usize = 25;
#[admin_command]
pub(super) async fn list_users(&self) -> Result<RoomMessageEventContent> {
match self.services.users.list_local_users() {
Ok(users) => {
let mut plain_msg = format!("Found {} local user account(s):\n```\n", users.len());
plain_msg += users.join("\n").as_str();
plain_msg += "\n```";
let users = self
.services
.users
.list_local_users()
.map(ToString::to_string)
.collect::<Vec<_>>()
.await;
Ok(RoomMessageEventContent::notice_markdown(plain_msg))
},
Err(e) => Ok(RoomMessageEventContent::text_plain(e.to_string())),
}
let mut plain_msg = format!("Found {} local user account(s):\n```\n", users.len());
plain_msg += users.join("\n").as_str();
plain_msg += "\n```";
Ok(RoomMessageEventContent::notice_markdown(plain_msg))
}
#[admin_command]
@ -42,7 +46,7 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
// Validate user id
let user_id = parse_local_user_id(self.services, &username)?;
if self.services.users.exists(&user_id)? {
if self.services.users.exists(&user_id).await {
return Ok(RoomMessageEventContent::text_plain(format!("Userid {user_id} already exists")));
}
@ -77,23 +81,25 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
self.services
.users
.set_displayname(&user_id, Some(displayname))
.await?;
.set_displayname(&user_id, Some(displayname));
// Initial account data
self.services.account_data.update(
None,
&user_id,
ruma::events::GlobalAccountDataEventType::PushRules
.to_string()
.into(),
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent {
content: ruma::events::push_rules::PushRulesEventContent {
global: ruma::push::Ruleset::server_default(&user_id),
},
})
.expect("to json value always works"),
)?;
self.services
.account_data
.update(
None,
&user_id,
ruma::events::GlobalAccountDataEventType::PushRules
.to_string()
.into(),
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent {
content: ruma::events::push_rules::PushRulesEventContent {
global: ruma::push::Ruleset::server_default(&user_id),
},
})
.expect("to json value always works"),
)
.await?;
if !self.services.globals.config.auto_join_rooms.is_empty() {
for room in &self.services.globals.config.auto_join_rooms {
@ -101,7 +107,8 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
.services
.rooms
.state_cache
.server_in_room(self.services.globals.server_name(), room)?
.server_in_room(self.services.globals.server_name(), room)
.await
{
warn!("Skipping room {room} to automatically join as we have never joined before.");
continue;
@ -135,13 +142,14 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
// if this account creation is from the CLI / --execute, invite the first user
// to admin room
if let Some(admin_room) = self.services.admin.get_admin_room()? {
if let Ok(admin_room) = self.services.admin.get_admin_room().await {
if self
.services
.rooms
.state_cache
.room_joined_count(&admin_room)?
== Some(1)
.room_joined_count(&admin_room)
.await
.is_ok_and(is_equal_to!(1))
{
self.services.admin.make_user_admin(&user_id).await?;
@ -167,7 +175,7 @@ pub(super) async fn deactivate(&self, no_leave_rooms: bool, user_id: String) ->
));
}
self.services.users.deactivate_account(&user_id)?;
self.services.users.deactivate_account(&user_id).await?;
if !no_leave_rooms {
self.services
@ -175,17 +183,22 @@ pub(super) async fn deactivate(&self, no_leave_rooms: bool, user_id: String) ->
.send_message(RoomMessageEventContent::text_plain(format!(
"Making {user_id} leave all rooms after deactivation..."
)))
.await;
.await
.ok();
let all_joined_rooms: Vec<OwnedRoomId> = self
.services
.rooms
.state_cache
.rooms_joined(&user_id)
.filter_map(Result::ok)
.collect();
.map(Into::into)
.collect()
.await;
full_user_deactivate(self.services, &user_id, all_joined_rooms).await?;
full_user_deactivate(self.services, &user_id, &all_joined_rooms).await?;
update_displayname(self.services, &user_id, None, &all_joined_rooms).await?;
update_avatar_url(self.services, &user_id, None, None, &all_joined_rooms).await?;
leave_all_rooms(self.services, &user_id).await;
}
Ok(RoomMessageEventContent::text_plain(format!(
@ -238,15 +251,16 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) ->
let mut admins = Vec::new();
for username in usernames {
match parse_active_local_user_id(self.services, username) {
match parse_active_local_user_id(self.services, username).await {
Ok(user_id) => {
if self.services.users.is_admin(&user_id)? && !force {
if self.services.users.is_admin(&user_id).await && !force {
self.services
.admin
.send_message(RoomMessageEventContent::text_plain(format!(
"{username} is an admin and --force is not set, skipping over"
)))
.await;
.await
.ok();
admins.push(username);
continue;
}
@ -258,7 +272,8 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) ->
.send_message(RoomMessageEventContent::text_plain(format!(
"{username} is the server service account, skipping over"
)))
.await;
.await
.ok();
continue;
}
@ -270,7 +285,8 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) ->
.send_message(RoomMessageEventContent::text_plain(format!(
"{username} is not a valid username, skipping over: {e}"
)))
.await;
.await
.ok();
continue;
},
}
@ -279,7 +295,7 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) ->
let mut deactivation_count: usize = 0;
for user_id in user_ids {
match self.services.users.deactivate_account(&user_id) {
match self.services.users.deactivate_account(&user_id).await {
Ok(()) => {
deactivation_count = deactivation_count.saturating_add(1);
if !no_leave_rooms {
@ -289,16 +305,26 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) ->
.rooms
.state_cache
.rooms_joined(&user_id)
.filter_map(Result::ok)
.collect();
full_user_deactivate(self.services, &user_id, all_joined_rooms).await?;
.map(Into::into)
.collect()
.await;
full_user_deactivate(self.services, &user_id, &all_joined_rooms).await?;
update_displayname(self.services, &user_id, None, &all_joined_rooms)
.await
.ok();
update_avatar_url(self.services, &user_id, None, None, &all_joined_rooms)
.await
.ok();
leave_all_rooms(self.services, &user_id).await;
}
},
Err(e) => {
self.services
.admin
.send_message(RoomMessageEventContent::text_plain(format!("Failed deactivating user: {e}")))
.await;
.await
.ok();
},
}
}
@ -326,9 +352,9 @@ pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result<RoomMess
.rooms
.state_cache
.rooms_joined(&user_id)
.filter_map(Result::ok)
.map(|room_id| get_room_info(self.services, &room_id))
.collect();
.then(|room_id| get_room_info(self.services, room_id))
.collect()
.await;
if rooms.is_empty() {
return Ok(RoomMessageEventContent::text_plain("User is not in any rooms."));
@ -355,13 +381,18 @@ pub(super) async fn force_join_room(
&self, user_id: String, room_id: OwnedRoomOrAliasId,
) -> Result<RoomMessageEventContent> {
let user_id = parse_local_user_id(self.services, &user_id)?;
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
let (room_id, servers) = self
.services
.rooms
.alias
.resolve_with_servers(&room_id, None)
.await?;
assert!(
self.services.globals.user_is_local(&user_id),
"Parsed user_id must be a local user"
);
join_room_by_id_helper(self.services, &user_id, &room_id, None, &[], None, &None).await?;
join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, None, &None).await?;
Ok(RoomMessageEventContent::notice_markdown(format!(
"{user_id} has been joined to {room_id}.",
@ -404,10 +435,9 @@ pub(super) async fn force_demote(
.services
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomPowerLevels, "")?
.as_ref()
.and_then(|event| serde_json::from_str(event.content.get()).ok()?)
.and_then(|content: RoomPowerLevelsEventContent| content.into());
.room_state_get_content::<RoomPowerLevelsEventContent>(&room_id, &StateEventType::RoomPowerLevels, "")
.await
.ok();
let user_can_demote_self = room_power_levels
.as_ref()
@ -417,9 +447,9 @@ pub(super) async fn force_demote(
.services
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomCreate, "")?
.as_ref()
.is_some_and(|event| event.sender == user_id);
.room_state_get(&room_id, &StateEventType::RoomCreate, "")
.await
.is_ok_and(|event| event.sender == user_id);
if !user_can_demote_self {
return Ok(RoomMessageEventContent::notice_markdown(
@ -435,14 +465,7 @@ pub(super) async fn force_demote(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomPowerLevels,
content: to_raw_value(&power_levels_content).expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
PduBuilder::state(String::new(), &power_levels_content),
&user_id,
&room_id,
&state_lock,
@ -473,33 +496,33 @@ pub(super) async fn make_user_admin(&self, user_id: String) -> Result<RoomMessag
pub(super) async fn put_room_tag(
&self, user_id: String, room_id: Box<RoomId>, tag: String,
) -> Result<RoomMessageEventContent> {
let user_id = parse_active_local_user_id(self.services, &user_id)?;
let user_id = parse_active_local_user_id(self.services, &user_id).await?;
let event = self
let mut tags_event = self
.services
.account_data
.get(Some(&room_id), &user_id, RoomAccountDataEventType::Tag)?;
let mut tags_event = event.map_or_else(
|| TagEvent {
.get_room(&room_id, &user_id, RoomAccountDataEventType::Tag)
.await
.unwrap_or(TagEvent {
content: TagEventContent {
tags: BTreeMap::new(),
},
},
|e| serde_json::from_str(e.get()).expect("Bad account data in database for user {user_id}"),
);
});
tags_event
.content
.tags
.insert(tag.clone().into(), TagInfo::new());
self.services.account_data.update(
Some(&room_id),
&user_id,
RoomAccountDataEventType::Tag,
&serde_json::to_value(tags_event).expect("to json value always works"),
)?;
self.services
.account_data
.update(
Some(&room_id),
&user_id,
RoomAccountDataEventType::Tag,
&serde_json::to_value(tags_event).expect("to json value always works"),
)
.await?;
Ok(RoomMessageEventContent::text_plain(format!(
"Successfully updated room account data for {user_id} and room {room_id} with tag {tag}"
@ -510,30 +533,30 @@ pub(super) async fn put_room_tag(
pub(super) async fn delete_room_tag(
&self, user_id: String, room_id: Box<RoomId>, tag: String,
) -> Result<RoomMessageEventContent> {
let user_id = parse_active_local_user_id(self.services, &user_id)?;
let user_id = parse_active_local_user_id(self.services, &user_id).await?;
let event = self
let mut tags_event = self
.services
.account_data
.get(Some(&room_id), &user_id, RoomAccountDataEventType::Tag)?;
let mut tags_event = event.map_or_else(
|| TagEvent {
.get_room(&room_id, &user_id, RoomAccountDataEventType::Tag)
.await
.unwrap_or(TagEvent {
content: TagEventContent {
tags: BTreeMap::new(),
},
},
|e| serde_json::from_str(e.get()).expect("Bad account data in database for user {user_id}"),
);
});
tags_event.content.tags.remove(&tag.clone().into());
self.services.account_data.update(
Some(&room_id),
&user_id,
RoomAccountDataEventType::Tag,
&serde_json::to_value(tags_event).expect("to json value always works"),
)?;
self.services
.account_data
.update(
Some(&room_id),
&user_id,
RoomAccountDataEventType::Tag,
&serde_json::to_value(tags_event).expect("to json value always works"),
)
.await?;
Ok(RoomMessageEventContent::text_plain(format!(
"Successfully updated room account data for {user_id} and room {room_id}, deleting room tag {tag}"
@ -542,21 +565,18 @@ pub(super) async fn delete_room_tag(
#[admin_command]
pub(super) async fn get_room_tags(&self, user_id: String, room_id: Box<RoomId>) -> Result<RoomMessageEventContent> {
let user_id = parse_active_local_user_id(self.services, &user_id)?;
let user_id = parse_active_local_user_id(self.services, &user_id).await?;
let event = self
let tags_event = self
.services
.account_data
.get(Some(&room_id), &user_id, RoomAccountDataEventType::Tag)?;
let tags_event = event.map_or_else(
|| TagEvent {
.get_room(&room_id, &user_id, RoomAccountDataEventType::Tag)
.await
.unwrap_or(TagEvent {
content: TagEventContent {
tags: BTreeMap::new(),
},
},
|e| serde_json::from_str(e.get()).expect("Bad account data in database for user {user_id}"),
);
});
Ok(RoomMessageEventContent::notice_markdown(format!(
"```\n{:#?}\n```",
@ -566,11 +586,12 @@ pub(super) async fn get_room_tags(&self, user_id: String, room_id: Box<RoomId>)
#[admin_command]
pub(super) async fn redact_event(&self, event_id: Box<EventId>) -> Result<RoomMessageEventContent> {
let Some(event) = self
let Ok(event) = self
.services
.rooms
.timeline
.get_non_outlier_pdu(&event_id)?
.get_non_outlier_pdu(&event_id)
.await
else {
return Ok(RoomMessageEventContent::text_plain("Event does not exist in our database."));
};
@ -599,16 +620,11 @@ pub(super) async fn redact_event(&self, event_id: Box<EventId>) -> Result<RoomMe
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomRedaction,
content: to_raw_value(&RoomRedactionEventContent {
redacts: Some(event.event_id.clone()),
..PduBuilder::timeline(&RoomRedactionEventContent {
redacts: Some(event.event_id.clone().into()),
reason: Some(reason),
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: None,
redacts: Some(event.event_id),
timestamp: None,
},
&sender_user,
&room_id,

View file

@ -8,23 +8,21 @@ pub(crate) fn escape_html(s: &str) -> String {
.replace('>', "&gt;")
}
pub(crate) fn get_room_info(services: &Services, id: &RoomId) -> (OwnedRoomId, u64, String) {
pub(crate) async fn get_room_info(services: &Services, room_id: &RoomId) -> (OwnedRoomId, u64, String) {
(
id.into(),
room_id.into(),
services
.rooms
.state_cache
.room_joined_count(id)
.ok()
.flatten()
.room_joined_count(room_id)
.await
.unwrap_or(0),
services
.rooms
.state_accessor
.get_name(id)
.ok()
.flatten()
.unwrap_or_else(|| id.to_string()),
.get_name(room_id)
.await
.unwrap_or_else(|_| room_id.to_string()),
)
}
@ -46,14 +44,14 @@ pub(crate) fn parse_local_user_id(services: &Services, user_id: &str) -> Result<
}
/// Parses user ID that is an active (not guest or deactivated) local user
pub(crate) fn parse_active_local_user_id(services: &Services, user_id: &str) -> Result<OwnedUserId> {
pub(crate) async fn parse_active_local_user_id(services: &Services, user_id: &str) -> Result<OwnedUserId> {
let user_id = parse_local_user_id(services, user_id)?;
if !services.users.exists(&user_id)? {
if !services.users.exists(&user_id).await {
return Err!("User {user_id:?} does not exist on this server.");
}
if services.users.is_deactivated(&user_id)? {
if services.users.is_deactivated(&user_id).await? {
return Err!("User {user_id:?} is deactivated.");
}

View file

@ -45,7 +45,7 @@ conduit-core.workspace = true
conduit-database.workspace = true
conduit-service.workspace = true
const-str.workspace = true
futures-util.workspace = true
futures.workspace = true
hmac.workspace = true
http.workspace = true
http-body-util.workspace = true

View file

@ -2,7 +2,8 @@ use std::fmt::Write;
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduit::{debug_info, error, info, utils, warn, Error, PduBuilder, Result};
use conduit::{debug_info, error, info, is_equal_to, utils, utils::ReadyExt, warn, Error, PduBuilder, Result};
use futures::{FutureExt, StreamExt};
use register::RegistrationKind;
use ruma::{
api::client::{
@ -20,11 +21,10 @@ use ruma::{
message::RoomMessageEventContent,
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
},
GlobalAccountDataEventType, StateEventType, TimelineEventType,
GlobalAccountDataEventType, StateEventType,
},
push, OwnedRoomId, UserId,
};
use serde_json::value::to_raw_value;
use service::Services;
use super::{join_room_by_id_helper, DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
@ -55,7 +55,7 @@ pub(crate) async fn get_register_available_route(
.ok_or(Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
// Check if username is creative enough
if services.users.exists(&user_id)? {
if services.users.exists(&user_id).await {
return Err(Error::BadRequest(ErrorKind::UserInUse, "Desired user ID is already taken."));
}
@ -110,7 +110,7 @@ pub(crate) async fn register_route(
if is_guest
&& (!services.globals.allow_guest_registration()
|| (services.globals.allow_registration() && services.globals.config.registration_token.is_some()))
|| (services.globals.allow_registration() && services.globals.registration_token.is_some()))
{
info!(
"Guest registration disabled / registration enabled with token configured, rejecting guest registration \
@ -125,7 +125,7 @@ pub(crate) async fn register_route(
// forbid guests from registering if there is not a real admin user yet. give
// generic user error.
if is_guest && services.users.count()? < 2 {
if is_guest && services.users.count().await < 2 {
warn!(
"Guest account attempted to register before a real admin user has been registered, rejecting \
registration. Guest's initial device name: {:?}",
@ -142,7 +142,7 @@ pub(crate) async fn register_route(
.filter(|user_id| !user_id.is_historical() && services.globals.user_is_local(user_id))
.ok_or(Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
if services.users.exists(&proposed_user_id)? {
if services.users.exists(&proposed_user_id).await {
return Err(Error::BadRequest(ErrorKind::UserInUse, "Desired user ID is already taken."));
}
@ -162,7 +162,7 @@ pub(crate) async fn register_route(
services.globals.server_name(),
)
.unwrap();
if !services.users.exists(&proposed_user_id)? {
if !services.users.exists(&proposed_user_id).await {
break proposed_user_id;
}
},
@ -182,7 +182,7 @@ pub(crate) async fn register_route(
// UIAA
let mut uiaainfo;
let skip_auth = if services.globals.config.registration_token.is_some() {
let skip_auth = if services.globals.registration_token.is_some() {
// Registration token required
uiaainfo = UiaaInfo {
flows: vec![AuthFlow {
@ -210,12 +210,15 @@ pub(crate) async fn register_route(
if !skip_auth {
if let Some(auth) = &body.auth {
let (worked, uiaainfo) = services.uiaa.try_auth(
&UserId::parse_with_server_name("", services.globals.server_name()).expect("we know this is valid"),
"".into(),
auth,
&uiaainfo,
)?;
let (worked, uiaainfo) = services
.uiaa
.try_auth(
&UserId::parse_with_server_name("", services.globals.server_name()).expect("we know this is valid"),
"".into(),
auth,
&uiaainfo,
)
.await?;
if !worked {
return Err(Error::Uiaa(uiaainfo));
}
@ -227,7 +230,7 @@ pub(crate) async fn register_route(
"".into(),
&uiaainfo,
&json,
)?;
);
return Err(Error::Uiaa(uiaainfo));
} else {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
@ -255,21 +258,23 @@ pub(crate) async fn register_route(
services
.users
.set_displayname(&user_id, Some(displayname.clone()))
.await?;
.set_displayname(&user_id, Some(displayname.clone()));
// Initial account data
services.account_data.update(
None,
&user_id,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent {
content: ruma::events::push_rules::PushRulesEventContent {
global: push::Ruleset::server_default(&user_id),
},
})
.expect("to json always works"),
)?;
services
.account_data
.update(
None,
&user_id,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent {
content: ruma::events::push_rules::PushRulesEventContent {
global: push::Ruleset::server_default(&user_id),
},
})
.expect("to json always works"),
)
.await?;
// Inhibit login does not work for guests
if !is_guest && body.inhibit_login {
@ -294,13 +299,16 @@ pub(crate) async fn register_route(
let token = utils::random_string(TOKEN_LENGTH);
// Create device for this account
services.users.create_device(
&user_id,
&device_id,
&token,
body.initial_device_display_name.clone(),
Some(client.to_string()),
)?;
services
.users
.create_device(
&user_id,
&device_id,
&token,
body.initial_device_display_name.clone(),
Some(client.to_string()),
)
.await?;
debug_info!(%user_id, %device_id, "User account was created");
@ -316,7 +324,8 @@ pub(crate) async fn register_route(
"New user \"{user_id}\" registered on this server from IP {client} and device display name \
\"{device_display_name}\""
)))
.await;
.await
.ok();
} else {
info!("New user \"{user_id}\" registered on this server.");
services
@ -324,7 +333,8 @@ pub(crate) async fn register_route(
.send_message(RoomMessageEventContent::notice_plain(format!(
"New user \"{user_id}\" registered on this server from IP {client}"
)))
.await;
.await
.ok();
}
}
@ -339,24 +349,31 @@ pub(crate) async fn register_route(
"Guest user \"{user_id}\" with device display name \"{device_display_name}\" registered on this \
server from IP {client}"
)))
.await;
.await
.ok();
} else {
services
.admin
.send_message(RoomMessageEventContent::notice_plain(format!(
"Guest user \"{user_id}\" with no device display name registered on this server from IP {client}",
)))
.await;
.await
.ok();
}
}
// If this is the first real user, grant them admin privileges except for guest
// users Note: the server user, @conduit:servername, is generated first
if !is_guest {
if let Some(admin_room) = services.admin.get_admin_room()? {
if services.rooms.state_cache.room_joined_count(&admin_room)? == Some(1) {
if let Ok(admin_room) = services.admin.get_admin_room().await {
if services
.rooms
.state_cache
.room_joined_count(&admin_room)
.await
.is_ok_and(is_equal_to!(1))
{
services.admin.make_user_admin(&user_id).await?;
warn!("Granting {user_id} admin privileges as the first user");
}
}
@ -370,7 +387,8 @@ pub(crate) async fn register_route(
if !services
.rooms
.state_cache
.server_in_room(services.globals.server_name(), room)?
.server_in_room(services.globals.server_name(), room)
.await
{
warn!("Skipping room {room} to automatically join as we have never joined before.");
continue;
@ -386,6 +404,7 @@ pub(crate) async fn register_route(
None,
&body.appservice_info,
)
.boxed()
.await
{
// don't return this error so we don't fail registrations
@ -449,16 +468,20 @@ pub(crate) async fn change_password_route(
if let Some(auth) = &body.auth {
let (worked, uiaainfo) = services
.uiaa
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
.try_auth(sender_user, sender_device, auth, &uiaainfo)
.await?;
if !worked {
return Err(Error::Uiaa(uiaainfo));
}
// Success!
// Success!
} else if let Some(json) = body.json_body {
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
services
.uiaa
.create(sender_user, sender_device, &uiaainfo, &json)?;
.create(sender_user, sender_device, &uiaainfo, &json);
return Err(Error::Uiaa(uiaainfo));
} else {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
@ -470,14 +493,12 @@ pub(crate) async fn change_password_route(
if body.logout_devices {
// Logout all devices except the current one
for id in services
services
.users
.all_device_ids(sender_user)
.filter_map(Result::ok)
.filter(|id| id != sender_device)
{
services.users.remove_device(sender_user, &id)?;
}
.ready_filter(|id| id != sender_device)
.for_each(|id| services.users.remove_device(sender_user, id))
.await;
}
info!("User {sender_user} changed their password.");
@ -486,7 +507,8 @@ pub(crate) async fn change_password_route(
.send_message(RoomMessageEventContent::notice_plain(format!(
"User {sender_user} changed their password."
)))
.await;
.await
.ok();
Ok(change_password::v3::Response {})
}
@ -505,7 +527,7 @@ pub(crate) async fn whoami_route(
Ok(whoami::v3::Response {
user_id: sender_user.clone(),
device_id,
is_guest: services.users.is_deactivated(sender_user)? && body.appservice_info.is_none(),
is_guest: services.users.is_deactivated(sender_user).await? && body.appservice_info.is_none(),
})
}
@ -546,7 +568,9 @@ pub(crate) async fn deactivate_route(
if let Some(auth) = &body.auth {
let (worked, uiaainfo) = services
.uiaa
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
.try_auth(sender_user, sender_device, auth, &uiaainfo)
.await?;
if !worked {
return Err(Error::Uiaa(uiaainfo));
}
@ -555,7 +579,8 @@ pub(crate) async fn deactivate_route(
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
services
.uiaa
.create(sender_user, sender_device, &uiaainfo, &json)?;
.create(sender_user, sender_device, &uiaainfo, &json);
return Err(Error::Uiaa(uiaainfo));
} else {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
@ -566,10 +591,14 @@ pub(crate) async fn deactivate_route(
.rooms
.state_cache
.rooms_joined(sender_user)
.filter_map(Result::ok)
.collect();
.map(Into::into)
.collect()
.await;
full_user_deactivate(&services, sender_user, all_joined_rooms).await?;
super::update_displayname(&services, sender_user, None, &all_joined_rooms).await?;
super::update_avatar_url(&services, sender_user, None, None, &all_joined_rooms).await?;
full_user_deactivate(&services, sender_user, &all_joined_rooms).await?;
info!("User {sender_user} deactivated their account.");
services
@ -577,7 +606,8 @@ pub(crate) async fn deactivate_route(
.send_message(RoomMessageEventContent::notice_plain(format!(
"User {sender_user} deactivated their account."
)))
.await;
.await
.ok();
Ok(deactivate::v3::Response {
id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport,
@ -636,7 +666,7 @@ pub(crate) async fn request_3pid_management_token_via_msisdn_route(
pub(crate) async fn check_registration_token_validity(
State(services): State<crate::State>, body: Ruma<check_registration_token_validity::v1::Request>,
) -> Result<check_registration_token_validity::v1::Response> {
let Some(reg_token) = services.globals.config.registration_token.clone() else {
let Some(reg_token) = services.globals.registration_token.clone() else {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"Server does not allow token registration.",
@ -656,34 +686,27 @@ pub(crate) async fn check_registration_token_validity(
/// - Removing all profile data
/// - Leaving all rooms (and forgets all of them)
pub async fn full_user_deactivate(
services: &Services, user_id: &UserId, all_joined_rooms: Vec<OwnedRoomId>,
services: &Services, user_id: &UserId, all_joined_rooms: &[OwnedRoomId],
) -> Result<()> {
services.users.deactivate_account(user_id)?;
services.users.deactivate_account(user_id).await?;
super::update_displayname(services, user_id, None, all_joined_rooms).await?;
super::update_avatar_url(services, user_id, None, None, all_joined_rooms).await?;
super::update_displayname(services, user_id, None, all_joined_rooms.clone()).await?;
super::update_avatar_url(services, user_id, None, None, all_joined_rooms.clone()).await?;
let all_profile_keys = services
services
.users
.all_profile_keys(user_id)
.filter_map(Result::ok);
for (profile_key, _profile_value) in all_profile_keys {
if let Err(e) = services.users.set_profile_key(user_id, &profile_key, None) {
warn!("Failed removing {user_id} profile key {profile_key}: {e}");
}
}
.ready_for_each(|(profile_key, _)| services.users.set_profile_key(user_id, &profile_key, None))
.await;
for room_id in all_joined_rooms {
let state_lock = services.rooms.state.mutex.lock(&room_id).await;
let state_lock = services.rooms.state.mutex.lock(room_id).await;
let room_power_levels = services
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomPowerLevels, "")?
.as_ref()
.and_then(|event| serde_json::from_str(event.content.get()).ok()?)
.and_then(|content: RoomPowerLevelsEventContent| content.into());
.room_state_get_content::<RoomPowerLevelsEventContent>(room_id, &StateEventType::RoomPowerLevels, "")
.await
.ok();
let user_can_demote_self = room_power_levels
.as_ref()
@ -692,9 +715,9 @@ pub async fn full_user_deactivate(
}) || services
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomCreate, "")?
.as_ref()
.is_some_and(|event| event.sender == user_id);
.room_state_get(room_id, &StateEventType::RoomCreate, "")
.await
.is_ok_and(|event| event.sender == user_id);
if user_can_demote_self {
let mut power_levels_content = room_power_levels.unwrap_or_default();
@ -705,16 +728,9 @@ pub async fn full_user_deactivate(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomPowerLevels,
content: to_raw_value(&power_levels_content).expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
PduBuilder::state(String::new(), &power_levels_content),
user_id,
&room_id,
room_id,
&state_lock,
)
.await

View file

@ -1,11 +1,9 @@
use axum::extract::State;
use conduit::{debug, Error, Result};
use conduit::{debug, Err, Result};
use futures::StreamExt;
use rand::seq::SliceRandom;
use ruma::{
api::client::{
alias::{create_alias, delete_alias, get_alias},
error::ErrorKind,
},
api::client::alias::{create_alias, delete_alias, get_alias},
OwnedServerName, RoomAliasId, RoomId,
};
use service::Services;
@ -33,16 +31,17 @@ pub(crate) async fn create_alias_route(
.forbidden_alias_names()
.is_match(body.room_alias.alias())
{
return Err(Error::BadRequest(ErrorKind::forbidden(), "Room alias is forbidden."));
return Err!(Request(Forbidden("Room alias is forbidden.")));
}
if services
.rooms
.alias
.resolve_local_alias(&body.room_alias)?
.is_some()
.resolve_local_alias(&body.room_alias)
.await
.is_ok()
{
return Err(Error::Conflict("Alias already exists."));
return Err!(Conflict("Alias already exists."));
}
services
@ -87,39 +86,32 @@ pub(crate) async fn get_alias_route(
State(services): State<crate::State>, body: Ruma<get_alias::v3::Request>,
) -> Result<get_alias::v3::Response> {
let room_alias = body.body.room_alias;
let servers = None;
let Ok((room_id, pre_servers)) = services
.rooms
.alias
.resolve_alias(&room_alias, servers.as_ref())
.await
else {
return Err(Error::BadRequest(ErrorKind::NotFound, "Room with alias not found."));
let Ok((room_id, servers)) = services.rooms.alias.resolve_alias(&room_alias, None).await else {
return Err!(Request(NotFound("Room with alias not found.")));
};
let servers = room_available_servers(&services, &room_id, &room_alias, &pre_servers);
let servers = room_available_servers(&services, &room_id, &room_alias, servers).await;
debug!(?room_alias, ?room_id, "available servers: {servers:?}");
Ok(get_alias::v3::Response::new(room_id, servers))
}
fn room_available_servers(
services: &Services, room_id: &RoomId, room_alias: &RoomAliasId, pre_servers: &Option<Vec<OwnedServerName>>,
async fn room_available_servers(
services: &Services, room_id: &RoomId, room_alias: &RoomAliasId, pre_servers: Vec<OwnedServerName>,
) -> Vec<OwnedServerName> {
// find active servers in room state cache to suggest
let mut servers: Vec<OwnedServerName> = services
.rooms
.state_cache
.room_servers(room_id)
.filter_map(Result::ok)
.collect();
.map(ToOwned::to_owned)
.collect()
.await;
// push any servers we want in the list already (e.g. responded remote alias
// servers, room alias server itself)
if let Some(pre_servers) = pre_servers {
servers.extend(pre_servers.clone());
};
servers.extend(pre_servers);
servers.sort_unstable();
servers.dedup();

View file

@ -1,18 +1,16 @@
use axum::extract::State;
use conduit::{err, Err};
use ruma::{
api::client::{
backup::{
add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, create_backup_version,
delete_backup_keys, delete_backup_keys_for_room, delete_backup_keys_for_session, delete_backup_version,
get_backup_info, get_backup_keys, get_backup_keys_for_room, get_backup_keys_for_session,
get_latest_backup_info, update_backup_version,
},
error::ErrorKind,
api::client::backup::{
add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, create_backup_version,
delete_backup_keys, delete_backup_keys_for_room, delete_backup_keys_for_session, delete_backup_version,
get_backup_info, get_backup_keys, get_backup_keys_for_room, get_backup_keys_for_session,
get_latest_backup_info, update_backup_version,
},
UInt,
};
use crate::{Error, Result, Ruma};
use crate::{Result, Ruma};
/// # `POST /_matrix/client/r0/room_keys/version`
///
@ -40,7 +38,8 @@ pub(crate) async fn update_backup_version_route(
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
services
.key_backups
.update_backup(sender_user, &body.version, &body.algorithm)?;
.update_backup(sender_user, &body.version, &body.algorithm)
.await?;
Ok(update_backup_version::v3::Response {})
}
@ -55,14 +54,15 @@ pub(crate) async fn get_latest_backup_info_route(
let (version, algorithm) = services
.key_backups
.get_latest_backup(sender_user)?
.ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Key backup does not exist."))?;
.get_latest_backup(sender_user)
.await
.map_err(|_| err!(Request(NotFound("Key backup does not exist."))))?;
Ok(get_latest_backup_info::v3::Response {
algorithm,
count: (UInt::try_from(services.key_backups.count_keys(sender_user, &version)?)
count: (UInt::try_from(services.key_backups.count_keys(sender_user, &version).await)
.expect("user backup keys count should not be that high")),
etag: services.key_backups.get_etag(sender_user, &version)?,
etag: services.key_backups.get_etag(sender_user, &version).await,
version,
})
}
@ -76,18 +76,21 @@ pub(crate) async fn get_backup_info_route(
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let algorithm = services
.key_backups
.get_backup(sender_user, &body.version)?
.ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Key backup does not exist."))?;
.get_backup(sender_user, &body.version)
.await
.map_err(|_| err!(Request(NotFound("Key backup does not exist at version {:?}", body.version))))?;
Ok(get_backup_info::v3::Response {
algorithm,
count: (UInt::try_from(
services
.key_backups
.count_keys(sender_user, &body.version)?,
)
.expect("user backup keys count should not be that high")),
etag: services.key_backups.get_etag(sender_user, &body.version)?,
count: services
.key_backups
.count_keys(sender_user, &body.version)
.await
.try_into()?,
etag: services
.key_backups
.get_etag(sender_user, &body.version)
.await,
version: body.version.clone(),
})
}
@ -105,7 +108,8 @@ pub(crate) async fn delete_backup_version_route(
services
.key_backups
.delete_backup(sender_user, &body.version)?;
.delete_backup(sender_user, &body.version)
.await;
Ok(delete_backup_version::v3::Response {})
}
@ -123,34 +127,36 @@ pub(crate) async fn add_backup_keys_route(
) -> Result<add_backup_keys::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if Some(&body.version)
!= services
.key_backups
.get_latest_backup_version(sender_user)?
.as_ref()
if services
.key_backups
.get_latest_backup_version(sender_user)
.await
.is_ok_and(|version| version != body.version)
{
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"You may only manipulate the most recently created version of the backup.",
));
return Err!(Request(InvalidParam(
"You may only manipulate the most recently created version of the backup."
)));
}
for (room_id, room) in &body.rooms {
for (session_id, key_data) in &room.sessions {
services
.key_backups
.add_key(sender_user, &body.version, room_id, session_id, key_data)?;
.add_key(sender_user, &body.version, room_id, session_id, key_data)
.await?;
}
}
Ok(add_backup_keys::v3::Response {
count: (UInt::try_from(
services
.key_backups
.count_keys(sender_user, &body.version)?,
)
.expect("user backup keys count should not be that high")),
etag: services.key_backups.get_etag(sender_user, &body.version)?,
count: services
.key_backups
.count_keys(sender_user, &body.version)
.await
.try_into()?,
etag: services
.key_backups
.get_etag(sender_user, &body.version)
.await,
})
}
@ -167,32 +173,34 @@ pub(crate) async fn add_backup_keys_for_room_route(
) -> Result<add_backup_keys_for_room::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if Some(&body.version)
!= services
.key_backups
.get_latest_backup_version(sender_user)?
.as_ref()
if services
.key_backups
.get_latest_backup_version(sender_user)
.await
.is_ok_and(|version| version != body.version)
{
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"You may only manipulate the most recently created version of the backup.",
));
return Err!(Request(InvalidParam(
"You may only manipulate the most recently created version of the backup."
)));
}
for (session_id, key_data) in &body.sessions {
services
.key_backups
.add_key(sender_user, &body.version, &body.room_id, session_id, key_data)?;
.add_key(sender_user, &body.version, &body.room_id, session_id, key_data)
.await?;
}
Ok(add_backup_keys_for_room::v3::Response {
count: (UInt::try_from(
services
.key_backups
.count_keys(sender_user, &body.version)?,
)
.expect("user backup keys count should not be that high")),
etag: services.key_backups.get_etag(sender_user, &body.version)?,
count: services
.key_backups
.count_keys(sender_user, &body.version)
.await
.try_into()?,
etag: services
.key_backups
.get_etag(sender_user, &body.version)
.await,
})
}
@ -209,30 +217,32 @@ pub(crate) async fn add_backup_keys_for_session_route(
) -> Result<add_backup_keys_for_session::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if Some(&body.version)
!= services
.key_backups
.get_latest_backup_version(sender_user)?
.as_ref()
if services
.key_backups
.get_latest_backup_version(sender_user)
.await
.is_ok_and(|version| version != body.version)
{
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"You may only manipulate the most recently created version of the backup.",
));
return Err!(Request(InvalidParam(
"You may only manipulate the most recently created version of the backup."
)));
}
services
.key_backups
.add_key(sender_user, &body.version, &body.room_id, &body.session_id, &body.session_data)?;
.add_key(sender_user, &body.version, &body.room_id, &body.session_id, &body.session_data)
.await?;
Ok(add_backup_keys_for_session::v3::Response {
count: (UInt::try_from(
services
.key_backups
.count_keys(sender_user, &body.version)?,
)
.expect("user backup keys count should not be that high")),
etag: services.key_backups.get_etag(sender_user, &body.version)?,
count: services
.key_backups
.count_keys(sender_user, &body.version)
.await
.try_into()?,
etag: services
.key_backups
.get_etag(sender_user, &body.version)
.await,
})
}
@ -244,7 +254,10 @@ pub(crate) async fn get_backup_keys_route(
) -> Result<get_backup_keys::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let rooms = services.key_backups.get_all(sender_user, &body.version)?;
let rooms = services
.key_backups
.get_all(sender_user, &body.version)
.await;
Ok(get_backup_keys::v3::Response {
rooms,
@ -261,7 +274,8 @@ pub(crate) async fn get_backup_keys_for_room_route(
let sessions = services
.key_backups
.get_room(sender_user, &body.version, &body.room_id)?;
.get_room(sender_user, &body.version, &body.room_id)
.await;
Ok(get_backup_keys_for_room::v3::Response {
sessions,
@ -278,8 +292,9 @@ pub(crate) async fn get_backup_keys_for_session_route(
let key_data = services
.key_backups
.get_session(sender_user, &body.version, &body.room_id, &body.session_id)?
.ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Backup key not found for this user's session."))?;
.get_session(sender_user, &body.version, &body.room_id, &body.session_id)
.await
.map_err(|_| err!(Request(NotFound(debug_error!("Backup key not found for this user's session.")))))?;
Ok(get_backup_keys_for_session::v3::Response {
key_data,
@ -296,16 +311,19 @@ pub(crate) async fn delete_backup_keys_route(
services
.key_backups
.delete_all_keys(sender_user, &body.version)?;
.delete_all_keys(sender_user, &body.version)
.await;
Ok(delete_backup_keys::v3::Response {
count: (UInt::try_from(
services
.key_backups
.count_keys(sender_user, &body.version)?,
)
.expect("user backup keys count should not be that high")),
etag: services.key_backups.get_etag(sender_user, &body.version)?,
count: services
.key_backups
.count_keys(sender_user, &body.version)
.await
.try_into()?,
etag: services
.key_backups
.get_etag(sender_user, &body.version)
.await,
})
}
@ -319,16 +337,19 @@ pub(crate) async fn delete_backup_keys_for_room_route(
services
.key_backups
.delete_room_keys(sender_user, &body.version, &body.room_id)?;
.delete_room_keys(sender_user, &body.version, &body.room_id)
.await;
Ok(delete_backup_keys_for_room::v3::Response {
count: (UInt::try_from(
services
.key_backups
.count_keys(sender_user, &body.version)?,
)
.expect("user backup keys count should not be that high")),
etag: services.key_backups.get_etag(sender_user, &body.version)?,
count: services
.key_backups
.count_keys(sender_user, &body.version)
.await
.try_into()?,
etag: services
.key_backups
.get_etag(sender_user, &body.version)
.await,
})
}
@ -342,15 +363,18 @@ pub(crate) async fn delete_backup_keys_for_session_route(
services
.key_backups
.delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id)?;
.delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id)
.await;
Ok(delete_backup_keys_for_session::v3::Response {
count: (UInt::try_from(
services
.key_backups
.count_keys(sender_user, &body.version)?,
)
.expect("user backup keys count should not be that high")),
etag: services.key_backups.get_etag(sender_user, &body.version)?,
count: services
.key_backups
.count_keys(sender_user, &body.version)
.await
.try_into()?,
etag: services
.key_backups
.get_etag(sender_user, &body.version)
.await,
})
}

View file

@ -3,7 +3,8 @@ use std::collections::BTreeMap;
use axum::extract::State;
use ruma::{
api::client::discovery::get_capabilities::{
self, Capabilities, RoomVersionStability, RoomVersionsCapability, ThirdPartyIdChangesCapability,
self, Capabilities, GetLoginTokenCapability, RoomVersionStability, RoomVersionsCapability,
ThirdPartyIdChangesCapability,
},
RoomVersionId,
};
@ -43,6 +44,11 @@ pub(crate) async fn get_capabilities_route(
enabled: false,
};
// we dont support generating tokens yet
capabilities.get_login_token = GetLoginTokenCapability {
enabled: false,
};
// MSC4133 capability
capabilities
.set("uk.tcpip.msc4133.profile_fields", json!({"enabled": true}))

View file

@ -1,4 +1,5 @@
use axum::extract::State;
use conduit::err;
use ruma::{
api::client::{
config::{get_global_account_data, get_room_account_data, set_global_account_data, set_room_account_data},
@ -25,7 +26,8 @@ pub(crate) async fn set_global_account_data_route(
&body.sender_user,
&body.event_type.to_string(),
body.data.json(),
)?;
)
.await?;
Ok(set_global_account_data::v3::Response {})
}
@ -42,7 +44,8 @@ pub(crate) async fn set_room_account_data_route(
&body.sender_user,
&body.event_type.to_string(),
body.data.json(),
)?;
)
.await?;
Ok(set_room_account_data::v3::Response {})
}
@ -55,17 +58,14 @@ pub(crate) async fn get_global_account_data_route(
) -> Result<get_global_account_data::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event: Box<RawJsonValue> = services
let account_data: ExtractGlobalEventContent = services
.account_data
.get(None, sender_user, body.event_type.to_string().into())?
.ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
let account_data = serde_json::from_str::<ExtractGlobalEventContent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
.content;
.get_global(sender_user, body.event_type.clone())
.await
.map_err(|_| err!(Request(NotFound("Data not found."))))?;
Ok(get_global_account_data::v3::Response {
account_data,
account_data: account_data.content,
})
}
@ -77,21 +77,18 @@ pub(crate) async fn get_room_account_data_route(
) -> Result<get_room_account_data::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event: Box<RawJsonValue> = services
let account_data: ExtractRoomEventContent = services
.account_data
.get(Some(&body.room_id), sender_user, body.event_type.clone())?
.ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
let account_data = serde_json::from_str::<ExtractRoomEventContent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
.content;
.get_room(&body.room_id, sender_user, body.event_type.clone())
.await
.map_err(|_| err!(Request(NotFound("Data not found."))))?;
Ok(get_room_account_data::v3::Response {
account_data,
account_data: account_data.content,
})
}
fn set_account_data(
async fn set_account_data(
services: &Services, room_id: Option<&RoomId>, sender_user: &Option<OwnedUserId>, event_type: &str,
data: &RawJsonValue,
) -> Result<()> {
@ -100,15 +97,18 @@ fn set_account_data(
let data: serde_json::Value =
serde_json::from_str(data.get()).map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?;
services.account_data.update(
room_id,
sender_user,
event_type.into(),
&json!({
"type": event_type,
"content": data,
}),
)?;
services
.account_data
.update(
room_id,
sender_user,
event_type.into(),
&json!({
"type": event_type,
"content": data,
}),
)
.await?;
Ok(())
}

View file

@ -1,15 +1,16 @@
use std::collections::HashSet;
use axum::extract::State;
use conduit::{err, error, Err};
use futures::StreamExt;
use ruma::{
api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions},
events::StateEventType,
api::client::{context::get_context, filter::LazyLoadOptions},
events::{StateEventType, TimelineEventType::*},
};
use tracing::error;
use crate::{Error, Result, Ruma};
use crate::{Result, Ruma};
/// # `GET /_matrix/client/r0/rooms/{roomId}/context`
/// # `GET /_matrix/client/r0/rooms/{roomId}/context/{eventId}`
///
/// Allows loading room history around an event.
///
@ -30,39 +31,38 @@ pub(crate) async fn get_context_route(
LazyLoadOptions::Disabled => (false, cfg!(feature = "element_hacks")),
};
let mut lazy_loaded = HashSet::new();
let mut lazy_loaded = HashSet::with_capacity(100);
let base_token = services
.rooms
.timeline
.get_pdu_count(&body.event_id)?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Base event id not found."))?;
.get_pdu_count(&body.event_id)
.await
.map_err(|_| err!(Request(NotFound("Base event id not found."))))?;
let base_event = services
.rooms
.timeline
.get_pdu(&body.event_id)?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Base event not found."))?;
.get_pdu(&body.event_id)
.await
.map_err(|_| err!(Request(NotFound("Base event not found."))))?;
let room_id = base_event.room_id.clone();
let room_id = &base_event.room_id;
if !services
.rooms
.state_accessor
.user_can_see_event(sender_user, &room_id, &body.event_id)?
.user_can_see_event(sender_user, room_id, &body.event_id)
.await
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"You don't have permission to view this event.",
));
return Err!(Request(Forbidden("You don't have permission to view this event.")));
}
if !services.rooms.lazy_loading.lazy_load_was_sent_before(
sender_user,
sender_device,
&room_id,
&base_event.sender,
)? || lazy_load_send_redundant
if !services
.rooms
.lazy_loading
.lazy_load_was_sent_before(sender_user, sender_device, room_id, &base_event.sender)
.await || lazy_load_send_redundant
{
lazy_loaded.insert(base_event.sender.as_str().to_owned());
}
@ -75,25 +75,45 @@ pub(crate) async fn get_context_route(
let events_before: Vec<_> = services
.rooms
.timeline
.pdus_until(sender_user, &room_id, base_token)?
.pdus_until(sender_user, room_id, base_token)
.await?
.take(limit / 2)
.filter_map(Result::ok) // Remove buggy events
.filter(|(_, pdu)| {
.filter_map(|(count, pdu)| async move {
// list of safe and common non-state events to ignore
if matches!(
&pdu.kind,
RoomMessage
| Sticker | CallInvite
| CallNotify | RoomEncrypted
| Image | File | Audio
| Voice | Video | UnstablePollStart
| PollStart | KeyVerificationStart
| Reaction | Emote
| Location
) && services
.users
.user_is_ignored(&pdu.sender, sender_user)
.await
{
return None;
}
services
.rooms
.state_accessor
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
.unwrap_or(false)
.user_can_see_event(sender_user, room_id, &pdu.event_id)
.await
.then_some((count, pdu))
})
.collect();
.collect()
.await;
for (_, event) in &events_before {
if !services.rooms.lazy_loading.lazy_load_was_sent_before(
sender_user,
sender_device,
&room_id,
&event.sender,
)? || lazy_load_send_redundant
if !services
.rooms
.lazy_loading
.lazy_load_was_sent_before(sender_user, sender_device, room_id, &event.sender)
.await || lazy_load_send_redundant
{
lazy_loaded.insert(event.sender.as_str().to_owned());
}
@ -103,33 +123,48 @@ pub(crate) async fn get_context_route(
.last()
.map_or_else(|| base_token.stringify(), |(count, _)| count.stringify());
let events_before: Vec<_> = events_before
.into_iter()
.map(|(_, pdu)| pdu.to_room_event())
.collect();
let events_after: Vec<_> = services
.rooms
.timeline
.pdus_after(sender_user, &room_id, base_token)?
.pdus_after(sender_user, room_id, base_token)
.await?
.take(limit / 2)
.filter_map(Result::ok) // Remove buggy events
.filter(|(_, pdu)| {
.filter_map(|(count, pdu)| async move {
// list of safe and common non-state events to ignore
if matches!(
&pdu.kind,
RoomMessage
| Sticker | CallInvite
| CallNotify | RoomEncrypted
| Image | File | Audio
| Voice | Video | UnstablePollStart
| PollStart | KeyVerificationStart
| Reaction | Emote
| Location
) && services
.users
.user_is_ignored(&pdu.sender, sender_user)
.await
{
return None;
}
services
.rooms
.state_accessor
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
.unwrap_or(false)
.user_can_see_event(sender_user, room_id, &pdu.event_id)
.await
.then_some((count, pdu))
})
.collect();
.collect()
.await;
for (_, event) in &events_after {
if !services.rooms.lazy_loading.lazy_load_was_sent_before(
sender_user,
sender_device,
&room_id,
&event.sender,
)? || lazy_load_send_redundant
if !services
.rooms
.lazy_loading
.lazy_load_was_sent_before(sender_user, sender_device, room_id, &event.sender)
.await || lazy_load_send_redundant
{
lazy_loaded.insert(event.sender.as_str().to_owned());
}
@ -142,12 +177,14 @@ pub(crate) async fn get_context_route(
events_after
.last()
.map_or(&*body.event_id, |(_, e)| &*e.event_id),
)?
)
.await
.map_or(
services
.rooms
.state
.get_room_shortstatehash(&room_id)?
.get_room_shortstatehash(room_id)
.await
.expect("All rooms have state"),
|hash| hash,
);
@ -156,35 +193,32 @@ pub(crate) async fn get_context_route(
.rooms
.state_accessor
.state_full_ids(shortstatehash)
.await?;
.await
.map_err(|e| err!(Database("State not found: {e}")))?;
let end_token = events_after
.last()
.map_or_else(|| base_token.stringify(), |(count, _)| count.stringify());
let events_after: Vec<_> = events_after
.into_iter()
.map(|(_, pdu)| pdu.to_room_event())
.collect();
let mut state = Vec::with_capacity(state_ids.len());
for (shortstatekey, id) in state_ids {
let (event_type, state_key) = services
.rooms
.short
.get_statekey_from_short(shortstatekey)?;
.get_statekey_from_short(shortstatekey)
.await?;
if event_type != StateEventType::RoomMember {
let Some(pdu) = services.rooms.timeline.get_pdu(&id)? else {
error!("Pdu in state not found: {}", id);
let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else {
error!("Pdu in state not found: {id}");
continue;
};
state.push(pdu.to_state_event());
} else if !lazy_load_enabled || lazy_loaded.contains(&state_key) {
let Some(pdu) = services.rooms.timeline.get_pdu(&id)? else {
error!("Pdu in state not found: {}", id);
let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else {
error!("Pdu in state not found: {id}");
continue;
};
@ -195,9 +229,15 @@ pub(crate) async fn get_context_route(
Ok(get_context::v3::Response {
start: Some(start_token),
end: Some(end_token),
events_before,
events_before: events_before
.iter()
.map(|(_, pdu)| pdu.to_room_event())
.collect(),
event: Some(base_event),
events_after,
events_after: events_after
.iter()
.map(|(_, pdu)| pdu.to_room_event())
.collect(),
state,
})
}

View file

@ -1,8 +1,14 @@
use axum::extract::State;
use ruma::api::client::{
device::{self, delete_device, delete_devices, get_device, get_devices, update_device},
error::ErrorKind,
uiaa::{AuthFlow, AuthType, UiaaInfo},
use axum_client_ip::InsecureClientIp;
use conduit::{err, Err};
use futures::StreamExt;
use ruma::{
api::client::{
device::{self, delete_device, delete_devices, get_device, get_devices, update_device},
error::ErrorKind,
uiaa::{AuthFlow, AuthType, UiaaInfo},
},
MilliSecondsSinceUnixEpoch,
};
use super::SESSION_ID_LENGTH;
@ -19,8 +25,8 @@ pub(crate) async fn get_devices_route(
let devices: Vec<device::Device> = services
.users
.all_devices_metadata(sender_user)
.filter_map(Result::ok) // Filter out buggy devices
.collect();
.collect()
.await;
Ok(get_devices::v3::Response {
devices,
@ -37,8 +43,9 @@ pub(crate) async fn get_device_route(
let device = services
.users
.get_device_metadata(sender_user, &body.body.device_id)?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
.get_device_metadata(sender_user, &body.body.device_id)
.await
.map_err(|_| err!(Request(NotFound("Device not found."))))?;
Ok(get_device::v3::Response {
device,
@ -48,21 +55,29 @@ pub(crate) async fn get_device_route(
/// # `PUT /_matrix/client/r0/devices/{deviceId}`
///
/// Updates the metadata on a given device of the sender user.
#[tracing::instrument(skip_all, fields(%client), name = "update_device")]
pub(crate) async fn update_device_route(
State(services): State<crate::State>, body: Ruma<update_device::v3::Request>,
State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp,
body: Ruma<update_device::v3::Request>,
) -> Result<update_device::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let mut device = services
.users
.get_device_metadata(sender_user, &body.device_id)?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
.get_device_metadata(sender_user, &body.device_id)
.await
.map_err(|_| err!(Request(NotFound("Device not found."))))?;
device.display_name.clone_from(&body.display_name);
device.last_seen_ip.clone_from(&Some(client.to_string()));
device
.last_seen_ts
.clone_from(&Some(MilliSecondsSinceUnixEpoch::now()));
services
.users
.update_device_metadata(sender_user, &body.device_id, &device)?;
.update_device_metadata(sender_user, &body.device_id, &device)
.await?;
Ok(update_device::v3::Response {})
}
@ -97,22 +112,28 @@ pub(crate) async fn delete_device_route(
if let Some(auth) = &body.auth {
let (worked, uiaainfo) = services
.uiaa
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
.try_auth(sender_user, sender_device, auth, &uiaainfo)
.await?;
if !worked {
return Err(Error::Uiaa(uiaainfo));
return Err!(Uiaa(uiaainfo));
}
// Success!
} else if let Some(json) = body.json_body {
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
services
.uiaa
.create(sender_user, sender_device, &uiaainfo, &json)?;
return Err(Error::Uiaa(uiaainfo));
.create(sender_user, sender_device, &uiaainfo, &json);
return Err!(Uiaa(uiaainfo));
} else {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
return Err!(Request(NotJson("Not json.")));
}
services.users.remove_device(sender_user, &body.device_id)?;
services
.users
.remove_device(sender_user, &body.device_id)
.await;
Ok(delete_device::v3::Response {})
}
@ -149,7 +170,9 @@ pub(crate) async fn delete_devices_route(
if let Some(auth) = &body.auth {
let (worked, uiaainfo) = services
.uiaa
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
.try_auth(sender_user, sender_device, auth, &uiaainfo)
.await?;
if !worked {
return Err(Error::Uiaa(uiaainfo));
}
@ -158,14 +181,15 @@ pub(crate) async fn delete_devices_route(
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
services
.uiaa
.create(sender_user, sender_device, &uiaainfo, &json)?;
.create(sender_user, sender_device, &uiaainfo, &json);
return Err(Error::Uiaa(uiaainfo));
} else {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
}
for device_id in &body.devices {
services.users.remove_device(sender_user, device_id)?;
services.users.remove_device(sender_user, device_id).await;
}
Ok(delete_devices::v3::Response {})

View file

@ -1,6 +1,7 @@
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduit::{err, info, warn, Err, Error, Result};
use conduit::{info, warn, Err, Error, Result};
use futures::{StreamExt, TryFutureExt};
use ruma::{
api::{
client::{
@ -18,7 +19,7 @@ use ruma::{
},
StateEventType,
},
uint, RoomId, ServerName, UInt, UserId,
uint, OwnedRoomId, RoomId, ServerName, UInt, UserId,
};
use service::Services;
@ -119,16 +120,22 @@ pub(crate) async fn set_room_visibility_route(
) -> Result<set_room_visibility::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if !services.rooms.metadata.exists(&body.room_id)? {
if !services.rooms.metadata.exists(&body.room_id).await {
// Return 404 if the room doesn't exist
return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found"));
}
if services.users.is_deactivated(sender_user).unwrap_or(false) && body.appservice_info.is_none() {
if services
.users
.is_deactivated(sender_user)
.await
.unwrap_or(false)
&& body.appservice_info.is_none()
{
return Err!(Request(Forbidden("Guests cannot publish to room directories")));
}
if !user_can_publish_room(&services, sender_user, &body.room_id)? {
if !user_can_publish_room(&services, sender_user, &body.room_id).await? {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"User is not allowed to publish this room",
@ -137,7 +144,7 @@ pub(crate) async fn set_room_visibility_route(
match &body.visibility {
room::Visibility::Public => {
if services.globals.config.lockdown_public_room_directory && !services.users.is_admin(sender_user)? {
if services.globals.config.lockdown_public_room_directory && !services.users.is_admin(sender_user).await {
info!(
"Non-admin user {sender_user} tried to publish {0} to the room directory while \
\"lockdown_public_room_directory\" is enabled",
@ -158,14 +165,14 @@ pub(crate) async fn set_room_visibility_route(
));
}
services.rooms.directory.set_public(&body.room_id)?;
services.rooms.directory.set_public(&body.room_id);
services
.admin
.send_text(&format!("{sender_user} made {} public to the room directory", body.room_id))
.await;
info!("{sender_user} made {0} public to the room directory", body.room_id);
},
room::Visibility::Private => services.rooms.directory.set_not_public(&body.room_id)?,
room::Visibility::Private => services.rooms.directory.set_not_public(&body.room_id),
_ => {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
@ -183,13 +190,13 @@ pub(crate) async fn set_room_visibility_route(
pub(crate) async fn get_room_visibility_route(
State(services): State<crate::State>, body: Ruma<get_room_visibility::v3::Request>,
) -> Result<get_room_visibility::v3::Response> {
if !services.rooms.metadata.exists(&body.room_id)? {
if !services.rooms.metadata.exists(&body.room_id).await {
// Return 404 if the room doesn't exist
return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found"));
}
Ok(get_room_visibility::v3::Response {
visibility: if services.rooms.directory.is_public_room(&body.room_id)? {
visibility: if services.rooms.directory.is_public_room(&body.room_id).await {
room::Visibility::Public
} else {
room::Visibility::Private
@ -248,101 +255,41 @@ pub(crate) async fn get_public_rooms_filtered_helper(
}
}
let mut all_rooms: Vec<_> = services
let mut all_rooms: Vec<PublicRoomsChunk> = services
.rooms
.directory
.public_rooms()
.map(|room_id| {
let room_id = room_id?;
let chunk = PublicRoomsChunk {
canonical_alias: services
.rooms
.state_accessor
.get_canonical_alias(&room_id)?,
name: services.rooms.state_accessor.get_name(&room_id)?,
num_joined_members: services
.rooms
.state_cache
.room_joined_count(&room_id)?
.unwrap_or_else(|| {
warn!("Room {} has no member count", room_id);
0
})
.try_into()
.expect("user count should not be that big"),
topic: services
.rooms
.state_accessor
.get_room_topic(&room_id)
.unwrap_or(None),
world_readable: services.rooms.state_accessor.is_world_readable(&room_id)?,
guest_can_join: services
.rooms
.state_accessor
.guest_can_join(&room_id)?,
avatar_url: services
.rooms
.state_accessor
.get_avatar(&room_id)?
.into_option()
.unwrap_or_default()
.url,
join_rule: services
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomJoinRules, "")?
.map(|s| {
serde_json::from_str(s.content.get())
.map(|c: RoomJoinRulesEventContent| match c.join_rule {
JoinRule::Public => Some(PublicRoomJoinRule::Public),
JoinRule::Knock => Some(PublicRoomJoinRule::Knock),
_ => None,
})
.map_err(|e| {
err!(Database(error!("Invalid room join rule event in database: {e}")))
})
})
.transpose()?
.flatten()
.ok_or_else(|| Error::bad_database("Missing room join rule event for room."))?,
room_type: services
.rooms
.state_accessor
.get_room_type(&room_id)?,
room_id,
};
Ok(chunk)
})
.filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms
.filter(|chunk| {
.map(ToOwned::to_owned)
.then(|room_id| public_rooms_chunk(services, room_id))
.filter_map(|chunk| async move {
if let Some(query) = filter.generic_search_term.as_ref().map(|q| q.to_lowercase()) {
if let Some(name) = &chunk.name {
if name.as_str().to_lowercase().contains(&query) {
return true;
return Some(chunk);
}
}
if let Some(topic) = &chunk.topic {
if topic.to_lowercase().contains(&query) {
return true;
return Some(chunk);
}
}
if let Some(canonical_alias) = &chunk.canonical_alias {
if canonical_alias.as_str().to_lowercase().contains(&query) {
return true;
return Some(chunk);
}
}
false
} else {
// No search term
true
return None;
}
// No search term
Some(chunk)
})
// We need to collect all, so we can sort by member count
.collect();
.collect()
.await;
all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members));
@ -385,22 +332,23 @@ pub(crate) async fn get_public_rooms_filtered_helper(
/// Check whether the user can publish to the room directory via power levels of
/// room history visibility event or room creator
fn user_can_publish_room(services: &Services, user_id: &UserId, room_id: &RoomId) -> Result<bool> {
if let Some(event) = services
async fn user_can_publish_room(services: &Services, user_id: &UserId, room_id: &RoomId) -> Result<bool> {
if let Ok(event) = services
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")?
.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")
.await
{
serde_json::from_str(event.content.get())
.map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels"))
.map(|content: RoomPowerLevelsEventContent| {
RoomPowerLevels::from(content).user_can_send_state(user_id, StateEventType::RoomHistoryVisibility)
})
} else if let Some(event) =
services
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomCreate, "")?
} else if let Ok(event) = services
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomCreate, "")
.await
{
Ok(event.sender == user_id)
} else {
@ -410,3 +358,61 @@ fn user_can_publish_room(services: &Services, user_id: &UserId, room_id: &RoomId
));
}
}
async fn public_rooms_chunk(services: &Services, room_id: OwnedRoomId) -> PublicRoomsChunk {
PublicRoomsChunk {
canonical_alias: services
.rooms
.state_accessor
.get_canonical_alias(&room_id)
.await
.ok(),
name: services.rooms.state_accessor.get_name(&room_id).await.ok(),
num_joined_members: services
.rooms
.state_cache
.room_joined_count(&room_id)
.await
.unwrap_or(0)
.try_into()
.expect("joined count overflows ruma UInt"),
topic: services
.rooms
.state_accessor
.get_room_topic(&room_id)
.await
.ok(),
world_readable: services
.rooms
.state_accessor
.is_world_readable(&room_id)
.await,
guest_can_join: services.rooms.state_accessor.guest_can_join(&room_id).await,
avatar_url: services
.rooms
.state_accessor
.get_avatar(&room_id)
.await
.into_option()
.unwrap_or_default()
.url,
join_rule: services
.rooms
.state_accessor
.room_state_get_content(&room_id, &StateEventType::RoomJoinRules, "")
.map_ok(|c: RoomJoinRulesEventContent| match c.join_rule {
JoinRule::Public => PublicRoomJoinRule::Public,
JoinRule::Knock => PublicRoomJoinRule::Knock,
_ => "invite".into(),
})
.await
.unwrap_or_default(),
room_type: services
.rooms
.state_accessor
.get_room_type(&room_id)
.await
.ok(),
room_id,
}
}

View file

@ -1,10 +1,8 @@
use axum::extract::State;
use ruma::api::client::{
error::ErrorKind,
filter::{create_filter, get_filter},
};
use conduit::err;
use ruma::api::client::filter::{create_filter, get_filter};
use crate::{Error, Result, Ruma};
use crate::{Result, Ruma};
/// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}`
///
@ -15,11 +13,13 @@ pub(crate) async fn get_filter_route(
State(services): State<crate::State>, body: Ruma<get_filter::v3::Request>,
) -> Result<get_filter::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let Some(filter) = services.users.get_filter(sender_user, &body.filter_id)? else {
return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found."));
};
Ok(get_filter::v3::Response::new(filter))
services
.users
.get_filter(sender_user, &body.filter_id)
.await
.map(get_filter::v3::Response::new)
.map_err(|_| err!(Request(NotFound("Filter not found."))))
}
/// # `PUT /_matrix/client/r0/user/{userId}/filter`
@ -29,7 +29,8 @@ pub(crate) async fn create_filter_route(
State(services): State<crate::State>, body: Ruma<create_filter::v3::Request>,
) -> Result<create_filter::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
Ok(create_filter::v3::Response::new(
services.users.create_filter(sender_user, &body.filter)?,
))
let filter_id = services.users.create_filter(sender_user, &body.filter);
Ok(create_filter::v3::Response::new(filter_id))
}

View file

@ -4,8 +4,8 @@ use std::{
};
use axum::extract::State;
use conduit::{utils, utils::math::continue_exponential_backoff_secs, Err, Error, Result};
use futures_util::{stream::FuturesUnordered, StreamExt};
use conduit::{err, utils, utils::math::continue_exponential_backoff_secs, Err, Error, Result};
use futures::{stream::FuturesUnordered, StreamExt};
use ruma::{
api::{
client::{
@ -21,7 +21,10 @@ use ruma::{
use serde_json::json;
use super::SESSION_ID_LENGTH;
use crate::{service::Services, Ruma};
use crate::{
service::{users::parse_master_key, Services},
Ruma,
};
/// # `POST /_matrix/client/r0/keys/upload`
///
@ -39,7 +42,8 @@ pub(crate) async fn upload_keys_route(
for (key_key, key_value) in &body.one_time_keys {
services
.users
.add_one_time_key(sender_user, sender_device, key_key, key_value)?;
.add_one_time_key(sender_user, sender_device, key_key, key_value)
.await?;
}
if let Some(device_keys) = &body.device_keys {
@ -47,19 +51,22 @@ pub(crate) async fn upload_keys_route(
// This check is needed to assure that signatures are kept
if services
.users
.get_device_keys(sender_user, sender_device)?
.is_none()
.get_device_keys(sender_user, sender_device)
.await
.is_err()
{
services
.users
.add_device_keys(sender_user, sender_device, device_keys)?;
.add_device_keys(sender_user, sender_device, device_keys)
.await;
}
}
Ok(upload_keys::v3::Response {
one_time_key_counts: services
.users
.count_one_time_keys(sender_user, sender_device)?,
.count_one_time_keys(sender_user, sender_device)
.await,
})
}
@ -120,7 +127,9 @@ pub(crate) async fn upload_signing_keys_route(
if let Some(auth) = &body.auth {
let (worked, uiaainfo) = services
.uiaa
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
.try_auth(sender_user, sender_device, auth, &uiaainfo)
.await?;
if !worked {
return Err(Error::Uiaa(uiaainfo));
}
@ -129,20 +138,24 @@ pub(crate) async fn upload_signing_keys_route(
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
services
.uiaa
.create(sender_user, sender_device, &uiaainfo, &json)?;
.create(sender_user, sender_device, &uiaainfo, &json);
return Err(Error::Uiaa(uiaainfo));
} else {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
}
if let Some(master_key) = &body.master_key {
services.users.add_cross_signing_keys(
sender_user,
master_key,
&body.self_signing_key,
&body.user_signing_key,
true, // notify so that other users see the new keys
)?;
services
.users
.add_cross_signing_keys(
sender_user,
master_key,
&body.self_signing_key,
&body.user_signing_key,
true, // notify so that other users see the new keys
)
.await?;
}
Ok(upload_signing_keys::v3::Response {})
@ -179,9 +192,11 @@ pub(crate) async fn upload_signatures_route(
.ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid signature value."))?
.to_owned(),
);
services
.users
.sign_key(user_id, key_id, signature, sender_user)?;
.sign_key(user_id, key_id, signature, sender_user)
.await?;
}
}
}
@ -204,56 +219,51 @@ pub(crate) async fn get_key_changes_route(
let mut device_list_updates = HashSet::new();
let from = body
.from
.parse()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?;
let to = body
.to
.parse()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?;
device_list_updates.extend(
services
.users
.keys_changed(
sender_user.as_str(),
body.from
.parse()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?,
Some(
body.to
.parse()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?,
),
)
.filter_map(Result::ok),
.keys_changed(sender_user.as_str(), from, Some(to))
.map(ToOwned::to_owned)
.collect::<Vec<_>>()
.await,
);
for room_id in services
.rooms
.state_cache
.rooms_joined(sender_user)
.filter_map(Result::ok)
{
let mut rooms_joined = services.rooms.state_cache.rooms_joined(sender_user).boxed();
while let Some(room_id) = rooms_joined.next().await {
device_list_updates.extend(
services
.users
.keys_changed(
room_id.as_ref(),
body.from
.parse()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?,
Some(
body.to
.parse()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?,
),
)
.filter_map(Result::ok),
.keys_changed(room_id.as_str(), from, Some(to))
.map(ToOwned::to_owned)
.collect::<Vec<_>>()
.await,
);
}
Ok(get_key_changes::v3::Response {
changed: device_list_updates.into_iter().collect(),
left: Vec::new(), // TODO
})
}
pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool + Send>(
pub(crate) async fn get_keys_helper<F>(
services: &Services, sender_user: Option<&UserId>, device_keys_input: &BTreeMap<OwnedUserId, Vec<OwnedDeviceId>>,
allowed_signatures: F, include_display_names: bool,
) -> Result<get_keys::v3::Response> {
) -> Result<get_keys::v3::Response>
where
F: Fn(&UserId) -> bool + Send + Sync,
{
let mut master_keys = BTreeMap::new();
let mut self_signing_keys = BTreeMap::new();
let mut user_signing_keys = BTreeMap::new();
@ -274,56 +284,60 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool + Send>(
if device_ids.is_empty() {
let mut container = BTreeMap::new();
for device_id in services.users.all_device_ids(user_id) {
let device_id = device_id?;
if let Some(mut keys) = services.users.get_device_keys(user_id, &device_id)? {
let mut devices = services.users.all_device_ids(user_id).boxed();
while let Some(device_id) = devices.next().await {
if let Ok(mut keys) = services.users.get_device_keys(user_id, device_id).await {
let metadata = services
.users
.get_device_metadata(user_id, &device_id)?
.ok_or_else(|| Error::bad_database("all_device_keys contained nonexistent device."))?;
.get_device_metadata(user_id, device_id)
.await
.map_err(|_| err!(Database("all_device_keys contained nonexistent device.")))?;
add_unsigned_device_display_name(&mut keys, metadata, include_display_names)
.map_err(|_| Error::bad_database("invalid device keys in database"))?;
.map_err(|_| err!(Database("invalid device keys in database")))?;
container.insert(device_id, keys);
container.insert(device_id.to_owned(), keys);
}
}
device_keys.insert(user_id.to_owned(), container);
} else {
for device_id in device_ids {
let mut container = BTreeMap::new();
if let Some(mut keys) = services.users.get_device_keys(user_id, device_id)? {
if let Ok(mut keys) = services.users.get_device_keys(user_id, device_id).await {
let metadata = services
.users
.get_device_metadata(user_id, device_id)?
.ok_or(Error::BadRequest(
ErrorKind::InvalidParam,
"Tried to get keys for nonexistent device.",
))?;
.get_device_metadata(user_id, device_id)
.await
.map_err(|_| err!(Request(InvalidParam("Tried to get keys for nonexistent device."))))?;
add_unsigned_device_display_name(&mut keys, metadata, include_display_names)
.map_err(|_| Error::bad_database("invalid device keys in database"))?;
.map_err(|_| err!(Database("invalid device keys in database")))?;
container.insert(device_id.to_owned(), keys);
}
device_keys.insert(user_id.to_owned(), container);
}
}
if let Some(master_key) = services
if let Ok(master_key) = services
.users
.get_master_key(sender_user, user_id, &allowed_signatures)?
.get_master_key(sender_user, user_id, &allowed_signatures)
.await
{
master_keys.insert(user_id.to_owned(), master_key);
}
if let Some(self_signing_key) =
services
.users
.get_self_signing_key(sender_user, user_id, &allowed_signatures)?
if let Ok(self_signing_key) = services
.users
.get_self_signing_key(sender_user, user_id, &allowed_signatures)
.await
{
self_signing_keys.insert(user_id.to_owned(), self_signing_key);
}
if Some(user_id) == sender_user {
if let Some(user_signing_key) = services.users.get_user_signing_key(user_id)? {
if let Ok(user_signing_key) = services.users.get_user_signing_key(user_id).await {
user_signing_keys.insert(user_id.to_owned(), user_signing_key);
}
}
@ -386,23 +400,26 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool + Send>(
while let Some((server, response)) = futures.next().await {
if let Ok(Ok(response)) = response {
for (user, masterkey) in response.master_keys {
let (master_key_id, mut master_key) = services.users.parse_master_key(&user, &masterkey)?;
let (master_key_id, mut master_key) = parse_master_key(&user, &masterkey)?;
if let Some(our_master_key) =
services
.users
.get_key(&master_key_id, sender_user, &user, &allowed_signatures)?
if let Ok(our_master_key) = services
.users
.get_key(&master_key_id, sender_user, &user, &allowed_signatures)
.await
{
let (_, our_master_key) = services.users.parse_master_key(&user, &our_master_key)?;
let (_, our_master_key) = parse_master_key(&user, &our_master_key)?;
master_key.signatures.extend(our_master_key.signatures);
}
let json = serde_json::to_value(master_key).expect("to_value always works");
let raw = serde_json::from_value(json).expect("Raw::from_value always works");
services.users.add_cross_signing_keys(
&user, &raw, &None, &None,
false, /* Dont notify. A notification would trigger another key request resulting in an
* endless loop */
)?;
services
.users
.add_cross_signing_keys(
&user, &raw, &None, &None,
false, /* Dont notify. A notification would trigger another key request resulting in an
* endless loop */
)
.await?;
master_keys.insert(user.clone(), raw);
}
@ -465,9 +482,10 @@ pub(crate) async fn claim_keys_helper(
let mut container = BTreeMap::new();
for (device_id, key_algorithm) in map {
if let Some(one_time_keys) = services
if let Ok(one_time_keys) = services
.users
.take_one_time_key(user_id, device_id, key_algorithm)?
.take_one_time_key(user_id, device_id, key_algorithm)
.await
{
let mut c = BTreeMap::new();
c.insert(one_time_keys.0, one_time_keys.1);

File diff suppressed because it is too large Load diff

View file

@ -1,21 +1,26 @@
use std::collections::{BTreeMap, HashSet};
use axum::extract::State;
use conduit::PduCount;
use conduit::{
err,
utils::{IterStream, ReadyExt},
Err, PduCount,
};
use futures::{FutureExt, StreamExt};
use ruma::{
api::client::{
error::ErrorKind,
filter::{RoomEventFilter, UrlFilter},
message::{get_message_events, send_message_event},
},
events::{MessageLikeEventType, StateEventType},
RoomId, UserId,
events::{MessageLikeEventType, StateEventType, TimelineEventType::*},
UserId,
};
use serde_json::{from_str, Value};
use service::rooms::timeline::PdusIterItem;
use crate::{
service::{pdu::PduBuilder, Services},
utils, Error, PduEvent, Result, Ruma,
utils, Result, Ruma,
};
/// # `PUT /_matrix/client/v3/rooms/{roomId}/send/{eventType}/{txnId}`
@ -30,65 +35,60 @@ use crate::{
pub(crate) async fn send_message_event_route(
State(services): State<crate::State>, body: Ruma<send_message_event::v3::Request>,
) -> Result<send_message_event::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_user = body.sender_user.as_deref().expect("user is authenticated");
let sender_device = body.sender_device.as_deref();
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
let appservice_info = body.appservice_info.as_ref();
// Forbid m.room.encrypted if encryption is disabled
if MessageLikeEventType::RoomEncrypted == body.event_type && !services.globals.allow_encryption() {
return Err(Error::BadRequest(ErrorKind::forbidden(), "Encryption has been disabled"));
return Err!(Request(Forbidden("Encryption has been disabled")));
}
if body.event_type == MessageLikeEventType::CallInvite && services.rooms.directory.is_public_room(&body.room_id)? {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"Room call invites are not allowed in public rooms",
));
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
if body.event_type == MessageLikeEventType::CallInvite
&& services.rooms.directory.is_public_room(&body.room_id).await
{
return Err!(Request(Forbidden("Room call invites are not allowed in public rooms")));
}
// Check if this is a new transaction id
if let Some(response) = services
if let Ok(response) = services
.transaction_ids
.existing_txnid(sender_user, sender_device, &body.txn_id)?
.existing_txnid(sender_user, sender_device, &body.txn_id)
.await
{
// The client might have sent a txnid of the /sendToDevice endpoint
// This txnid has no response associated with it
if response.is_empty() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Tried to use txn id already used for an incompatible endpoint.",
));
return Err!(Request(InvalidParam(
"Tried to use txn id already used for an incompatible endpoint."
)));
}
let event_id = utils::string_from_bytes(&response)
.map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?
.try_into()
.map_err(|_| Error::bad_database("Invalid event id in txnid data."))?;
return Ok(send_message_event::v3::Response {
event_id,
event_id: utils::string_from_bytes(&response)
.map(TryInto::try_into)
.map_err(|e| err!(Database("Invalid event_id in txnid data: {e:?}")))??,
});
}
let mut unsigned = BTreeMap::new();
unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
let content =
from_str(body.body.body.json().get()).map_err(|e| err!(Request(BadJson("Invalid JSON body: {e}"))))?;
let event_id = services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: body.event_type.to_string().into(),
content: from_str(body.body.body.json().get())
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?,
event_type: body.event_type.clone().into(),
content,
unsigned: Some(unsigned),
state_key: None,
redacts: None,
timestamp: if body.appservice_info.is_some() {
body.timestamp
} else {
None
},
timestamp: appservice_info.and(body.timestamp),
..Default::default()
},
sender_user,
&body.room_id,
@ -98,11 +98,13 @@ pub(crate) async fn send_message_event_route(
services
.transaction_ids
.add_txnid(sender_user, sender_device, &body.txn_id, event_id.as_bytes())?;
.add_txnid(sender_user, sender_device, &body.txn_id, event_id.as_bytes());
drop(state_lock);
Ok(send_message_event::v3::Response::new((*event_id).to_owned()))
Ok(send_message_event::v3::Response {
event_id: event_id.into(),
})
}
/// # `GET /_matrix/client/r0/rooms/{roomId}/messages`
@ -117,8 +119,12 @@ pub(crate) async fn get_message_events_route(
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
let from = match body.from.clone() {
Some(from) => PduCount::try_from_string(&from)?,
let room_id = &body.room_id;
let filter = &body.filter;
let limit = usize::try_from(body.limit).unwrap_or(10).min(100);
let from = match body.from.as_ref() {
Some(from) => PduCount::try_from_string(from)?,
None => match body.dir {
ruma::api::Direction::Forward => PduCount::min(),
ruma::api::Direction::Backward => PduCount::max(),
@ -133,30 +139,25 @@ pub(crate) async fn get_message_events_route(
services
.rooms
.lazy_loading
.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)
.await?;
let limit = usize::try_from(body.limit).unwrap_or(10).min(100);
let next_token;
.lazy_load_confirm_delivery(sender_user, sender_device, room_id, from);
let mut resp = get_message_events::v3::Response::new();
let mut lazy_loaded = HashSet::new();
let next_token;
match body.dir {
ruma::api::Direction::Forward => {
let events_after: Vec<_> = services
let events_after: Vec<PdusIterItem> = services
.rooms
.timeline
.pdus_after(sender_user, &body.room_id, from)?
.filter_map(Result::ok) // Filter out buggy events
.filter(|(_, pdu)| { contains_url_filter(pdu, &body.filter) && visibility_filter(&services, pdu, sender_user, &body.room_id)
})
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
.pdus_after(sender_user, room_id, from)
.await?
.ready_filter_map(|item| contains_url_filter(item, filter))
.filter_map(|item| visibility_filter(&services, item, sender_user))
.ready_take_while(|(count, _)| Some(*count) != to) // Stop at `to`
.take(limit)
.collect();
.collect()
.boxed()
.await;
for (_, event) in &events_after {
/* TODO: Remove the not "element_hacks" check when these are resolved:
@ -164,24 +165,48 @@ pub(crate) async fn get_message_events_route(
* https://github.com/vector-im/element-web/issues/21034
*/
if !cfg!(feature = "element_hacks")
&& !services.rooms.lazy_loading.lazy_load_was_sent_before(
sender_user,
sender_device,
&body.room_id,
&event.sender,
)? {
&& !services
.rooms
.lazy_loading
.lazy_load_was_sent_before(sender_user, sender_device, room_id, &event.sender)
.await
{
lazy_loaded.insert(event.sender.clone());
}
lazy_loaded.insert(event.sender.clone());
if cfg!(features = "element_hacks") {
lazy_loaded.insert(event.sender.clone());
}
}
next_token = events_after.last().map(|(count, _)| count).copied();
let events_after: Vec<_> = events_after
.into_iter()
.map(|(_, pdu)| pdu.to_room_event())
.collect();
.stream()
.filter_map(|(_, pdu)| async move {
// list of safe and common non-state events to ignore
if matches!(
&pdu.kind,
RoomMessage
| Sticker | CallInvite
| CallNotify | RoomEncrypted
| Image | File | Audio
| Voice | Video | UnstablePollStart
| PollStart | KeyVerificationStart
| Reaction | Emote | Location
) && services
.users
.user_is_ignored(&pdu.sender, sender_user)
.await
{
return None;
}
Some(pdu.to_room_event())
})
.collect()
.await;
resp.start = from.stringify();
resp.end = next_token.map(|count| count.stringify());
@ -191,17 +216,43 @@ pub(crate) async fn get_message_events_route(
services
.rooms
.timeline
.backfill_if_required(&body.room_id, from)
.backfill_if_required(room_id, from)
.boxed()
.await?;
let events_before: Vec<_> = services
let events_before: Vec<PdusIterItem> = services
.rooms
.timeline
.pdus_until(sender_user, &body.room_id, from)?
.filter_map(Result::ok) // Filter out buggy events
.filter(|(_, pdu)| {contains_url_filter(pdu, &body.filter) && visibility_filter(&services, pdu, sender_user, &body.room_id)})
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
.pdus_until(sender_user, room_id, from)
.await?
.ready_filter_map(|item| contains_url_filter(item, filter))
.filter_map(|(count, pdu)| async move {
// list of safe and common non-state events to ignore
if matches!(
&pdu.kind,
RoomMessage
| Sticker | CallInvite
| CallNotify | RoomEncrypted
| Image | File | Audio
| Voice | Video | UnstablePollStart
| PollStart | KeyVerificationStart
| Reaction | Emote | Location
) && services
.users
.user_is_ignored(&pdu.sender, sender_user)
.await
{
return None;
}
Some((count, pdu))
})
.filter_map(|item| visibility_filter(&services, item, sender_user))
.ready_take_while(|(count, _)| Some(*count) != to) // Stop at `to`
.take(limit)
.collect();
.collect()
.boxed()
.await;
for (_, event) in &events_before {
/* TODO: Remove the not "element_hacks" check when these are resolved:
@ -209,16 +260,18 @@ pub(crate) async fn get_message_events_route(
* https://github.com/vector-im/element-web/issues/21034
*/
if !cfg!(feature = "element_hacks")
&& !services.rooms.lazy_loading.lazy_load_was_sent_before(
sender_user,
sender_device,
&body.room_id,
&event.sender,
)? {
&& !services
.rooms
.lazy_loading
.lazy_load_was_sent_before(sender_user, sender_device, room_id, &event.sender)
.await
{
lazy_loaded.insert(event.sender.clone());
}
lazy_loaded.insert(event.sender.clone());
if cfg!(features = "element_hacks") {
lazy_loaded.insert(event.sender.clone());
}
}
next_token = events_before.last().map(|(count, _)| count).copied();
@ -234,49 +287,61 @@ pub(crate) async fn get_message_events_route(
},
}
resp.state = Vec::new();
for ll_id in &lazy_loaded {
if let Some(member_event) =
resp.state = lazy_loaded
.iter()
.stream()
.filter_map(|ll_user_id| async move {
services
.rooms
.state_accessor
.room_state_get(&body.room_id, &StateEventType::RoomMember, ll_id.as_str())?
{
resp.state.push(member_event.to_state_event());
}
}
.room_state_get(room_id, &StateEventType::RoomMember, ll_user_id.as_str())
.await
.map(|member_event| member_event.to_state_event())
.ok()
})
.collect()
.await;
// remove the feature check when we are sure clients like element can handle it
if !cfg!(feature = "element_hacks") {
if let Some(next_token) = next_token {
services
.rooms
.lazy_loading
.lazy_load_mark_sent(sender_user, sender_device, &body.room_id, lazy_loaded, next_token)
.await;
services.rooms.lazy_loading.lazy_load_mark_sent(
sender_user,
sender_device,
room_id,
lazy_loaded,
next_token,
);
}
}
Ok(resp)
}
fn visibility_filter(services: &Services, pdu: &PduEvent, user_id: &UserId, room_id: &RoomId) -> bool {
async fn visibility_filter(services: &Services, item: PdusIterItem, user_id: &UserId) -> Option<PdusIterItem> {
let (_, pdu) = &item;
services
.rooms
.state_accessor
.user_can_see_event(user_id, room_id, &pdu.event_id)
.unwrap_or(false)
.user_can_see_event(user_id, &pdu.room_id, &pdu.event_id)
.await
.then_some(item)
}
fn contains_url_filter(pdu: &PduEvent, filter: &RoomEventFilter) -> bool {
fn contains_url_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Option<PdusIterItem> {
let (_, pdu) = &item;
if filter.url_filter.is_none() {
return true;
return Some(item);
}
let content: Value = from_str(pdu.content.get()).unwrap();
match filter.url_filter {
let res = match filter.url_filter {
Some(UrlFilter::EventsWithoutUrl) => !content["url"].is_string(),
Some(UrlFilter::EventsWithUrl) => content["url"].is_string(),
None => true,
}
};
res.then_some(item)
}

View file

@ -52,7 +52,7 @@ pub(super) use keys::*;
pub(super) use media::*;
pub(super) use media_legacy::*;
pub(super) use membership::*;
pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room, validate_and_add_event_id};
pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room};
pub(super) use message::*;
pub(super) use openid::*;
pub(super) use presence::*;

View file

@ -28,7 +28,8 @@ pub(crate) async fn set_presence_route(
services
.presence
.set_presence(sender_user, &body.presence, None, None, body.status_msg.clone())?;
.set_presence(sender_user, &body.presence, None, None, body.status_msg.clone())
.await?;
Ok(set_presence::v3::Response {})
}
@ -49,14 +50,15 @@ pub(crate) async fn get_presence_route(
let mut presence_event = None;
for _room_id in services
let has_shared_rooms = services
.rooms
.user
.get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])?
{
if let Some(presence) = services.presence.get_presence(&body.user_id)? {
.has_shared_rooms(sender_user, &body.user_id)
.await;
if has_shared_rooms {
if let Ok(presence) = services.presence.get_presence(&body.user_id).await {
presence_event = Some(presence);
break;
}
}

View file

@ -1,5 +1,10 @@
use axum::extract::State;
use conduit::{pdu::PduBuilder, warn, Err, Error, Result};
use conduit::{
pdu::PduBuilder,
utils::{stream::TryIgnore, IterStream},
warn, Err, Error, Result,
};
use futures::{StreamExt, TryStreamExt};
use ruma::{
api::{
client::{
@ -8,11 +13,10 @@ use ruma::{
},
federation,
},
events::{room::member::RoomMemberEventContent, StateEventType, TimelineEventType},
events::{room::member::RoomMemberEventContent, StateEventType},
presence::PresenceState,
OwnedMxcUri, OwnedRoomId, UserId,
};
use serde_json::value::to_raw_value;
use service::Services;
use crate::Ruma;
@ -35,16 +39,18 @@ pub(crate) async fn set_displayname_route(
.rooms
.state_cache
.rooms_joined(&body.user_id)
.filter_map(Result::ok)
.collect();
.map(ToOwned::to_owned)
.collect()
.await;
update_displayname(&services, &body.user_id, body.displayname.clone(), all_joined_rooms).await?;
update_displayname(&services, &body.user_id, body.displayname.clone(), &all_joined_rooms).await?;
if services.globals.allow_local_presence() {
// Presence update
services
.presence
.ping_presence(&body.user_id, &PresenceState::Online)?;
.ping_presence(&body.user_id, &PresenceState::Online)
.await?;
}
Ok(set_display_name::v3::Response {})
@ -72,22 +78,19 @@ pub(crate) async fn get_displayname_route(
)
.await
{
if !services.users.exists(&body.user_id)? {
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None)?;
}
services
.users
.set_displayname(&body.user_id, response.displayname.clone())
.await?;
.set_displayname(&body.user_id, response.displayname.clone());
services
.users
.set_avatar_url(&body.user_id, response.avatar_url.clone())
.await?;
.set_avatar_url(&body.user_id, response.avatar_url.clone());
services
.users
.set_blurhash(&body.user_id, response.blurhash.clone())
.await?;
.set_blurhash(&body.user_id, response.blurhash.clone());
return Ok(get_display_name::v3::Response {
displayname: response.displayname,
@ -95,14 +98,14 @@ pub(crate) async fn get_displayname_route(
}
}
if !services.users.exists(&body.user_id)? {
if !services.users.exists(&body.user_id).await {
// Return 404 if this user doesn't exist and we couldn't fetch it over
// federation
return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found."));
}
Ok(get_display_name::v3::Response {
displayname: services.users.displayname(&body.user_id)?,
displayname: services.users.displayname(&body.user_id).await.ok(),
})
}
@ -124,15 +127,16 @@ pub(crate) async fn set_avatar_url_route(
.rooms
.state_cache
.rooms_joined(&body.user_id)
.filter_map(Result::ok)
.collect();
.map(ToOwned::to_owned)
.collect()
.await;
update_avatar_url(
&services,
&body.user_id,
body.avatar_url.clone(),
body.blurhash.clone(),
all_joined_rooms,
&all_joined_rooms,
)
.await?;
@ -140,7 +144,9 @@ pub(crate) async fn set_avatar_url_route(
// Presence update
services
.presence
.ping_presence(&body.user_id, &PresenceState::Online)?;
.ping_presence(&body.user_id, &PresenceState::Online)
.await
.ok();
}
Ok(set_avatar_url::v3::Response {})
@ -168,22 +174,21 @@ pub(crate) async fn get_avatar_url_route(
)
.await
{
if !services.users.exists(&body.user_id)? {
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None)?;
}
services
.users
.set_displayname(&body.user_id, response.displayname.clone())
.await?;
.set_displayname(&body.user_id, response.displayname.clone());
services
.users
.set_avatar_url(&body.user_id, response.avatar_url.clone())
.await?;
.set_avatar_url(&body.user_id, response.avatar_url.clone());
services
.users
.set_blurhash(&body.user_id, response.blurhash.clone())
.await?;
.set_blurhash(&body.user_id, response.blurhash.clone());
return Ok(get_avatar_url::v3::Response {
avatar_url: response.avatar_url,
@ -192,15 +197,15 @@ pub(crate) async fn get_avatar_url_route(
}
}
if !services.users.exists(&body.user_id)? {
if !services.users.exists(&body.user_id).await {
// Return 404 if this user doesn't exist and we couldn't fetch it over
// federation
return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found."));
}
Ok(get_avatar_url::v3::Response {
avatar_url: services.users.avatar_url(&body.user_id)?,
blurhash: services.users.blurhash(&body.user_id)?,
avatar_url: services.users.avatar_url(&body.user_id).await.ok(),
blurhash: services.users.blurhash(&body.user_id).await.ok(),
})
}
@ -226,31 +231,30 @@ pub(crate) async fn get_profile_route(
)
.await
{
if !services.users.exists(&body.user_id)? {
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None)?;
}
services
.users
.set_displayname(&body.user_id, response.displayname.clone())
.await?;
.set_displayname(&body.user_id, response.displayname.clone());
services
.users
.set_avatar_url(&body.user_id, response.avatar_url.clone())
.await?;
.set_avatar_url(&body.user_id, response.avatar_url.clone());
services
.users
.set_blurhash(&body.user_id, response.blurhash.clone())
.await?;
.set_blurhash(&body.user_id, response.blurhash.clone());
services
.users
.set_timezone(&body.user_id, response.tz.clone())
.await?;
.set_timezone(&body.user_id, response.tz.clone());
for (profile_key, profile_key_value) in &response.custom_profile_fields {
services
.users
.set_profile_key(&body.user_id, profile_key, Some(profile_key_value.clone()))?;
.set_profile_key(&body.user_id, profile_key, Some(profile_key_value.clone()));
}
return Ok(get_profile::v3::Response {
@ -263,134 +267,108 @@ pub(crate) async fn get_profile_route(
}
}
if !services.users.exists(&body.user_id)? {
if !services.users.exists(&body.user_id).await {
// Return 404 if this user doesn't exist and we couldn't fetch it over
// federation
return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found."));
}
Ok(get_profile::v3::Response {
avatar_url: services.users.avatar_url(&body.user_id)?,
blurhash: services.users.blurhash(&body.user_id)?,
displayname: services.users.displayname(&body.user_id)?,
tz: services.users.timezone(&body.user_id)?,
avatar_url: services.users.avatar_url(&body.user_id).await.ok(),
blurhash: services.users.blurhash(&body.user_id).await.ok(),
displayname: services.users.displayname(&body.user_id).await.ok(),
tz: services.users.timezone(&body.user_id).await.ok(),
custom_profile_fields: services
.users
.all_profile_keys(&body.user_id)
.filter_map(Result::ok)
.collect(),
.collect()
.await,
})
}
pub async fn update_displayname(
services: &Services, user_id: &UserId, displayname: Option<String>, all_joined_rooms: Vec<OwnedRoomId>,
services: &Services, user_id: &UserId, displayname: Option<String>, all_joined_rooms: &[OwnedRoomId],
) -> Result<()> {
let current_display_name = services.users.displayname(user_id).unwrap_or_default();
let current_display_name = services.users.displayname(user_id).await.ok();
if displayname == current_display_name {
return Ok(());
}
services
.users
.set_displayname(user_id, displayname.clone())
.await?;
services.users.set_displayname(user_id, displayname.clone());
// Send a new join membership event into all joined rooms
let all_joined_rooms: Vec<_> = all_joined_rooms
.iter()
.map(|room_id| {
Ok::<_, Error>((
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
displayname: displayname.clone(),
join_authorized_via_users_server: None,
..serde_json::from_str(
services
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())?
.ok_or_else(|| {
Error::bad_database("Tried to send display name update for user not in the room.")
})?
.content
.get(),
)
.map_err(|_| Error::bad_database("Database contains invalid PDU."))?
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(user_id.to_string()),
redacts: None,
timestamp: None,
},
room_id,
))
})
.filter_map(Result::ok)
.collect();
let mut joined_rooms = Vec::new();
for room_id in all_joined_rooms {
let Ok(content) = services
.rooms
.state_accessor
.room_state_get_content(room_id, &StateEventType::RoomMember, user_id.as_str())
.await
else {
continue;
};
update_all_rooms(services, all_joined_rooms, user_id).await;
let pdu = PduBuilder::state(
user_id.to_string(),
&RoomMemberEventContent {
displayname: displayname.clone(),
join_authorized_via_users_server: None,
..content
},
);
joined_rooms.push((pdu, room_id));
}
update_all_rooms(services, joined_rooms, user_id).await;
Ok(())
}
pub async fn update_avatar_url(
services: &Services, user_id: &UserId, avatar_url: Option<OwnedMxcUri>, blurhash: Option<String>,
all_joined_rooms: Vec<OwnedRoomId>,
all_joined_rooms: &[OwnedRoomId],
) -> Result<()> {
let current_avatar_url = services.users.avatar_url(user_id).unwrap_or_default();
let current_blurhash = services.users.blurhash(user_id).unwrap_or_default();
let current_avatar_url = services.users.avatar_url(user_id).await.ok();
let current_blurhash = services.users.blurhash(user_id).await.ok();
if current_avatar_url == avatar_url && current_blurhash == blurhash {
return Ok(());
}
services
.users
.set_avatar_url(user_id, avatar_url.clone())
.await?;
services
.users
.set_blurhash(user_id, blurhash.clone())
.await?;
services.users.set_avatar_url(user_id, avatar_url.clone());
services.users.set_blurhash(user_id, blurhash.clone());
// Send a new join membership event into all joined rooms
let avatar_url = &avatar_url;
let blurhash = &blurhash;
let all_joined_rooms: Vec<_> = all_joined_rooms
.iter()
.map(|room_id| {
Ok::<_, Error>((
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
avatar_url: avatar_url.clone(),
blurhash: blurhash.clone(),
join_authorized_via_users_server: None,
..serde_json::from_str(
services
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())?
.ok_or_else(|| {
Error::bad_database("Tried to send avatar URL update for user not in the room.")
})?
.content
.get(),
)
.map_err(|_| Error::bad_database("Database contains invalid PDU."))?
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(user_id.to_string()),
redacts: None,
timestamp: None,
.try_stream()
.and_then(|room_id: &OwnedRoomId| async move {
let content = services
.rooms
.state_accessor
.room_state_get_content(room_id, &StateEventType::RoomMember, user_id.as_str())
.await?;
let pdu = PduBuilder::state(
user_id.to_string(),
&RoomMemberEventContent {
avatar_url: avatar_url.clone(),
blurhash: blurhash.clone(),
join_authorized_via_users_server: None,
..content
},
room_id,
))
);
Ok((pdu, room_id))
})
.filter_map(Result::ok)
.collect();
.ignore_err()
.collect()
.await;
update_all_rooms(services, all_joined_rooms, user_id).await;

View file

@ -5,7 +5,7 @@ use ruma::{
error::ErrorKind,
push::{
delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled, get_pushrules_all,
set_pusher, set_pushrule, set_pushrule_actions, set_pushrule_enabled, RuleScope,
set_pusher, set_pushrule, set_pushrule_actions, set_pushrule_enabled,
},
},
events::{
@ -13,7 +13,7 @@ use ruma::{
GlobalAccountDataEventType,
},
push::{InsertPushRuleError, RemovePushRuleError, Ruleset},
CanonicalJsonObject,
CanonicalJsonObject, CanonicalJsonValue,
};
use service::Services;
@ -27,49 +27,30 @@ pub(crate) async fn get_pushrules_all_route(
) -> Result<get_pushrules_all::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let global_ruleset: Ruleset;
let Ok(event) =
services
.account_data
.get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())
let Some(content_value) = services
.account_data
.get_global::<CanonicalJsonObject>(sender_user, GlobalAccountDataEventType::PushRules)
.await
.ok()
.and_then(|event| event.get("content").cloned())
.filter(CanonicalJsonValue::is_object)
else {
// push rules event doesn't exist, create it and return default
return recreate_push_rules_and_return(&services, sender_user);
};
if let Some(event) = event {
let value = serde_json::from_str::<CanonicalJsonObject>(event.get())
.map_err(|e| err!(Database(warn!("Invalid push rules account data event in database: {e}"))))?;
let Some(content_value) = value.get("content") else {
// user somehow has a push rule event with no content key, recreate it and
// return server default silently
return recreate_push_rules_and_return(&services, sender_user);
};
if content_value.to_string().is_empty() {
// user somehow has a push rule event with empty content, recreate it and return
// server default silently
return recreate_push_rules_and_return(&services, sender_user);
}
let account_data_content = serde_json::from_value::<PushRulesEventContent>(content_value.clone().into())
.map_err(|e| err!(Database(warn!("Invalid push rules account data event in database: {e}"))))?;
global_ruleset = account_data_content.global;
} else {
// user somehow has non-existent push rule event. recreate it and return server
// default silently
return recreate_push_rules_and_return(&services, sender_user);
}
return recreate_push_rules_and_return(&services, sender_user).await;
};
let account_data_content = serde_json::from_value::<PushRulesEventContent>(content_value.into())
.map_err(|e| err!(Database(warn!("Invalid push rules account data event in database: {e}"))))?;
let global_ruleset: Ruleset = account_data_content.global;
Ok(get_pushrules_all::v3::Response {
global: global_ruleset,
})
}
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
/// # `GET /_matrix/client/r0/pushrules/global/{kind}/{ruleId}`
///
/// Retrieves a single specified push rule for this user.
pub(crate) async fn get_pushrule_route(
@ -77,16 +58,14 @@ pub(crate) async fn get_pushrule_route(
) -> Result<get_pushrule::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event = services
let event: PushRulesEvent = services
.account_data
.get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?;
.get_global(sender_user, GlobalAccountDataEventType::PushRules)
.await
.map_err(|_| err!(Request(NotFound("PushRules event not found."))))?;
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
.content;
let rule = account_data
let rule = event
.content
.global
.get(body.kind.clone(), &body.rule_id)
.map(Into::into);
@ -100,7 +79,7 @@ pub(crate) async fn get_pushrule_route(
}
}
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
/// # `PUT /_matrix/client/r0/pushrules/global/{kind}/{ruleId}`
///
/// Creates a single specified push rule for this user.
pub(crate) async fn set_pushrule_route(
@ -109,20 +88,11 @@ pub(crate) async fn set_pushrule_route(
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let body = body.body;
if body.scope != RuleScope::Global {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Scopes other than 'global' are not supported.",
));
}
let event = services
let mut account_data: PushRulesEvent = services
.account_data
.get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?;
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
.get_global(sender_user, GlobalAccountDataEventType::PushRules)
.await
.map_err(|_| err!(Request(NotFound("PushRules event not found."))))?;
if let Err(error) =
account_data
@ -155,17 +125,20 @@ pub(crate) async fn set_pushrule_route(
return Err(err);
}
services.account_data.update(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(account_data).expect("to json value always works"),
)?;
services
.account_data
.update(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(account_data).expect("to json value always works"),
)
.await?;
Ok(set_pushrule::v3::Response {})
}
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
/// # `GET /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/actions`
///
/// Gets the actions of a single specified push rule for this user.
pub(crate) async fn get_pushrule_actions_route(
@ -173,34 +146,25 @@ pub(crate) async fn get_pushrule_actions_route(
) -> Result<get_pushrule_actions::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if body.scope != RuleScope::Global {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Scopes other than 'global' are not supported.",
));
}
let event = services
let event: PushRulesEvent = services
.account_data
.get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?;
.get_global(sender_user, GlobalAccountDataEventType::PushRules)
.await
.map_err(|_| err!(Request(NotFound("PushRules event not found."))))?;
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
.content;
let global = account_data.global;
let actions = global
let actions = event
.content
.global
.get(body.kind.clone(), &body.rule_id)
.map(|rule| rule.actions().to_owned())
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Push rule not found."))?;
.ok_or(err!(Request(NotFound("Push rule not found."))))?;
Ok(get_pushrule_actions::v3::Response {
actions,
})
}
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
/// # `PUT /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/actions`
///
/// Sets the actions of a single specified push rule for this user.
pub(crate) async fn set_pushrule_actions_route(
@ -208,20 +172,11 @@ pub(crate) async fn set_pushrule_actions_route(
) -> Result<set_pushrule_actions::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if body.scope != RuleScope::Global {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Scopes other than 'global' are not supported.",
));
}
let event = services
let mut account_data: PushRulesEvent = services
.account_data
.get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?;
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
.get_global(sender_user, GlobalAccountDataEventType::PushRules)
.await
.map_err(|_| err!(Request(NotFound("PushRules event not found."))))?;
if account_data
.content
@ -232,17 +187,20 @@ pub(crate) async fn set_pushrule_actions_route(
return Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found."));
}
services.account_data.update(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(account_data).expect("to json value always works"),
)?;
services
.account_data
.update(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(account_data).expect("to json value always works"),
)
.await?;
Ok(set_pushrule_actions::v3::Response {})
}
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled`
/// # `GET /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/enabled`
///
/// Gets the enabled status of a single specified push rule for this user.
pub(crate) async fn get_pushrule_enabled_route(
@ -250,33 +208,25 @@ pub(crate) async fn get_pushrule_enabled_route(
) -> Result<get_pushrule_enabled::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if body.scope != RuleScope::Global {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Scopes other than 'global' are not supported.",
));
}
let event = services
let event: PushRulesEvent = services
.account_data
.get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?;
.get_global(sender_user, GlobalAccountDataEventType::PushRules)
.await
.map_err(|_| err!(Request(NotFound("PushRules event not found."))))?;
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
let global = account_data.content.global;
let enabled = global
let enabled = event
.content
.global
.get(body.kind.clone(), &body.rule_id)
.map(ruma::push::AnyPushRuleRef::enabled)
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Push rule not found."))?;
.ok_or(err!(Request(NotFound("Push rule not found."))))?;
Ok(get_pushrule_enabled::v3::Response {
enabled,
})
}
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled`
/// # `PUT /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/enabled`
///
/// Sets the enabled status of a single specified push rule for this user.
pub(crate) async fn set_pushrule_enabled_route(
@ -284,20 +234,11 @@ pub(crate) async fn set_pushrule_enabled_route(
) -> Result<set_pushrule_enabled::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if body.scope != RuleScope::Global {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Scopes other than 'global' are not supported.",
));
}
let event = services
let mut account_data: PushRulesEvent = services
.account_data
.get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?;
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
.get_global(sender_user, GlobalAccountDataEventType::PushRules)
.await
.map_err(|_| err!(Request(NotFound("PushRules event not found."))))?;
if account_data
.content
@ -308,17 +249,20 @@ pub(crate) async fn set_pushrule_enabled_route(
return Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found."));
}
services.account_data.update(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(account_data).expect("to json value always works"),
)?;
services
.account_data
.update(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(account_data).expect("to json value always works"),
)
.await?;
Ok(set_pushrule_enabled::v3::Response {})
}
/// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
/// # `DELETE /_matrix/client/r0/pushrules/global/{kind}/{ruleId}`
///
/// Deletes a single specified push rule for this user.
pub(crate) async fn delete_pushrule_route(
@ -326,20 +270,11 @@ pub(crate) async fn delete_pushrule_route(
) -> Result<delete_pushrule::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if body.scope != RuleScope::Global {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Scopes other than 'global' are not supported.",
));
}
let event = services
let mut account_data: PushRulesEvent = services
.account_data
.get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?;
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
.get_global(sender_user, GlobalAccountDataEventType::PushRules)
.await
.map_err(|_| err!(Request(NotFound("PushRules event not found."))))?;
if let Err(error) = account_data
.content
@ -357,12 +292,15 @@ pub(crate) async fn delete_pushrule_route(
return Err(err);
}
services.account_data.update(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(account_data).expect("to json value always works"),
)?;
services
.account_data
.update(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(account_data).expect("to json value always works"),
)
.await?;
Ok(delete_pushrule::v3::Response {})
}
@ -376,7 +314,7 @@ pub(crate) async fn get_pushers_route(
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
Ok(get_pushers::v3::Response {
pushers: services.pusher.get_pushers(sender_user)?,
pushers: services.pusher.get_pushers(sender_user).await,
})
}
@ -390,27 +328,30 @@ pub(crate) async fn set_pushers_route(
) -> Result<set_pusher::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
services.pusher.set_pusher(sender_user, &body.action)?;
services.pusher.set_pusher(sender_user, &body.action);
Ok(set_pusher::v3::Response::default())
}
/// user somehow has bad push rules, these must always exist per spec.
/// so recreate it and return server default silently
fn recreate_push_rules_and_return(
async fn recreate_push_rules_and_return(
services: &Services, sender_user: &ruma::UserId,
) -> Result<get_pushrules_all::v3::Response> {
services.account_data.update(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(PushRulesEvent {
content: PushRulesEventContent {
global: Ruleset::server_default(sender_user),
},
})
.expect("to json always works"),
)?;
services
.account_data
.update(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(PushRulesEvent {
content: PushRulesEventContent {
global: Ruleset::server_default(sender_user),
},
})
.expect("to json always works"),
)
.await?;
Ok(get_pushrules_all::v3::Response {
global: Ruleset::server_default(sender_user),

View file

@ -31,27 +31,32 @@ pub(crate) async fn set_read_marker_route(
event_id: fully_read.clone(),
},
};
services.account_data.update(
Some(&body.room_id),
sender_user,
RoomAccountDataEventType::FullyRead,
&serde_json::to_value(fully_read_event).expect("to json value always works"),
)?;
services
.account_data
.update(
Some(&body.room_id),
sender_user,
RoomAccountDataEventType::FullyRead,
&serde_json::to_value(fully_read_event).expect("to json value always works"),
)
.await?;
}
if body.private_read_receipt.is_some() || body.read_receipt.is_some() {
services
.rooms
.user
.reset_notification_counts(sender_user, &body.room_id)?;
.reset_notification_counts(sender_user, &body.room_id);
}
if let Some(event) = &body.private_read_receipt {
let count = services
.rooms
.timeline
.get_pdu_count(event)?
.ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Event does not exist."))?;
.get_pdu_count(event)
.await
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event does not exist."))?;
let count = match count {
PduCount::Backfilled(_) => {
return Err(Error::BadRequest(
@ -64,7 +69,7 @@ pub(crate) async fn set_read_marker_route(
services
.rooms
.read_receipt
.private_read_set(&body.room_id, sender_user, count)?;
.private_read_set(&body.room_id, sender_user, count);
}
if let Some(event) = &body.read_receipt {
@ -83,14 +88,18 @@ pub(crate) async fn set_read_marker_route(
let mut receipt_content = BTreeMap::new();
receipt_content.insert(event.to_owned(), receipts);
services.rooms.read_receipt.readreceipt_update(
sender_user,
&body.room_id,
&ruma::events::receipt::ReceiptEvent {
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
room_id: body.room_id.clone(),
},
)?;
services
.rooms
.read_receipt
.readreceipt_update(
sender_user,
&body.room_id,
&ruma::events::receipt::ReceiptEvent {
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
room_id: body.room_id.clone(),
},
)
.await;
}
Ok(set_read_marker::v3::Response {})
@ -111,7 +120,7 @@ pub(crate) async fn create_receipt_route(
services
.rooms
.user
.reset_notification_counts(sender_user, &body.room_id)?;
.reset_notification_counts(sender_user, &body.room_id);
}
match body.receipt_type {
@ -121,12 +130,15 @@ pub(crate) async fn create_receipt_route(
event_id: body.event_id.clone(),
},
};
services.account_data.update(
Some(&body.room_id),
sender_user,
RoomAccountDataEventType::FullyRead,
&serde_json::to_value(fully_read_event).expect("to json value always works"),
)?;
services
.account_data
.update(
Some(&body.room_id),
sender_user,
RoomAccountDataEventType::FullyRead,
&serde_json::to_value(fully_read_event).expect("to json value always works"),
)
.await?;
},
create_receipt::v3::ReceiptType::Read => {
let mut user_receipts = BTreeMap::new();
@ -143,21 +155,27 @@ pub(crate) async fn create_receipt_route(
let mut receipt_content = BTreeMap::new();
receipt_content.insert(body.event_id.clone(), receipts);
services.rooms.read_receipt.readreceipt_update(
sender_user,
&body.room_id,
&ruma::events::receipt::ReceiptEvent {
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
room_id: body.room_id.clone(),
},
)?;
services
.rooms
.read_receipt
.readreceipt_update(
sender_user,
&body.room_id,
&ruma::events::receipt::ReceiptEvent {
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
room_id: body.room_id.clone(),
},
)
.await;
},
create_receipt::v3::ReceiptType::ReadPrivate => {
let count = services
.rooms
.timeline
.get_pdu_count(&body.event_id)?
.ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Event does not exist."))?;
.get_pdu_count(&body.event_id)
.await
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event does not exist."))?;
let count = match count {
PduCount::Backfilled(_) => {
return Err(Error::BadRequest(
@ -170,7 +188,7 @@ pub(crate) async fn create_receipt_route(
services
.rooms
.read_receipt
.private_read_set(&body.room_id, sender_user, count)?;
.private_read_set(&body.room_id, sender_user, count);
},
_ => return Err(Error::bad_database("Unsupported receipt type")),
}

View file

@ -1,9 +1,5 @@
use axum::extract::State;
use ruma::{
api::client::redact::redact_event,
events::{room::redaction::RoomRedactionEventContent, TimelineEventType},
};
use serde_json::value::to_raw_value;
use ruma::{api::client::redact::redact_event, events::room::redaction::RoomRedactionEventContent};
use crate::{service::pdu::PduBuilder, Result, Ruma};
@ -25,16 +21,11 @@ pub(crate) async fn redact_event_route(
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomRedaction,
content: to_raw_value(&RoomRedactionEventContent {
redacts: Some(body.event_id.clone().into()),
..PduBuilder::timeline(&RoomRedactionEventContent {
redacts: Some(body.event_id.clone()),
reason: body.reason.clone(),
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: None,
redacts: Some(body.event_id.into()),
timestamp: None,
},
sender_user,
&body.room_id,
@ -44,8 +35,7 @@ pub(crate) async fn redact_event_route(
drop(state_lock);
let event_id = (*event_id).to_owned();
Ok(redact_event::v3::Response {
event_id,
event_id: event_id.into(),
})
}

View file

@ -9,20 +9,24 @@ use crate::{Result, Ruma};
pub(crate) async fn get_relating_events_with_rel_type_and_event_type_route(
State(services): State<crate::State>, body: Ruma<get_relating_events_with_rel_type_and_event_type::v1::Request>,
) -> Result<get_relating_events_with_rel_type_and_event_type::v1::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_user = body.sender_user.as_deref().expect("user is authenticated");
let res = services.rooms.pdu_metadata.paginate_relations_with_filter(
sender_user,
&body.room_id,
&body.event_id,
&Some(body.event_type.clone()),
&Some(body.rel_type.clone()),
&body.from,
&body.to,
&body.limit,
body.recurse,
body.dir,
)?;
let res = services
.rooms
.pdu_metadata
.paginate_relations_with_filter(
sender_user,
&body.room_id,
&body.event_id,
body.event_type.clone().into(),
body.rel_type.clone().into(),
body.from.as_ref(),
body.to.as_ref(),
body.limit,
body.recurse,
body.dir,
)
.await?;
Ok(get_relating_events_with_rel_type_and_event_type::v1::Response {
chunk: res.chunk,
@ -36,20 +40,24 @@ pub(crate) async fn get_relating_events_with_rel_type_and_event_type_route(
pub(crate) async fn get_relating_events_with_rel_type_route(
State(services): State<crate::State>, body: Ruma<get_relating_events_with_rel_type::v1::Request>,
) -> Result<get_relating_events_with_rel_type::v1::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_user = body.sender_user.as_deref().expect("user is authenticated");
let res = services.rooms.pdu_metadata.paginate_relations_with_filter(
sender_user,
&body.room_id,
&body.event_id,
&None,
&Some(body.rel_type.clone()),
&body.from,
&body.to,
&body.limit,
body.recurse,
body.dir,
)?;
let res = services
.rooms
.pdu_metadata
.paginate_relations_with_filter(
sender_user,
&body.room_id,
&body.event_id,
None,
body.rel_type.clone().into(),
body.from.as_ref(),
body.to.as_ref(),
body.limit,
body.recurse,
body.dir,
)
.await?;
Ok(get_relating_events_with_rel_type::v1::Response {
chunk: res.chunk,
@ -63,18 +71,22 @@ pub(crate) async fn get_relating_events_with_rel_type_route(
pub(crate) async fn get_relating_events_route(
State(services): State<crate::State>, body: Ruma<get_relating_events::v1::Request>,
) -> Result<get_relating_events::v1::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_user = body.sender_user.as_deref().expect("user is authenticated");
services.rooms.pdu_metadata.paginate_relations_with_filter(
sender_user,
&body.room_id,
&body.event_id,
&None,
&None,
&body.from,
&body.to,
&body.limit,
body.recurse,
body.dir,
)
services
.rooms
.pdu_metadata
.paginate_relations_with_filter(
sender_user,
&body.room_id,
&body.event_id,
None,
None,
body.from.as_ref(),
body.to.as_ref(),
body.limit,
body.recurse,
body.dir,
)
.await
}

View file

@ -1,9 +1,14 @@
use std::time::Duration;
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduit::{utils::ReadyExt, Err};
use rand::Rng;
use ruma::{
api::client::{error::ErrorKind, room::report_content},
api::client::{
error::ErrorKind,
room::{report_content, report_room},
},
events::room::message,
int, EventId, RoomId, UserId,
};
@ -13,35 +18,85 @@ use tracing::info;
use crate::{
debug_info,
service::{pdu::PduEvent, Services},
utils::HtmlEscape,
Error, Result, Ruma,
};
/// # `POST /_matrix/client/v3/rooms/{roomId}/report`
///
/// Reports an abusive room to homeserver admins
#[tracing::instrument(skip_all, fields(%client), name = "report_room")]
pub(crate) async fn report_room_route(
State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp,
body: Ruma<report_room::v3::Request>,
) -> Result<report_room::v3::Response> {
// user authentication
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
info!(
"Received room report by user {sender_user} for room {} with reason: {:?}",
body.room_id, body.reason
);
delay_response().await;
if !services
.rooms
.state_cache
.server_in_room(&services.globals.config.server_name, &body.room_id)
.await
{
return Err!(Request(NotFound(
"Room does not exist to us, no local users have joined at all"
)));
}
if body.reason.as_ref().is_some_and(|s| s.len() > 750) {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Reason too long, should be 750 characters or fewer",
));
};
// send admin room message that we received the report with an @room ping for
// urgency
services
.admin
.send_message(message::RoomMessageEventContent::text_markdown(format!(
"@room Room report received from {} -\n\nRoom ID: {}\n\nReport Reason: {}",
sender_user.to_owned(),
body.room_id,
body.reason.as_deref().unwrap_or("")
)))
.await
.ok();
Ok(report_room::v3::Response {})
}
/// # `POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`
///
/// Reports an inappropriate event to homeserver admins
#[tracing::instrument(skip_all, fields(%client), name = "report_event")]
pub(crate) async fn report_event_route(
State(services): State<crate::State>, body: Ruma<report_content::v3::Request>,
State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp,
body: Ruma<report_content::v3::Request>,
) -> Result<report_content::v3::Response> {
// user authentication
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
info!(
"Received /report request by user {sender_user} for room {} and event ID {}",
body.room_id, body.event_id
"Received event report by user {sender_user} for room {} and event ID {}, with reason: {:?}",
body.room_id, body.event_id, body.reason
);
delay_response().await;
// check if we know about the reported event ID or if it's invalid
let Some(pdu) = services.rooms.timeline.get_pdu(&body.event_id)? else {
return Err(Error::BadRequest(
ErrorKind::NotFound,
"Event ID is not known to us or Event ID is invalid",
));
let Ok(pdu) = services.rooms.timeline.get_pdu(&body.event_id).await else {
return Err!(Request(NotFound("Event ID is not known to us or Event ID is invalid")));
};
is_report_valid(
is_event_report_valid(
&services,
&pdu.event_id,
&body.room_id,
@ -49,39 +104,25 @@ pub(crate) async fn report_event_route(
&body.reason,
body.score,
&pdu,
)?;
)
.await?;
// send admin room message that we received the report with an @room ping for
// urgency
services
.admin
.send_message(message::RoomMessageEventContent::text_html(
format!(
"@room Report received from: {}\n\nEvent ID: {}\nRoom ID: {}\nSent By: {}\n\nReport Score: {}\nReport \
Reason: {}",
sender_user.to_owned(),
pdu.event_id,
pdu.room_id,
pdu.sender.clone(),
body.score.unwrap_or_else(|| ruma::Int::from(0)),
body.reason.as_deref().unwrap_or("")
),
format!(
"<details><summary>@room Report received from: <a href=\"https://matrix.to/#/{0}\">{0}\
</a></summary><ul><li>Event Info<ul><li>Event ID: <code>{1}</code>\
<a href=\"https://matrix.to/#/{2}/{1}\">🔗</a></li><li>Room ID: <code>{2}</code>\
</li><li>Sent By: <a href=\"https://matrix.to/#/{3}\">{3}</a></li></ul></li><li>\
Report Info<ul><li>Report Score: {4}</li><li>Report Reason: {5}</li></ul></li>\
</ul></details>",
sender_user.to_owned(),
pdu.event_id.clone(),
pdu.room_id.clone(),
pdu.sender.clone(),
body.score.unwrap_or_else(|| ruma::Int::from(0)),
HtmlEscape(body.reason.as_deref().unwrap_or(""))
),
))
.await;
.send_message(message::RoomMessageEventContent::text_markdown(format!(
"@room Event report received from {} -\n\nEvent ID: {}\nRoom ID: {}\nSent By: {}\n\nReport Score: \
{}\nReport Reason: {}",
sender_user.to_owned(),
pdu.event_id,
pdu.room_id,
pdu.sender,
body.score.unwrap_or_else(|| ruma::Int::from(0)),
body.reason.as_deref().unwrap_or("")
)))
.await
.ok();
Ok(report_content::v3::Response {})
}
@ -92,7 +133,7 @@ pub(crate) async fn report_event_route(
/// check if score is in valid range
/// check if report reasoning is less than or equal to 750 characters
/// check if reporting user is in the reporting room
fn is_report_valid(
async fn is_event_report_valid(
services: &Services, event_id: &EventId, room_id: &RoomId, sender_user: &UserId, reason: &Option<String>,
score: Option<ruma::Int>, pdu: &std::sync::Arc<PduEvent>,
) -> Result<()> {
@ -123,8 +164,8 @@ fn is_report_valid(
.rooms
.state_cache
.room_members(room_id)
.filter_map(Result::ok)
.any(|user_id| user_id == *sender_user)
.ready_any(|user_id| user_id == sender_user)
.await
{
return Err(Error::BadRequest(
ErrorKind::NotFound,

View file

@ -1,7 +1,8 @@
use std::{cmp::max, collections::BTreeMap};
use axum::extract::State;
use conduit::{debug_info, debug_warn, err};
use conduit::{debug_info, debug_warn, err, Err};
use futures::{FutureExt, StreamExt};
use ruma::{
api::client::{
error::ErrorKind,
@ -73,7 +74,7 @@ pub(crate) async fn create_room_route(
if !services.globals.allow_room_creation()
&& body.appservice_info.is_none()
&& !services.users.is_admin(sender_user)?
&& !services.users.is_admin(sender_user).await
{
return Err(Error::BadRequest(ErrorKind::forbidden(), "Room creation has been disabled."));
}
@ -85,14 +86,18 @@ pub(crate) async fn create_room_route(
};
// check if room ID doesn't already exist instead of erroring on auth check
if services.rooms.short.get_shortroomid(&room_id)?.is_some() {
if services.rooms.short.get_shortroomid(&room_id).await.is_ok() {
return Err(Error::BadRequest(
ErrorKind::RoomInUse,
"Room with that custom room ID already exists",
));
}
let _short_id = services.rooms.short.get_or_create_shortroomid(&room_id)?;
let _short_id = services
.rooms
.short
.get_or_create_shortroomid(&room_id)
.await;
let state_lock = services.rooms.state.mutex.lock(&room_id).await;
let alias: Option<OwnedRoomAliasId> = if let Some(alias) = &body.room_alias_name {
@ -119,7 +124,7 @@ pub(crate) async fn create_room_route(
None => services.globals.default_room_version(),
};
let content = match &body.creation_content {
let create_content = match &body.creation_content {
Some(content) => {
use RoomVersionId::*;
@ -181,16 +186,15 @@ pub(crate) async fn create_room_route(
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomCreate,
content: to_raw_value(&content).expect("event is valid, we just created it"),
unsigned: None,
content: to_raw_value(&create_content).expect("create event content serialization"),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
..Default::default()
},
sender_user,
&room_id,
&state_lock,
)
.boxed()
.await?;
// 2. Let the room creator join
@ -198,28 +202,21 @@ pub(crate) async fn create_room_route(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Join,
displayname: services.users.displayname(sender_user)?,
avatar_url: services.users.avatar_url(sender_user)?,
PduBuilder::state(
sender_user.to_string(),
&RoomMemberEventContent {
displayname: services.users.displayname(sender_user).await.ok(),
avatar_url: services.users.avatar_url(sender_user).await.ok(),
blurhash: services.users.blurhash(sender_user).await.ok(),
is_direct: Some(body.is_direct),
third_party_invite: None,
blurhash: services.users.blurhash(sender_user)?,
reason: None,
join_authorized_via_users_server: None,
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(sender_user.to_string()),
redacts: None,
timestamp: None,
},
..RoomMemberEventContent::new(MembershipState::Join)
},
),
sender_user,
&room_id,
&state_lock,
)
.boxed()
.await?;
// 3. Power levels
@ -233,8 +230,16 @@ pub(crate) async fn create_room_route(
let mut users = BTreeMap::from_iter([(sender_user.clone(), int!(100))]);
if preset == RoomPreset::TrustedPrivateChat {
for invite_ in &body.invite {
users.insert(invite_.clone(), int!(100));
for invite in &body.invite {
if services.users.user_is_ignored(sender_user, invite).await {
return Err!(Request(Forbidden("You cannot invite users you have ignored to rooms.")));
} else if services.users.user_is_ignored(invite, sender_user).await {
// silently drop the invite to the recipient if they've been ignored by the
// sender, pretend it worked
continue;
}
users.insert(invite.clone(), int!(100));
}
}
@ -247,16 +252,15 @@ pub(crate) async fn create_room_route(
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomPowerLevels,
content: to_raw_value(&power_levels_content).expect("to_raw_value always works on serde_json::Value"),
unsigned: None,
content: to_raw_value(&power_levels_content).expect("serialized power_levels event content"),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
..Default::default()
},
sender_user,
&room_id,
&state_lock,
)
.boxed()
.await?;
// 4. Canonical room alias
@ -265,22 +269,18 @@ pub(crate) async fn create_room_route(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomCanonicalAlias,
content: to_raw_value(&RoomCanonicalAliasEventContent {
PduBuilder::state(
String::new(),
&RoomCanonicalAliasEventContent {
alias: Some(room_alias_id.to_owned()),
alt_aliases: vec![],
})
.expect("We checked that alias earlier, it must be fine"),
unsigned: None,
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
},
),
sender_user,
&room_id,
&state_lock,
)
.boxed()
.await?;
}
@ -291,23 +291,19 @@ pub(crate) async fn create_room_route(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomJoinRules,
content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
PduBuilder::state(
String::new(),
&RoomJoinRulesEventContent::new(match preset {
RoomPreset::PublicChat => JoinRule::Public,
// according to spec "invite" is the default
_ => JoinRule::Invite,
}))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
}),
),
sender_user,
&room_id,
&state_lock,
)
.boxed()
.await?;
// 5.2 History Visibility
@ -315,19 +311,15 @@ pub(crate) async fn create_room_route(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomHistoryVisibility,
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
PduBuilder::state(
String::new(),
&RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared),
),
sender_user,
&room_id,
&state_lock,
)
.boxed()
.await?;
// 5.3 Guest Access
@ -335,22 +327,18 @@ pub(crate) async fn create_room_route(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomGuestAccess,
content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
PduBuilder::state(
String::new(),
&RoomGuestAccessEventContent::new(match preset {
RoomPreset::PublicChat => GuestAccess::Forbidden,
_ => GuestAccess::CanJoin,
}))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
}),
),
sender_user,
&room_id,
&state_lock,
)
.boxed()
.await?;
// 6. Events listed in initial_state
@ -383,6 +371,7 @@ pub(crate) async fn create_room_route(
.rooms
.timeline
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
.boxed()
.await?;
}
@ -392,19 +381,12 @@ pub(crate) async fn create_room_route(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomName,
content: to_raw_value(&RoomNameEventContent::new(name.clone()))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
PduBuilder::state(String::new(), &RoomNameEventContent::new(name.clone())),
sender_user,
&room_id,
&state_lock,
)
.boxed()
.await?;
}
@ -413,28 +395,35 @@ pub(crate) async fn create_room_route(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomTopic,
content: to_raw_value(&RoomTopicEventContent {
PduBuilder::state(
String::new(),
&RoomTopicEventContent {
topic: topic.clone(),
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
},
),
sender_user,
&room_id,
&state_lock,
)
.boxed()
.await?;
}
// 8. Events implied by invite (and TODO: invite_3pid)
drop(state_lock);
for user_id in &body.invite {
if let Err(e) = invite_helper(&services, sender_user, user_id, &room_id, None, body.is_direct).await {
if services.users.user_is_ignored(sender_user, user_id).await {
return Err!(Request(Forbidden("You cannot invite users you have ignored to rooms.")));
} else if services.users.user_is_ignored(user_id, sender_user).await {
// silently drop the invite to the recipient if they've been ignored by the
// sender, pretend it worked
continue;
}
if let Err(e) = invite_helper(&services, sender_user, user_id, &room_id, None, body.is_direct)
.boxed()
.await
{
warn!(%e, "Failed to send invite");
}
}
@ -448,7 +437,7 @@ pub(crate) async fn create_room_route(
}
if body.visibility == room::Visibility::Public {
services.rooms.directory.set_public(&room_id)?;
services.rooms.directory.set_public(&room_id);
}
info!("{sender_user} created a room with room ID {room_id}");
@ -470,13 +459,15 @@ pub(crate) async fn get_room_event_route(
let event = services
.rooms
.timeline
.get_pdu(&body.event_id)?
.ok_or_else(|| err!(Request(NotFound("Event {} not found.", &body.event_id))))?;
.get_pdu(&body.event_id)
.await
.map_err(|_| err!(Request(NotFound("Event {} not found.", &body.event_id))))?;
if !services
.rooms
.state_accessor
.user_can_see_event(sender_user, &event.room_id, &body.event_id)?
.user_can_see_event(sender_user, &event.room_id, &body.event_id)
.await
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
@ -506,7 +497,8 @@ pub(crate) async fn get_room_aliases_route(
if !services
.rooms
.state_accessor
.user_can_see_state_events(sender_user, &body.room_id)?
.user_can_see_state_events(sender_user, &body.room_id)
.await
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
@ -519,8 +511,9 @@ pub(crate) async fn get_room_aliases_route(
.rooms
.alias
.local_aliases_for_room(&body.room_id)
.filter_map(Result::ok)
.collect(),
.map(ToOwned::to_owned)
.collect()
.await,
})
}
@ -556,7 +549,8 @@ pub(crate) async fn upgrade_room_route(
let _short_id = services
.rooms
.short
.get_or_create_shortroomid(&replacement_room)?;
.get_or_create_shortroomid(&replacement_room)
.await;
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
@ -567,18 +561,13 @@ pub(crate) async fn upgrade_room_route(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomTombstone,
content: to_raw_value(&RoomTombstoneEventContent {
PduBuilder::state(
String::new(),
&RoomTombstoneEventContent {
body: "This room has been replaced".to_owned(),
replacement_room: replacement_room.clone(),
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
},
),
sender_user,
&body.room_id,
&state_lock,
@ -590,16 +579,12 @@ pub(crate) async fn upgrade_room_route(
let state_lock = services.rooms.state.mutex.lock(&replacement_room).await;
// Get the old room creation event
let mut create_event_content = serde_json::from_str::<CanonicalJsonObject>(
services
.rooms
.state_accessor
.room_state_get(&body.room_id, &StateEventType::RoomCreate, "")?
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
.content
.get(),
)
.map_err(|_| Error::bad_database("Invalid room event in database."))?;
let mut create_event_content: CanonicalJsonObject = services
.rooms
.state_accessor
.room_state_get_content(&body.room_id, &StateEventType::RoomCreate, "")
.await
.map_err(|_| err!(Database("Found room without m.room.create event.")))?;
// Use the m.room.tombstone event as the predecessor
let predecessor = Some(ruma::events::room::create::PreviousRoom::new(
@ -679,11 +664,11 @@ pub(crate) async fn upgrade_room_route(
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Join,
displayname: services.users.displayname(sender_user)?,
avatar_url: services.users.avatar_url(sender_user)?,
displayname: services.users.displayname(sender_user).await.ok(),
avatar_url: services.users.avatar_url(sender_user).await.ok(),
is_direct: None,
third_party_invite: None,
blurhash: services.users.blurhash(sender_user)?,
blurhash: services.users.blurhash(sender_user).await.ok(),
reason: None,
join_authorized_via_users_server: None,
})
@ -704,10 +689,11 @@ pub(crate) async fn upgrade_room_route(
let event_content = match services
.rooms
.state_accessor
.room_state_get(&body.room_id, event_type, "")?
.room_state_get(&body.room_id, event_type, "")
.await
{
Some(v) => v.content.clone(),
None => continue, // Skipping missing events.
Ok(v) => v.content.clone(),
Err(_) => continue, // Skipping missing events.
};
services
@ -717,10 +703,8 @@ pub(crate) async fn upgrade_room_route(
PduBuilder {
event_type: event_type.to_string().into(),
content: event_content,
unsigned: None,
state_key: Some(String::new()),
redacts: None,
timestamp: None,
..Default::default()
},
sender_user,
&replacement_room,
@ -730,34 +714,32 @@ pub(crate) async fn upgrade_room_route(
}
// Moves any local aliases to the new room
for alias in services
let mut local_aliases = services
.rooms
.alias
.local_aliases_for_room(&body.room_id)
.filter_map(Result::ok)
{
.boxed();
while let Some(alias) = local_aliases.next().await {
services
.rooms
.alias
.remove_alias(&alias, sender_user)
.remove_alias(alias, sender_user)
.await?;
services
.rooms
.alias
.set_alias(&alias, &replacement_room, sender_user)?;
.set_alias(alias, &replacement_room, sender_user)?;
}
// Get the old room power levels
let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str(
services
.rooms
.state_accessor
.room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")?
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
.content
.get(),
)
.map_err(|_| Error::bad_database("Invalid room event in database."))?;
let power_levels_event_content: RoomPowerLevelsEventContent = services
.rooms
.state_accessor
.room_state_get_content(&body.room_id, &StateEventType::RoomPowerLevels, "")
.await
.map_err(|_| err!(Database("Found room without m.room.power_levels event.")))?;
// Setting events_default and invite to the greater of 50 and users_default + 1
let new_level = max(
@ -765,12 +747,8 @@ pub(crate) async fn upgrade_room_route(
power_levels_event_content
.users_default
.checked_add(int!(1))
.ok_or_else(|| {
Error::BadRequest(ErrorKind::BadJson, "users_default power levels event content is not valid")
})?,
.ok_or_else(|| err!(Request(BadJson("users_default power levels event content is not valid"))))?,
);
power_levels_event_content.events_default = new_level;
power_levels_event_content.invite = new_level;
// Modify the power levels in the old room to prevent sending of events and
// inviting new users
@ -778,14 +756,14 @@ pub(crate) async fn upgrade_room_route(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomPowerLevels,
content: to_raw_value(&power_levels_event_content).expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
PduBuilder::state(
String::new(),
&RoomPowerLevelsEventContent {
events_default: new_level,
invite: new_level,
..power_levels_event_content
},
),
sender_user,
&body.room_id,
&state_lock,
@ -820,10 +798,18 @@ fn default_power_levels_content(
power_levels_content["events"]["m.room.history_visibility"] =
serde_json::to_value(100).expect("100 is valid Value");
// always allow users to respond (not post new) to polls. this is primarily
// useful in read-only announcement rooms that post a public poll.
power_levels_content["events"]["org.matrix.msc3381.poll.response"] =
serde_json::to_value(0).expect("0 is valid Value");
power_levels_content["events"]["m.poll.response"] = serde_json::to_value(0).expect("0 is valid Value");
// synapse does this too. clients do not expose these permissions. it prevents
// default users from calling public rooms, for obvious reasons.
if *visibility == room::Visibility::Public {
power_levels_content["events"]["m.call.invite"] = serde_json::to_value(50).expect("50 is valid Value");
power_levels_content["events"]["m.call"] = serde_json::to_value(50).expect("50 is valid Value");
power_levels_content["events"]["m.call.member"] = serde_json::to_value(50).expect("50 is valid Value");
power_levels_content["events"]["org.matrix.msc3401.call"] =
serde_json::to_value(50).expect("50 is valid Value");
power_levels_content["events"]["org.matrix.msc3401.call.member"] =
@ -878,8 +864,9 @@ async fn room_alias_check(
if services
.rooms
.alias
.resolve_local_alias(&full_room_alias)?
.is_some()
.resolve_local_alias(&full_room_alias)
.await
.is_ok()
{
return Err(Error::BadRequest(ErrorKind::RoomInUse, "Room alias already exists."));
}

View file

@ -1,6 +1,12 @@
use std::collections::BTreeMap;
use axum::extract::State;
use conduit::{
debug,
utils::{IterStream, ReadyExt},
Err,
};
use futures::{FutureExt, StreamExt};
use ruma::{
api::client::{
error::ErrorKind,
@ -13,7 +19,6 @@ use ruma::{
serde::Raw,
uint, OwnedRoomId,
};
use tracing::debug;
use crate::{Error, Result, Ruma};
@ -32,14 +37,17 @@ pub(crate) async fn search_events_route(
let filter = &search_criteria.filter;
let include_state = &search_criteria.include_state;
let room_ids = filter.rooms.clone().unwrap_or_else(|| {
let room_ids = if let Some(room_ids) = &filter.rooms {
room_ids.clone()
} else {
services
.rooms
.state_cache
.rooms_joined(sender_user)
.filter_map(Result::ok)
.map(ToOwned::to_owned)
.collect()
});
.await
};
// Use limit or else 10, with maximum 100
let limit: usize = filter
@ -53,27 +61,30 @@ pub(crate) async fn search_events_route(
if include_state.is_some_and(|include_state| include_state) {
for room_id in &room_ids {
if !services.rooms.state_cache.is_joined(sender_user, room_id)? {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"You don't have permission to view this room.",
));
if !services
.rooms
.state_cache
.is_joined(sender_user, room_id)
.await
{
return Err!(Request(Forbidden("You don't have permission to view this room.")));
}
// check if sender_user can see state events
if services
.rooms
.state_accessor
.user_can_see_state_events(sender_user, room_id)?
.user_can_see_state_events(sender_user, room_id)
.await
{
let room_state = services
let room_state: Vec<_> = services
.rooms
.state_accessor
.room_state_full(room_id)
.await?
.values()
.map(|pdu| pdu.to_state_event())
.collect::<Vec<_>>();
.collect();
debug!("Room state: {:?}", room_state);
@ -87,10 +98,15 @@ pub(crate) async fn search_events_route(
}
}
let mut searches = Vec::new();
let mut search_vecs = Vec::new();
for room_id in &room_ids {
if !services.rooms.state_cache.is_joined(sender_user, room_id)? {
if !services
.rooms
.state_cache
.is_joined(sender_user, room_id)
.await
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"You don't have permission to view this room.",
@ -100,69 +116,71 @@ pub(crate) async fn search_events_route(
if let Some(search) = services
.rooms
.search
.search_pdus(room_id, &search_criteria.search_term)?
.search_pdus(room_id, &search_criteria.search_term)
.await
{
searches.push(search.0.peekable());
search_vecs.push(search.0);
}
}
let mut searches: Vec<_> = search_vecs
.iter()
.map(|vec| vec.iter().peekable())
.collect();
let skip: usize = match body.next_batch.as_ref().map(|s| s.parse()) {
Some(Ok(s)) => s,
Some(Err(_)) => return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid next_batch token.")),
None => 0, // Default to the start
};
let mut results = Vec::new();
let mut results = Vec::with_capacity(searches.len());
let next_batch = skip.saturating_add(limit);
for _ in 0..next_batch {
if let Some(s) = searches
.iter_mut()
.map(|s| (s.peek().cloned(), s))
.max_by_key(|(peek, _)| peek.clone())
.map(|s| (s.peek().copied(), s))
.max_by_key(|(peek, _)| *peek)
.and_then(|(_, i)| i.next())
{
results.push(s);
}
}
let results: Vec<_> = results
let final_results: Vec<_> = results
.iter()
.skip(skip)
.filter_map(|result| {
.stream()
.filter_map(|id| services.rooms.timeline.get_pdu_from_id(id).map(Result::ok))
.ready_filter(|pdu| !pdu.is_redacted())
.filter_map(|pdu| async move {
services
.rooms
.timeline
.get_pdu_from_id(result)
.ok()?
.filter(|pdu| {
!pdu.is_redacted()
&& services
.rooms
.state_accessor
.user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id)
.unwrap_or(false)
})
.map(|pdu| pdu.to_room_event())
.state_accessor
.user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id)
.await
.then_some(pdu)
})
.map(|result| {
Ok::<_, Error>(SearchResult {
context: EventContextResult {
end: None,
events_after: Vec::new(),
events_before: Vec::new(),
profile_info: BTreeMap::new(),
start: None,
},
rank: None,
result: Some(result),
})
})
.filter_map(Result::ok)
.take(limit)
.collect();
.map(|pdu| pdu.to_room_event())
.map(|result| SearchResult {
context: EventContextResult {
end: None,
events_after: Vec::new(),
events_before: Vec::new(),
profile_info: BTreeMap::new(),
start: None,
},
rank: None,
result: Some(result),
})
.collect()
.boxed()
.await;
let more_unloaded_results = searches.iter_mut().any(|s| s.peek().is_some());
let next_batch = more_unloaded_results.then(|| next_batch.to_string());
Ok(search_events::v3::Response::new(ResultCategories {
@ -170,7 +188,7 @@ pub(crate) async fn search_events_route(
count: Some(results.len().try_into().unwrap_or_else(|_| uint!(0))),
groups: BTreeMap::new(), // TODO
next_batch,
results,
results: final_results,
state: room_states,
highlights: search_criteria
.search_term

View file

@ -1,5 +1,7 @@
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduit::{debug, err, info, utils::ReadyExt, warn, Err};
use futures::StreamExt;
use ruma::{
api::client::{
error::ErrorKind,
@ -19,7 +21,6 @@ use ruma::{
UserId,
};
use serde::Deserialize;
use tracing::{debug, info, warn};
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH};
use crate::{utils, utils::hash, Error, Result, Ruma};
@ -79,21 +80,22 @@ pub(crate) async fn login_route(
UserId::parse(user)
} else {
warn!("Bad login type: {:?}", &body.login_info);
return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type."));
return Err!(Request(Forbidden("Bad login type.")));
}
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
let hash = services
.users
.password_hash(&user_id)?
.ok_or(Error::BadRequest(ErrorKind::forbidden(), "Wrong username or password."))?;
.password_hash(&user_id)
.await
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
if hash.is_empty() {
return Err(Error::BadRequest(ErrorKind::UserDeactivated, "The user has been deactivated"));
return Err!(Request(UserDeactivated("The user has been deactivated")));
}
if hash::verify_password(password, &hash).is_err() {
return Err(Error::BadRequest(ErrorKind::forbidden(), "Wrong username or password."));
return Err!(Request(Forbidden("Wrong username or password.")));
}
user_id
@ -112,15 +114,12 @@ pub(crate) async fn login_route(
let username = token.claims.sub.to_lowercase();
UserId::parse_with_server_name(username, services.globals.server_name()).map_err(|e| {
warn!("Failed to parse username from user logging in: {e}");
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
})?
UserId::parse_with_server_name(username, services.globals.server_name())
.map_err(|e| err!(Request(InvalidUsername(debug_error!(?e, "Failed to parse login username")))))?
} else {
return Err(Error::BadRequest(
ErrorKind::Unknown,
"Token login is not supported (server has no jwt decoding key).",
));
return Err!(Request(Unknown(
"Token login is not supported (server has no jwt decoding key)."
)));
}
},
#[allow(deprecated)]
@ -169,23 +168,32 @@ pub(crate) async fn login_route(
let token = utils::random_string(TOKEN_LENGTH);
// Determine if device_id was provided and exists in the db for this user
let device_exists = body.device_id.as_ref().map_or(false, |device_id| {
let device_exists = if body.device_id.is_some() {
services
.users
.all_device_ids(&user_id)
.any(|x| x.as_ref().map_or(false, |v| v == device_id))
});
.ready_any(|v| v == device_id)
.await
} else {
false
};
if device_exists {
services.users.set_token(&user_id, &device_id, &token)?;
services
.users
.set_token(&user_id, &device_id, &token)
.await?;
} else {
services.users.create_device(
&user_id,
&device_id,
&token,
body.initial_device_display_name.clone(),
Some(client.to_string()),
)?;
services
.users
.create_device(
&user_id,
&device_id,
&token,
body.initial_device_display_name.clone(),
Some(client.to_string()),
)
.await?;
}
// send client well-known if specified so the client knows to reconfigure itself
@ -228,10 +236,13 @@ pub(crate) async fn logout_route(
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
services.users.remove_device(sender_user, sender_device)?;
services
.users
.remove_device(sender_user, sender_device)
.await;
// send device list update for user after logout
services.users.mark_device_key_update(sender_user)?;
services.users.mark_device_key_update(sender_user).await;
Ok(logout::v3::Response::new())
}
@ -256,12 +267,14 @@ pub(crate) async fn logout_all_route(
) -> Result<logout_all::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
for device_id in services.users.all_device_ids(sender_user).flatten() {
services.users.remove_device(sender_user, &device_id)?;
}
services
.users
.all_device_ids(sender_user)
.for_each(|device_id| services.users.remove_device(sender_user, device_id))
.await;
// send device list update for user after logout
services.users.mark_device_key_update(sender_user)?;
services.users.mark_device_key_update(sender_user).await;
Ok(logout_all::v3::Response::new())
}

View file

@ -1,7 +1,7 @@
use std::sync::Arc;
use axum::extract::State;
use conduit::{debug_info, error, pdu::PduBuilder, Error, Result};
use conduit::{err, pdu::PduBuilder, utils::BoolExt, Err, Error, Result};
use ruma::{
api::client::{
error::ErrorKind,
@ -84,12 +84,10 @@ pub(crate) async fn get_state_events_route(
if !services
.rooms
.state_accessor
.user_can_see_state_events(sender_user, &body.room_id)?
.user_can_see_state_events(sender_user, &body.room_id)
.await
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"You don't have permission to view the room state.",
));
return Err!(Request(Forbidden("You don't have permission to view the room state.")));
}
Ok(get_state_events::v3::Response {
@ -120,43 +118,34 @@ pub(crate) async fn get_state_events_for_key_route(
if !services
.rooms
.state_accessor
.user_can_see_state_events(sender_user, &body.room_id)?
.user_can_see_state_events(sender_user, &body.room_id)
.await
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"You don't have permission to view the room state.",
));
return Err!(Request(Forbidden("You don't have permission to view the room state.")));
}
let event = services
.rooms
.state_accessor
.room_state_get(&body.room_id, &body.event_type, &body.state_key)?
.ok_or_else(|| {
debug_info!("State event {:?} not found in room {:?}", &body.event_type, &body.room_id);
Error::BadRequest(ErrorKind::NotFound, "State event not found.")
.room_state_get(&body.room_id, &body.event_type, &body.state_key)
.await
.map_err(|_| {
err!(Request(NotFound(debug_warn!(
room_id = ?body.room_id,
event_type = ?body.event_type,
"State event not found in room.",
))))
})?;
if body
let event_format = body
.format
.as_ref()
.is_some_and(|f| f.to_lowercase().eq("event"))
{
Ok(get_state_events_for_key::v3::Response {
content: None,
event: serde_json::from_str(event.to_state_event().json().get()).map_err(|e| {
error!("Invalid room state event in database: {}", e);
Error::bad_database("Invalid room state event in database")
})?,
})
} else {
Ok(get_state_events_for_key::v3::Response {
content: Some(serde_json::from_str(event.content.get()).map_err(|e| {
error!("Invalid room state event content in database: {}", e);
Error::bad_database("Invalid room state event content in database")
})?),
event: None,
})
}
.is_some_and(|f| f.to_lowercase().eq("event"));
Ok(get_state_events_for_key::v3::Response {
content: event_format.or(|| event.get_content_as_value()),
event: event_format.then(|| event.to_state_event_value()),
})
}
/// # `GET /_matrix/client/v3/rooms/{roomid}/state/{eventType}`
@ -187,11 +176,10 @@ async fn send_state_event_for_key_helper(
.build_and_append_pdu(
PduBuilder {
event_type: event_type.to_string().into(),
content: serde_json::from_str(json.json().get()).expect("content is valid json"),
unsigned: None,
content: serde_json::from_str(json.json().get())?,
state_key: Some(state_key),
redacts: None,
timestamp,
..Default::default()
},
sender,
room_id,
@ -204,7 +192,7 @@ async fn send_state_event_for_key_helper(
async fn allowed_to_send_state_event(
services: &Services, room_id: &RoomId, event_type: &StateEventType, json: &Raw<AnyStateEventContent>,
) -> Result<()> {
) -> Result {
match event_type {
// Forbid m.room.encryption if encryption is disabled
StateEventType::RoomEncryption => {
@ -214,7 +202,7 @@ async fn allowed_to_send_state_event(
},
// admin room is a sensitive room, it should not ever be made public
StateEventType::RoomJoinRules => {
if let Some(admin_room_id) = services.admin.get_admin_room()? {
if let Ok(admin_room_id) = services.admin.get_admin_room().await {
if admin_room_id == room_id {
if let Ok(join_rule) = serde_json::from_str::<RoomJoinRulesEventContent>(json.json().get()) {
if join_rule.join_rule == JoinRule::Public {
@ -229,7 +217,7 @@ async fn allowed_to_send_state_event(
},
// admin room is a sensitive room, it should not ever be made world readable
StateEventType::RoomHistoryVisibility => {
if let Some(admin_room_id) = services.admin.get_admin_room()? {
if let Ok(admin_room_id) = services.admin.get_admin_room().await {
if admin_room_id == room_id {
if let Ok(visibility_content) =
serde_json::from_str::<RoomHistoryVisibilityEventContent>(json.json().get())
@ -254,23 +242,27 @@ async fn allowed_to_send_state_event(
}
for alias in aliases {
if !services.globals.server_is_ours(alias.server_name())
|| services
.rooms
.alias
.resolve_local_alias(&alias)?
.filter(|room| room == room_id) // Make sure it's the right room
.is_none()
if !services.globals.server_is_ours(alias.server_name()) {
return Err!(Request(Forbidden("canonical_alias must be for this server")));
}
if !services
.rooms
.alias
.resolve_local_alias(&alias)
.await
.is_ok_and(|room| room == room_id)
// Make sure it's the right room
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"You are only allowed to send canonical_alias events when its aliases already exist",
));
return Err!(Request(Forbidden(
"You are only allowed to send canonical_alias events when its aliases already exist"
)));
}
}
}
},
_ => (),
}
Ok(())
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,67 @@
mod v3;
mod v4;
use conduit::{
utils::{math::usize_from_u64_truncated, ReadyExt},
PduCount,
};
use futures::StreamExt;
use ruma::{RoomId, UserId};
pub(crate) use self::{v3::sync_events_route, v4::sync_events_v4_route};
use crate::{service::Services, Error, PduEvent, Result};
async fn load_timeline(
services: &Services, sender_user: &UserId, room_id: &RoomId, roomsincecount: PduCount, limit: u64,
) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> {
let timeline_pdus;
let limited = if services
.rooms
.timeline
.last_timeline_count(sender_user, room_id)
.await?
> roomsincecount
{
let mut non_timeline_pdus = services
.rooms
.timeline
.pdus_until(sender_user, room_id, PduCount::max())
.await?
.ready_take_while(|(pducount, _)| pducount > &roomsincecount);
// Take the last events for the timeline
timeline_pdus = non_timeline_pdus
.by_ref()
.take(usize_from_u64_truncated(limit))
.collect::<Vec<_>>()
.await
.into_iter()
.rev()
.collect::<Vec<_>>();
// They /sync response doesn't always return all messages, so we say the output
// is limited unless there are events in non_timeline_pdus
non_timeline_pdus.next().await.is_some()
} else {
timeline_pdus = Vec::new();
false
};
Ok((timeline_pdus, limited))
}
async fn share_encrypted_room(
services: &Services, sender_user: &UserId, user_id: &UserId, ignore_room: Option<&RoomId>,
) -> bool {
services
.rooms
.user
.get_shared_rooms(sender_user, user_id)
.ready_filter(|&room_id| Some(room_id) != ignore_room)
.any(|other_room_id| {
services
.rooms
.state_accessor
.is_encrypted_room(other_room_id)
})
.await
}

1067
src/api/client/sync/v3.rs Normal file

File diff suppressed because it is too large Load diff

784
src/api/client/sync/v4.rs Normal file
View file

@ -0,0 +1,784 @@
use std::{
cmp::{self, Ordering},
collections::{BTreeMap, BTreeSet, HashSet},
time::Duration,
};
use axum::extract::State;
use conduit::{
debug, error, extract_variant,
utils::{
math::{ruma_from_usize, usize_from_ruma},
BoolExt, IterStream, ReadyExt, TryFutureExtExt,
},
warn, Error, PduCount, Result,
};
use futures::{FutureExt, StreamExt, TryFutureExt};
use ruma::{
api::client::{
error::ErrorKind,
sync::sync_events::{
self,
v4::{SlidingOp, SlidingSyncRoomHero},
DeviceLists, UnreadNotificationsCount,
},
},
directory::RoomTypeFilter,
events::{
room::member::{MembershipState, RoomMemberEventContent},
AnyRawAccountDataEvent, StateEventType,
TimelineEventType::{self, *},
},
state_res::Event,
uint, MilliSecondsSinceUnixEpoch, OwnedRoomId, UInt, UserId,
};
use service::{rooms::read_receipt::pack_receipts, Services};
use super::{load_timeline, share_encrypted_room};
use crate::Ruma;
const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync";
const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] =
&[RoomMessage, RoomEncrypted, Sticker, CallInvite, PollStart, Beacon];
/// POST `/_matrix/client/unstable/org.matrix.msc3575/sync`
///
/// Sliding Sync endpoint (future endpoint: `/_matrix/client/v4/sync`)
pub(crate) async fn sync_events_v4_route(
State(services): State<crate::State>, body: Ruma<sync_events::v4::Request>,
) -> Result<sync_events::v4::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.expect("user is authenticated");
let mut body = body.body;
// Setup watchers, so if there's no response, we can wait for them
let watcher = services.globals.watch(sender_user, &sender_device);
let next_batch = services.globals.next_count()?;
let conn_id = body
.conn_id
.clone()
.unwrap_or_else(|| SINGLE_CONNECTION_SYNC.to_owned());
let globalsince = body
.pos
.as_ref()
.and_then(|string| string.parse().ok())
.unwrap_or(0);
if globalsince != 0
&& !services
.sync
.remembered(sender_user.clone(), sender_device.clone(), conn_id.clone())
{
debug!("Restarting sync stream because it was gone from the database");
return Err(Error::Request(
ErrorKind::UnknownPos,
"Connection data lost since last time".into(),
http::StatusCode::BAD_REQUEST,
));
}
if globalsince == 0 {
services
.sync
.forget_sync_request_connection(sender_user.clone(), sender_device.clone(), conn_id.clone());
}
// Get sticky parameters from cache
let known_rooms =
services
.sync
.update_sync_request_with_cache(sender_user.clone(), sender_device.clone(), &mut body);
let all_joined_rooms: Vec<_> = services
.rooms
.state_cache
.rooms_joined(sender_user)
.map(ToOwned::to_owned)
.collect()
.await;
let all_invited_rooms: Vec<_> = services
.rooms
.state_cache
.rooms_invited(sender_user)
.map(|r| r.0)
.collect()
.await;
let all_rooms = all_joined_rooms
.iter()
.chain(all_invited_rooms.iter())
.map(Clone::clone)
.collect();
if body.extensions.to_device.enabled.unwrap_or(false) {
services
.users
.remove_to_device_events(sender_user, &sender_device, globalsince)
.await;
}
let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in
let mut device_list_changes = HashSet::new();
let mut device_list_left = HashSet::new();
let mut receipts = sync_events::v4::Receipts {
rooms: BTreeMap::new(),
};
let mut account_data = sync_events::v4::AccountData {
global: Vec::new(),
rooms: BTreeMap::new(),
};
if body.extensions.account_data.enabled.unwrap_or(false) {
account_data.global = services
.account_data
.changes_since(None, sender_user, globalsince)
.await?
.into_iter()
.filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global))
.collect();
if let Some(rooms) = body.extensions.account_data.rooms {
for room in rooms {
account_data.rooms.insert(
room.clone(),
services
.account_data
.changes_since(Some(&room), sender_user, globalsince)
.await?
.into_iter()
.filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
.collect(),
);
}
}
}
if body.extensions.e2ee.enabled.unwrap_or(false) {
// Look for device list updates of this account
device_list_changes.extend(
services
.users
.keys_changed(sender_user.as_ref(), globalsince, None)
.map(ToOwned::to_owned)
.collect::<Vec<_>>()
.await,
);
for room_id in &all_joined_rooms {
let Ok(current_shortstatehash) = services.rooms.state.get_room_shortstatehash(room_id).await else {
error!("Room {room_id} has no state");
continue;
};
let since_shortstatehash = services
.rooms
.user
.get_token_shortstatehash(room_id, globalsince)
.await
.ok();
let encrypted_room = services
.rooms
.state_accessor
.state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")
.await
.is_ok();
if let Some(since_shortstatehash) = since_shortstatehash {
// Skip if there are only timeline changes
if since_shortstatehash == current_shortstatehash {
continue;
}
let since_encryption = services
.rooms
.state_accessor
.state_get(since_shortstatehash, &StateEventType::RoomEncryption, "")
.await;
let since_sender_member: Option<RoomMemberEventContent> = services
.rooms
.state_accessor
.state_get_content(since_shortstatehash, &StateEventType::RoomMember, sender_user.as_str())
.ok()
.await;
let joined_since_last_sync =
since_sender_member.map_or(true, |member| member.membership != MembershipState::Join);
let new_encrypted_room = encrypted_room && since_encryption.is_err();
if encrypted_room {
let current_state_ids = services
.rooms
.state_accessor
.state_full_ids(current_shortstatehash)
.await?;
let since_state_ids = services
.rooms
.state_accessor
.state_full_ids(since_shortstatehash)
.await?;
for (key, id) in current_state_ids {
if since_state_ids.get(&key) != Some(&id) {
let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else {
error!("Pdu in state not found: {id}");
continue;
};
if pdu.kind == RoomMember {
if let Some(state_key) = &pdu.state_key {
let user_id = UserId::parse(state_key.clone())
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
if user_id == *sender_user {
continue;
}
let content: RoomMemberEventContent = pdu.get_content()?;
match content.membership {
MembershipState::Join => {
// A new user joined an encrypted room
if !share_encrypted_room(&services, sender_user, &user_id, Some(room_id))
.await
{
device_list_changes.insert(user_id);
}
},
MembershipState::Leave => {
// Write down users that have left encrypted rooms we are in
left_encrypted_users.insert(user_id);
},
_ => {},
}
}
}
}
}
if joined_since_last_sync || new_encrypted_room {
// If the user is in a new encrypted room, give them all joined users
device_list_changes.extend(
services
.rooms
.state_cache
.room_members(room_id)
// Don't send key updates from the sender to the sender
.ready_filter(|user_id| sender_user != user_id)
// Only send keys if the sender doesn't share an encrypted room with the target
// already
.filter_map(|user_id| {
share_encrypted_room(&services, sender_user, user_id, Some(room_id))
.map(|res| res.or_some(user_id.to_owned()))
})
.collect::<Vec<_>>()
.await,
);
}
}
}
// Look for device list updates in this room
device_list_changes.extend(
services
.users
.keys_changed(room_id.as_ref(), globalsince, None)
.map(ToOwned::to_owned)
.collect::<Vec<_>>()
.await,
);
}
for user_id in left_encrypted_users {
let dont_share_encrypted_room = !share_encrypted_room(&services, sender_user, &user_id, None).await;
// If the user doesn't share an encrypted room with the target anymore, we need
// to tell them
if dont_share_encrypted_room {
device_list_left.insert(user_id);
}
}
}
let mut lists = BTreeMap::new();
let mut todo_rooms = BTreeMap::new(); // and required state
for (list_id, list) in &body.lists {
let active_rooms = match list.filters.clone().and_then(|f| f.is_invite) {
Some(true) => &all_invited_rooms,
Some(false) => &all_joined_rooms,
None => &all_rooms,
};
let active_rooms = match list.filters.clone().map(|f| f.not_room_types) {
Some(filter) if filter.is_empty() => active_rooms.clone(),
Some(value) => filter_rooms(&services, active_rooms, &value, true).await,
None => active_rooms.clone(),
};
let active_rooms = match list.filters.clone().map(|f| f.room_types) {
Some(filter) if filter.is_empty() => active_rooms.clone(),
Some(value) => filter_rooms(&services, &active_rooms, &value, false).await,
None => active_rooms,
};
let mut new_known_rooms = BTreeSet::new();
let ranges = list.ranges.clone();
lists.insert(
list_id.clone(),
sync_events::v4::SyncList {
ops: ranges
.into_iter()
.map(|mut r| {
r.0 = r.0.clamp(
uint!(0),
UInt::try_from(active_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX),
);
r.1 =
r.1.clamp(r.0, UInt::try_from(active_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX));
let room_ids = if !active_rooms.is_empty() {
active_rooms[usize_from_ruma(r.0)..=usize_from_ruma(r.1)].to_vec()
} else {
Vec::new()
};
new_known_rooms.extend(room_ids.iter().cloned());
for room_id in &room_ids {
let todo_room = todo_rooms
.entry(room_id.clone())
.or_insert((BTreeSet::new(), 0, u64::MAX));
let limit = list
.room_details
.timeline_limit
.map_or(10, u64::from)
.min(100);
todo_room
.0
.extend(list.room_details.required_state.iter().cloned());
todo_room.1 = todo_room.1.max(limit);
// 0 means unknown because it got out of date
todo_room.2 = todo_room.2.min(
known_rooms
.get(list_id.as_str())
.and_then(|k| k.get(room_id))
.copied()
.unwrap_or(0),
);
}
sync_events::v4::SyncOp {
op: SlidingOp::Sync,
range: Some(r),
index: None,
room_ids,
room_id: None,
}
})
.collect(),
count: ruma_from_usize(active_rooms.len()),
},
);
if let Some(conn_id) = &body.conn_id {
services.sync.update_sync_known_rooms(
sender_user.clone(),
sender_device.clone(),
conn_id.clone(),
list_id.clone(),
new_known_rooms,
globalsince,
);
}
}
let mut known_subscription_rooms = BTreeSet::new();
for (room_id, room) in &body.room_subscriptions {
if !services.rooms.metadata.exists(room_id).await {
continue;
}
let todo_room = todo_rooms
.entry(room_id.clone())
.or_insert((BTreeSet::new(), 0, u64::MAX));
let limit = room.timeline_limit.map_or(10, u64::from).min(100);
todo_room.0.extend(room.required_state.iter().cloned());
todo_room.1 = todo_room.1.max(limit);
// 0 means unknown because it got out of date
todo_room.2 = todo_room.2.min(
known_rooms
.get("subscriptions")
.and_then(|k| k.get(room_id))
.copied()
.unwrap_or(0),
);
known_subscription_rooms.insert(room_id.clone());
}
for r in body.unsubscribe_rooms {
known_subscription_rooms.remove(&r);
body.room_subscriptions.remove(&r);
}
if let Some(conn_id) = &body.conn_id {
services.sync.update_sync_known_rooms(
sender_user.clone(),
sender_device.clone(),
conn_id.clone(),
"subscriptions".to_owned(),
known_subscription_rooms,
globalsince,
);
}
if let Some(conn_id) = &body.conn_id {
services.sync.update_sync_subscriptions(
sender_user.clone(),
sender_device.clone(),
conn_id.clone(),
body.room_subscriptions,
);
}
let mut rooms = BTreeMap::new();
for (room_id, (required_state_request, timeline_limit, roomsince)) in &todo_rooms {
let roomsincecount = PduCount::Normal(*roomsince);
let mut timestamp: Option<_> = None;
let mut invite_state = None;
let (timeline_pdus, limited);
if all_invited_rooms.contains(room_id) {
// TODO: figure out a timestamp we can use for remote invites
invite_state = services
.rooms
.state_cache
.invite_state(sender_user, room_id)
.await
.ok();
(timeline_pdus, limited) = (Vec::new(), true);
} else {
(timeline_pdus, limited) =
match load_timeline(&services, sender_user, room_id, roomsincecount, *timeline_limit).await {
Ok(value) => value,
Err(err) => {
warn!("Encountered missing timeline in {}, error {}", room_id, err);
continue;
},
};
}
account_data.rooms.insert(
room_id.clone(),
services
.account_data
.changes_since(Some(room_id), sender_user, *roomsince)
.await?
.into_iter()
.filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
.collect(),
);
let vector: Vec<_> = services
.rooms
.read_receipt
.readreceipts_since(room_id, *roomsince)
.filter_map(|(read_user, ts, v)| async move {
(!services
.users
.user_is_ignored(&read_user, sender_user)
.await)
.then_some((read_user, ts, v))
})
.collect()
.await;
let receipt_size = vector.len();
receipts
.rooms
.insert(room_id.clone(), pack_receipts(Box::new(vector.into_iter())));
if roomsince != &0
&& timeline_pdus.is_empty()
&& account_data.rooms.get(room_id).is_some_and(Vec::is_empty)
&& receipt_size == 0
{
continue;
}
let prev_batch = timeline_pdus
.first()
.map_or(Ok::<_, Error>(None), |(pdu_count, _)| {
Ok(Some(match pdu_count {
PduCount::Backfilled(_) => {
error!("timeline in backfill state?!");
"0".to_owned()
},
PduCount::Normal(c) => c.to_string(),
}))
})?
.or_else(|| {
if roomsince != &0 {
Some(roomsince.to_string())
} else {
None
}
});
let room_events: Vec<_> = timeline_pdus
.iter()
.stream()
.filter_map(|(_, pdu)| async move {
// list of safe and common non-state events to ignore
if matches!(
&pdu.kind,
RoomMessage
| Sticker | CallInvite
| CallNotify | RoomEncrypted
| Image | File | Audio
| Voice | Video | UnstablePollStart
| PollStart | KeyVerificationStart
| Reaction | Emote | Location
) && services
.users
.user_is_ignored(&pdu.sender, sender_user)
.await
{
return None;
}
Some(pdu.to_sync_room_event())
})
.collect()
.await;
for (_, pdu) in timeline_pdus {
let ts = MilliSecondsSinceUnixEpoch(pdu.origin_server_ts);
if DEFAULT_BUMP_TYPES.contains(pdu.event_type()) && !timestamp.is_some_and(|time| time > ts) {
timestamp = Some(ts);
}
}
let required_state = required_state_request
.iter()
.stream()
.filter_map(|state| async move {
services
.rooms
.state_accessor
.room_state_get(room_id, &state.0, &state.1)
.await
.map(|s| s.to_sync_state_event())
.ok()
})
.collect()
.await;
// Heroes
let heroes: Vec<_> = services
.rooms
.state_cache
.room_members(room_id)
.ready_filter(|member| member != sender_user)
.filter_map(|user_id| {
services
.rooms
.state_accessor
.get_member(room_id, user_id)
.map_ok(|memberevent| SlidingSyncRoomHero {
user_id: user_id.into(),
name: memberevent.displayname,
avatar: memberevent.avatar_url,
})
.ok()
})
.take(5)
.collect()
.await;
let name = match heroes.len().cmp(&(1_usize)) {
Ordering::Greater => {
let firsts = heroes[1..]
.iter()
.map(|h| h.name.clone().unwrap_or_else(|| h.user_id.to_string()))
.collect::<Vec<_>>()
.join(", ");
let last = heroes[0]
.name
.clone()
.unwrap_or_else(|| heroes[0].user_id.to_string());
Some(format!("{firsts} and {last}"))
},
Ordering::Equal => Some(
heroes[0]
.name
.clone()
.unwrap_or_else(|| heroes[0].user_id.to_string()),
),
Ordering::Less => None,
};
let heroes_avatar = if heroes.len() == 1 {
heroes[0].avatar.clone()
} else {
None
};
rooms.insert(
room_id.clone(),
sync_events::v4::SlidingSyncRoom {
name: services
.rooms
.state_accessor
.get_name(room_id)
.await
.ok()
.or(name),
avatar: if let Some(heroes_avatar) = heroes_avatar {
ruma::JsOption::Some(heroes_avatar)
} else {
match services.rooms.state_accessor.get_avatar(room_id).await {
ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url),
ruma::JsOption::Null => ruma::JsOption::Null,
ruma::JsOption::Undefined => ruma::JsOption::Undefined,
}
},
initial: Some(roomsince == &0),
is_dm: None,
invite_state,
unread_notifications: UnreadNotificationsCount {
highlight_count: Some(
services
.rooms
.user
.highlight_count(sender_user, room_id)
.await
.try_into()
.expect("notification count can't go that high"),
),
notification_count: Some(
services
.rooms
.user
.notification_count(sender_user, room_id)
.await
.try_into()
.expect("notification count can't go that high"),
),
},
timeline: room_events,
required_state,
prev_batch,
limited,
joined_count: Some(
services
.rooms
.state_cache
.room_joined_count(room_id)
.await
.unwrap_or(0)
.try_into()
.unwrap_or_else(|_| uint!(0)),
),
invited_count: Some(
services
.rooms
.state_cache
.room_invited_count(room_id)
.await
.unwrap_or(0)
.try_into()
.unwrap_or_else(|_| uint!(0)),
),
num_live: None, // Count events in timeline greater than global sync counter
timestamp,
heroes: Some(heroes),
},
);
}
if rooms
.iter()
.all(|(_, r)| r.timeline.is_empty() && r.required_state.is_empty())
{
// Hang a few seconds so requests are not spammed
// Stop hanging if new info arrives
let default = Duration::from_secs(30);
let duration = cmp::min(body.timeout.unwrap_or(default), default);
_ = tokio::time::timeout(duration, watcher).await;
}
Ok(sync_events::v4::Response {
initial: globalsince == 0,
txn_id: body.txn_id.clone(),
pos: next_batch.to_string(),
lists,
rooms,
extensions: sync_events::v4::Extensions {
to_device: if body.extensions.to_device.enabled.unwrap_or(false) {
Some(sync_events::v4::ToDevice {
events: services
.users
.get_to_device_events(sender_user, &sender_device)
.collect()
.await,
next_batch: next_batch.to_string(),
})
} else {
None
},
e2ee: sync_events::v4::E2EE {
device_lists: DeviceLists {
changed: device_list_changes.into_iter().collect(),
left: device_list_left.into_iter().collect(),
},
device_one_time_keys_count: services
.users
.count_one_time_keys(sender_user, &sender_device)
.await,
// Fallback keys are not yet supported
device_unused_fallback_key_types: None,
},
account_data,
receipts,
typing: sync_events::v4::Typing {
rooms: BTreeMap::new(),
},
},
delta_token: None,
})
}
async fn filter_rooms(
services: &Services, rooms: &[OwnedRoomId], filter: &[RoomTypeFilter], negate: bool,
) -> Vec<OwnedRoomId> {
rooms
.iter()
.stream()
.filter_map(|r| async move {
let room_type = services.rooms.state_accessor.get_room_type(r).await;
if room_type.as_ref().is_err_and(|e| !e.is_not_found()) {
return None;
}
let room_type_filter = RoomTypeFilter::from(room_type.ok());
let include = if negate {
!filter.contains(&room_type_filter)
} else {
filter.is_empty() || filter.contains(&room_type_filter)
};
include.then_some(r.to_owned())
})
.collect()
.await
}

View file

@ -9,7 +9,7 @@ use ruma::{
},
};
use crate::{Error, Result, Ruma};
use crate::{Result, Ruma};
/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
///
@ -21,32 +21,30 @@ pub(crate) async fn update_tag_route(
) -> Result<create_tag::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event = services
let mut tags_event = services
.account_data
.get(Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag)?;
let mut tags_event = event.map_or_else(
|| {
Ok(TagEvent {
content: TagEventContent {
tags: BTreeMap::new(),
},
})
},
|e| serde_json::from_str(e.get()).map_err(|_| Error::bad_database("Invalid account data event in db.")),
)?;
.get_room(&body.room_id, sender_user, RoomAccountDataEventType::Tag)
.await
.unwrap_or(TagEvent {
content: TagEventContent {
tags: BTreeMap::new(),
},
});
tags_event
.content
.tags
.insert(body.tag.clone().into(), body.tag_info.clone());
services.account_data.update(
Some(&body.room_id),
sender_user,
RoomAccountDataEventType::Tag,
&serde_json::to_value(tags_event).expect("to json value always works"),
)?;
services
.account_data
.update(
Some(&body.room_id),
sender_user,
RoomAccountDataEventType::Tag,
&serde_json::to_value(tags_event).expect("to json value always works"),
)
.await?;
Ok(create_tag::v3::Response {})
}
@ -61,29 +59,27 @@ pub(crate) async fn delete_tag_route(
) -> Result<delete_tag::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event = services
let mut tags_event = services
.account_data
.get(Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag)?;
let mut tags_event = event.map_or_else(
|| {
Ok(TagEvent {
content: TagEventContent {
tags: BTreeMap::new(),
},
})
},
|e| serde_json::from_str(e.get()).map_err(|_| Error::bad_database("Invalid account data event in db.")),
)?;
.get_room(&body.room_id, sender_user, RoomAccountDataEventType::Tag)
.await
.unwrap_or(TagEvent {
content: TagEventContent {
tags: BTreeMap::new(),
},
});
tags_event.content.tags.remove(&body.tag.clone().into());
services.account_data.update(
Some(&body.room_id),
sender_user,
RoomAccountDataEventType::Tag,
&serde_json::to_value(tags_event).expect("to json value always works"),
)?;
services
.account_data
.update(
Some(&body.room_id),
sender_user,
RoomAccountDataEventType::Tag,
&serde_json::to_value(tags_event).expect("to json value always works"),
)
.await?;
Ok(delete_tag::v3::Response {})
}
@ -98,20 +94,15 @@ pub(crate) async fn get_tags_route(
) -> Result<get_tags::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event = services
let tags_event = services
.account_data
.get(Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag)?;
let tags_event = event.map_or_else(
|| {
Ok(TagEvent {
content: TagEventContent {
tags: BTreeMap::new(),
},
})
},
|e| serde_json::from_str(e.get()).map_err(|_| Error::bad_database("Invalid account data event in db.")),
)?;
.get_room(&body.room_id, sender_user, RoomAccountDataEventType::Tag)
.await
.unwrap_or(TagEvent {
content: TagEventContent {
tags: BTreeMap::new(),
},
});
Ok(get_tags::v3::Response {
tags: tags_event.content.tags,

View file

@ -1,4 +1,6 @@
use axum::extract::State;
use conduit::PduEvent;
use futures::StreamExt;
use ruma::{
api::client::{error::ErrorKind, threads::get_threads},
uint,
@ -27,20 +29,23 @@ pub(crate) async fn get_threads_route(
u64::MAX
};
let threads = services
let room_id = &body.room_id;
let threads: Vec<(u64, PduEvent)> = services
.rooms
.threads
.threads_until(sender_user, &body.room_id, from, &body.include)?
.threads_until(sender_user, &body.room_id, from, &body.include)
.await?
.take(limit)
.filter_map(Result::ok)
.filter(|(_, pdu)| {
.filter_map(|(count, pdu)| async move {
services
.rooms
.state_accessor
.user_can_see_event(sender_user, &body.room_id, &pdu.event_id)
.unwrap_or(false)
.user_can_see_event(sender_user, room_id, &pdu.event_id)
.await
.then_some((count, pdu))
})
.collect::<Vec<_>>();
.collect()
.await;
let next_batch = threads.last().map(|(count, _)| count.to_string());

View file

@ -2,6 +2,7 @@ use std::collections::BTreeMap;
use axum::extract::State;
use conduit::{Error, Result};
use futures::StreamExt;
use ruma::{
api::{
client::{error::ErrorKind, to_device::send_event_to_device},
@ -24,8 +25,9 @@ pub(crate) async fn send_event_to_device_route(
// Check if this is a new transaction id
if services
.transaction_ids
.existing_txnid(sender_user, sender_device, &body.txn_id)?
.is_some()
.existing_txnid(sender_user, sender_device, &body.txn_id)
.await
.is_ok()
{
return Ok(send_event_to_device::v3::Response {});
}
@ -53,31 +55,35 @@ pub(crate) async fn send_event_to_device_route(
continue;
}
let event_type = &body.event_type.to_string();
let event = event
.deserialize_as()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid"))?;
match target_device_id_maybe {
DeviceIdOrAllDevices::DeviceId(target_device_id) => {
services.users.add_to_device_event(
sender_user,
target_user_id,
target_device_id,
&body.event_type.to_string(),
event
.deserialize_as()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid"))?,
)?;
services
.users
.add_to_device_event(sender_user, target_user_id, target_device_id, event_type, event)
.await;
},
DeviceIdOrAllDevices::AllDevices => {
for target_device_id in services.users.all_device_ids(target_user_id) {
services.users.add_to_device_event(
sender_user,
target_user_id,
&target_device_id?,
&body.event_type.to_string(),
event
.deserialize_as()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid"))?,
)?;
}
let (event_type, event) = (&event_type, &event);
services
.users
.all_device_ids(target_user_id)
.for_each(|target_device_id| {
services.users.add_to_device_event(
sender_user,
target_user_id,
target_device_id,
event_type,
event.clone(),
)
})
.await;
},
}
}
@ -86,7 +92,7 @@ pub(crate) async fn send_event_to_device_route(
// Save transaction id with empty data
services
.transaction_ids
.add_txnid(sender_user, sender_device, &body.txn_id, &[])?;
.add_txnid(sender_user, sender_device, &body.txn_id, &[]);
Ok(send_event_to_device::v3::Response {})
}

View file

@ -16,7 +16,8 @@ pub(crate) async fn create_typing_event_route(
if !services
.rooms
.state_cache
.is_joined(sender_user, &body.room_id)?
.is_joined(sender_user, &body.room_id)
.await
{
return Err(Error::BadRequest(ErrorKind::forbidden(), "You are not in this room."));
}

View file

@ -2,7 +2,8 @@ use std::collections::BTreeMap;
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduit::{warn, Err};
use conduit::Err;
use futures::StreamExt;
use ruma::{
api::{
client::{
@ -45,7 +46,7 @@ pub(crate) async fn get_mutual_rooms_route(
));
}
if !services.users.exists(&body.user_id)? {
if !services.users.exists(&body.user_id).await {
return Ok(mutual_rooms::unstable::Response {
joined: vec![],
next_batch_token: None,
@ -55,9 +56,10 @@ pub(crate) async fn get_mutual_rooms_route(
let mutual_rooms: Vec<OwnedRoomId> = services
.rooms
.user
.get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])?
.filter_map(Result::ok)
.collect();
.get_shared_rooms(sender_user, &body.user_id)
.map(ToOwned::to_owned)
.collect()
.await;
Ok(mutual_rooms::unstable::Response {
joined: mutual_rooms,
@ -99,7 +101,7 @@ pub(crate) async fn get_room_summary(
let room_id = services.rooms.alias.resolve(&body.room_id_or_alias).await?;
if !services.rooms.metadata.exists(&room_id)? {
if !services.rooms.metadata.exists(&room_id).await {
return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server"));
}
@ -108,7 +110,7 @@ pub(crate) async fn get_room_summary(
.rooms
.state_accessor
.is_world_readable(&room_id)
.unwrap_or(false)
.await
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
@ -122,50 +124,58 @@ pub(crate) async fn get_room_summary(
.rooms
.state_accessor
.get_canonical_alias(&room_id)
.unwrap_or(None),
.await
.ok(),
avatar_url: services
.rooms
.state_accessor
.get_avatar(&room_id)?
.get_avatar(&room_id)
.await
.into_option()
.unwrap_or_default()
.url,
guest_can_join: services.rooms.state_accessor.guest_can_join(&room_id)?,
name: services
.rooms
.state_accessor
.get_name(&room_id)
.unwrap_or(None),
guest_can_join: services.rooms.state_accessor.guest_can_join(&room_id).await,
name: services.rooms.state_accessor.get_name(&room_id).await.ok(),
num_joined_members: services
.rooms
.state_cache
.room_joined_count(&room_id)
.unwrap_or_default()
.unwrap_or_else(|| {
warn!("Room {room_id} has no member count");
0
})
.try_into()
.expect("user count should not be that big"),
.await
.unwrap_or(0)
.try_into()?,
topic: services
.rooms
.state_accessor
.get_room_topic(&room_id)
.unwrap_or(None),
.await
.ok(),
world_readable: services
.rooms
.state_accessor
.is_world_readable(&room_id)
.unwrap_or(false),
join_rule: services.rooms.state_accessor.get_join_rule(&room_id)?.0,
room_type: services.rooms.state_accessor.get_room_type(&room_id)?,
room_version: Some(services.rooms.state.get_room_version(&room_id)?),
.await,
join_rule: services
.rooms
.state_accessor
.get_join_rule(&room_id)
.await
.unwrap_or_default()
.0,
room_type: services
.rooms
.state_accessor
.get_room_type(&room_id)
.await
.ok(),
room_version: services.rooms.state.get_room_version(&room_id).await.ok(),
membership: if let Some(sender_user) = sender_user {
services
.rooms
.state_accessor
.get_member(&room_id, sender_user)?
.map_or_else(|| Some(MembershipState::Leave), |content| Some(content.membership))
.get_member(&room_id, sender_user)
.await
.map_or_else(|_| MembershipState::Leave, |content| content.membership)
.into()
} else {
None
},
@ -173,7 +183,8 @@ pub(crate) async fn get_room_summary(
.rooms
.state_accessor
.get_room_encryption(&room_id)
.unwrap_or_else(|_e| None),
.await
.ok(),
})
}
@ -191,13 +202,14 @@ pub(crate) async fn delete_timezone_key_route(
return Err!(Request(Forbidden("You cannot update the profile of another user")));
}
services.users.set_timezone(&body.user_id, None).await?;
services.users.set_timezone(&body.user_id, None);
if services.globals.allow_local_presence() {
// Presence update
services
.presence
.ping_presence(&body.user_id, &PresenceState::Online)?;
.ping_presence(&body.user_id, &PresenceState::Online)
.await?;
}
Ok(delete_timezone_key::unstable::Response {})
@ -217,16 +229,14 @@ pub(crate) async fn set_timezone_key_route(
return Err!(Request(Forbidden("You cannot update the profile of another user")));
}
services
.users
.set_timezone(&body.user_id, body.tz.clone())
.await?;
services.users.set_timezone(&body.user_id, body.tz.clone());
if services.globals.allow_local_presence() {
// Presence update
services
.presence
.ping_presence(&body.user_id, &PresenceState::Online)?;
.ping_presence(&body.user_id, &PresenceState::Online)
.await?;
}
Ok(set_timezone_key::unstable::Response {})
@ -280,10 +290,11 @@ pub(crate) async fn set_profile_key_route(
.rooms
.state_cache
.rooms_joined(&body.user_id)
.filter_map(Result::ok)
.collect();
.map(Into::into)
.collect()
.await;
update_displayname(&services, &body.user_id, Some(profile_key_value.to_string()), all_joined_rooms).await?;
update_displayname(&services, &body.user_id, Some(profile_key_value.to_string()), &all_joined_rooms).await?;
} else if body.key == "avatar_url" {
let mxc = ruma::OwnedMxcUri::from(profile_key_value.to_string());
@ -291,21 +302,23 @@ pub(crate) async fn set_profile_key_route(
.rooms
.state_cache
.rooms_joined(&body.user_id)
.filter_map(Result::ok)
.collect();
.map(Into::into)
.collect()
.await;
update_avatar_url(&services, &body.user_id, Some(mxc), None, all_joined_rooms).await?;
update_avatar_url(&services, &body.user_id, Some(mxc), None, &all_joined_rooms).await?;
} else {
services
.users
.set_profile_key(&body.user_id, &body.key, Some(profile_key_value.clone()))?;
.set_profile_key(&body.user_id, &body.key, Some(profile_key_value.clone()));
}
if services.globals.allow_local_presence() {
// Presence update
services
.presence
.ping_presence(&body.user_id, &PresenceState::Online)?;
.ping_presence(&body.user_id, &PresenceState::Online)
.await?;
}
Ok(set_profile_key::unstable::Response {})
@ -335,30 +348,33 @@ pub(crate) async fn delete_profile_key_route(
.rooms
.state_cache
.rooms_joined(&body.user_id)
.filter_map(Result::ok)
.collect();
.map(Into::into)
.collect()
.await;
update_displayname(&services, &body.user_id, None, all_joined_rooms).await?;
update_displayname(&services, &body.user_id, None, &all_joined_rooms).await?;
} else if body.key == "avatar_url" {
let all_joined_rooms: Vec<OwnedRoomId> = services
.rooms
.state_cache
.rooms_joined(&body.user_id)
.filter_map(Result::ok)
.collect();
.map(Into::into)
.collect()
.await;
update_avatar_url(&services, &body.user_id, None, None, all_joined_rooms).await?;
update_avatar_url(&services, &body.user_id, None, None, &all_joined_rooms).await?;
} else {
services
.users
.set_profile_key(&body.user_id, &body.key, None)?;
.set_profile_key(&body.user_id, &body.key, None);
}
if services.globals.allow_local_presence() {
// Presence update
services
.presence
.ping_presence(&body.user_id, &PresenceState::Online)?;
.ping_presence(&body.user_id, &PresenceState::Online)
.await?;
}
Ok(delete_profile_key::unstable::Response {})
@ -386,26 +402,25 @@ pub(crate) async fn get_timezone_key_route(
)
.await
{
if !services.users.exists(&body.user_id)? {
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None)?;
}
services
.users
.set_displayname(&body.user_id, response.displayname.clone())
.await?;
.set_displayname(&body.user_id, response.displayname.clone());
services
.users
.set_avatar_url(&body.user_id, response.avatar_url.clone())
.await?;
.set_avatar_url(&body.user_id, response.avatar_url.clone());
services
.users
.set_blurhash(&body.user_id, response.blurhash.clone())
.await?;
.set_blurhash(&body.user_id, response.blurhash.clone());
services
.users
.set_timezone(&body.user_id, response.tz.clone())
.await?;
.set_timezone(&body.user_id, response.tz.clone());
return Ok(get_timezone_key::unstable::Response {
tz: response.tz,
@ -413,14 +428,14 @@ pub(crate) async fn get_timezone_key_route(
}
}
if !services.users.exists(&body.user_id)? {
if !services.users.exists(&body.user_id).await {
// Return 404 if this user doesn't exist and we couldn't fetch it over
// federation
return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found."));
}
Ok(get_timezone_key::unstable::Response {
tz: services.users.timezone(&body.user_id)?,
tz: services.users.timezone(&body.user_id).await.ok(),
})
}
@ -448,32 +463,31 @@ pub(crate) async fn get_profile_key_route(
)
.await
{
if !services.users.exists(&body.user_id)? {
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None)?;
}
services
.users
.set_displayname(&body.user_id, response.displayname.clone())
.await?;
.set_displayname(&body.user_id, response.displayname.clone());
services
.users
.set_avatar_url(&body.user_id, response.avatar_url.clone())
.await?;
.set_avatar_url(&body.user_id, response.avatar_url.clone());
services
.users
.set_blurhash(&body.user_id, response.blurhash.clone())
.await?;
.set_blurhash(&body.user_id, response.blurhash.clone());
services
.users
.set_timezone(&body.user_id, response.tz.clone())
.await?;
.set_timezone(&body.user_id, response.tz.clone());
if let Some(value) = response.custom_profile_fields.get(&body.key) {
profile_key_value.insert(body.key.clone(), value.clone());
services
.users
.set_profile_key(&body.user_id, &body.key, Some(value.clone()))?;
.set_profile_key(&body.user_id, &body.key, Some(value.clone()));
} else {
return Err!(Request(NotFound("The requested profile key does not exist.")));
}
@ -484,13 +498,13 @@ pub(crate) async fn get_profile_key_route(
}
}
if !services.users.exists(&body.user_id)? {
if !services.users.exists(&body.user_id).await {
// Return 404 if this user doesn't exist and we couldn't fetch it over
// federation
return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found."));
return Err!(Request(NotFound("Profile was not found.")));
}
if let Some(value) = services.users.profile_key(&body.user_id, &body.key)? {
if let Ok(value) = services.users.profile_key(&body.user_id, &body.key).await {
profile_key_value.insert(body.key.clone(), value);
} else {
return Err!(Request(NotFound("The requested profile key does not exist.")));

View file

@ -1,6 +1,7 @@
use std::collections::BTreeMap;
use axum::{extract::State, response::IntoResponse, Json};
use futures::StreamExt;
use ruma::api::client::{
discovery::{
discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo},
@ -173,7 +174,7 @@ pub(crate) async fn conduwuit_server_version() -> Result<impl IntoResponse> {
/// homeserver. Endpoint is disabled if federation is disabled for privacy. This
/// only includes active users (not deactivated, no guests, etc)
pub(crate) async fn conduwuit_local_user_count(State(services): State<crate::State>) -> Result<impl IntoResponse> {
let user_count = services.users.list_local_users()?.len();
let user_count = services.users.list_local_users().count().await;
Ok(Json(serde_json::json!({
"count": user_count

View file

@ -1,4 +1,6 @@
use axum::extract::State;
use conduit::utils::TryFutureExtExt;
use futures::{pin_mut, StreamExt};
use ruma::{
api::client::user_directory::search_users,
events::{
@ -21,14 +23,12 @@ pub(crate) async fn search_users_route(
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let limit = usize::try_from(body.limit).unwrap_or(10); // default limit is 10
let mut users = services.users.iter().filter_map(|user_id| {
let users = services.users.stream().filter_map(|user_id| async {
// Filter out buggy users (they should not exist, but you never know...)
let user_id = user_id.ok()?;
let user = search_users::v3::User {
user_id: user_id.clone(),
display_name: services.users.displayname(&user_id).ok()?,
avatar_url: services.users.avatar_url(&user_id).ok()?,
user_id: user_id.to_owned(),
display_name: services.users.displayname(user_id).await.ok(),
avatar_url: services.users.avatar_url(user_id).await.ok(),
};
let user_id_matches = user
@ -56,20 +56,15 @@ pub(crate) async fn search_users_route(
let user_is_in_public_rooms = services
.rooms
.state_cache
.rooms_joined(&user_id)
.filter_map(Result::ok)
.rooms_joined(&user.user_id)
.any(|room| {
services
.rooms
.state_accessor
.room_state_get(&room, &StateEventType::RoomJoinRules, "")
.map_or(false, |event| {
event.map_or(false, |event| {
serde_json::from_str(event.content.get())
.map_or(false, |r: RoomJoinRulesEventContent| r.join_rule == JoinRule::Public)
})
})
});
.room_state_get_content::<RoomJoinRulesEventContent>(room, &StateEventType::RoomJoinRules, "")
.map_ok_or(false, |content| content.join_rule == JoinRule::Public)
})
.await;
if user_is_in_public_rooms {
user_visible = true;
@ -77,25 +72,22 @@ pub(crate) async fn search_users_route(
let user_is_in_shared_rooms = services
.rooms
.user
.get_shared_rooms(vec![sender_user.clone(), user_id])
.ok()?
.next()
.is_some();
.has_shared_rooms(sender_user, &user.user_id)
.await;
if user_is_in_shared_rooms {
user_visible = true;
}
}
if !user_visible {
return None;
}
Some(user)
user_visible.then_some(user)
});
let results = users.by_ref().take(limit).collect();
let limited = users.next().is_some();
pin_mut!(users);
let limited = users.by_ref().next().await.is_some();
let results = users.take(limit).collect().await;
Ok(search_users::v3::Response {
results,

View file

@ -24,7 +24,7 @@ pub(crate) async fn turn_server_route(
return Err!(Request(NotFound("Not Found")));
}
let turn_secret = services.globals.turn_secret().clone();
let turn_secret = services.globals.turn_secret.clone();
let (username, password) = if !turn_secret.is_empty() {
let expiry = SecondsSinceUnixEpoch::from_system_time(

View file

@ -1,5 +1,3 @@
#![recursion_limit = "192"]
pub mod client;
pub mod router;
pub mod server;

View file

@ -22,101 +22,102 @@ use crate::{client, server};
pub fn build(router: Router<State>, server: &Server) -> Router<State> {
let config = &server.config;
let mut router = router
.ruma_route(client::get_timezone_key_route)
.ruma_route(client::get_profile_key_route)
.ruma_route(client::set_profile_key_route)
.ruma_route(client::delete_profile_key_route)
.ruma_route(client::set_timezone_key_route)
.ruma_route(client::delete_timezone_key_route)
.ruma_route(client::appservice_ping)
.ruma_route(client::get_supported_versions_route)
.ruma_route(client::get_register_available_route)
.ruma_route(client::register_route)
.ruma_route(client::get_login_types_route)
.ruma_route(client::login_route)
.ruma_route(client::whoami_route)
.ruma_route(client::logout_route)
.ruma_route(client::logout_all_route)
.ruma_route(client::change_password_route)
.ruma_route(client::deactivate_route)
.ruma_route(client::third_party_route)
.ruma_route(client::request_3pid_management_token_via_email_route)
.ruma_route(client::request_3pid_management_token_via_msisdn_route)
.ruma_route(client::check_registration_token_validity)
.ruma_route(client::get_capabilities_route)
.ruma_route(client::get_pushrules_all_route)
.ruma_route(client::set_pushrule_route)
.ruma_route(client::get_pushrule_route)
.ruma_route(client::set_pushrule_enabled_route)
.ruma_route(client::get_pushrule_enabled_route)
.ruma_route(client::get_pushrule_actions_route)
.ruma_route(client::set_pushrule_actions_route)
.ruma_route(client::delete_pushrule_route)
.ruma_route(client::get_room_event_route)
.ruma_route(client::get_room_aliases_route)
.ruma_route(client::get_filter_route)
.ruma_route(client::create_filter_route)
.ruma_route(client::create_openid_token_route)
.ruma_route(client::set_global_account_data_route)
.ruma_route(client::set_room_account_data_route)
.ruma_route(client::get_global_account_data_route)
.ruma_route(client::get_room_account_data_route)
.ruma_route(client::set_displayname_route)
.ruma_route(client::get_displayname_route)
.ruma_route(client::set_avatar_url_route)
.ruma_route(client::get_avatar_url_route)
.ruma_route(client::get_profile_route)
.ruma_route(client::set_presence_route)
.ruma_route(client::get_presence_route)
.ruma_route(client::upload_keys_route)
.ruma_route(client::get_keys_route)
.ruma_route(client::claim_keys_route)
.ruma_route(client::create_backup_version_route)
.ruma_route(client::update_backup_version_route)
.ruma_route(client::delete_backup_version_route)
.ruma_route(client::get_latest_backup_info_route)
.ruma_route(client::get_backup_info_route)
.ruma_route(client::add_backup_keys_route)
.ruma_route(client::add_backup_keys_for_room_route)
.ruma_route(client::add_backup_keys_for_session_route)
.ruma_route(client::delete_backup_keys_for_room_route)
.ruma_route(client::delete_backup_keys_for_session_route)
.ruma_route(client::delete_backup_keys_route)
.ruma_route(client::get_backup_keys_for_room_route)
.ruma_route(client::get_backup_keys_for_session_route)
.ruma_route(client::get_backup_keys_route)
.ruma_route(client::set_read_marker_route)
.ruma_route(client::create_receipt_route)
.ruma_route(client::create_typing_event_route)
.ruma_route(client::create_room_route)
.ruma_route(client::redact_event_route)
.ruma_route(client::report_event_route)
.ruma_route(client::create_alias_route)
.ruma_route(client::delete_alias_route)
.ruma_route(client::get_alias_route)
.ruma_route(client::join_room_by_id_route)
.ruma_route(client::join_room_by_id_or_alias_route)
.ruma_route(client::joined_members_route)
.ruma_route(client::leave_room_route)
.ruma_route(client::forget_room_route)
.ruma_route(client::joined_rooms_route)
.ruma_route(client::kick_user_route)
.ruma_route(client::ban_user_route)
.ruma_route(client::unban_user_route)
.ruma_route(client::invite_user_route)
.ruma_route(client::set_room_visibility_route)
.ruma_route(client::get_room_visibility_route)
.ruma_route(client::get_public_rooms_route)
.ruma_route(client::get_public_rooms_filtered_route)
.ruma_route(client::search_users_route)
.ruma_route(client::get_member_events_route)
.ruma_route(client::get_protocols_route)
.ruma_route(&client::get_timezone_key_route)
.ruma_route(&client::get_profile_key_route)
.ruma_route(&client::set_profile_key_route)
.ruma_route(&client::delete_profile_key_route)
.ruma_route(&client::set_timezone_key_route)
.ruma_route(&client::delete_timezone_key_route)
.ruma_route(&client::appservice_ping)
.ruma_route(&client::get_supported_versions_route)
.ruma_route(&client::get_register_available_route)
.ruma_route(&client::register_route)
.ruma_route(&client::get_login_types_route)
.ruma_route(&client::login_route)
.ruma_route(&client::whoami_route)
.ruma_route(&client::logout_route)
.ruma_route(&client::logout_all_route)
.ruma_route(&client::change_password_route)
.ruma_route(&client::deactivate_route)
.ruma_route(&client::third_party_route)
.ruma_route(&client::request_3pid_management_token_via_email_route)
.ruma_route(&client::request_3pid_management_token_via_msisdn_route)
.ruma_route(&client::check_registration_token_validity)
.ruma_route(&client::get_capabilities_route)
.ruma_route(&client::get_pushrules_all_route)
.ruma_route(&client::set_pushrule_route)
.ruma_route(&client::get_pushrule_route)
.ruma_route(&client::set_pushrule_enabled_route)
.ruma_route(&client::get_pushrule_enabled_route)
.ruma_route(&client::get_pushrule_actions_route)
.ruma_route(&client::set_pushrule_actions_route)
.ruma_route(&client::delete_pushrule_route)
.ruma_route(&client::get_room_event_route)
.ruma_route(&client::get_room_aliases_route)
.ruma_route(&client::get_filter_route)
.ruma_route(&client::create_filter_route)
.ruma_route(&client::create_openid_token_route)
.ruma_route(&client::set_global_account_data_route)
.ruma_route(&client::set_room_account_data_route)
.ruma_route(&client::get_global_account_data_route)
.ruma_route(&client::get_room_account_data_route)
.ruma_route(&client::set_displayname_route)
.ruma_route(&client::get_displayname_route)
.ruma_route(&client::set_avatar_url_route)
.ruma_route(&client::get_avatar_url_route)
.ruma_route(&client::get_profile_route)
.ruma_route(&client::set_presence_route)
.ruma_route(&client::get_presence_route)
.ruma_route(&client::upload_keys_route)
.ruma_route(&client::get_keys_route)
.ruma_route(&client::claim_keys_route)
.ruma_route(&client::create_backup_version_route)
.ruma_route(&client::update_backup_version_route)
.ruma_route(&client::delete_backup_version_route)
.ruma_route(&client::get_latest_backup_info_route)
.ruma_route(&client::get_backup_info_route)
.ruma_route(&client::add_backup_keys_route)
.ruma_route(&client::add_backup_keys_for_room_route)
.ruma_route(&client::add_backup_keys_for_session_route)
.ruma_route(&client::delete_backup_keys_for_room_route)
.ruma_route(&client::delete_backup_keys_for_session_route)
.ruma_route(&client::delete_backup_keys_route)
.ruma_route(&client::get_backup_keys_for_room_route)
.ruma_route(&client::get_backup_keys_for_session_route)
.ruma_route(&client::get_backup_keys_route)
.ruma_route(&client::set_read_marker_route)
.ruma_route(&client::create_receipt_route)
.ruma_route(&client::create_typing_event_route)
.ruma_route(&client::create_room_route)
.ruma_route(&client::redact_event_route)
.ruma_route(&client::report_event_route)
.ruma_route(&client::report_room_route)
.ruma_route(&client::create_alias_route)
.ruma_route(&client::delete_alias_route)
.ruma_route(&client::get_alias_route)
.ruma_route(&client::join_room_by_id_route)
.ruma_route(&client::join_room_by_id_or_alias_route)
.ruma_route(&client::joined_members_route)
.ruma_route(&client::leave_room_route)
.ruma_route(&client::forget_room_route)
.ruma_route(&client::joined_rooms_route)
.ruma_route(&client::kick_user_route)
.ruma_route(&client::ban_user_route)
.ruma_route(&client::unban_user_route)
.ruma_route(&client::invite_user_route)
.ruma_route(&client::set_room_visibility_route)
.ruma_route(&client::get_room_visibility_route)
.ruma_route(&client::get_public_rooms_route)
.ruma_route(&client::get_public_rooms_filtered_route)
.ruma_route(&client::search_users_route)
.ruma_route(&client::get_member_events_route)
.ruma_route(&client::get_protocols_route)
.route("/_matrix/client/unstable/thirdparty/protocols",
get(client::get_protocols_route_unstable))
.ruma_route(client::send_message_event_route)
.ruma_route(client::send_state_event_for_key_route)
.ruma_route(client::get_state_events_route)
.ruma_route(client::get_state_events_for_key_route)
.ruma_route(&client::send_message_event_route)
.ruma_route(&client::send_state_event_for_key_route)
.ruma_route(&client::get_state_events_route)
.ruma_route(&client::get_state_events_for_key_route)
// Ruma doesn't have support for multiple paths for a single endpoint yet, and these routes
// share one Ruma request / response type pair with {get,send}_state_event_for_key_route
.route(
@ -140,46 +141,46 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
get(client::get_state_events_for_empty_key_route)
.put(client::send_state_event_for_empty_key_route),
)
.ruma_route(client::sync_events_route)
.ruma_route(client::sync_events_v4_route)
.ruma_route(client::get_context_route)
.ruma_route(client::get_message_events_route)
.ruma_route(client::search_events_route)
.ruma_route(client::turn_server_route)
.ruma_route(client::send_event_to_device_route)
.ruma_route(client::create_content_route)
.ruma_route(client::get_content_thumbnail_route)
.ruma_route(client::get_content_route)
.ruma_route(client::get_content_as_filename_route)
.ruma_route(client::get_media_preview_route)
.ruma_route(client::get_media_config_route)
.ruma_route(client::get_devices_route)
.ruma_route(client::get_device_route)
.ruma_route(client::update_device_route)
.ruma_route(client::delete_device_route)
.ruma_route(client::delete_devices_route)
.ruma_route(client::get_tags_route)
.ruma_route(client::update_tag_route)
.ruma_route(client::delete_tag_route)
.ruma_route(client::upload_signing_keys_route)
.ruma_route(client::upload_signatures_route)
.ruma_route(client::get_key_changes_route)
.ruma_route(client::get_pushers_route)
.ruma_route(client::set_pushers_route)
.ruma_route(client::upgrade_room_route)
.ruma_route(client::get_threads_route)
.ruma_route(client::get_relating_events_with_rel_type_and_event_type_route)
.ruma_route(client::get_relating_events_with_rel_type_route)
.ruma_route(client::get_relating_events_route)
.ruma_route(client::get_hierarchy_route)
.ruma_route(client::get_mutual_rooms_route)
.ruma_route(client::get_room_summary)
.ruma_route(&client::sync_events_route)
.ruma_route(&client::sync_events_v4_route)
.ruma_route(&client::get_context_route)
.ruma_route(&client::get_message_events_route)
.ruma_route(&client::search_events_route)
.ruma_route(&client::turn_server_route)
.ruma_route(&client::send_event_to_device_route)
.ruma_route(&client::create_content_route)
.ruma_route(&client::get_content_thumbnail_route)
.ruma_route(&client::get_content_route)
.ruma_route(&client::get_content_as_filename_route)
.ruma_route(&client::get_media_preview_route)
.ruma_route(&client::get_media_config_route)
.ruma_route(&client::get_devices_route)
.ruma_route(&client::get_device_route)
.ruma_route(&client::update_device_route)
.ruma_route(&client::delete_device_route)
.ruma_route(&client::delete_devices_route)
.ruma_route(&client::get_tags_route)
.ruma_route(&client::update_tag_route)
.ruma_route(&client::delete_tag_route)
.ruma_route(&client::upload_signing_keys_route)
.ruma_route(&client::upload_signatures_route)
.ruma_route(&client::get_key_changes_route)
.ruma_route(&client::get_pushers_route)
.ruma_route(&client::set_pushers_route)
.ruma_route(&client::upgrade_room_route)
.ruma_route(&client::get_threads_route)
.ruma_route(&client::get_relating_events_with_rel_type_and_event_type_route)
.ruma_route(&client::get_relating_events_with_rel_type_route)
.ruma_route(&client::get_relating_events_route)
.ruma_route(&client::get_hierarchy_route)
.ruma_route(&client::get_mutual_rooms_route)
.ruma_route(&client::get_room_summary)
.route(
"/_matrix/client/unstable/im.nheko.summary/rooms/:room_id_or_alias/summary",
get(client::get_room_summary_legacy)
)
.ruma_route(client::well_known_support)
.ruma_route(client::well_known_client)
.ruma_route(&client::well_known_support)
.ruma_route(&client::well_known_client)
.route("/_conduwuit/server_version", get(client::conduwuit_server_version))
.route("/_matrix/client/r0/rooms/:room_id/initialSync", get(initial_sync))
.route("/_matrix/client/v3/rooms/:room_id/initialSync", get(initial_sync))
@ -187,35 +188,35 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
if config.allow_federation {
router = router
.ruma_route(server::get_server_version_route)
.ruma_route(&server::get_server_version_route)
.route("/_matrix/key/v2/server", get(server::get_server_keys_route))
.route("/_matrix/key/v2/server/:key_id", get(server::get_server_keys_deprecated_route))
.ruma_route(server::get_public_rooms_route)
.ruma_route(server::get_public_rooms_filtered_route)
.ruma_route(server::send_transaction_message_route)
.ruma_route(server::get_event_route)
.ruma_route(server::get_backfill_route)
.ruma_route(server::get_missing_events_route)
.ruma_route(server::get_event_authorization_route)
.ruma_route(server::get_room_state_route)
.ruma_route(server::get_room_state_ids_route)
.ruma_route(server::create_leave_event_template_route)
.ruma_route(server::create_leave_event_v1_route)
.ruma_route(server::create_leave_event_v2_route)
.ruma_route(server::create_join_event_template_route)
.ruma_route(server::create_join_event_v1_route)
.ruma_route(server::create_join_event_v2_route)
.ruma_route(server::create_invite_route)
.ruma_route(server::get_devices_route)
.ruma_route(server::get_room_information_route)
.ruma_route(server::get_profile_information_route)
.ruma_route(server::get_keys_route)
.ruma_route(server::claim_keys_route)
.ruma_route(server::get_openid_userinfo_route)
.ruma_route(server::get_hierarchy_route)
.ruma_route(server::well_known_server)
.ruma_route(server::get_content_route)
.ruma_route(server::get_content_thumbnail_route)
.ruma_route(&server::get_public_rooms_route)
.ruma_route(&server::get_public_rooms_filtered_route)
.ruma_route(&server::send_transaction_message_route)
.ruma_route(&server::get_event_route)
.ruma_route(&server::get_backfill_route)
.ruma_route(&server::get_missing_events_route)
.ruma_route(&server::get_event_authorization_route)
.ruma_route(&server::get_room_state_route)
.ruma_route(&server::get_room_state_ids_route)
.ruma_route(&server::create_leave_event_template_route)
.ruma_route(&server::create_leave_event_v1_route)
.ruma_route(&server::create_leave_event_v2_route)
.ruma_route(&server::create_join_event_template_route)
.ruma_route(&server::create_join_event_v1_route)
.ruma_route(&server::create_join_event_v2_route)
.ruma_route(&server::create_invite_route)
.ruma_route(&server::get_devices_route)
.ruma_route(&server::get_room_information_route)
.ruma_route(&server::get_profile_information_route)
.ruma_route(&server::get_keys_route)
.ruma_route(&server::claim_keys_route)
.ruma_route(&server::get_openid_userinfo_route)
.ruma_route(&server::get_hierarchy_route)
.ruma_route(&server::well_known_server)
.ruma_route(&server::get_content_route)
.ruma_route(&server::get_content_thumbnail_route)
.route("/_conduwuit/local_user_count", get(client::conduwuit_local_user_count));
} else {
router = router
@ -227,11 +228,11 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
if config.allow_legacy_media {
router = router
.ruma_route(client::get_media_config_legacy_route)
.ruma_route(client::get_media_preview_legacy_route)
.ruma_route(client::get_content_legacy_route)
.ruma_route(client::get_content_as_filename_legacy_route)
.ruma_route(client::get_content_thumbnail_legacy_route)
.ruma_route(&client::get_media_config_legacy_route)
.ruma_route(&client::get_media_preview_legacy_route)
.ruma_route(&client::get_content_legacy_route)
.ruma_route(&client::get_content_as_filename_legacy_route)
.ruma_route(&client::get_content_thumbnail_legacy_route)
.route("/_matrix/media/v1/config", get(client::get_media_config_legacy_legacy_route))
.route("/_matrix/media/v1/upload", post(client::create_content_legacy_route))
.route(

View file

@ -10,7 +10,10 @@ use super::{auth, auth::Auth, request, request::Request};
use crate::{service::appservice::RegistrationInfo, State};
/// Extractor for Ruma request structs
pub(crate) struct Args<T> {
pub(crate) struct Args<T>
where
T: IncomingRequest + Send + Sync + 'static,
{
/// Request struct body
pub(crate) body: T,
@ -38,14 +41,14 @@ pub(crate) struct Args<T> {
#[async_trait]
impl<T> FromRequest<State, Body> for Args<T>
where
T: IncomingRequest,
T: IncomingRequest + Send + Sync + 'static,
{
type Rejection = Error;
async fn from_request(request: hyper::Request<Body>, services: &State) -> Result<Self, Self::Rejection> {
let mut request = request::from(services, request).await?;
let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&request.body).ok();
let auth = auth::auth(services, &mut request, &json_body, &T::METADATA).await?;
let auth = auth::auth(services, &mut request, json_body.as_ref(), &T::METADATA).await?;
Ok(Self {
body: make_body::<T>(services, &mut request, &mut json_body, &auth)?,
origin: auth.origin,
@ -57,7 +60,10 @@ where
}
}
impl<T> Deref for Args<T> {
impl<T> Deref for Args<T>
where
T: IncomingRequest + Send + Sync + 'static,
{
type Target = T;
fn deref(&self) -> &Self::Target { &self.body }
@ -67,7 +73,7 @@ fn make_body<T>(
services: &Services, request: &mut Request, json_body: &mut Option<CanonicalJsonValue>, auth: &Auth,
) -> Result<T>
where
T: IncomingRequest,
T: IncomingRequest + Send + Sync + 'static,
{
let body = if let Some(CanonicalJsonValue::Object(json_body)) = json_body {
let user_id = auth.sender_user.clone().unwrap_or_else(|| {
@ -77,15 +83,13 @@ where
let uiaa_request = json_body
.get("auth")
.and_then(|auth| auth.as_object())
.and_then(CanonicalJsonValue::as_object)
.and_then(|auth| auth.get("session"))
.and_then(|session| session.as_str())
.and_then(CanonicalJsonValue::as_str)
.and_then(|session| {
services.uiaa.get_uiaa_request(
&user_id,
&auth.sender_device.clone().unwrap_or_else(|| EMPTY.into()),
session,
)
services
.uiaa
.get_uiaa_request(&user_id, auth.sender_device.as_deref(), session)
});
if let Some(CanonicalJsonValue::Object(initial_request)) = uiaa_request {

View file

@ -1,19 +1,20 @@
use std::collections::BTreeMap;
use axum::RequestPartsExt;
use axum_extra::{
headers::{authorization::Bearer, Authorization},
typed_header::TypedHeaderRejectionReason,
TypedHeader,
};
use conduit::{debug_info, warn, Err, Error, Result};
use conduit::{debug_error, err, warn, Err, Error, Result};
use http::uri::PathAndQuery;
use ruma::{
api::{client::error::ErrorKind, AuthScheme, Metadata},
server_util::authorization::XMatrix,
CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
};
use service::{
server_keys::{PubKeyMap, PubKeys},
Services,
};
use service::Services;
use super::request::Request;
use crate::service::appservice::RegistrationInfo;
@ -33,7 +34,7 @@ pub(super) struct Auth {
}
pub(super) async fn auth(
services: &Services, request: &mut Request, json_body: &Option<CanonicalJsonValue>, metadata: &Metadata,
services: &Services, request: &mut Request, json_body: Option<&CanonicalJsonValue>, metadata: &Metadata,
) -> Result<Auth> {
let bearer: Option<TypedHeader<Authorization<Bearer>>> = request.parts.extract().await?;
let token = match &bearer {
@ -44,8 +45,8 @@ pub(super) async fn auth(
let token = if let Some(token) = token {
if let Some(reg_info) = services.appservice.find_from_token(token).await {
Token::Appservice(Box::new(reg_info))
} else if let Some((user_id, device_id)) = services.users.find_from_token(token)? {
Token::User((user_id, OwnedDeviceId::from(device_id)))
} else if let Ok((user_id, device_id)) = services.users.find_from_token(token).await {
Token::User((user_id, device_id))
} else {
Token::Invalid
}
@ -98,7 +99,7 @@ pub(super) async fn auth(
))
}
},
(AuthScheme::AccessToken, Token::Appservice(info)) => Ok(auth_appservice(services, request, info)?),
(AuthScheme::AccessToken, Token::Appservice(info)) => Ok(auth_appservice(services, request, info).await?),
(AuthScheme::None | AuthScheme::AccessTokenOptional | AuthScheme::AppserviceToken, Token::Appservice(info)) => {
Ok(Auth {
origin: None,
@ -150,28 +151,25 @@ pub(super) async fn auth(
}
}
fn auth_appservice(services: &Services, request: &Request, info: Box<RegistrationInfo>) -> Result<Auth> {
let user_id = request
async fn auth_appservice(services: &Services, request: &Request, info: Box<RegistrationInfo>) -> Result<Auth> {
let user_id_default =
|| UserId::parse_with_server_name(info.registration.sender_localpart.as_str(), services.globals.server_name());
let Ok(user_id) = request
.query
.user_id
.clone()
.map_or_else(
|| {
UserId::parse_with_server_name(
info.registration.sender_localpart.as_str(),
services.globals.server_name(),
)
},
UserId::parse,
)
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
.map_or_else(user_id_default, UserId::parse)
else {
return Err!(Request(InvalidUsername("Username is invalid.")));
};
if !info.is_user_match(&user_id) {
return Err(Error::BadRequest(ErrorKind::Exclusive, "User is not in namespace."));
return Err!(Request(Exclusive("User is not in namespace.")));
}
if !services.users.exists(&user_id)? {
return Err(Error::BadRequest(ErrorKind::forbidden(), "User does not exist."));
if !services.users.exists(&user_id).await {
return Err!(Request(Forbidden("User does not exist.")));
}
Ok(Auth {
@ -182,117 +180,104 @@ fn auth_appservice(services: &Services, request: &Request, info: Box<Registratio
})
}
async fn auth_server(
services: &Services, request: &mut Request, json_body: &Option<CanonicalJsonValue>,
) -> Result<Auth> {
async fn auth_server(services: &Services, request: &mut Request, body: Option<&CanonicalJsonValue>) -> Result<Auth> {
type Member = (String, CanonicalJsonValue);
type Object = CanonicalJsonObject;
type Value = CanonicalJsonValue;
let x_matrix = parse_x_matrix(request).await?;
auth_server_checks(services, &x_matrix)?;
let destination = services.globals.server_name();
let origin = &x_matrix.origin;
#[allow(clippy::or_fun_call)]
let signature_uri = request
.parts
.uri
.path_and_query()
.unwrap_or(&PathAndQuery::from_static("/"))
.to_string();
let signature: [Member; 1] = [(x_matrix.key.to_string(), Value::String(x_matrix.sig.to_string()))];
let signatures: [Member; 1] = [(origin.to_string(), Value::Object(signature.into()))];
let authorization: [Member; 5] = [
("destination".into(), Value::String(destination.into())),
("method".into(), Value::String(request.parts.method.to_string())),
("origin".into(), Value::String(origin.to_string())),
("signatures".into(), Value::Object(signatures.into())),
("uri".into(), Value::String(signature_uri)),
];
let mut authorization: Object = authorization.into();
if let Some(body) = body {
authorization.insert("content".to_owned(), body.clone());
}
let key = services
.server_keys
.get_verify_key(origin, &x_matrix.key)
.await
.map_err(|e| err!(Request(Forbidden(warn!("Failed to fetch signing keys: {e}")))))?;
let keys: PubKeys = [(x_matrix.key.to_string(), key.key)].into();
let keys: PubKeyMap = [(origin.to_string(), keys)].into();
if let Err(e) = ruma::signatures::verify_json(&keys, authorization) {
debug_error!("Failed to verify federation request from {origin}: {e}");
if request.parts.uri.to_string().contains('@') {
warn!(
"Request uri contained '@' character. Make sure your reverse proxy gives Conduit the raw uri (apache: \
use nocanon)"
);
}
return Err!(Request(Forbidden("Failed to verify X-Matrix signatures.")));
}
Ok(Auth {
origin: origin.to_owned().into(),
sender_user: None,
sender_device: None,
appservice_info: None,
})
}
fn auth_server_checks(services: &Services, x_matrix: &XMatrix) -> Result<()> {
if !services.server.config.allow_federation {
return Err!(Config("allow_federation", "Federation is disabled."));
}
let TypedHeader(Authorization(x_matrix)) = request
.parts
.extract::<TypedHeader<Authorization<XMatrix>>>()
.await
.map_err(|e| {
warn!("Missing or invalid Authorization header: {e}");
let msg = match e.reason() {
TypedHeaderRejectionReason::Missing => "Missing Authorization header.",
TypedHeaderRejectionReason::Error(_) => "Invalid X-Matrix signatures.",
_ => "Unknown header-related error",
};
Error::BadRequest(ErrorKind::forbidden(), msg)
})?;
let destination = services.globals.server_name();
if x_matrix.destination.as_deref() != Some(destination) {
return Err!(Request(Forbidden("Invalid destination.")));
}
let origin = &x_matrix.origin;
if services
.server
.config
.forbidden_remote_server_names
.contains(origin)
{
debug_info!("Refusing to accept inbound federation request to {origin}");
return Err!(Request(Forbidden("Federation with this homeserver is not allowed.")));
return Err!(Request(Forbidden(debug_warn!("Federation requests from {origin} denied."))));
}
let signatures =
BTreeMap::from_iter([(x_matrix.key.clone(), CanonicalJsonValue::String(x_matrix.sig.to_string()))]);
let signatures = BTreeMap::from_iter([(
origin.as_str().to_owned(),
CanonicalJsonValue::Object(
signatures
.into_iter()
.map(|(k, v)| (k.to_string(), v))
.collect(),
),
)]);
let server_destination = services.globals.server_name().as_str().to_owned();
if let Some(destination) = x_matrix.destination.as_ref() {
if destination != &server_destination {
return Err(Error::BadRequest(ErrorKind::forbidden(), "Invalid authorization."));
}
}
let signature_uri = CanonicalJsonValue::String(
request
.parts
.uri
.path_and_query()
.unwrap_or(&PathAndQuery::from_static("/"))
.to_string(),
);
let mut request_map = BTreeMap::from_iter([
(
"method".to_owned(),
CanonicalJsonValue::String(request.parts.method.to_string()),
),
("uri".to_owned(), signature_uri),
("origin".to_owned(), CanonicalJsonValue::String(origin.as_str().to_owned())),
("destination".to_owned(), CanonicalJsonValue::String(server_destination)),
("signatures".to_owned(), CanonicalJsonValue::Object(signatures)),
]);
if let Some(json_body) = json_body {
request_map.insert("content".to_owned(), json_body.clone());
};
let keys_result = services
.server_keys
.fetch_signing_keys_for_server(origin, vec![x_matrix.key.to_string()])
.await;
let keys = keys_result.map_err(|e| {
warn!("Failed to fetch signing keys: {e}");
Error::BadRequest(ErrorKind::forbidden(), "Failed to fetch signing keys.")
})?;
let pub_key_map = BTreeMap::from_iter([(origin.as_str().to_owned(), keys)]);
match ruma::signatures::verify_json(&pub_key_map, &request_map) {
Ok(()) => Ok(Auth {
origin: Some(origin.clone()),
sender_user: None,
sender_device: None,
appservice_info: None,
}),
Err(e) => {
warn!("Failed to verify json request from {origin}: {e}\n{request_map:?}");
if request.parts.uri.to_string().contains('@') {
warn!(
"Request uri contained '@' character. Make sure your reverse proxy gives Conduit the raw uri \
(apache: use nocanon)"
);
}
Err(Error::BadRequest(
ErrorKind::forbidden(),
"Failed to verify X-Matrix signatures.",
))
},
}
Ok(())
}
async fn parse_x_matrix(request: &mut Request) -> Result<XMatrix> {
let TypedHeader(Authorization(x_matrix)) = request
.parts
.extract::<TypedHeader<Authorization<XMatrix>>>()
.await
.map_err(|e| {
let msg = match e.reason() {
TypedHeaderRejectionReason::Missing => "Missing Authorization header.",
TypedHeaderRejectionReason::Error(_) => "Invalid X-Matrix signatures.",
_ => "Unknown header-related error",
};
err!(Request(Forbidden(warn!("{msg}: {e}"))))
})?;
Ok(x_matrix)
}

View file

@ -1,5 +1,3 @@
use std::future::Future;
use axum::{
extract::FromRequestParts,
response::IntoResponse,
@ -7,19 +5,25 @@ use axum::{
Router,
};
use conduit::Result;
use futures::{Future, TryFutureExt};
use http::Method;
use ruma::api::IncomingRequest;
use super::{Ruma, RumaResponse, State};
pub(in super::super) trait RumaHandler<T> {
fn add_route(&'static self, router: Router<State>, path: &str) -> Router<State>;
fn add_routes(&'static self, router: Router<State>) -> Router<State>;
}
pub(in super::super) trait RouterExt {
fn ruma_route<H, T>(self, handler: H) -> Self
fn ruma_route<H, T>(self, handler: &'static H) -> Self
where
H: RumaHandler<T>;
}
impl RouterExt for Router<State> {
fn ruma_route<H, T>(self, handler: H) -> Self
fn ruma_route<H, T>(self, handler: &'static H) -> Self
where
H: RumaHandler<T>,
{
@ -27,34 +31,28 @@ impl RouterExt for Router<State> {
}
}
pub(in super::super) trait RumaHandler<T> {
fn add_routes(&self, router: Router<State>) -> Router<State>;
fn add_route(&self, router: Router<State>, path: &str) -> Router<State>;
}
macro_rules! ruma_handler {
( $($tx:ident),* $(,)? ) => {
#[allow(non_snake_case)]
impl<Req, Ret, Fut, Fun, $($tx,)*> RumaHandler<($($tx,)* Ruma<Req>,)> for Fun
impl<Err, Req, Fut, Fun, $($tx,)*> RumaHandler<($($tx,)* Ruma<Req>,)> for Fun
where
Req: IncomingRequest + Send + 'static,
Ret: IntoResponse,
Fut: Future<Output = Result<Req::OutgoingResponse, Ret>> + Send,
Fun: FnOnce($($tx,)* Ruma<Req>,) -> Fut + Clone + Send + Sync + 'static,
$( $tx: FromRequestParts<State> + Send + 'static, )*
Fun: Fn($($tx,)* Ruma<Req>,) -> Fut + Send + Sync + 'static,
Fut: Future<Output = Result<Req::OutgoingResponse, Err>> + Send,
Req: IncomingRequest + Send + Sync,
Err: IntoResponse + Send,
<Req as IncomingRequest>::OutgoingResponse: Send,
$( $tx: FromRequestParts<State> + Send + Sync + 'static, )*
{
fn add_routes(&self, router: Router<State>) -> Router<State> {
fn add_routes(&'static self, router: Router<State>) -> Router<State> {
Req::METADATA
.history
.all_paths()
.fold(router, |router, path| self.add_route(router, path))
}
fn add_route(&self, router: Router<State>, path: &str) -> Router<State> {
let handle = self.clone();
fn add_route(&'static self, router: Router<State>, path: &str) -> Router<State> {
let action = |$($tx,)* req| self($($tx,)* req).map_ok(RumaResponse);
let method = method_to_filter(&Req::METADATA.method);
let action = |$($tx,)* req| async { handle($($tx,)* req).await.map(RumaResponse) };
router.route(path, on(method, action))
}
}

View file

@ -5,13 +5,18 @@ use http::StatusCode;
use http_body_util::Full;
use ruma::api::{client::uiaa::UiaaResponse, OutgoingResponse};
pub(crate) struct RumaResponse<T>(pub(crate) T);
pub(crate) struct RumaResponse<T>(pub(crate) T)
where
T: OutgoingResponse;
impl From<Error> for RumaResponse<UiaaResponse> {
fn from(t: Error) -> Self { Self(t.into()) }
}
impl<T: OutgoingResponse> IntoResponse for RumaResponse<T> {
impl<T> IntoResponse for RumaResponse<T>
where
T: OutgoingResponse,
{
fn into_response(self) -> Response {
self.0
.try_into_http_response::<BytesMut>()

View file

@ -1,9 +1,13 @@
use std::cmp;
use axum::extract::State;
use conduit::{Error, Result};
use ruma::{
api::{client::error::ErrorKind, federation::backfill::get_backfill},
uint, user_id, MilliSecondsSinceUnixEpoch,
use conduit::{
is_equal_to,
utils::{IterStream, ReadyExt},
Err, PduCount, Result,
};
use futures::{FutureExt, StreamExt};
use ruma::{api::federation::backfill::get_backfill, uint, user_id, MilliSecondsSinceUnixEpoch};
use crate::Ruma;
@ -19,27 +23,35 @@ pub(crate) async fn get_backfill_route(
services
.rooms
.event_handler
.acl_check(origin, &body.room_id)?;
.acl_check(origin, &body.room_id)
.await?;
if !services
.rooms
.state_accessor
.is_world_readable(&body.room_id)?
&& !services
.rooms
.state_cache
.server_in_room(origin, &body.room_id)?
.is_world_readable(&body.room_id)
.await && !services
.rooms
.state_cache
.server_in_room(origin, &body.room_id)
.await
{
return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not in room."));
return Err!(Request(Forbidden("Server is not in room.")));
}
let until = body
.v
.iter()
.map(|event_id| services.rooms.timeline.get_pdu_count(event_id))
.filter_map(|r| r.ok().flatten())
.max()
.ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event not found."))?;
.stream()
.filter_map(|event_id| {
services
.rooms
.timeline
.get_pdu_count(event_id)
.map(Result::ok)
})
.ready_fold(PduCount::Backfilled(0), cmp::max)
.await;
let limit = body
.limit
@ -47,31 +59,37 @@ pub(crate) async fn get_backfill_route(
.try_into()
.expect("UInt could not be converted to usize");
let all_events = services
let pdus = services
.rooms
.timeline
.pdus_until(user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)?
.take(limit);
.pdus_until(user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)
.await?
.take(limit)
.filter_map(|(_, pdu)| async move {
if !services
.rooms
.state_accessor
.server_can_see_event(origin, &pdu.room_id, &pdu.event_id)
.await
.is_ok_and(is_equal_to!(true))
{
return None;
}
let events = all_events
.filter_map(Result::ok)
.filter(|(_, e)| {
matches!(
services
.rooms
.state_accessor
.server_can_see_event(origin, &e.room_id, &e.event_id,),
Ok(true),
)
services
.rooms
.timeline
.get_pdu_json(&pdu.event_id)
.await
.ok()
})
.map(|(_, pdu)| services.rooms.timeline.get_pdu_json(&pdu.event_id))
.filter_map(|r| r.ok().flatten())
.map(|pdu| services.sending.convert_to_outgoing_federation_event(pdu))
.collect();
.then(|pdu| services.sending.convert_to_outgoing_federation_event(pdu))
.collect()
.await;
Ok(get_backfill::v1::Response {
origin: services.globals.server_name().to_owned(),
origin_server_ts: MilliSecondsSinceUnixEpoch::now(),
pdus: events,
pdus,
})
}

View file

@ -1,9 +1,6 @@
use axum::extract::State;
use conduit::{Error, Result};
use ruma::{
api::{client::error::ErrorKind, federation::event::get_event},
MilliSecondsSinceUnixEpoch, RoomId,
};
use conduit::{err, Err, Result};
use ruma::{api::federation::event::get_event, MilliSecondsSinceUnixEpoch, RoomId};
use crate::Ruma;
@ -21,34 +18,46 @@ pub(crate) async fn get_event_route(
let event = services
.rooms
.timeline
.get_pdu_json(&body.event_id)?
.ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Event not found."))?;
.get_pdu_json(&body.event_id)
.await
.map_err(|_| err!(Request(NotFound("Event not found."))))?;
let room_id_str = event
.get("room_id")
.and_then(|val| val.as_str())
.ok_or_else(|| Error::bad_database("Invalid event in database."))?;
.ok_or_else(|| err!(Database("Invalid event in database.")))?;
let room_id =
<&RoomId>::try_from(room_id_str).map_err(|_| Error::bad_database("Invalid room_id in event in database."))?;
<&RoomId>::try_from(room_id_str).map_err(|_| err!(Database("Invalid room_id in event in database.")))?;
if !services.rooms.state_accessor.is_world_readable(room_id)?
&& !services.rooms.state_cache.server_in_room(origin, room_id)?
if !services
.rooms
.state_accessor
.is_world_readable(room_id)
.await && !services
.rooms
.state_cache
.server_in_room(origin, room_id)
.await
{
return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not in room."));
return Err!(Request(Forbidden("Server is not in room.")));
}
if !services
.rooms
.state_accessor
.server_can_see_event(origin, room_id, &body.event_id)?
.server_can_see_event(origin, room_id, &body.event_id)
.await?
{
return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not allowed to see event."));
return Err!(Request(Forbidden("Server is not allowed to see event.")));
}
Ok(get_event::v1::Response {
origin: services.globals.server_name().to_owned(),
origin_server_ts: MilliSecondsSinceUnixEpoch::now(),
pdu: services.sending.convert_to_outgoing_federation_event(event),
pdu: services
.sending
.convert_to_outgoing_federation_event(event)
.await,
})
}

View file

@ -1,7 +1,8 @@
use std::sync::Arc;
use std::borrow::Borrow;
use axum::extract::State;
use conduit::{Error, Result};
use futures::StreamExt;
use ruma::{
api::{client::error::ErrorKind, federation::authorization::get_event_authorization},
RoomId,
@ -22,16 +23,18 @@ pub(crate) async fn get_event_authorization_route(
services
.rooms
.event_handler
.acl_check(origin, &body.room_id)?;
.acl_check(origin, &body.room_id)
.await?;
if !services
.rooms
.state_accessor
.is_world_readable(&body.room_id)?
&& !services
.rooms
.state_cache
.server_in_room(origin, &body.room_id)?
.is_world_readable(&body.room_id)
.await && !services
.rooms
.state_cache
.server_in_room(origin, &body.room_id)
.await
{
return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not in room."));
}
@ -39,8 +42,9 @@ pub(crate) async fn get_event_authorization_route(
let event = services
.rooms
.timeline
.get_pdu_json(&body.event_id)?
.ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Event not found."))?;
.get_pdu_json(&body.event_id)
.await
.map_err(|_| Error::BadRequest(ErrorKind::NotFound, "Event not found."))?;
let room_id_str = event
.get("room_id")
@ -50,16 +54,17 @@ pub(crate) async fn get_event_authorization_route(
let room_id =
<&RoomId>::try_from(room_id_str).map_err(|_| Error::bad_database("Invalid room_id in event in database."))?;
let auth_chain_ids = services
let auth_chain = services
.rooms
.auth_chain
.event_ids_iter(room_id, vec![Arc::from(&*body.event_id)])
.await?;
.event_ids_iter(room_id, &[body.event_id.borrow()])
.await?
.filter_map(|id| async move { services.rooms.timeline.get_pdu_json(&id).await.ok() })
.then(|pdu| services.sending.convert_to_outgoing_federation_event(pdu))
.collect()
.await;
Ok(get_event_authorization::v1::Response {
auth_chain: auth_chain_ids
.filter_map(|id| services.rooms.timeline.get_pdu_json(&id).ok()?)
.map(|pdu| services.sending.convert_to_outgoing_federation_event(pdu))
.collect(),
auth_chain,
})
}

Some files were not shown because too many files have changed in this diff Show more