Compare commits
328 commits
strawberry
...
main
Author | SHA1 | Date | |
---|---|---|---|
![]() |
d8311a5ff6 | ||
![]() |
47f8345457 | ||
![]() |
99868b1661 | ||
![]() |
d5ad973464 | ||
![]() |
ff276a42a3 | ||
![]() |
5f8c68ab84 | ||
![]() |
6578b83bce | ||
![]() |
3cc92b32ec | ||
![]() |
9678948daf | ||
![]() |
500faa8d7f | ||
![]() |
d6cc447add | ||
![]() |
e28ae8fb4d | ||
![]() |
c7246662f4 | ||
![]() |
a212bf7cfc | ||
![]() |
58b8c7516a | ||
![]() |
bb8320a691 | ||
![]() |
532dfd004d | ||
![]() |
4e5b87d0cd | ||
![]() |
00f7745ec4 | ||
![]() |
d036394ec7 | ||
![]() |
6a073b4fa4 | ||
![]() |
b7109131e2 | ||
![]() |
94b107b42b | ||
![]() |
29d55b8036 | ||
![]() |
45fd3875c8 | ||
![]() |
f9529937ce | ||
![]() |
0b56204f89 | ||
![]() |
58adb6fead | ||
![]() |
5d1404e9df | ||
![]() |
f14756fb76 | ||
![]() |
24be579477 | ||
![]() |
0e0b8cc403 | ||
![]() |
1036f8dfa8 | ||
![]() |
74012c5289 | ||
![]() |
ea246d91d9 | ||
![]() |
1b71b99c51 | ||
![]() |
0f81c1e1cc | ||
![]() |
bee1f89624 | ||
![]() |
5768ca8442 | ||
![]() |
3f0f89cddb | ||
![]() |
d3b65af616 | ||
![]() |
d60920c728 | ||
![]() |
db99d3a001 | ||
![]() |
bee4c6255a | ||
![]() |
dc6e9e74d9 | ||
![]() |
5bf5afaec8 | ||
![]() |
095734a8e7 | ||
![]() |
a93cb34dd6 | ||
![]() |
b03c493bf9 | ||
![]() |
d0132706cd | ||
![]() |
0e2009dbf5 | ||
![]() |
3e57b7d35d | ||
![]() |
75b6daa67f | ||
![]() |
6365f1a887 | ||
![]() |
b2bf35cfab | ||
![]() |
7f448d88a4 | ||
![]() |
c99f5770a0 | ||
![]() |
dfe058a244 | ||
![]() |
07ba00f74e | ||
![]() |
9d0ce3965e | ||
![]() |
d1b82ea225 | ||
![]() |
23e3f6526f | ||
![]() |
8010505853 | ||
![]() |
9ce95a7030 | ||
![]() |
d8ea8b378c | ||
![]() |
17003ba773 | ||
![]() |
a57336ec13 | ||
![]() |
7294368015 | ||
![]() |
aa4d2e2363 | ||
![]() |
07ec9d6d85 | ||
![]() |
33c5afe050 | ||
![]() |
7bf92c8a37 | ||
![]() |
658c19d55e | ||
![]() |
4518f55408 | ||
![]() |
ee3c585555 | ||
![]() |
6c29792b3d | ||
![]() |
258b399de9 | ||
![]() |
5dea52f0f8 | ||
![]() |
1d1ccec532 | ||
![]() |
0877f29439 | ||
![]() |
e920c44cb4 | ||
![]() |
ae818d5b25 | ||
![]() |
7f95eef9ab | ||
![]() |
3104586884 | ||
![]() |
c4b05e77f3 | ||
![]() |
1366a3092f | ||
![]() |
1e23c95ec6 | ||
![]() |
56dba8acb7 | ||
![]() |
889fb3cf26 | ||
![]() |
e704bbaf11 | ||
![]() |
5ba0c02d52 | ||
![]() |
df1edcf498 | ||
![]() |
0e2ca7d719 | ||
![]() |
0e342aab7f | ||
![]() |
47ff91243d | ||
![]() |
d0c767c23c | ||
![]() |
06f2039eee | ||
![]() |
0b012b529f | ||
![]() |
5efe804a20 | ||
![]() |
ef96e7afac | ||
![]() |
c8a730c29e | ||
![]() |
bb0b57efb8 | ||
![]() |
5a3264980a | ||
![]() |
90fee4f50e | ||
![]() |
51d29bc1cb | ||
![]() |
298b58c069 | ||
![]() |
6052c0c8a2 | ||
![]() |
8b3f629198 | ||
![]() |
4f882c3bd8 | ||
![]() |
2c58a6efda | ||
![]() |
fe65648296 | ||
![]() |
5ad1100e0f | ||
![]() |
20dd1d148d | ||
![]() |
fa71162c7d | ||
![]() |
f34e0b21a3 | ||
![]() |
d80e61cbee | ||
![]() |
c92678ecbe | ||
![]() |
ecea0cff69 | ||
![]() |
931fd4c802 | ||
![]() |
657e91fd42 | ||
![]() |
f4c51cd405 | ||
![]() |
17b625a85b | ||
![]() |
c10500f8ae | ||
![]() |
2c1ec3fb02 | ||
![]() |
408f5bd30c | ||
![]() |
97208d6081 | ||
![]() |
35981d5aef | ||
![]() |
7c17163730 | ||
![]() |
1ecd027389 | ||
![]() |
df72384c16 | ||
![]() |
0d741bbd46 | ||
![]() |
af714d5778 | ||
![]() |
00cc23b649 | ||
![]() |
de53ad83b2 | ||
![]() |
17e0384eeb | ||
![]() |
dca7bf9635 | ||
![]() |
a67ab75417 | ||
![]() |
cbf207bd1f | ||
![]() |
4bdd0d77db | ||
![]() |
045e8a2937 | ||
![]() |
a1e1f40ded | ||
![]() |
e97952b7f6 | ||
![]() |
bec19df275 | ||
![]() |
8085a1c064 | ||
![]() |
1061f68f0e | ||
![]() |
01155fa649 | ||
![]() |
c614d5bf44 | ||
![]() |
f47677c995 | ||
![]() |
6113803038 | ||
![]() |
4de0dafdf1 | ||
![]() |
f2ca670c3b | ||
![]() |
0a9a9b3c92 | ||
![]() |
b872f8e593 | ||
![]() |
ecc9099127 | ||
![]() |
e123a5b660 | ||
![]() |
59c073d0d8 | ||
![]() |
5428526120 | ||
![]() |
d8e94ee965 | ||
![]() |
31ab84e928 | ||
![]() |
565837ad75 | ||
![]() |
2d71d5590a | ||
![]() |
3ec43be959 | ||
![]() |
e3b81f7b64 | ||
![]() |
b6e9dc3d98 | ||
![]() |
cfcd6eb1a6 | ||
![]() |
88e7e50daf | ||
![]() |
8345ea2cd3 | ||
![]() |
add2e0e9ee | ||
![]() |
43e6c27bb7 | ||
![]() |
c7c9f0e4a6 | ||
![]() |
ef2d307c15 | ||
![]() |
f761d4d5c9 | ||
![]() |
16b07ae3ec | ||
![]() |
62d80b97e6 | ||
![]() |
fda8b36809 | ||
![]() |
f6dfc9538f | ||
![]() |
f80d85e107 | ||
![]() |
9158edfb7c | ||
![]() |
04656a7886 | ||
![]() |
442bb9889c | ||
![]() |
62180897c0 | ||
![]() |
80277f6aa2 | ||
![]() |
d32534164c | ||
![]() |
b3271e0d65 | ||
![]() |
106bcd30b7 | ||
![]() |
da4b94d80d | ||
![]() |
32f990fc72 | ||
![]() |
5e59ce37c4 | ||
![]() |
a774afe837 | ||
![]() |
ffe3b0faf2 | ||
![]() |
bd6d4bc58f | ||
![]() |
b4d22bd05e | ||
![]() |
7ce782ddf4 | ||
![]() |
4add39d0fe | ||
![]() |
ea49b60273 | ||
![]() |
2fa9621f3a | ||
![]() |
09bc71caab | ||
![]() |
6983798487 | ||
![]() |
a4ef04cd14 | ||
![]() |
4e0cedbe51 | ||
![]() |
4ff1155bf0 | ||
![]() |
e161e5dd61 | ||
![]() |
f698254c41 | ||
![]() |
69837671bb | ||
![]() |
ff8bbd4cfa | ||
![]() |
1a8482b3b4 | ||
![]() |
31c2968bb2 | ||
![]() |
3c8376d897 | ||
![]() |
50acfe7832 | ||
![]() |
eb7d893c86 | ||
![]() |
936161d89e | ||
![]() |
329925c661 | ||
![]() |
af399fd517 | ||
![]() |
ad0b0af955 | ||
![]() |
2c5af902a3 | ||
![]() |
2f449ba47d | ||
![]() |
a567e314e9 | ||
![]() |
ed3cd99781 | ||
![]() |
99fe88c21e | ||
![]() |
ffd0fd4242 | ||
![]() |
b2a565b0b4 | ||
![]() |
c516a8df3e | ||
![]() |
94d786ac12 | ||
![]() |
677316631a | ||
![]() |
2b730a30ad | ||
![]() |
98f9570547 | ||
![]() |
13335042b7 | ||
![]() |
6db8df5e23 | ||
![]() |
d0b4a619af | ||
![]() |
4a2d0d35bc | ||
![]() |
3e0ff2dc84 | ||
![]() |
71a3855af6 | ||
![]() |
db7d23e780 | ||
![]() |
1c585ab1b6 | ||
![]() |
24e6086f12 | ||
![]() |
ee63f720c9 | ||
![]() |
4b3c54bbfa | ||
![]() |
68856645ee | ||
![]() |
9ad4f20da4 | ||
![]() |
186c459584 | ||
![]() |
29a19ba437 | ||
![]() |
3b0195e6b3 | ||
![]() |
4b331fe50e | ||
![]() |
c323894497 | ||
![]() |
5b5ccba64e | ||
![]() |
9dcf289c7a | ||
![]() |
d86061084c | ||
![]() |
1d26eec82d | ||
![]() |
9514064c1c | ||
![]() |
2abf15b9e9 | ||
![]() |
cd5d4f48be | ||
![]() |
eed3291625 | ||
![]() |
6a7fe3ab7c | ||
![]() |
72daf7ea68 | ||
![]() |
94f2384fb0 | ||
![]() |
d59f68a51a | ||
![]() |
b1b6dc0479 | ||
![]() |
184a3b0f0c | ||
![]() |
b5c167de12 | ||
![]() |
5be07ebc0f | ||
![]() |
7c6b8b132a | ||
![]() |
1351d07735 | ||
![]() |
6e7c73336c | ||
![]() |
52adae7553 | ||
![]() |
a5520e8b1b | ||
![]() |
265802d546 | ||
![]() |
da9f1ae5d7 | ||
![]() |
607e338ac2 | ||
![]() |
f75d9fa79e | ||
![]() |
7c0c029a4a | ||
![]() |
49023aa295 | ||
![]() |
0c96891008 | ||
![]() |
1f31e74024 | ||
![]() |
9ab381e4eb | ||
![]() |
dda27ffcb1 | ||
![]() |
8ab825b12c | ||
![]() |
19f6d9d0e1 | ||
![]() |
277b4951e8 | ||
![]() |
610129d162 | ||
![]() |
4c0ae8c2f7 | ||
![]() |
ea25dc04b2 | ||
![]() |
388730d6dd | ||
![]() |
ac944496c1 | ||
![]() |
3dae02b886 | ||
![]() |
3eed408b29 | ||
![]() |
4fbbfe5d30 | ||
![]() |
df3eb95d4f | ||
![]() |
7045481fae | ||
![]() |
c6ae6adc80 | ||
![]() |
afdf5a07b5 | ||
![]() |
f9e76d6239 | ||
![]() |
8141ca3444 | ||
![]() |
abf33013e3 | ||
![]() |
96e85adc32 | ||
![]() |
fc1170e12a | ||
![]() |
819e35f81f | ||
![]() |
bab40a3747 | ||
![]() |
aad42bdaa0 | ||
![]() |
3759d1be6c | ||
![]() |
77d8e26efe | ||
![]() |
7a8ca8842a | ||
![]() |
80832cb0bb | ||
![]() |
98d8e5c63c | ||
![]() |
5167e1f06d | ||
![]() |
e56d3c6cb3 | ||
![]() |
afcd0bfeef | ||
![]() |
5b8464252c | ||
![]() |
2cc6ad8df3 | ||
![]() |
afe9e5536b | ||
![]() |
9ebb39ca4f | ||
![]() |
f59e3d8850 | ||
![]() |
6cb3275be0 | ||
![]() |
be16f84410 | ||
![]() |
9dd058de60 | ||
![]() |
5a1c41e66b | ||
![]() |
fabd3cf567 | ||
![]() |
5e21b43f25 | ||
![]() |
9bda5a43e5 | ||
![]() |
8c18481d1d | ||
![]() |
fde1b94e26 | ||
![]() |
b71201cf19 | ||
![]() |
8451ea3bc3 | ||
![]() |
6f15c9b3f4 | ||
![]() |
0074f903d8 | ||
![]() |
1852eeebf2 | ||
![]() |
5b6279b1c5 | ||
![]() |
4c2999ccd1 | ||
![]() |
53d03bbb1f |
432 changed files with 21931 additions and 12870 deletions
27
.cargo/audit.toml
Normal file
27
.cargo/audit.toml
Normal file
|
@ -0,0 +1,27 @@
|
|||
[advisories]
|
||||
ignore = ["RUSTSEC-2024-0436", "RUSTSEC-2025-0014"] # advisory IDs to ignore e.g. ["RUSTSEC-2019-0001", ...]
|
||||
informational_warnings = [] # warn for categories of informational advisories
|
||||
severity_threshold = "none" # CVSS severity ("none", "low", "medium", "high", "critical")
|
||||
|
||||
# Advisory Database Configuration
|
||||
[database]
|
||||
path = "~/.cargo/advisory-db" # Path where advisory git repo will be cloned
|
||||
url = "https://github.com/RustSec/advisory-db.git" # URL to git repo
|
||||
fetch = true # Perform a `git fetch` before auditing (default: true)
|
||||
stale = false # Allow stale advisory DB (i.e. no commits for 90 days, default: false)
|
||||
|
||||
# Output Configuration
|
||||
[output]
|
||||
deny = ["warnings", "unmaintained", "unsound", "yanked"] # exit on error if unmaintained dependencies are found
|
||||
format = "terminal" # "terminal" (human readable report) or "json"
|
||||
quiet = false # Only print information on error
|
||||
show_tree = true # Show inverse dependency trees along with advisories (default: true)
|
||||
|
||||
# Target Configuration
|
||||
[target]
|
||||
arch = ["x86_64", "aarch64"] # Ignore advisories for CPU architectures other than these
|
||||
os = ["linux", "windows", "macos"] # Ignore advisories for operating systems other than these
|
||||
|
||||
[yanked]
|
||||
enabled = true # Warn for yanked crates in Cargo.lock (default: true)
|
||||
update_index = true # Auto-update the crates.io index (default: true)
|
87
.gitattributes
vendored
Normal file
87
.gitattributes
vendored
Normal file
|
@ -0,0 +1,87 @@
|
|||
# taken from https://github.com/gitattributes/gitattributes/blob/46a8961ad73f5bd4d8d193708840fbc9e851d702/Rust.gitattributes
|
||||
# Auto detect text files and perform normalization
|
||||
* text=auto
|
||||
|
||||
*.rs text diff=rust
|
||||
*.toml text diff=toml
|
||||
Cargo.lock text
|
||||
|
||||
# taken from https://github.com/gitattributes/gitattributes/blob/46a8961ad73f5bd4d8d193708840fbc9e851d702/Common.gitattributes
|
||||
# Documents
|
||||
*.bibtex text diff=bibtex
|
||||
*.doc diff=astextplain
|
||||
*.DOC diff=astextplain
|
||||
*.docx diff=astextplain
|
||||
*.DOCX diff=astextplain
|
||||
*.dot diff=astextplain
|
||||
*.DOT diff=astextplain
|
||||
*.pdf diff=astextplain
|
||||
*.PDF diff=astextplain
|
||||
*.rtf diff=astextplain
|
||||
*.RTF diff=astextplain
|
||||
*.md text diff=markdown
|
||||
*.mdx text diff=markdown
|
||||
*.tex text diff=tex
|
||||
*.adoc text
|
||||
*.textile text
|
||||
*.mustache text
|
||||
*.csv text eol=crlf
|
||||
*.tab text
|
||||
*.tsv text
|
||||
*.txt text
|
||||
*.sql text
|
||||
*.epub diff=astextplain
|
||||
|
||||
# Graphics
|
||||
*.png binary
|
||||
*.jpg binary
|
||||
*.jpeg binary
|
||||
*.gif binary
|
||||
*.tif binary
|
||||
*.tiff binary
|
||||
*.ico binary
|
||||
# SVG treated as text by default.
|
||||
*.svg text
|
||||
*.eps binary
|
||||
|
||||
# Scripts
|
||||
*.bash text eol=lf
|
||||
*.fish text eol=lf
|
||||
*.ksh text eol=lf
|
||||
*.sh text eol=lf
|
||||
*.zsh text eol=lf
|
||||
# These are explicitly windows files and should use crlf
|
||||
*.bat text eol=crlf
|
||||
*.cmd text eol=crlf
|
||||
*.ps1 text eol=crlf
|
||||
|
||||
# Serialisation
|
||||
*.json text
|
||||
*.toml text
|
||||
*.xml text
|
||||
*.yaml text
|
||||
*.yml text
|
||||
|
||||
# Archives
|
||||
*.7z binary
|
||||
*.bz binary
|
||||
*.bz2 binary
|
||||
*.bzip2 binary
|
||||
*.gz binary
|
||||
*.lz binary
|
||||
*.lzma binary
|
||||
*.rar binary
|
||||
*.tar binary
|
||||
*.taz binary
|
||||
*.tbz binary
|
||||
*.tbz2 binary
|
||||
*.tgz binary
|
||||
*.tlz binary
|
||||
*.txz binary
|
||||
*.xz binary
|
||||
*.Z binary
|
||||
*.zip binary
|
||||
*.zst binary
|
||||
|
||||
# Text files where line endings should be preserved
|
||||
*.patch -text
|
|
@ -1,8 +0,0 @@
|
|||
|
||||
<!-- Please describe your changes here -->
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
- [ ] I ran `cargo fmt`, `cargo clippy`, and `cargo test`
|
||||
- [ ] I agree to release my code and all other changes of this MR under the Apache-2.0 license
|
||||
|
|
@ -1,264 +0,0 @@
|
|||
name: CI and Artifacts
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
# documentation workflow deals with this or is not relevant for this workflow
|
||||
paths-ignore:
|
||||
- '*.md'
|
||||
- 'conduwuit-example.toml'
|
||||
- 'book.toml'
|
||||
- '.gitlab-ci.yml'
|
||||
- '.gitignore'
|
||||
- 'renovate.json'
|
||||
- 'docs/**'
|
||||
- 'debian/**'
|
||||
- 'docker/**'
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- '*'
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
#workflow_dispatch:
|
||||
|
||||
#concurrency:
|
||||
# group: ${{ gitea.head_ref || gitea.ref_name }}
|
||||
# cancel-in-progress: true
|
||||
|
||||
env:
|
||||
# Required to make some things output color
|
||||
TERM: ansi
|
||||
# Publishing to my nix binary cache
|
||||
ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }}
|
||||
# conduwuit.cachix.org
|
||||
CACHIX_AUTH_TOKEN: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
# Just in case incremental is still being set to true, speeds up CI
|
||||
CARGO_INCREMENTAL: 0
|
||||
# Custom nix binary cache if fork is being used
|
||||
ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }}
|
||||
ATTIC_PUBLIC_KEY: ${{ vars.ATTIC_PUBLIC_KEY }}
|
||||
# Get error output from nix that we can actually use
|
||||
NIX_CONFIG: show-trace = true
|
||||
|
||||
#permissions:
|
||||
# packages: write
|
||||
# contents: read
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Sync repository
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
|
||||
- name: Tag comparison check
|
||||
if: startsWith(gitea.ref, 'refs/tags/v')
|
||||
run: |
|
||||
# Tag mismatch with latest repo tag check to prevent potential downgrades
|
||||
LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
|
||||
if [ $LATEST_TAG != ${{ gitea.ref_name }} ]; then
|
||||
echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.'
|
||||
echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' >> $GITHUB_STEP_SUMMARY
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Install Nix
|
||||
uses: https://github.com/DeterminateSystems/nix-installer-action@main
|
||||
with:
|
||||
diagnostic-endpoint: ""
|
||||
extra-conf: |
|
||||
experimental-features = nix-command flakes
|
||||
accept-flake-config = true
|
||||
|
||||
- name: Enable Cachix binary cache
|
||||
run: |
|
||||
nix profile install nixpkgs#cachix
|
||||
cachix use crane
|
||||
cachix use nix-community
|
||||
|
||||
- name: Configure Magic Nix Cache
|
||||
uses: https://github.com/DeterminateSystems/magic-nix-cache-action@main
|
||||
with:
|
||||
diagnostic-endpoint: ""
|
||||
upstream-cache: "https://attic.kennel.juneis.dog/conduwuit"
|
||||
|
||||
- name: Apply Nix binary cache configuration
|
||||
run: |
|
||||
sudo tee -a /etc/nix/nix.conf > /dev/null <<EOF
|
||||
extra-substituters = https://attic.kennel.juneis.dog/conduit https://attic.kennel.juneis.dog/conduwuit https://cache.lix.systems https://conduwuit.cachix.org
|
||||
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
|
||||
EOF
|
||||
|
||||
- name: Use alternative Nix binary caches if specified
|
||||
if: ${{ (env.ATTIC_ENDPOINT != '') && (env.ATTIC_PUBLIC_KEY != '') }}
|
||||
run: |
|
||||
sudo tee -a /etc/nix/nix.conf > /dev/null <<EOF
|
||||
extra-substituters = ${{ env.ATTIC_ENDPOINT }}
|
||||
extra-trusted-public-keys = ${{ env.ATTIC_PUBLIC_KEY }}
|
||||
EOF
|
||||
|
||||
- name: Prepare build environment
|
||||
run: |
|
||||
echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc"
|
||||
nix profile install --impure --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv
|
||||
direnv allow
|
||||
nix develop .#all-features --command true
|
||||
|
||||
- name: Cache CI dependencies
|
||||
run: |
|
||||
bin/nix-build-and-cache ci
|
||||
|
||||
- name: Run CI tests
|
||||
run: |
|
||||
direnv exec . engage > >(tee -a test_output.log)
|
||||
|
||||
- name: Sync Complement repository
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
with:
|
||||
repository: 'matrix-org/complement'
|
||||
path: complement_src
|
||||
|
||||
- name: Run Complement tests
|
||||
run: |
|
||||
direnv exec . bin/complement 'complement_src' 'complement_test_logs.jsonl' 'complement_test_results.jsonl'
|
||||
cp -v -f result complement_oci_image.tar.gz
|
||||
|
||||
- name: Upload Complement OCI image
|
||||
uses: https://github.com/actions/upload-artifact@v4
|
||||
with:
|
||||
name: complement_oci_image.tar.gz
|
||||
path: complement_oci_image.tar.gz
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Upload Complement logs
|
||||
uses: https://github.com/actions/upload-artifact@v4
|
||||
with:
|
||||
name: complement_test_logs.jsonl
|
||||
path: complement_test_logs.jsonl
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Upload Complement results
|
||||
uses: https://github.com/actions/upload-artifact@v4
|
||||
with:
|
||||
name: complement_test_results.jsonl
|
||||
path: complement_test_results.jsonl
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Diff Complement results with checked-in repo results
|
||||
run: |
|
||||
diff -u --color=always tests/test_results/complement/test_results.jsonl complement_test_results.jsonl > >(tee -a complement_test_output.log)
|
||||
echo '# Complement diff results' >> $GITHUB_STEP_SUMMARY
|
||||
echo '```diff' >> $GITHUB_STEP_SUMMARY
|
||||
tail -n 100 complement_test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
- name: Update Job Summary
|
||||
if: success() || failure()
|
||||
run: |
|
||||
if [ ${{ job.status }} == 'success' ]; then
|
||||
echo '# ✅ completed suwuccessfully' >> $GITHUB_STEP_SUMMARY
|
||||
else
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
tail -n 40 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
needs: tests
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- target: aarch64-unknown-linux-musl
|
||||
- target: x86_64-unknown-linux-musl
|
||||
steps:
|
||||
- name: Sync repository
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
|
||||
- name: Install Nix
|
||||
uses: https://github.com/DeterminateSystems/nix-installer-action@main
|
||||
with:
|
||||
diagnostic-endpoint: ""
|
||||
extra-conf: |
|
||||
experimental-features = nix-command flakes
|
||||
accept-flake-config = true
|
||||
|
||||
- name: Install and enable Cachix binary cache
|
||||
run: |
|
||||
nix profile install nixpkgs#cachix
|
||||
cachix use crane
|
||||
cachix use nix-community
|
||||
|
||||
- name: Configure Magic Nix Cache
|
||||
uses: https://github.com/DeterminateSystems/magic-nix-cache-action@main
|
||||
with:
|
||||
diagnostic-endpoint: ""
|
||||
upstream-cache: "https://attic.kennel.juneis.dog/conduwuit"
|
||||
|
||||
- name: Apply Nix binary cache configuration
|
||||
run: |
|
||||
sudo tee -a /etc/nix/nix.conf > /dev/null <<EOF
|
||||
extra-substituters = https://attic.kennel.juneis.dog/conduit https://attic.kennel.juneis.dog/conduwuit https://cache.lix.systems https://conduwuit.cachix.org
|
||||
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
|
||||
EOF
|
||||
|
||||
- name: Use alternative Nix binary caches if specified
|
||||
if: ${{ (env.ATTIC_ENDPOINT != '') && (env.ATTIC_PUBLIC_KEY != '') }}
|
||||
run: |
|
||||
sudo tee -a /etc/nix/nix.conf > /dev/null <<EOF
|
||||
extra-substituters = ${{ env.ATTIC_ENDPOINT }}
|
||||
extra-trusted-public-keys = ${{ env.ATTIC_PUBLIC_KEY }}
|
||||
EOF
|
||||
|
||||
- name: Prepare build environment
|
||||
run: |
|
||||
echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc"
|
||||
nix profile install --impure --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv
|
||||
direnv allow
|
||||
nix develop .#all-features --command true
|
||||
|
||||
- name: Build static ${{ matrix.target }}
|
||||
run: |
|
||||
CARGO_DEB_TARGET_TUPLE=$(echo ${{ matrix.target }} | grep -o -E '^([^-]*-){3}[^-]*')
|
||||
SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct)
|
||||
|
||||
bin/nix-build-and-cache just .#static-${{ matrix.target }}
|
||||
mkdir -v -p target/release/
|
||||
mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/
|
||||
cp -v -f result/bin/conduit target/release/conduwuit
|
||||
cp -v -f result/bin/conduit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit
|
||||
# -p conduit is the main crate name
|
||||
direnv exec . cargo deb --verbose --no-build --no-strip -p conduit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/${{ matrix.target }}.deb
|
||||
mv -v target/release/conduwuit static-${{ matrix.target }}
|
||||
mv -v target/release/${{ matrix.target }}.deb ${{ matrix.target }}.deb
|
||||
|
||||
- name: Upload static-${{ matrix.target }}
|
||||
uses: https://github.com/actions/upload-artifact@v4
|
||||
with:
|
||||
name: static-${{ matrix.target }}
|
||||
path: static-${{ matrix.target }}
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Upload deb ${{ matrix.target }}
|
||||
uses: https://github.com/actions/upload-artifact@v4
|
||||
with:
|
||||
name: deb-${{ matrix.target }}
|
||||
path: ${{ matrix.target }}.deb
|
||||
if-no-files-found: error
|
||||
compression-level: 0
|
||||
|
||||
- name: Build OCI image ${{ matrix.target }}
|
||||
run: |
|
||||
bin/nix-build-and-cache just .#oci-image-${{ matrix.target }}
|
||||
cp -v -f result oci-image-${{ matrix.target }}.tar.gz
|
||||
|
||||
- name: Upload OCI image ${{ matrix.target }}
|
||||
uses: https://github.com/actions/upload-artifact@v4
|
||||
with:
|
||||
name: oci-image-${{ matrix.target }}
|
||||
path: oci-image-${{ matrix.target }}.tar.gz
|
||||
if-no-files-found: error
|
||||
compression-level: 0
|
521
.github/workflows/ci.yml
vendored
521
.github/workflows/ci.yml
vendored
|
@ -21,16 +21,6 @@ concurrency:
|
|||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
# sccache only on main repo
|
||||
SCCACHE_GHA_ENABLED: "${{ (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'true' || 'false' }}"
|
||||
RUSTC_WRAPPER: "${{ (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'sccache' || '' }}"
|
||||
SCCACHE_BUCKET: "${{ (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'sccache' || '' }}"
|
||||
SCCACHE_S3_USE_SSL: ${{ vars.SCCACHE_S3_USE_SSL }}
|
||||
SCCACHE_REGION: ${{ vars.SCCACHE_REGION }}
|
||||
SCCACHE_ENDPOINT: ${{ vars.SCCACHE_ENDPOINT }}
|
||||
SCCACHE_CACHE_MULTIARCH: ${{ vars.SCCACHE_CACHE_MULTIARCH }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
# Required to make some things output color
|
||||
TERM: ansi
|
||||
# Publishing to my nix binary cache
|
||||
|
@ -45,23 +35,21 @@ env:
|
|||
# Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps
|
||||
NIX_CONFIG: |
|
||||
show-trace = true
|
||||
extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://cache.lix.systems https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net
|
||||
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
|
||||
extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net https://nix-community.cachix.org https://crane.cachix.org
|
||||
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs= crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=
|
||||
experimental-features = nix-command flakes
|
||||
extra-experimental-features = nix-command flakes
|
||||
accept-flake-config = true
|
||||
# complement uses libolm
|
||||
NIXPKGS_ALLOW_INSECURE: 1
|
||||
WEB_UPLOAD_SSH_USERNAME: ${{ secrets.WEB_UPLOAD_SSH_USERNAME }}
|
||||
GH_SHA: ${{ github.sha }}
|
||||
GH_REF_NAME: ${{ github.ref_name }}
|
||||
WEBSERVER_DIR_NAME: ${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
name: Test
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Setup SSH web publish
|
||||
env:
|
||||
|
@ -87,24 +75,13 @@ jobs:
|
|||
END
|
||||
|
||||
echo "Checking connection"
|
||||
ssh -q website "echo test"
|
||||
ssh -q website "echo test" || ssh -q website "echo test"
|
||||
|
||||
echo "Creating commit rev directory on web server"
|
||||
ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/$GITHUB_SHA/"
|
||||
ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/$GITHUB_SHA/"
|
||||
ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" || ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/"
|
||||
ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" || ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/"
|
||||
|
||||
- name: Install liburing
|
||||
run: |
|
||||
sudo apt install liburing-dev -y
|
||||
|
||||
- name: Free up a bit of runner space
|
||||
run: |
|
||||
set +o pipefail
|
||||
sudo docker image prune --all --force || true
|
||||
sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true
|
||||
sudo apt clean
|
||||
sudo rm -rf /usr/local/lib/android /usr/local/julia* /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/local/lib/heroku /usr/lib/heroku /usr/local/share/boost /usr/share/dotnet /usr/local/bin/cmake* /usr/local/bin/stack /usr/local/bin/terraform /opt/microsoft/powershell /opt/hostedtoolcache/CodeQL /opt/hostedtoolcache/go /opt/hostedtoolcache/PyPy /usr/local/bin/sam || true
|
||||
set -o pipefail
|
||||
echo "SSH_WEBSITE=1" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Sync repository
|
||||
uses: actions/checkout@v4
|
||||
|
@ -123,57 +100,9 @@ jobs:
|
|||
exit 1
|
||||
fi
|
||||
|
||||
- uses: nixbuild/nix-quick-install-action@master
|
||||
|
||||
- name: Restore and cache Nix store
|
||||
uses: nix-community/cache-nix-action@v5.1.0
|
||||
with:
|
||||
# restore and save a cache using this key
|
||||
primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/.lock') }}
|
||||
# if there's no cache hit, restore a cache by this prefix
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-
|
||||
# collect garbage until Nix store size (in bytes) is at most this number
|
||||
# before trying to save a new cache
|
||||
gc-max-store-size-linux: 2073741824
|
||||
# do purge caches
|
||||
purge: true
|
||||
# purge all versions of the cache
|
||||
purge-prefixes: nix-${{ runner.os }}-
|
||||
# created more than this number of seconds ago relative to the start of the `Post Restore` phase
|
||||
purge-last-accessed: 86400
|
||||
# except the version with the `primary-key`, if it exists
|
||||
purge-primary-key: never
|
||||
# always save the cache
|
||||
save-always: true
|
||||
|
||||
- name: Enable Cachix binary cache
|
||||
run: |
|
||||
nix profile install nixpkgs#cachix
|
||||
cachix use crane
|
||||
cachix use nix-community
|
||||
|
||||
- name: Apply Nix binary cache configuration
|
||||
run: |
|
||||
sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null <<EOF
|
||||
extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://cache.lix.systems https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net
|
||||
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
|
||||
experimental-features = nix-command flakes
|
||||
extra-experimental-features = nix-command flakes
|
||||
accept-flake-config = true
|
||||
EOF
|
||||
|
||||
- name: Use alternative Nix binary caches if specified
|
||||
if: ${{ (env.ATTIC_ENDPOINT != '') && (env.ATTIC_PUBLIC_KEY != '') }}
|
||||
run: |
|
||||
sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null <<EOF
|
||||
extra-substituters = ${ATTIC_ENDPOINT}
|
||||
extra-trusted-public-keys = ${ATTIC_PUBLIC_KEY}
|
||||
EOF
|
||||
|
||||
- name: Prepare build environment
|
||||
run: |
|
||||
echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc"
|
||||
nix profile install --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv
|
||||
direnv allow
|
||||
nix develop .#all-features --command true
|
||||
|
||||
|
@ -184,13 +113,11 @@ jobs:
|
|||
bin/nix-build-and-cache just '.#devShells.x86_64-linux.all-features'
|
||||
bin/nix-build-and-cache just '.#devShells.x86_64-linux.dynamic'
|
||||
|
||||
# use sccache for Rust
|
||||
- name: Run sccache-cache
|
||||
if: (env.SCCACHE_GHA_ENABLED == 'true')
|
||||
uses: mozilla-actions/sccache-action@main
|
||||
|
||||
# use rust-cache
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
# we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting
|
||||
# releases and tags
|
||||
#if: ${{ !startsWith(github.ref, 'refs/tags/') }}
|
||||
with:
|
||||
cache-all-crates: "true"
|
||||
cache-on-failure: "true"
|
||||
|
@ -242,41 +169,28 @@ jobs:
|
|||
if: success() || failure()
|
||||
run: |
|
||||
if [ ${GH_JOB_STATUS} == 'success' ]; then
|
||||
echo '# ✅ completed suwuccessfully' >> $GITHUB_STEP_SUMMARY
|
||||
echo '# ✅ CI completed suwuccessfully' >> $GITHUB_STEP_SUMMARY
|
||||
else
|
||||
echo '# CI failure' >> $GITHUB_STEP_SUMMARY
|
||||
echo '# ❌ CI failed (last 100 lines of output)' >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
tail -n 40 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY
|
||||
tail -n 100 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
echo '# Complement diff results' >> $GITHUB_STEP_SUMMARY
|
||||
echo '# Complement diff results (last 100 lines)' >> $GITHUB_STEP_SUMMARY
|
||||
echo '```diff' >> $GITHUB_STEP_SUMMARY
|
||||
tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
||||
- name: Run cargo clean test artifacts to free up space
|
||||
run: |
|
||||
cargo clean --profile test
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: self-hosted
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- target: aarch64-linux-musl
|
||||
- target: x86_64-linux-musl
|
||||
steps:
|
||||
- name: Free up a bit of runner space
|
||||
run: |
|
||||
set +o pipefail
|
||||
sudo docker image prune --all --force || true
|
||||
sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true
|
||||
sudo apt clean
|
||||
sudo rm -rf /usr/local/lib/android /usr/local/julia* /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/local/lib/heroku /usr/lib/heroku /usr/local/share/boost /usr/share/dotnet /usr/local/bin/cmake* /usr/local/bin/stack /usr/local/bin/terraform /opt/microsoft/powershell /opt/hostedtoolcache/CodeQL /opt/hostedtoolcache/go /opt/hostedtoolcache/PyPy /usr/local/bin/sam || true
|
||||
set -o pipefail
|
||||
|
||||
- name: Sync repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
|
@ -306,69 +220,21 @@ jobs:
|
|||
END
|
||||
|
||||
echo "Checking connection"
|
||||
ssh -q website "echo test"
|
||||
ssh -q website "echo test" || ssh -q website "echo test"
|
||||
|
||||
- uses: nixbuild/nix-quick-install-action@master
|
||||
|
||||
- name: Restore and cache Nix store
|
||||
uses: nix-community/cache-nix-action@v5.1.0
|
||||
with:
|
||||
# restore and save a cache using this key
|
||||
primary-key: nix-${{ runner.os }}-${{ matrix.target }}-${{ hashFiles('**/*.nix', '**/.lock') }}
|
||||
# if there's no cache hit, restore a cache by this prefix
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-
|
||||
# collect garbage until Nix store size (in bytes) is at most this number
|
||||
# before trying to save a new cache
|
||||
gc-max-store-size-linux: 2073741824
|
||||
# do purge caches
|
||||
purge: true
|
||||
# purge all versions of the cache
|
||||
purge-prefixes: nix-${{ runner.os }}-
|
||||
# created more than this number of seconds ago relative to the start of the `Post Restore` phase
|
||||
purge-last-accessed: 86400
|
||||
# except the version with the `primary-key`, if it exists
|
||||
purge-primary-key: never
|
||||
# always save the cache
|
||||
save-always: true
|
||||
|
||||
- name: Enable Cachix binary cache
|
||||
run: |
|
||||
nix profile install nixpkgs#cachix
|
||||
cachix use crane
|
||||
cachix use nix-community
|
||||
|
||||
- name: Apply Nix binary cache configuration
|
||||
run: |
|
||||
sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null <<EOF
|
||||
extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://cache.lix.systems https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net
|
||||
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
|
||||
experimental-features = nix-command flakes
|
||||
extra-experimental-features = nix-command flakes
|
||||
accept-flake-config = true
|
||||
EOF
|
||||
|
||||
- name: Use alternative Nix binary caches if specified
|
||||
if: ${{ (env.ATTIC_ENDPOINT != '') && (env.ATTIC_PUBLIC_KEY != '') }}
|
||||
run: |
|
||||
sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null <<EOF
|
||||
extra-substituters = ${ATTIC_ENDPOINT}
|
||||
extra-trusted-public-keys = ${ATTIC_PUBLIC_KEY}
|
||||
EOF
|
||||
echo "SSH_WEBSITE=1" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Prepare build environment
|
||||
run: |
|
||||
echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc"
|
||||
nix profile install --impure --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv
|
||||
direnv allow
|
||||
nix develop .#all-features --command true --impure
|
||||
|
||||
# use sccache for Rust
|
||||
- name: Run sccache-cache
|
||||
if: (env.SCCACHE_GHA_ENABLED == 'true')
|
||||
uses: mozilla-actions/sccache-action@main
|
||||
|
||||
# use rust-cache
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
# we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting
|
||||
# releases and tags
|
||||
#if: ${{ !startsWith(github.ref, 'refs/tags/') }}
|
||||
with:
|
||||
cache-all-crates: "true"
|
||||
cache-on-failure: "true"
|
||||
|
@ -491,29 +357,29 @@ jobs:
|
|||
- name: Upload static-x86_64-linux-musl-all-features-x86_64-haswell-optimised to webserver
|
||||
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||
run: |
|
||||
if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then
|
||||
scp static-x86_64-linux-musl-x86_64-haswell-optimised website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-x86_64-linux-musl-x86_64-haswell-optimised
|
||||
if [ ! -z $SSH_WEBSITE ]; then
|
||||
chmod +x static-x86_64-linux-musl-x86_64-haswell-optimised
|
||||
scp static-x86_64-linux-musl-x86_64-haswell-optimised website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-x86_64-linux-musl-x86_64-haswell-optimised
|
||||
fi
|
||||
|
||||
- name: Upload static-${{ matrix.target }}-all-features to webserver
|
||||
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]'
|
||||
run: |
|
||||
if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then
|
||||
scp static-${{ matrix.target }} website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-${{ matrix.target }}
|
||||
if [ ! -z $SSH_WEBSITE ]; then
|
||||
chmod +x static-${{ matrix.target }}
|
||||
scp static-${{ matrix.target }} website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-${{ matrix.target }}
|
||||
fi
|
||||
|
||||
- name: Upload static deb x86_64-linux-musl-all-features-x86_64-haswell-optimised to webserver
|
||||
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||
run: |
|
||||
if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then
|
||||
scp x86_64-linux-musl-x86_64-haswell-optimised.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/x86_64-linux-musl-x86_64-haswell-optimised.deb
|
||||
if [ ! -z $SSH_WEBSITE ]; then
|
||||
scp x86_64-linux-musl-x86_64-haswell-optimised.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/x86_64-linux-musl-x86_64-haswell-optimised.deb
|
||||
fi
|
||||
|
||||
- name: Upload static deb ${{ matrix.target }}-all-features to webserver
|
||||
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]'
|
||||
run: |
|
||||
if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then
|
||||
scp ${{ matrix.target }}.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/${{ matrix.target }}.deb
|
||||
if [ ! -z $SSH_WEBSITE ]; then
|
||||
scp ${{ matrix.target }}.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/${{ matrix.target }}.deb
|
||||
fi
|
||||
|
||||
- name: Upload static-${{ matrix.target }}-debug-all-features to GitHub
|
||||
|
@ -532,17 +398,15 @@ jobs:
|
|||
compression-level: 0
|
||||
|
||||
- name: Upload static-${{ matrix.target }}-debug-all-features to webserver
|
||||
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]'
|
||||
run: |
|
||||
if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then
|
||||
scp static-${{ matrix.target }}-debug website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-${{ matrix.target }}-debug
|
||||
if [ ! -z $SSH_WEBSITE ]; then
|
||||
scp static-${{ matrix.target }}-debug website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-${{ matrix.target }}-debug
|
||||
fi
|
||||
|
||||
- name: Upload static deb ${{ matrix.target }}-debug-all-features to webserver
|
||||
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]'
|
||||
run: |
|
||||
if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then
|
||||
scp ${{ matrix.target }}-debug.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/${{ matrix.target }}-debug.deb
|
||||
if [ ! -z $SSH_WEBSITE ]; then
|
||||
scp ${{ matrix.target }}-debug.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/${{ matrix.target }}-debug.deb
|
||||
fi
|
||||
|
||||
- name: Build OCI image ${{ matrix.target }}-all-features
|
||||
|
@ -564,6 +428,14 @@ jobs:
|
|||
|
||||
cp -v -f result oci-image-${{ matrix.target }}-debug.tar.gz
|
||||
|
||||
- name: Upload OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised to GitHub
|
||||
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised
|
||||
path: oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz
|
||||
if-no-files-found: error
|
||||
compression-level: 0
|
||||
- name: Upload OCI image ${{ matrix.target }}-all-features to GitHub
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
|
@ -583,146 +455,26 @@ jobs:
|
|||
- name: Upload OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz to webserver
|
||||
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||
run: |
|
||||
if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then
|
||||
scp oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz
|
||||
if [ ! -z $SSH_WEBSITE ]; then
|
||||
scp oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz
|
||||
fi
|
||||
|
||||
- name: Upload OCI image ${{ matrix.target }}-all-features to webserver
|
||||
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]'
|
||||
run: |
|
||||
if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then
|
||||
scp oci-image-${{ matrix.target }}.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/oci-image-${{ matrix.target }}.tar.gz
|
||||
if [ ! -z $SSH_WEBSITE ]; then
|
||||
scp oci-image-${{ matrix.target }}.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-${{ matrix.target }}.tar.gz
|
||||
fi
|
||||
|
||||
- name: Upload OCI image ${{ matrix.target }}-debug-all-features to webserver
|
||||
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]'
|
||||
run: |
|
||||
if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then
|
||||
scp oci-image-${{ matrix.target }}-debug.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/oci-image-${{ matrix.target }}-debug.tar.gz
|
||||
if [ ! -z $SSH_WEBSITE ]; then
|
||||
scp oci-image-${{ matrix.target }}-debug.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-${{ matrix.target }}-debug.tar.gz
|
||||
fi
|
||||
|
||||
build_mac_binaries:
|
||||
name: Build MacOS Binaries
|
||||
strategy:
|
||||
matrix:
|
||||
os: [macos-latest, macos-13]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- name: Sync repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup SSH web publish
|
||||
env:
|
||||
web_upload_ssh_private_key: ${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}
|
||||
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]'
|
||||
run: |
|
||||
mkdir -p -v ~/.ssh
|
||||
|
||||
echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts
|
||||
echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519
|
||||
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
|
||||
cat >>~/.ssh/config <<END
|
||||
Host website
|
||||
HostName ${{ secrets.WEB_UPLOAD_SSH_HOSTNAME }}
|
||||
User ${{ secrets.WEB_UPLOAD_SSH_USERNAME }}
|
||||
IdentityFile ~/.ssh/id_ed25519
|
||||
StrictHostKeyChecking yes
|
||||
AddKeysToAgent no
|
||||
ForwardX11 no
|
||||
BatchMode yes
|
||||
END
|
||||
|
||||
echo "Checking connection"
|
||||
ssh -q website "echo test"
|
||||
|
||||
- name: Tag comparison check
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') }}
|
||||
run: |
|
||||
# Tag mismatch with latest repo tag check to prevent potential downgrades
|
||||
LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
|
||||
if [ ${LATEST_TAG} != ${GH_REF_NAME} ]; then
|
||||
echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.'
|
||||
echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' >> $GITHUB_STEP_SUMMARY
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# use sccache for Rust
|
||||
- name: Run sccache-cache
|
||||
if: (env.SCCACHE_GHA_ENABLED == 'true')
|
||||
uses: mozilla-actions/sccache-action@main
|
||||
|
||||
# use rust-cache
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-all-crates: "true"
|
||||
cache-on-failure: "true"
|
||||
cache-targets: "true"
|
||||
|
||||
# Nix can't do portable macOS builds yet
|
||||
- name: Build macOS x86_64 binary
|
||||
if: ${{ matrix.os == 'macos-13' }}
|
||||
run: |
|
||||
CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release
|
||||
cp -v -f target/release/conduwuit conduwuit-macos-x86_64
|
||||
otool -L conduwuit-macos-x86_64
|
||||
|
||||
# quick smoke test of the x86_64 macOS binary
|
||||
- name: Run x86_64 macOS release binary
|
||||
if: ${{ matrix.os == 'macos-13' }}
|
||||
run: |
|
||||
./conduwuit-macos-x86_64 --version
|
||||
|
||||
- name: Build macOS arm64 binary
|
||||
if: ${{ matrix.os == 'macos-latest' }}
|
||||
run: |
|
||||
CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release
|
||||
cp -v -f target/release/conduwuit conduwuit-macos-arm64
|
||||
otool -L conduwuit-macos-arm64
|
||||
|
||||
# quick smoke test of the arm64 macOS binary
|
||||
- name: Run arm64 macOS release binary
|
||||
if: ${{ matrix.os == 'macos-latest' }}
|
||||
run: |
|
||||
./conduwuit-macos-arm64 --version
|
||||
|
||||
- name: Upload macOS x86_64 binary to webserver
|
||||
if: ${{ matrix.os == 'macos-13' }}
|
||||
run: |
|
||||
if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then
|
||||
scp conduwuit-macos-x86_64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/conduwuit-macos-x86_64
|
||||
fi
|
||||
|
||||
- name: Upload macOS arm64 binary to webserver
|
||||
if: ${{ matrix.os == 'macos-latest' }}
|
||||
run: |
|
||||
if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then
|
||||
scp conduwuit-macos-arm64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/conduwuit-macos-arm64
|
||||
fi
|
||||
|
||||
- name: Upload macOS x86_64 binary
|
||||
if: ${{ matrix.os == 'macos-13' }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: conduwuit-macos-x86_64
|
||||
path: conduwuit-macos-x86_64
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Upload macOS arm64 binary
|
||||
if: ${{ matrix.os == 'macos-latest' }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: conduwuit-macos-arm64
|
||||
path: conduwuit-macos-arm64
|
||||
if-no-files-found: error
|
||||
variables:
|
||||
outputs:
|
||||
github_repository: ${{ steps.var.outputs.github_repository }}
|
||||
runs-on: "ubuntu-latest"
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Setting global variables
|
||||
uses: actions/github-script@v7
|
||||
|
@ -732,25 +484,18 @@ jobs:
|
|||
core.setOutput('github_repository', '${{ github.repository }}'.toLowerCase())
|
||||
docker:
|
||||
name: Docker publish
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: self-hosted
|
||||
needs: [build, variables, tests]
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]'
|
||||
env:
|
||||
DOCKER_ARM64: docker.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-arm64v8
|
||||
DOCKER_AMD64: docker.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-amd64
|
||||
DOCKER_TAG: docker.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}
|
||||
DOCKER_BRANCH: docker.io/${{ needs.variables.outputs.github_repository }}:${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}
|
||||
GHCR_ARM64: ghcr.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-arm64v8
|
||||
GHCR_AMD64: ghcr.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-amd64
|
||||
GHCR_TAG: ghcr.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}
|
||||
GHCR_BRANCH: ghcr.io/${{ needs.variables.outputs.github_repository }}:${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}
|
||||
GLCR_ARM64: registry.gitlab.com/conduwuit/conduwuit:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-arm64v8
|
||||
GLCR_AMD64: registry.gitlab.com/conduwuit/conduwuit:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-amd64
|
||||
GLCR_TAG: registry.gitlab.com/conduwuit/conduwuit:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}
|
||||
GLCR_BRANCH: registry.gitlab.com/conduwuit/conduwuit:${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}
|
||||
DOCKER_HUB_REPO: docker.io/${{ needs.variables.outputs.github_repository }}
|
||||
GHCR_REPO: ghcr.io/${{ needs.variables.outputs.github_repository }}
|
||||
GLCR_REPO: registry.gitlab.com/conduwuit/conduwuit
|
||||
UNIQUE_TAG: ${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}
|
||||
BRANCH_TAG: ${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}
|
||||
|
||||
DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }}
|
||||
|
@ -781,146 +526,192 @@ jobs:
|
|||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: "oci*"
|
||||
|
||||
- name: Move OCI images into position
|
||||
run: |
|
||||
mv -v oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised/*.tar.gz oci-image-amd64-haswell-optimised.tar.gz
|
||||
mv -v oci-image-x86_64-linux-musl/*.tar.gz oci-image-amd64.tar.gz
|
||||
mv -v oci-image-aarch64-linux-musl/*.tar.gz oci-image-arm64v8.tar.gz
|
||||
mv -v oci-image-x86_64-linux-musl-debug/*.tar.gz oci-image-amd64-debug.tar.gz
|
||||
mv -v oci-image-aarch64-linux-musl-debug/*.tar.gz oci-image-arm64v8-debug.tar.gz
|
||||
|
||||
- name: Load and push amd64 haswell image
|
||||
run: |
|
||||
docker load -i oci-image-amd64-haswell-optimised.tar.gz
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell
|
||||
docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell
|
||||
fi
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
docker push ${GHCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
fi
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
docker push ${GLCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
fi
|
||||
|
||||
- name: Load and push amd64 image
|
||||
run: |
|
||||
docker load -i oci-image-amd64.tar.gz
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${DOCKER_AMD64}
|
||||
docker push ${DOCKER_AMD64}
|
||||
docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64
|
||||
docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64
|
||||
fi
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GHCR_AMD64}
|
||||
docker push ${GHCR_AMD64}
|
||||
docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-amd64
|
||||
docker push ${GHCR_REPO}:${UNIQUE_TAG}-amd64
|
||||
fi
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GLCR_AMD64}
|
||||
docker push ${GLCR_AMD64}
|
||||
docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-amd64
|
||||
docker push ${GLCR_REPO}:${UNIQUE_TAG}-amd64
|
||||
fi
|
||||
|
||||
- name: Load and push arm64 image
|
||||
run: |
|
||||
docker load -i oci-image-arm64v8.tar.gz
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${DOCKER_ARM64}
|
||||
docker push ${DOCKER_ARM64}
|
||||
docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8
|
||||
docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8
|
||||
fi
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GHCR_ARM64}
|
||||
docker push ${GHCR_ARM64}
|
||||
docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8
|
||||
docker push ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8
|
||||
fi
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GLCR_ARM64}
|
||||
docker push ${GLCR_ARM64}
|
||||
docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8
|
||||
docker push ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8
|
||||
fi
|
||||
|
||||
- name: Load and push amd64 debug image
|
||||
run: |
|
||||
docker load -i oci-image-amd64-debug.tar.gz
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${DOCKER_AMD64}-debug
|
||||
docker push ${DOCKER_AMD64}-debug
|
||||
docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
fi
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GHCR_AMD64}-debug
|
||||
docker push ${GHCR_AMD64}-debug
|
||||
docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
docker push ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
fi
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GLCR_AMD64}-debug
|
||||
docker push ${GLCR_AMD64}-debug
|
||||
docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
docker push ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
fi
|
||||
|
||||
- name: Load and push arm64 debug image
|
||||
run: |
|
||||
docker load -i oci-image-arm64v8-debug.tar.gz
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${DOCKER_ARM64}-debug
|
||||
docker push ${DOCKER_ARM64}-debug
|
||||
docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug
|
||||
docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug
|
||||
fi
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GHCR_ARM64}-debug
|
||||
docker push ${GHCR_ARM64}-debug
|
||||
docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug
|
||||
docker push ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug
|
||||
fi
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GLCR_ARM64}-debug
|
||||
docker push ${GLCR_ARM64}-debug
|
||||
docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug
|
||||
docker push ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug
|
||||
fi
|
||||
|
||||
- name: Create Docker haswell manifests
|
||||
run: |
|
||||
# Dockerhub Container Registry
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell
|
||||
docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell
|
||||
fi
|
||||
# GitHub Container Registry
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
fi
|
||||
# GitLab Container Registry
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
fi
|
||||
|
||||
- name: Create Docker combined manifests
|
||||
run: |
|
||||
# Dockerhub Container Registry
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
docker manifest create ${DOCKER_TAG} --amend ${DOCKER_ARM64} --amend ${DOCKER_AMD64}
|
||||
docker manifest create ${DOCKER_BRANCH} --amend ${DOCKER_ARM64} --amend ${DOCKER_AMD64}
|
||||
docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG} --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64
|
||||
docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG} --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64
|
||||
fi
|
||||
# GitHub Container Registry
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
docker manifest create ${GHCR_TAG} --amend ${GHCR_ARM64} --amend ${GHCR_AMD64}
|
||||
docker manifest create ${GHCR_BRANCH} --amend ${GHCR_ARM64} --amend ${GHCR_AMD64}
|
||||
docker manifest create ${GHCR_REPO}:${UNIQUE_TAG} --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64
|
||||
docker manifest create ${GHCR_REPO}:${BRANCH_TAG} --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64
|
||||
fi
|
||||
# GitLab Container Registry
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
docker manifest create ${GLCR_TAG} --amend ${GLCR_ARM64} --amend ${GLCR_AMD64}
|
||||
docker manifest create ${GLCR_BRANCH} --amend ${GLCR_ARM64} --amend ${GLCR_AMD64}
|
||||
docker manifest create ${GLCR_REPO}:${UNIQUE_TAG} --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64
|
||||
docker manifest create ${GLCR_REPO}:${BRANCH_TAG} --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64
|
||||
fi
|
||||
|
||||
- name: Create Docker combined debug manifests
|
||||
run: |
|
||||
# Dockerhub Container Registry
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
docker manifest create ${DOCKER_TAG}-debug --amend ${DOCKER_ARM64}-debug --amend ${DOCKER_AMD64}-debug
|
||||
docker manifest create ${DOCKER_BRANCH}-debug --amend ${DOCKER_ARM64}-debug --amend ${DOCKER_AMD64}-debug
|
||||
docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
fi
|
||||
# GitHub Container Registry
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
docker manifest create ${GHCR_TAG}-debug --amend ${GHCR_ARM64}-debug --amend ${GHCR_AMD64}-debug
|
||||
docker manifest create ${GHCR_BRANCH}-debug --amend ${GHCR_ARM64}-debug --amend ${GHCR_AMD64}-debug
|
||||
docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
fi
|
||||
# GitLab Container Registry
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
docker manifest create ${GLCR_TAG}-debug --amend ${GLCR_ARM64}-debug --amend ${GLCR_AMD64}-debug
|
||||
docker manifest create ${GLCR_BRANCH}-debug --amend ${GLCR_ARM64}-debug --amend ${GLCR_AMD64}-debug
|
||||
docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
fi
|
||||
|
||||
- name: Push manifests to Docker registries
|
||||
run: |
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
docker manifest push ${DOCKER_TAG}
|
||||
docker manifest push ${DOCKER_BRANCH}
|
||||
docker manifest push ${DOCKER_TAG}-debug
|
||||
docker manifest push ${DOCKER_BRANCH}-debug
|
||||
docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}
|
||||
docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG}
|
||||
docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug
|
||||
docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG}-debug
|
||||
docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell
|
||||
docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell
|
||||
fi
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
docker manifest push ${GHCR_TAG}
|
||||
docker manifest push ${GHCR_BRANCH}
|
||||
docker manifest push ${GHCR_TAG}-debug
|
||||
docker manifest push ${GHCR_BRANCH}-debug
|
||||
docker manifest push ${GHCR_REPO}:${UNIQUE_TAG}
|
||||
docker manifest push ${GHCR_REPO}:${BRANCH_TAG}
|
||||
docker manifest push ${GHCR_REPO}:${UNIQUE_TAG}-debug
|
||||
docker manifest push ${GHCR_REPO}:${BRANCH_TAG}-debug
|
||||
docker manifest push ${GHCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
docker manifest push ${GHCR_REPO}:${BRANCH_TAG}-haswell
|
||||
fi
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
docker manifest push ${GLCR_TAG}
|
||||
docker manifest push ${GLCR_BRANCH}
|
||||
docker manifest push ${GLCR_TAG}-debug
|
||||
docker manifest push ${GLCR_BRANCH}-debug
|
||||
docker manifest push ${GLCR_REPO}:${UNIQUE_TAG}
|
||||
docker manifest push ${GLCR_REPO}:${BRANCH_TAG}
|
||||
docker manifest push ${GLCR_REPO}:${UNIQUE_TAG}-debug
|
||||
docker manifest push ${GLCR_REPO}:${BRANCH_TAG}-debug
|
||||
docker manifest push ${GLCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
docker manifest push ${GLCR_REPO}:${BRANCH_TAG}-haswell
|
||||
fi
|
||||
|
||||
- name: Add Image Links to Job Summary
|
||||
run: |
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
echo "- \`docker pull ${DOCKER_TAG}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`docker pull ${DOCKER_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
echo "- \`docker pull ${GHCR_TAG}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`docker pull ${GHCR_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
echo "- \`docker pull ${GLCR_TAG}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`docker pull ${GLCR_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
|
41
.github/workflows/docker-hub-description.yml
vendored
Normal file
41
.github/workflows/docker-hub-description.yml
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
name: Update Docker Hub Description
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- README.md
|
||||
- .github/workflows/docker-hub-description.yml
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
dockerHubDescription:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]' && (vars.DOCKER_USERNAME != '') }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setting variables
|
||||
uses: actions/github-script@v7
|
||||
id: var
|
||||
with:
|
||||
script: |
|
||||
const githubRepo = '${{ github.repository }}'.toLowerCase()
|
||||
const repoId = githubRepo.split('/')[1]
|
||||
|
||||
core.setOutput('github_repository', githubRepo)
|
||||
const dockerRepo = '${{ vars.DOCKER_USERNAME }}'.toLowerCase() + '/' + repoId
|
||||
core.setOutput('docker_repo', dockerRepo)
|
||||
|
||||
- name: Docker Hub Description
|
||||
uses: peter-evans/dockerhub-description@v4
|
||||
with:
|
||||
username: ${{ vars.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
repository: ${{ steps.var.outputs.docker_repo }}
|
||||
short-description: ${{ github.event.repository.description }}
|
||||
enable-url-completion: true
|
64
.github/workflows/documentation.yml
vendored
64
.github/workflows/documentation.yml
vendored
|
@ -24,8 +24,8 @@ env:
|
|||
# Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps
|
||||
NIX_CONFIG: |
|
||||
show-trace = true
|
||||
extra-substituters = extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://cache.lix.systems https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net
|
||||
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
|
||||
extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net https://nix-community.cachix.org https://crane.cachix.org
|
||||
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs= crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=
|
||||
experimental-features = nix-command flakes
|
||||
extra-experimental-features = nix-command flakes
|
||||
accept-flake-config = true
|
||||
|
@ -41,7 +41,7 @@ permissions: {}
|
|||
jobs:
|
||||
docs:
|
||||
name: Documentation and GitHub Pages
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: self-hosted
|
||||
|
||||
permissions:
|
||||
pages: write
|
||||
|
@ -52,15 +52,6 @@ jobs:
|
|||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
|
||||
steps:
|
||||
- name: Free up a bit of runner space
|
||||
run: |
|
||||
set +o pipefail
|
||||
sudo docker image prune --all --force || true
|
||||
sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true
|
||||
sudo apt clean
|
||||
sudo rm -v -rf /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/lib/heroku
|
||||
set -o pipefail
|
||||
|
||||
- name: Sync repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
|
@ -70,57 +61,9 @@ jobs:
|
|||
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request')
|
||||
uses: actions/configure-pages@v5
|
||||
|
||||
- uses: nixbuild/nix-quick-install-action@master
|
||||
|
||||
- name: Restore and cache Nix store
|
||||
uses: nix-community/cache-nix-action@v5.1.0
|
||||
with:
|
||||
# restore and save a cache using this key
|
||||
primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/.lock') }}
|
||||
# if there's no cache hit, restore a cache by this prefix
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-
|
||||
# collect garbage until Nix store size (in bytes) is at most this number
|
||||
# before trying to save a new cache
|
||||
gc-max-store-size-linux: 2073741824
|
||||
# do purge caches
|
||||
purge: true
|
||||
# purge all versions of the cache
|
||||
purge-prefixes: nix-${{ runner.os }}-
|
||||
# created more than this number of seconds ago relative to the start of the `Post Restore` phase
|
||||
purge-last-accessed: 86400
|
||||
# except the version with the `primary-key`, if it exists
|
||||
purge-primary-key: never
|
||||
# always save the cache
|
||||
save-always: true
|
||||
|
||||
- name: Enable Cachix binary cache
|
||||
run: |
|
||||
nix profile install nixpkgs#cachix
|
||||
cachix use crane
|
||||
cachix use nix-community
|
||||
|
||||
- name: Apply Nix binary cache configuration
|
||||
run: |
|
||||
sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null <<EOF
|
||||
extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://cache.lix.systems https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net
|
||||
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
|
||||
experimental-features = nix-command flakes
|
||||
extra-experimental-features = nix-command flakes
|
||||
accept-flake-config = true
|
||||
EOF
|
||||
|
||||
- name: Use alternative Nix binary caches if specified
|
||||
if: ${{ (env.ATTIC_ENDPOINT != '') && (env.ATTIC_PUBLIC_KEY != '') }}
|
||||
run: |
|
||||
sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null <<EOF
|
||||
extra-substituters = ${ATTIC_ENDPOINT}
|
||||
extra-trusted-public-keys = ${ATTIC_PUBLIC_KEY}
|
||||
EOF
|
||||
|
||||
- name: Prepare build environment
|
||||
run: |
|
||||
echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc"
|
||||
nix profile install --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv
|
||||
direnv allow
|
||||
nix develop --command true
|
||||
|
||||
|
@ -138,6 +81,7 @@ jobs:
|
|||
bin/nix-build-and-cache just .#book
|
||||
|
||||
cp -r --dereference result public
|
||||
chmod u+w -R public
|
||||
|
||||
- name: Upload generated documentation (book) as normal artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
|
|
|
@ -12,8 +12,8 @@ variables:
|
|||
TRANSFER_METER_FREQUENCY: 5s
|
||||
NIX_CONFIG: |
|
||||
show-trace = true
|
||||
extra-substituters = https://attic.kennel.juneis.dog/conduit https://attic.kennel.juneis.dog/conduwuit https://cache.lix.systems https://conduwuit.cachix.org
|
||||
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
|
||||
extra-substituters = https://attic.kennel.juneis.dog/conduit https://attic.kennel.juneis.dog/conduwuit https://conduwuit.cachix.org
|
||||
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
|
||||
experimental-features = nix-command flakes
|
||||
extra-experimental-features = nix-command flakes
|
||||
accept-flake-config = true
|
||||
|
@ -45,10 +45,6 @@ before_script:
|
|||
- if command -v nix > /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null && [ -n "$ATTIC_PUBLIC_KEY" ]; then echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add Lix binary cache
|
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://cache.lix.systems" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o=" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add crane binary cache
|
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi
|
||||
|
|
1928
Cargo.lock
generated
1928
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
202
Cargo.toml
202
Cargo.toml
|
@ -7,30 +7,31 @@ default-members = ["src/*"]
|
|||
|
||||
[workspace.package]
|
||||
authors = [
|
||||
"strawberry <strawberry@puppygock.gay>",
|
||||
"timokoesters <timo@koesters.xyz>",
|
||||
"June Clementine Strawberry <june@girlboss.ceo>",
|
||||
"strawberry <strawberry@puppygock.gay>", # woof
|
||||
"Jason Volk <jason@zemos.net>",
|
||||
]
|
||||
categories = ["network-programming"]
|
||||
description = "a very cool fork of Conduit, a Matrix homeserver written in Rust"
|
||||
edition = "2021"
|
||||
description = "a very cool Matrix chat homeserver written in Rust"
|
||||
edition = "2024"
|
||||
homepage = "https://conduwuit.puppyirl.gay/"
|
||||
keywords = ["chat", "matrix", "server", "uwu"]
|
||||
keywords = ["chat", "matrix", "networking", "server", "uwu"]
|
||||
license = "Apache-2.0"
|
||||
# See also `rust-toolchain.toml`
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/girlbossceo/conduwuit"
|
||||
rust-version = "1.83.0"
|
||||
rust-version = "1.86.0"
|
||||
version = "0.5.0"
|
||||
|
||||
[workspace.metadata.crane]
|
||||
name = "conduwuit"
|
||||
|
||||
[workspace.dependencies.arrayvec]
|
||||
version = "0.7.4"
|
||||
version = "0.7.6"
|
||||
features = ["serde"]
|
||||
|
||||
[workspace.dependencies.smallvec]
|
||||
version = "1.13.2"
|
||||
version = "1.14.0"
|
||||
features = [
|
||||
"const_generics",
|
||||
"const_new",
|
||||
|
@ -39,8 +40,12 @@ features = [
|
|||
"write",
|
||||
]
|
||||
|
||||
[workspace.dependencies.smallstr]
|
||||
version = "0.3"
|
||||
features = ["ffi", "std", "union"]
|
||||
|
||||
[workspace.dependencies.const-str]
|
||||
version = "0.5.7"
|
||||
version = "0.6.2"
|
||||
|
||||
[workspace.dependencies.ctor]
|
||||
version = "0.2.9"
|
||||
|
@ -58,10 +63,6 @@ features = ["parse"]
|
|||
[workspace.dependencies.sanitize-filename]
|
||||
version = "0.6.0"
|
||||
|
||||
[workspace.dependencies.jsonwebtoken]
|
||||
version = "9.3.0"
|
||||
default-features = false
|
||||
|
||||
[workspace.dependencies.base64]
|
||||
version = "0.22.1"
|
||||
default-features = false
|
||||
|
@ -80,13 +81,13 @@ version = "0.8.5"
|
|||
|
||||
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
||||
[workspace.dependencies.bytes]
|
||||
version = "1.9.0"
|
||||
version = "1.10.1"
|
||||
|
||||
[workspace.dependencies.http-body-util]
|
||||
version = "0.1.2"
|
||||
version = "0.1.3"
|
||||
|
||||
[workspace.dependencies.http]
|
||||
version = "1.2.0"
|
||||
version = "1.3.1"
|
||||
|
||||
[workspace.dependencies.regex]
|
||||
version = "1.11.1"
|
||||
|
@ -110,7 +111,7 @@ default-features = false
|
|||
features = ["typed-header", "tracing"]
|
||||
|
||||
[workspace.dependencies.axum-server]
|
||||
version = "0.7.1"
|
||||
version = "0.7.2"
|
||||
default-features = false
|
||||
|
||||
# to listen on both HTTP and HTTPS if listening on TLS dierctly from conduwuit for complement or sytest
|
||||
|
@ -121,7 +122,7 @@ version = "0.7"
|
|||
version = "0.6.1"
|
||||
|
||||
[workspace.dependencies.tower]
|
||||
version = "0.5.1"
|
||||
version = "0.5.2"
|
||||
default-features = false
|
||||
features = ["util"]
|
||||
|
||||
|
@ -130,21 +131,22 @@ version = "0.6.2"
|
|||
default-features = false
|
||||
features = [
|
||||
"add-extension",
|
||||
"catch-panic",
|
||||
"cors",
|
||||
"sensitive-headers",
|
||||
"set-header",
|
||||
"timeout",
|
||||
"trace",
|
||||
"util",
|
||||
"catch-panic",
|
||||
]
|
||||
|
||||
[workspace.dependencies.rustls]
|
||||
version = "0.23.19"
|
||||
version = "0.23.25"
|
||||
default-features = false
|
||||
features = ["aws_lc_rs"]
|
||||
|
||||
[workspace.dependencies.reqwest]
|
||||
version = "0.12.9"
|
||||
version = "0.12.15"
|
||||
default-features = false
|
||||
features = [
|
||||
"rustls-tls-native-roots",
|
||||
|
@ -154,12 +156,12 @@ features = [
|
|||
]
|
||||
|
||||
[workspace.dependencies.serde]
|
||||
version = "1.0.216"
|
||||
version = "1.0.219"
|
||||
default-features = false
|
||||
features = ["rc"]
|
||||
|
||||
[workspace.dependencies.serde_json]
|
||||
version = "1.0.133"
|
||||
version = "1.0.140"
|
||||
default-features = false
|
||||
features = ["raw_value"]
|
||||
|
||||
|
@ -181,7 +183,7 @@ version = "0.5.3"
|
|||
features = ["alloc", "rand"]
|
||||
default-features = false
|
||||
|
||||
# Used to generate thumbnails for images
|
||||
# Used to generate thumbnails for images & blurhashes
|
||||
[workspace.dependencies.image]
|
||||
version = "0.25.5"
|
||||
default-features = false
|
||||
|
@ -192,15 +194,23 @@ features = [
|
|||
"webp",
|
||||
]
|
||||
|
||||
[workspace.dependencies.blurhash]
|
||||
version = "0.2.3"
|
||||
default-features = false
|
||||
features = [
|
||||
"fast-linear-to-srgb",
|
||||
"image",
|
||||
]
|
||||
|
||||
# logging
|
||||
[workspace.dependencies.log]
|
||||
version = "0.4.22"
|
||||
version = "0.4.27"
|
||||
default-features = false
|
||||
[workspace.dependencies.tracing]
|
||||
version = "0.1.41"
|
||||
default-features = false
|
||||
[workspace.dependencies.tracing-subscriber]
|
||||
version = "=0.3.18"
|
||||
version = "0.3.19"
|
||||
default-features = false
|
||||
features = ["env-filter", "std", "tracing", "tracing-log", "ansi", "fmt"]
|
||||
[workspace.dependencies.tracing-core]
|
||||
|
@ -214,7 +224,7 @@ default-features = false
|
|||
|
||||
# used for conduwuit's CLI and admin room command parsing
|
||||
[workspace.dependencies.clap]
|
||||
version = "4.5.23"
|
||||
version = "4.5.35"
|
||||
default-features = false
|
||||
features = [
|
||||
"derive",
|
||||
|
@ -227,12 +237,12 @@ features = [
|
|||
]
|
||||
|
||||
[workspace.dependencies.futures]
|
||||
version = "0.3.30"
|
||||
version = "0.3.31"
|
||||
default-features = false
|
||||
features = ["std", "async-await"]
|
||||
|
||||
[workspace.dependencies.tokio]
|
||||
version = "1.42.0"
|
||||
version = "1.44.2"
|
||||
default-features = false
|
||||
features = [
|
||||
"fs",
|
||||
|
@ -265,7 +275,7 @@ features = ["alloc", "std"]
|
|||
default-features = false
|
||||
|
||||
[workspace.dependencies.hyper]
|
||||
version = "1.5.1"
|
||||
version = "1.6.0"
|
||||
default-features = false
|
||||
features = [
|
||||
"server",
|
||||
|
@ -274,8 +284,7 @@ features = [
|
|||
]
|
||||
|
||||
[workspace.dependencies.hyper-util]
|
||||
# hyper-util >=0.1.9 seems to have DNS issues
|
||||
version = "=0.1.8"
|
||||
version = "0.1.11"
|
||||
default-features = false
|
||||
features = [
|
||||
"server-auto",
|
||||
|
@ -285,7 +294,7 @@ features = [
|
|||
|
||||
# to support multiple variations of setting a config option
|
||||
[workspace.dependencies.either]
|
||||
version = "1.13.0"
|
||||
version = "1.15.0"
|
||||
default-features = false
|
||||
features = ["serde"]
|
||||
|
||||
|
@ -296,22 +305,27 @@ default-features = false
|
|||
features = ["env", "toml"]
|
||||
|
||||
[workspace.dependencies.hickory-resolver]
|
||||
version = "0.24.2"
|
||||
version = "0.25.1"
|
||||
default-features = false
|
||||
features = [
|
||||
"serde",
|
||||
"system-config",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
# Used for conduwuit::Error type
|
||||
[workspace.dependencies.thiserror]
|
||||
version = "2.0.7"
|
||||
version = "2.0.12"
|
||||
default-features = false
|
||||
|
||||
# Used when hashing the state
|
||||
[workspace.dependencies.ring]
|
||||
version = "0.17.8"
|
||||
version = "0.17.14"
|
||||
default-features = false
|
||||
|
||||
# Used to make working with iterators easier, was already a transitive depdendency
|
||||
[workspace.dependencies.itertools]
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
|
||||
# to parse user-friendly time durations in admin commands
|
||||
#TODO: overlaps chrono?
|
||||
|
@ -327,7 +341,7 @@ version = "0.4.0"
|
|||
version = "2.3.1"
|
||||
|
||||
[workspace.dependencies.async-trait]
|
||||
version = "0.1.83"
|
||||
version = "0.1.88"
|
||||
|
||||
[workspace.dependencies.lru-cache]
|
||||
version = "0.1.2"
|
||||
|
@ -336,7 +350,7 @@ version = "0.1.2"
|
|||
[workspace.dependencies.ruma]
|
||||
git = "https://github.com/girlbossceo/ruwuma"
|
||||
#branch = "conduwuit-changes"
|
||||
rev = "c4f55b39900b33b2d443dd12a6a2dab50961fdfb"
|
||||
rev = "920148dca1076454ca0ca5d43b5ce1aa708381d4"
|
||||
features = [
|
||||
"compat",
|
||||
"rand",
|
||||
|
@ -345,8 +359,6 @@ features = [
|
|||
"federation-api",
|
||||
"markdown",
|
||||
"push-gateway-api-c",
|
||||
"state-res",
|
||||
"server-util",
|
||||
"unstable-exhaustive-types",
|
||||
"ring-compat",
|
||||
"compat-upload-signatures",
|
||||
|
@ -363,24 +375,27 @@ features = [
|
|||
"unstable-msc3381", # polls
|
||||
"unstable-msc3489", # beacon / live location
|
||||
"unstable-msc3575",
|
||||
"unstable-msc3930", # polls push rules
|
||||
"unstable-msc4075",
|
||||
"unstable-msc4095",
|
||||
"unstable-msc4121",
|
||||
"unstable-msc4125",
|
||||
"unstable-msc4186",
|
||||
"unstable-msc4203", # sending to-device events to appservices
|
||||
"unstable-msc4210", # remove legacy mentions
|
||||
"unstable-extensible-events",
|
||||
"unstable-pdu",
|
||||
]
|
||||
|
||||
[workspace.dependencies.rust-rocksdb]
|
||||
path = "deps/rust-rocksdb"
|
||||
package = "rust-rocksdb-uwu"
|
||||
git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1"
|
||||
rev = "1c267e0bf0cc7b7702e9a329deccd89de79ef4c3"
|
||||
default-features = false
|
||||
features = [
|
||||
"multi-threaded-cf",
|
||||
"mt_static",
|
||||
"lz4",
|
||||
"zstd",
|
||||
"zlib",
|
||||
"bzip2",
|
||||
]
|
||||
|
||||
|
@ -412,7 +427,7 @@ features = ["rt-tokio"]
|
|||
|
||||
# optional sentry metrics for crash/panic reporting
|
||||
[workspace.dependencies.sentry]
|
||||
version = "0.35.0"
|
||||
version = "0.37.0"
|
||||
default-features = false
|
||||
features = [
|
||||
"backtrace",
|
||||
|
@ -428,24 +443,30 @@ features = [
|
|||
]
|
||||
|
||||
[workspace.dependencies.sentry-tracing]
|
||||
version = "0.35.0"
|
||||
version = "0.37.0"
|
||||
[workspace.dependencies.sentry-tower]
|
||||
version = "0.35.0"
|
||||
version = "0.37.0"
|
||||
|
||||
# jemalloc usage
|
||||
[workspace.dependencies.tikv-jemalloc-sys]
|
||||
git = "https://github.com/girlbossceo/jemallocator"
|
||||
rev = "d87938bfddc26377dd7fdf14bbcd345f3ab19442"
|
||||
rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8"
|
||||
default-features = false
|
||||
features = ["unprefixed_malloc_on_supported_platforms"]
|
||||
features = [
|
||||
"background_threads_runtime_support",
|
||||
"unprefixed_malloc_on_supported_platforms",
|
||||
]
|
||||
[workspace.dependencies.tikv-jemallocator]
|
||||
git = "https://github.com/girlbossceo/jemallocator"
|
||||
rev = "d87938bfddc26377dd7fdf14bbcd345f3ab19442"
|
||||
rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8"
|
||||
default-features = false
|
||||
features = ["unprefixed_malloc_on_supported_platforms"]
|
||||
features = [
|
||||
"background_threads_runtime_support",
|
||||
"unprefixed_malloc_on_supported_platforms",
|
||||
]
|
||||
[workspace.dependencies.tikv-jemalloc-ctl]
|
||||
git = "https://github.com/girlbossceo/jemallocator"
|
||||
rev = "d87938bfddc26377dd7fdf14bbcd345f3ab19442"
|
||||
rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8"
|
||||
default-features = false
|
||||
features = ["use_std"]
|
||||
|
||||
|
@ -458,7 +479,7 @@ default-features = false
|
|||
features = ["resource"]
|
||||
|
||||
[workspace.dependencies.sd-notify]
|
||||
version = "0.4.3"
|
||||
version = "0.4.5"
|
||||
default-features = false
|
||||
|
||||
[workspace.dependencies.hardened_malloc-rs]
|
||||
|
@ -475,25 +496,25 @@ version = "0.4.3"
|
|||
default-features = false
|
||||
|
||||
[workspace.dependencies.termimad]
|
||||
version = "0.31.1"
|
||||
version = "0.31.2"
|
||||
default-features = false
|
||||
|
||||
[workspace.dependencies.checked_ops]
|
||||
version = "0.1"
|
||||
|
||||
[workspace.dependencies.syn]
|
||||
version = "2.0.90"
|
||||
version = "2.0"
|
||||
default-features = false
|
||||
features = ["full", "extra-traits"]
|
||||
|
||||
[workspace.dependencies.quote]
|
||||
version = "1.0.37"
|
||||
version = "1.0"
|
||||
|
||||
[workspace.dependencies.proc-macro2]
|
||||
version = "1.0.89"
|
||||
version = "1.0"
|
||||
|
||||
[workspace.dependencies.bytesize]
|
||||
version = "1.3.0"
|
||||
version = "2.0"
|
||||
|
||||
[workspace.dependencies.core_affinity]
|
||||
version = "0.8.1"
|
||||
|
@ -504,6 +525,17 @@ version = "0.2"
|
|||
[workspace.dependencies.num-traits]
|
||||
version = "0.2"
|
||||
|
||||
[workspace.dependencies.minicbor]
|
||||
version = "0.26.3"
|
||||
features = ["std"]
|
||||
|
||||
[workspace.dependencies.minicbor-serde]
|
||||
version = "0.4.1"
|
||||
features = ["std"]
|
||||
|
||||
[workspace.dependencies.maplit]
|
||||
version = "1.0.2"
|
||||
|
||||
#
|
||||
# Patches
|
||||
#
|
||||
|
@ -513,16 +545,16 @@ version = "0.2"
|
|||
# https://github.com/girlbossceo/tracing/commit/b348dca742af641c47bc390261f60711c2af573c
|
||||
[patch.crates-io.tracing-subscriber]
|
||||
git = "https://github.com/girlbossceo/tracing"
|
||||
rev = "ccc4fbd8238c2d5ba354e61ec17ac610af11401d"
|
||||
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
||||
[patch.crates-io.tracing]
|
||||
git = "https://github.com/girlbossceo/tracing"
|
||||
rev = "ccc4fbd8238c2d5ba354e61ec17ac610af11401d"
|
||||
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
||||
[patch.crates-io.tracing-core]
|
||||
git = "https://github.com/girlbossceo/tracing"
|
||||
rev = "ccc4fbd8238c2d5ba354e61ec17ac610af11401d"
|
||||
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
||||
[patch.crates-io.tracing-log]
|
||||
git = "https://github.com/girlbossceo/tracing"
|
||||
rev = "ccc4fbd8238c2d5ba354e61ec17ac610af11401d"
|
||||
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
||||
|
||||
# adds a tab completion callback: https://github.com/girlbossceo/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50
|
||||
# adds event for CTRL+\: https://github.com/girlbossceo/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b
|
||||
|
@ -538,10 +570,23 @@ rev = "fe4aebeeaae435af60087ddd56b573a2e0be671d"
|
|||
git = "https://github.com/girlbossceo/async-channel"
|
||||
rev = "92e5e74063bf2a3b10414bcc8a0d68b235644280"
|
||||
|
||||
# adds affinity masks for selecting more than one core at a time
|
||||
[patch.crates-io.core_affinity]
|
||||
git = "https://github.com/girlbossceo/core_affinity_rs"
|
||||
rev = "9c8e51510c35077df888ee72a36b4b05637147da"
|
||||
|
||||
# reverts hyperium#148 conflicting with our delicate federation resolver hooks
|
||||
[patch.crates-io.hyper-util]
|
||||
git = "https://github.com/girlbossceo/hyper-util"
|
||||
rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941"
|
||||
|
||||
# allows no-aaaa option in resolv.conf
|
||||
# bumps rust edition and toolchain to 1.86.0 and 2024
|
||||
# use sat_add on line number errors
|
||||
[patch.crates-io.resolv-conf]
|
||||
git = "https://github.com/girlbossceo/resolv-conf"
|
||||
rev = "200e958941d522a70c5877e3d846f55b5586c68d"
|
||||
|
||||
#
|
||||
# Our crates
|
||||
#
|
||||
|
@ -658,7 +703,7 @@ inherits = "release"
|
|||
|
||||
# To enable hot-reloading:
|
||||
# 1. Uncomment all of the rustflags here.
|
||||
# 2. Uncomment crate-type=dylib in src/*/Cargo.toml and deps/rust-rocksdb/Cargo.toml
|
||||
# 2. Uncomment crate-type=dylib in src/*/Cargo.toml
|
||||
#
|
||||
# opt-level, mir-opt-level, validate-mir are not known to interfere with reloading
|
||||
# and can be raised if build times are tolerable.
|
||||
|
@ -726,27 +771,6 @@ inherits = "dev"
|
|||
# '-Clink-arg=-Wl,-z,lazy',
|
||||
#]
|
||||
|
||||
[profile.dev.package.rust-rocksdb-uwu]
|
||||
inherits = "dev"
|
||||
debug = 'limited'
|
||||
incremental = false
|
||||
codegen-units = 1
|
||||
opt-level = 'z'
|
||||
#rustflags = [
|
||||
# '--cfg', 'conduwuit_mods',
|
||||
# '-Ztls-model=initial-exec',
|
||||
# '-Cprefer-dynamic=true',
|
||||
# '-Zstaticlib-prefer-dynamic=true',
|
||||
# '-Zstaticlib-allow-rdylib-deps=true',
|
||||
# '-Zpacked-bundled-libs=true',
|
||||
# '-Zplt=true',
|
||||
# '-Clink-arg=-Wl,--no-as-needed',
|
||||
# '-Clink-arg=-Wl,--allow-shlib-undefined',
|
||||
# '-Clink-arg=-Wl,-z,lazy',
|
||||
# '-Clink-arg=-Wl,-z,nodlopen',
|
||||
# '-Clink-arg=-Wl,-z,nodelete',
|
||||
#]
|
||||
|
||||
[profile.dev.package.'*']
|
||||
inherits = "dev"
|
||||
debug = 'limited'
|
||||
|
@ -834,6 +858,9 @@ unused_crate_dependencies = "allow"
|
|||
unsafe_code = "allow"
|
||||
variant_size_differences = "allow"
|
||||
|
||||
# we check nightly clippy lints
|
||||
unknown_lints = "allow"
|
||||
|
||||
#######################################
|
||||
#
|
||||
# Clippy lints
|
||||
|
@ -873,13 +900,16 @@ enum_glob_use = { level = "allow", priority = 1 }
|
|||
if_not_else = { level = "allow", priority = 1 }
|
||||
if_then_some_else_none = { level = "allow", priority = 1 }
|
||||
inline_always = { level = "allow", priority = 1 }
|
||||
match_bool = { level = "allow", priority = 1 }
|
||||
missing_docs_in_private_items = { level = "allow", priority = 1 }
|
||||
missing_errors_doc = { level = "allow", priority = 1 }
|
||||
missing_panics_doc = { level = "allow", priority = 1 }
|
||||
module_name_repetitions = { level = "allow", priority = 1 }
|
||||
needless_continue = { level = "allow", priority = 1 }
|
||||
no_effect_underscore_binding = { level = "allow", priority = 1 }
|
||||
similar_names = { level = "allow", priority = 1 }
|
||||
single_match_else = { level = "allow", priority = 1 }
|
||||
struct_excessive_bools = { level = "allow", priority = 1 }
|
||||
struct_field_names = { level = "allow", priority = 1 }
|
||||
unnecessary_wraps = { level = "allow", priority = 1 }
|
||||
unused_async = { level = "allow", priority = 1 }
|
||||
|
@ -941,9 +971,13 @@ style = { level = "warn", priority = -1 }
|
|||
# trivial assertions are quite alright
|
||||
assertions_on_constants = { level = "allow", priority = 1 }
|
||||
module_inception = { level = "allow", priority = 1 }
|
||||
obfuscated_if_else = { level = "allow", priority = 1 }
|
||||
|
||||
###################
|
||||
suspicious = { level = "warn", priority = -1 }
|
||||
|
||||
## some sadness
|
||||
let_underscore_future = { level = "allow", priority = 1 }
|
||||
|
||||
# rust doesnt understand conduwuit's custom log macros
|
||||
literal_string_with_formatting_args = { level = "allow", priority = 1 }
|
||||
|
|
125
README.md
125
README.md
|
@ -1,10 +1,20 @@
|
|||
# conduwuit
|
||||
|
||||
[](https://matrix.to/#/#conduwuit:puppygock.gay) [](https://matrix.to/#/#conduwuit-space:puppygock.gay) [](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml)
|
||||
[](https://matrix.to/#/#conduwuit:puppygock.gay) [](https://matrix.to/#/#conduwuit-space:puppygock.gay)
|
||||
|
||||
[](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml)
|
||||
|
||||
    
|
||||
|
||||
|
||||
|
||||
&link=https%3A%2F%2Fhub.docker.com%2Frepository%2Fdocker%2Fgirlbossceo%2Fconduwuit%2Ftags%3Fname%3Dlatest) &link=https%3A%2F%2Fhub.docker.com%2Frepository%2Fdocker%2Fgirlbossceo%2Fconduwuit%2Ftags%3Fname%3Dmain)
|
||||
|
||||
|
||||
|
||||
<!-- ANCHOR: catchphrase -->
|
||||
|
||||
### a very cool, featureful fork of [Conduit](https://conduit.rs/)
|
||||
### a very cool [Matrix](https://matrix.org/) chat homeserver written in Rust
|
||||
|
||||
<!-- ANCHOR_END: catchphrase -->
|
||||
|
||||
|
@ -15,16 +25,15 @@ information and how to deploy/setup conduwuit.
|
|||
|
||||
#### What is Matrix?
|
||||
|
||||
[Matrix](https://matrix.org) is an open network for secure and decentralized
|
||||
communication. Users from every Matrix homeserver can chat with users from all
|
||||
other Matrix servers. You can even use bridges (also called Matrix Appservices)
|
||||
to communicate with users outside of Matrix, like a community on Discord.
|
||||
[Matrix](https://matrix.org) is an open, federated, and extensible network for
|
||||
decentralised communication. Users from any Matrix homeserver can chat with users from all
|
||||
other homeservers over federation. Matrix is designed to be extensible and built on top of.
|
||||
You can even use bridges such as Matrix Appservices to communicate with users outside of Matrix, like a community on Discord.
|
||||
|
||||
#### What is the goal?
|
||||
|
||||
A high-performance and efficient Matrix homeserver that's easy to set up and
|
||||
just works. You can install it on a mini-computer like the Raspberry Pi to
|
||||
host Matrix for your family, friends or company.
|
||||
A high-performance, efficient, low-cost, and featureful Matrix homeserver that's
|
||||
easy to set up and just works with minimal configuration needed.
|
||||
|
||||
#### Can I try it out?
|
||||
|
||||
|
@ -37,17 +46,35 @@ homeserver". This means there are rules, so please read the rules:
|
|||
[https://transfem.dev/homeserver_rules.txt](https://transfem.dev/homeserver_rules.txt)
|
||||
|
||||
transfem.dev is also listed at
|
||||
[servers.joinmatrix.org](https://servers.joinmatrix.org/)
|
||||
[servers.joinmatrix.org](https://servers.joinmatrix.org/), which is a list of
|
||||
popular public Matrix homeservers, including some others that run conduwuit.
|
||||
|
||||
#### What is the current status?
|
||||
|
||||
conduwuit is technically a hard fork of Conduit, which is in Beta. The Beta status
|
||||
initially was inherited from Conduit, however overtime this Beta status is rapidly
|
||||
becoming less and less relevant as our codebase significantly diverges more and more.
|
||||
conduwuit is technically a hard fork of [Conduit](https://conduit.rs/), which is in beta.
|
||||
The beta status initially was inherited from Conduit, however the huge amount of
|
||||
codebase divergance, changes, fixes, and improvements have effectively made this
|
||||
beta status not entirely applicable to us anymore.
|
||||
|
||||
conduwuit is quite stable and very usable as a daily driver and for a low-medium
|
||||
sized homeserver. There is still a lot of more work to be done, but it is in a far
|
||||
better place than the project was in early 2024.
|
||||
conduwuit is very stable based on our rapidly growing userbase, has lots of features that users
|
||||
expect, and very usable as a daily driver for small, medium, and upper-end medium sized homeservers.
|
||||
|
||||
A lot of critical stability and performance issues have been fixed, and a lot of
|
||||
necessary groundwork has finished; making this project way better than it was
|
||||
back in the start at ~early 2024.
|
||||
|
||||
#### Where is the differences page?
|
||||
|
||||
conduwuit historically had a "differences" page that listed each and every single
|
||||
different thing about conduwuit from Conduit, as a way to promote and advertise
|
||||
conduwuit by showing significant amounts of work done. While this was feasible to
|
||||
maintain back when the project was new in early-2024, this became impossible
|
||||
very quickly and has unfortunately became heavily outdated, missing tons of things, etc.
|
||||
|
||||
It's difficult to list out what we do differently, what are our notable features, etc
|
||||
when there's so many things and features and bug fixes and performance optimisations,
|
||||
the list goes on. We simply recommend folks to just try out conduwuit, or ask us
|
||||
what features you are looking for and if they're implemented in conduwuit.
|
||||
|
||||
#### How is conduwuit funded? Is conduwuit sustainable?
|
||||
|
||||
|
@ -60,17 +87,26 @@ and we have no plans in stopping or slowing down any time soon!
|
|||
|
||||
#### Can I migrate or switch from Conduit?
|
||||
|
||||
conduwuit is a complete drop-in replacement for Conduit. As long as you are using RocksDB,
|
||||
the only "migration" you need to do is replace the binary or container image. There
|
||||
is no harm or additional steps required for using conduwuit. See the
|
||||
[Migrating from Conduit](https://conduwuit.puppyirl.gay/deploying/generic.html#migrating-from-conduit) section
|
||||
on the generic deploying guide.
|
||||
conduwuit had drop-in migration/replacement support for Conduit for about 12 months before
|
||||
bugs somewhere along the line broke it. Maintaining this has been difficult and
|
||||
the majority of Conduit users have already migrated, additionally debugging Conduit
|
||||
is not one of our interests, and so Conduit migration no longer works. We also
|
||||
feel that 12 months has been plenty of time for people to seamlessly migrate.
|
||||
|
||||
If you are a Conduit user looking to migrate, you will have to wipe and reset
|
||||
your database. We may fix seamless migration support at some point, but it's not an interest
|
||||
from us.
|
||||
|
||||
#### Can I migrate from Synapse or Dendrite?
|
||||
|
||||
Currently there is no known way to seamlessly migrate all user data from the old
|
||||
homeserver to conduwuit. However it is perfectly acceptable to replace the old
|
||||
homeserver software with conduwuit using the same server name and there will not
|
||||
be any issues with federation.
|
||||
|
||||
There is an interest in developing a built-in seamless user data migration
|
||||
method into conduwuit, however there is no concrete ETA or timeline for this.
|
||||
|
||||
Note that as of conduwuit version 0.5.0, backwards compatibility with Conduit is
|
||||
no longer supported. We only support migrating *from* Conduit, not back to
|
||||
Conduit like before. If you are truly finding yourself wanting to migrate back
|
||||
to Conduit, we would appreciate all your feedback and if we can assist with
|
||||
any issues or concerns.
|
||||
|
||||
<!-- ANCHOR_END: body -->
|
||||
|
||||
|
@ -78,10 +114,23 @@ any issues or concerns.
|
|||
|
||||
#### Contact
|
||||
|
||||
If you run into any question, feel free to
|
||||
[`#conduwuit:puppygock.gay`](https://matrix.to/#/#conduwuit:puppygock.gay)
|
||||
is the official project Matrix room. You can get support here, ask questions or
|
||||
concerns, get assistance setting up conduwuit, etc.
|
||||
|
||||
- Ask us in `#conduwuit:puppygock.gay` on Matrix
|
||||
- [Open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new)
|
||||
This room should stay relevant and focused on conduwuit. An offtopic general
|
||||
chatter room can be found in the room topic there as well.
|
||||
|
||||
Please keep the issue trackers focused on *actual* bug reports and enhancement requests.
|
||||
|
||||
General support is extremely difficult to be offered over an issue tracker, and
|
||||
simple questions should be asked directly in an interactive platform like our
|
||||
Matrix room above as they can turn into a relevant discussion and/or may not be
|
||||
simple to answer. If you're not sure, just ask in the Matrix room.
|
||||
|
||||
If you have a bug or feature to request: [Open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new)
|
||||
|
||||
If you need to contact the primary maintainer, my contact methods are on my website: https://girlboss.ceo
|
||||
|
||||
#### Donate
|
||||
|
||||
|
@ -90,8 +139,16 @@ not get paid to work on this, and I work on it in my free time. Donations are
|
|||
heavily appreciated! 💜🥺
|
||||
|
||||
- Liberapay: <https://liberapay.com/girlbossceo>
|
||||
- Ko-fi (note they take a fee): <https://ko-fi.com/puppygock>
|
||||
- GitHub Sponsors: <https://github.com/sponsors/girlbossceo>
|
||||
- Ko-fi: <https://ko-fi.com/puppygock>
|
||||
|
||||
I do not and will not accept cryptocurrency donations, including things related.
|
||||
|
||||
Note that donations will NOT guarantee you or give you any kind of tangible product,
|
||||
feature prioritisation, etc. By donating, you are agreeing that conduwuit is NOT
|
||||
going to provide you any goods or services as part of your donation, and this
|
||||
donation is purely a generous donation. We will not provide things like paid
|
||||
personal/direct support, feature request priority, merchandise, etc.
|
||||
|
||||
#### Logo
|
||||
|
||||
|
@ -99,17 +156,23 @@ Original repo and Matrix room picture was from bran (<3). Current banner image
|
|||
and logo is directly from [this cohost
|
||||
post](https://web.archive.org/web/20241126004041/https://cohost.org/RatBaby/post/1028290-finally-a-flag-for).
|
||||
|
||||
An SVG logo made by [@nktnet1](https://github.com/nktnet1) is available here: <https://github.com/girlbossceo/conduwuit/blob/main/docs/assets/>
|
||||
|
||||
#### Is it conduwuit or Conduwuit?
|
||||
|
||||
Both, but I prefer conduwuit.
|
||||
|
||||
#### Mirrors of conduwuit
|
||||
|
||||
If GitHub is unavailable in your country, or has poor connectivity, conduwuit's
|
||||
source code is mirrored onto the following additional platforms I maintain:
|
||||
|
||||
- GitHub: <https://github.com/girlbossceo/conduwuit>
|
||||
- GitLab: <https://gitlab.com/conduwuit/conduwuit>
|
||||
- git.girlcock.ceo: <https://git.girlcock.ceo/strawberry/conduwuit>
|
||||
- git.gay: <https://git.gay/june/conduwuit>
|
||||
- Codeberg: <https://codeberg.org/girlbossceo/conduwuit>
|
||||
- mau.dev: <https://mau.dev/june/conduwuit>
|
||||
- Codeberg: <https://codeberg.org/arf/conduwuit>
|
||||
- sourcehut: <https://git.sr.ht/~girlbossceo/conduwuit>
|
||||
|
||||
<!-- ANCHOR_END: footer -->
|
||||
|
|
|
@ -4,10 +4,24 @@ Wants=network-online.target
|
|||
After=network-online.target
|
||||
Documentation=https://conduwuit.puppyirl.gay/
|
||||
RequiresMountsFor=/var/lib/private/conduwuit
|
||||
Alias=matrix-conduwuit.service
|
||||
|
||||
[Service]
|
||||
DynamicUser=yes
|
||||
Type=notify
|
||||
Type=notify-reload
|
||||
ReloadSignal=SIGUSR1
|
||||
|
||||
TTYPath=/dev/tty25
|
||||
DeviceAllow=char-tty
|
||||
StandardInput=tty-force
|
||||
StandardOutput=tty
|
||||
StandardError=journal+console
|
||||
TTYReset=yes
|
||||
# uncomment to allow buffer to be cleared every restart
|
||||
TTYVTDisallocate=no
|
||||
|
||||
TTYColumns=120
|
||||
TTYRows=40
|
||||
|
||||
AmbientCapabilities=
|
||||
CapabilityBoundingSet=
|
||||
|
|
|
@ -10,15 +10,15 @@ set -euo pipefail
|
|||
COMPLEMENT_SRC="${COMPLEMENT_SRC:-$1}"
|
||||
|
||||
# A `.jsonl` file to write test logs to
|
||||
LOG_FILE="$2"
|
||||
LOG_FILE="${2:-complement_test_logs.jsonl}"
|
||||
|
||||
# A `.jsonl` file to write test results to
|
||||
RESULTS_FILE="$3"
|
||||
RESULTS_FILE="${3:-complement_test_results.jsonl}"
|
||||
|
||||
OCI_IMAGE="complement-conduwuit:main"
|
||||
COMPLEMENT_BASE_IMAGE="${COMPLEMENT_BASE_IMAGE:-complement-conduwuit:main}"
|
||||
|
||||
# Complement tests that are skipped due to flakiness/reliability issues
|
||||
SKIPPED_COMPLEMENT_TESTS='-skip=TestClientSpacesSummary.*|TestJoinFederatedRoomFromApplicationServiceBridgeUser.*|TestJumpToDateEndpoint.*|TestUnbanViaInvite.*'
|
||||
# Complement tests that are skipped due to flakiness/reliability issues or we don't implement such features and won't for a long time
|
||||
SKIPPED_COMPLEMENT_TESTS='TestPartialStateJoin.*|TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_when_m.*|TestRoomDeleteAlias/Parallel/Can_delete_canonical_alias|TestUnbanViaInvite.*|TestRoomState/Parallel/GET_/publicRooms_lists.*"|TestRoomDeleteAlias/Parallel/Users_with_sufficient_power-level_can_delete_other.*'
|
||||
|
||||
# $COMPLEMENT_SRC needs to be a directory to Complement source code
|
||||
if [ -f "$COMPLEMENT_SRC" ]; then
|
||||
|
@ -34,17 +34,41 @@ toplevel="$(git rev-parse --show-toplevel)"
|
|||
|
||||
pushd "$toplevel" > /dev/null
|
||||
|
||||
bin/nix-build-and-cache just .#linux-complement
|
||||
if [ ! -f "complement_oci_image.tar.gz" ]; then
|
||||
echo "building complement conduwuit image"
|
||||
|
||||
# if using macOS, use linux-complement
|
||||
#bin/nix-build-and-cache just .#linux-complement
|
||||
bin/nix-build-and-cache just .#complement
|
||||
#nix build -L .#complement
|
||||
|
||||
echo "complement conduwuit image tar.gz built at \"result\""
|
||||
|
||||
echo "loading into docker"
|
||||
docker load < result
|
||||
popd > /dev/null
|
||||
else
|
||||
echo "skipping building a complement conduwuit image as complement_oci_image.tar.gz was already found, loading this"
|
||||
|
||||
docker load < complement_oci_image.tar.gz
|
||||
popd > /dev/null
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "running go test with:"
|
||||
echo "\$COMPLEMENT_SRC: $COMPLEMENT_SRC"
|
||||
echo "\$COMPLEMENT_BASE_IMAGE: $COMPLEMENT_BASE_IMAGE"
|
||||
echo "\$RESULTS_FILE: $RESULTS_FILE"
|
||||
echo "\$LOG_FILE: $LOG_FILE"
|
||||
echo ""
|
||||
|
||||
# It's okay (likely, even) that `go test` exits nonzero
|
||||
# `COMPLEMENT_ENABLE_DIRTY_RUNS=1` reuses the same complement container for faster complement, at the possible expense of test environment pollution
|
||||
set +o pipefail
|
||||
env \
|
||||
-C "$COMPLEMENT_SRC" \
|
||||
COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \
|
||||
go test -tags="conduwuit_blacklist" "$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests | tee "$LOG_FILE"
|
||||
COMPLEMENT_BASE_IMAGE="$COMPLEMENT_BASE_IMAGE" \
|
||||
go test -tags="conduwuit_blacklist" -skip="$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE"
|
||||
set -o pipefail
|
||||
|
||||
# Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results
|
||||
|
@ -54,3 +78,18 @@ cat "$LOG_FILE" | jq -s -c 'sort_by(.Test)[]' | jq -c '
|
|||
and .Test != null
|
||||
) | {Action: .Action, Test: .Test}
|
||||
' > "$RESULTS_FILE"
|
||||
|
||||
#if command -v gotestfmt &> /dev/null; then
|
||||
# echo "using gotestfmt on $LOG_FILE"
|
||||
# grep '{"Time":' "$LOG_FILE" | gotestfmt > "complement_test_logs_gotestfmt.log"
|
||||
#fi
|
||||
|
||||
echo ""
|
||||
echo ""
|
||||
echo "complement logs saved at $LOG_FILE"
|
||||
echo "complement results saved at $RESULTS_FILE"
|
||||
#if command -v gotestfmt &> /dev/null; then
|
||||
# echo "complement logs in gotestfmt pretty format outputted at complement_test_logs_gotestfmt.log (use an editor/terminal/pager that interprets ANSI colours and UTF-8 emojis)"
|
||||
#fi
|
||||
echo ""
|
||||
echo ""
|
||||
|
|
|
@ -13,12 +13,15 @@ create-missing = true
|
|||
extra-watch-dirs = ["debian", "docs"]
|
||||
|
||||
[rust]
|
||||
edition = "2021"
|
||||
edition = "2024"
|
||||
|
||||
[output.html]
|
||||
git-repository-url = "https://github.com/girlbossceo/conduwuit"
|
||||
edit-url-template = "https://github.com/girlbossceo/conduwuit/edit/main/{path}"
|
||||
git-repository-icon = "fa-github-square"
|
||||
|
||||
[output.html.redirect]
|
||||
"/differences.html" = "https://conduwuit.puppyirl.gay/#where-is-the-differences-page"
|
||||
|
||||
[output.html.search]
|
||||
limit-results = 15
|
||||
|
|
|
@ -2,9 +2,10 @@ array-size-threshold = 4096
|
|||
cognitive-complexity-threshold = 94 # TODO reduce me ALARA
|
||||
excessive-nesting-threshold = 11 # TODO reduce me to 4 or 5
|
||||
future-size-threshold = 7745 # TODO reduce me ALARA
|
||||
stack-size-threshold = 196608 # reduce me ALARA
|
||||
stack-size-threshold = 196608 # TODO reduce me ALARA
|
||||
too-many-lines-threshold = 780 # TODO reduce me to <= 100
|
||||
type-complexity-threshold = 250 # reduce me to ~200
|
||||
large-error-threshold = 256 # TODO reduce me ALARA
|
||||
|
||||
disallowed-macros = [
|
||||
{ path = "log::error", reason = "use conduwuit_core::error" },
|
||||
|
|
|
@ -195,14 +195,6 @@
|
|||
#
|
||||
#servernameevent_data_cache_capacity = varies by system
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
#
|
||||
#server_visibility_cache_capacity = varies by system
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
#
|
||||
#user_visibility_cache_capacity = varies by system
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
#
|
||||
#stateinfo_cache_capacity = varies by system
|
||||
|
@ -377,6 +369,26 @@
|
|||
#
|
||||
#pusher_idle_timeout = 15
|
||||
|
||||
# Maximum time to receive a request from a client (seconds).
|
||||
#
|
||||
#client_receive_timeout = 75
|
||||
|
||||
# Maximum time to process a request received from a client (seconds).
|
||||
#
|
||||
#client_request_timeout = 180
|
||||
|
||||
# Maximum time to transmit a response to a client (seconds)
|
||||
#
|
||||
#client_response_timeout = 120
|
||||
|
||||
# Grace period for clean shutdown of client requests (seconds).
|
||||
#
|
||||
#client_shutdown_timeout = 10
|
||||
|
||||
# Grace period for clean shutdown of federation requests (seconds).
|
||||
#
|
||||
#sender_shutdown_timeout = 5
|
||||
|
||||
# Enables registration. If set to false, no users can register on this
|
||||
# server.
|
||||
#
|
||||
|
@ -389,13 +401,16 @@
|
|||
#
|
||||
#allow_registration = false
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
# Enabling this setting opens registration to anyone without restrictions.
|
||||
# This makes your server vulnerable to abuse
|
||||
#
|
||||
#yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = false
|
||||
|
||||
# A static registration token that new users will have to provide when
|
||||
# creating an account. If unset and `allow_registration` is true,
|
||||
# registration is open without any condition.
|
||||
# you must set
|
||||
# `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse`
|
||||
# to true to allow open registration without any conditions.
|
||||
#
|
||||
# YOU NEED TO EDIT THIS OR USE registration_token_file.
|
||||
#
|
||||
|
@ -403,8 +418,9 @@
|
|||
#
|
||||
#registration_token =
|
||||
|
||||
# Path to a file on the system that gets read for the registration token.
|
||||
# this config option takes precedence/priority over "registration_token".
|
||||
# Path to a file on the system that gets read for additional registration
|
||||
# tokens. Multiple tokens can be added if you separate them with
|
||||
# whitespace
|
||||
#
|
||||
# conduwuit must be able to access the file, and it must not be empty
|
||||
#
|
||||
|
@ -421,10 +437,19 @@
|
|||
#
|
||||
#allow_federation = true
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
# Allows federation requests to be made to itself
|
||||
#
|
||||
# This isn't intended and is very likely a bug if federation requests are
|
||||
# being sent to yourself. This currently mainly exists for development
|
||||
# purposes.
|
||||
#
|
||||
#federation_loopback = false
|
||||
|
||||
# Always calls /forget on behalf of the user if leaving a room. This is a
|
||||
# part of MSC4267 "Automatically forgetting rooms on leave"
|
||||
#
|
||||
#forget_forced_upon_leave = false
|
||||
|
||||
# Set this to true to require authentication on the normally
|
||||
# unauthenticated profile retrieval endpoints (GET)
|
||||
# "/_matrix/client/v3/profile/{userId}".
|
||||
|
@ -502,9 +527,9 @@
|
|||
|
||||
# Default room version conduwuit will create rooms with.
|
||||
#
|
||||
# Per spec, room version 10 is the default.
|
||||
# Per spec, room version 11 is the default.
|
||||
#
|
||||
#default_room_version = 10
|
||||
#default_room_version = 11
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
#
|
||||
|
@ -563,17 +588,13 @@
|
|||
#
|
||||
#proxy = "none"
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
#
|
||||
#jwt_secret =
|
||||
|
||||
# Servers listed here will be used to gather public keys of other servers
|
||||
# (notary trusted key servers).
|
||||
#
|
||||
# Currently, conduwuit doesn't support inbound batched key requests, so
|
||||
# this list should only contain other Synapse servers.
|
||||
#
|
||||
# example: ["matrix.org", "envs.net", "constellatory.net", "tchncs.de"]
|
||||
# example: ["matrix.org", "tchncs.de"]
|
||||
#
|
||||
#trusted_servers = ["matrix.org"]
|
||||
|
||||
|
@ -649,6 +670,22 @@
|
|||
#
|
||||
#openid_token_ttl = 3600
|
||||
|
||||
# Allow an existing session to mint a login token for another client.
|
||||
# This requires interactive authentication, but has security ramifications
|
||||
# as a malicious client could use the mechanism to spawn more than one
|
||||
# session.
|
||||
# Enabled by default.
|
||||
#
|
||||
#login_via_existing_session = true
|
||||
|
||||
# Login token expiration/TTL in milliseconds.
|
||||
#
|
||||
# These are short-lived tokens for the m.login.token endpoint.
|
||||
# This is used to allow existing sessions to create new sessions.
|
||||
# see login_via_existing_session.
|
||||
#
|
||||
#login_token_ttl = 120000
|
||||
|
||||
# Static TURN username to provide the client if not using a shared secret
|
||||
# ("turn_secret"), It is recommended to use a shared secret over static
|
||||
# credentials.
|
||||
|
@ -785,7 +822,7 @@
|
|||
|
||||
# Type of RocksDB database compression to use.
|
||||
#
|
||||
# Available options are "zstd", "zlib", "bz2", "lz4", or "none".
|
||||
# Available options are "zstd", "bz2", "lz4", or "none".
|
||||
#
|
||||
# It is best to use ZSTD as an overall good balance between
|
||||
# speed/performance, storage, IO amplification, and CPU usage. For more
|
||||
|
@ -806,6 +843,9 @@
|
|||
# magic number and translated to the library's default compression level
|
||||
# as they all differ. See their `kDefaultCompressionLevel`.
|
||||
#
|
||||
# Note when using the default value we may override it with a setting
|
||||
# tailored specifically conduwuit.
|
||||
#
|
||||
#rocksdb_compression_level = 32767
|
||||
|
||||
# Level of compression the specified compression algorithm for the
|
||||
|
@ -819,6 +859,9 @@
|
|||
# less likely for this data to be used. Research your chosen compression
|
||||
# algorithm.
|
||||
#
|
||||
# Note when using the default value we may override it with a setting
|
||||
# tailored specifically conduwuit.
|
||||
#
|
||||
#rocksdb_bottommost_compression_level = 32767
|
||||
|
||||
# Whether to enable RocksDB's "bottommost_compression".
|
||||
|
@ -830,7 +873,7 @@
|
|||
#
|
||||
# See https://github.com/facebook/rocksdb/wiki/Compression for more details.
|
||||
#
|
||||
#rocksdb_bottommost_compression = false
|
||||
#rocksdb_bottommost_compression = true
|
||||
|
||||
# Database recovery mode (for RocksDB WAL corruption).
|
||||
#
|
||||
|
@ -876,6 +919,20 @@
|
|||
#
|
||||
#rocksdb_paranoid_file_checks = false
|
||||
|
||||
# Enables or disables checksum verification in rocksdb at runtime.
|
||||
# Checksums are usually hardware accelerated with low overhead; they are
|
||||
# enabled in rocksdb by default. Older or slower platforms may see gains
|
||||
# from disabling.
|
||||
#
|
||||
#rocksdb_checksums = true
|
||||
|
||||
# Enables the "atomic flush" mode in rocksdb. This option is not intended
|
||||
# for users. It may be removed or ignored in future versions. Atomic flush
|
||||
# may be enabled by the paranoid to possibly improve database integrity at
|
||||
# the cost of performance.
|
||||
#
|
||||
#rocksdb_atomic_flush = false
|
||||
|
||||
# Database repair mode (for RocksDB SST corruption).
|
||||
#
|
||||
# Use this option when the server reports corruption while running or
|
||||
|
@ -1129,13 +1186,16 @@
|
|||
#
|
||||
#prune_missing_media = false
|
||||
|
||||
# Vector list of servers that conduwuit will refuse to download remote
|
||||
# media from.
|
||||
# Vector list of regex patterns of server names that conduwuit will refuse
|
||||
# to download remote media from.
|
||||
#
|
||||
# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||
#
|
||||
#prevent_media_downloads_from = []
|
||||
|
||||
# List of forbidden server names that we will block incoming AND outgoing
|
||||
# federation with, and block client room joins / remote user invites.
|
||||
# List of forbidden server names via regex patterns that we will block
|
||||
# incoming AND outgoing federation with, and block client room joins /
|
||||
# remote user invites.
|
||||
#
|
||||
# This check is applied on the room ID, room alias, sender server name,
|
||||
# sender user's server name, inbound federation X-Matrix origin, and
|
||||
|
@ -1143,11 +1203,15 @@
|
|||
#
|
||||
# Basically "global" ACLs.
|
||||
#
|
||||
# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||
#
|
||||
#forbidden_remote_server_names = []
|
||||
|
||||
# List of forbidden server names that we will block all outgoing federated
|
||||
# room directory requests for. Useful for preventing our users from
|
||||
# wandering into bad servers or spaces.
|
||||
# List of forbidden server names via regex patterns that we will block all
|
||||
# outgoing federated room directory requests for. Useful for preventing
|
||||
# our users from wandering into bad servers or spaces.
|
||||
#
|
||||
# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||
#
|
||||
#forbidden_remote_room_directory_server_names = []
|
||||
|
||||
|
@ -1258,7 +1322,7 @@
|
|||
# used, and startup as warnings if any room aliases in your database have
|
||||
# a forbidden room alias/ID.
|
||||
#
|
||||
# example: ["19dollarfortnitecards", "b[4a]droom"]
|
||||
# example: ["19dollarfortnitecards", "b[4a]droom", "badphrase"]
|
||||
#
|
||||
#forbidden_alias_names = []
|
||||
|
||||
|
@ -1271,7 +1335,7 @@
|
|||
# startup as warnings if any local users in your database have a forbidden
|
||||
# username.
|
||||
#
|
||||
# example: ["administrator", "b[a4]dusernam[3e]"]
|
||||
# example: ["administrator", "b[a4]dusernam[3e]", "badphrase"]
|
||||
#
|
||||
#forbidden_usernames = []
|
||||
|
||||
|
@ -1334,6 +1398,13 @@
|
|||
#
|
||||
#admin_execute_errors_ignore = false
|
||||
|
||||
# List of admin commands to execute on SIGUSR2.
|
||||
#
|
||||
# Similar to admin_execute, but these commands are executed when the
|
||||
# server receives SIGUSR2 on supporting platforms.
|
||||
#
|
||||
#admin_signal_execute = []
|
||||
|
||||
# Controls the max log level for admin command log captures (logs
|
||||
# generated from running admin commands). Defaults to "info" on release
|
||||
# builds, else "debug" on debug builds.
|
||||
|
@ -1491,6 +1562,16 @@
|
|||
#
|
||||
#sender_workers = 0
|
||||
|
||||
# Enables listener sockets; can be set to false to disable listening. This
|
||||
# option is intended for developer/diagnostic purposes only.
|
||||
#
|
||||
#listening = true
|
||||
|
||||
# Enables configuration reload when the server receives SIGUSR1 on
|
||||
# supporting platforms.
|
||||
#
|
||||
#config_reload_signal = true
|
||||
|
||||
[global.tls]
|
||||
|
||||
# Path to a valid TLS certificate file.
|
||||
|
@ -1541,3 +1622,21 @@
|
|||
# This item is undocumented. Please contribute documentation for it.
|
||||
#
|
||||
#support_mxid =
|
||||
|
||||
[global.blurhashing]
|
||||
|
||||
# blurhashing x component, 4 is recommended by https://blurha.sh/
|
||||
#
|
||||
#components_x = 4
|
||||
|
||||
# blurhashing y component, 3 is recommended by https://blurha.sh/
|
||||
#
|
||||
#components_y = 3
|
||||
|
||||
# Max raw size that the server will blurhash, this is the size of the
|
||||
# image after converting it to raw data, it should be higher than the
|
||||
# upload limit but not too high. The higher it is the higher the
|
||||
# potential load will be for clients requesting blurhashes. The default
|
||||
# is 33.55MB. Setting it to 0 disables blurhashing.
|
||||
#
|
||||
#blurhash_max_raw_size = 33554432
|
||||
|
|
1
debian/conduwuit.service
vendored
1
debian/conduwuit.service
vendored
|
@ -2,6 +2,7 @@
|
|||
Description=conduwuit Matrix homeserver
|
||||
Wants=network-online.target
|
||||
After=network-online.target
|
||||
Alias=matrix-conduwuit.service
|
||||
Documentation=https://conduwuit.puppyirl.gay/
|
||||
|
||||
[Service]
|
||||
|
|
18
debian/postrm
vendored
18
debian/postrm
vendored
|
@ -10,21 +10,33 @@ CONDUWUIT_DATABASE_PATH_SYMLINK=/var/lib/matrix-conduit
|
|||
case $1 in
|
||||
purge)
|
||||
# Remove debconf changes from the db
|
||||
db_purge
|
||||
#db_purge
|
||||
|
||||
# Per https://www.debian.org/doc/debian-policy/ch-files.html#behavior
|
||||
# "configuration files must be preserved when the package is removed, and
|
||||
# only deleted when the package is purged."
|
||||
|
||||
#
|
||||
|
||||
if [ -d "$CONDUWUIT_CONFIG_PATH" ]; then
|
||||
if test -L "$CONDUWUIT_CONFIG_PATH"; then
|
||||
echo "Deleting conduwuit configuration files"
|
||||
rm -v -r "$CONDUWUIT_CONFIG_PATH"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -d "$CONDUWUIT_DATABASE_PATH" ]; then
|
||||
rm -v -r "$CONDUWUIT_DATABASE_PATH"
|
||||
if test -L "$CONDUWUIT_DATABASE_PATH"; then
|
||||
echo "Deleting conduwuit database directory"
|
||||
rm -r "$CONDUWUIT_DATABASE_PATH"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -d "$CONDUWUIT_DATABASE_PATH_SYMLINK" ]; then
|
||||
rm -v -r "$CONDUWUIT_DATABASE_PATH_SYMLINK"
|
||||
if test -L "$CONDUWUIT_DATABASE_SYMLINK"; then
|
||||
echo "Removing matrix-conduit symlink"
|
||||
rm -r "$CONDUWUIT_DATABASE_PATH_SYMLINK"
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
|
42
deps/rust-rocksdb/Cargo.toml
vendored
42
deps/rust-rocksdb/Cargo.toml
vendored
|
@ -1,42 +0,0 @@
|
|||
[package]
|
||||
name = "rust-rocksdb-uwu"
|
||||
categories.workspace = true
|
||||
description = "dylib wrapper for rust-rocksdb"
|
||||
edition = "2021"
|
||||
keywords.workspace = true
|
||||
license.workspace = true
|
||||
readme.workspace = true
|
||||
repository.workspace = true
|
||||
version = "0.0.1"
|
||||
|
||||
[features]
|
||||
default = ["lz4", "zstd", "zlib", "bzip2"]
|
||||
jemalloc = ["rust-rocksdb/jemalloc"]
|
||||
io-uring = ["rust-rocksdb/io-uring"]
|
||||
valgrind = ["rust-rocksdb/valgrind"]
|
||||
snappy = ["rust-rocksdb/snappy"]
|
||||
lz4 = ["rust-rocksdb/lz4"]
|
||||
zstd = ["rust-rocksdb/zstd"]
|
||||
zlib = ["rust-rocksdb/zlib"]
|
||||
bzip2 = ["rust-rocksdb/bzip2"]
|
||||
rtti = ["rust-rocksdb/rtti"]
|
||||
mt_static = ["rust-rocksdb/mt_static"]
|
||||
multi-threaded-cf = ["rust-rocksdb/multi-threaded-cf"]
|
||||
serde1 = ["rust-rocksdb/serde1"]
|
||||
malloc-usable-size = ["rust-rocksdb/malloc-usable-size"]
|
||||
|
||||
[dependencies.rust-rocksdb]
|
||||
git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1"
|
||||
rev = "123d6302fed23fc706344becb2f19623265a83f8"
|
||||
#branch = "master"
|
||||
default-features = false
|
||||
|
||||
[lib]
|
||||
path = "lib.rs"
|
||||
crate-type = [
|
||||
"rlib",
|
||||
# "dylib"
|
||||
]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
62
deps/rust-rocksdb/lib.rs
vendored
62
deps/rust-rocksdb/lib.rs
vendored
|
@ -1,62 +0,0 @@
|
|||
pub use rust_rocksdb::*;
|
||||
|
||||
#[cfg_attr(not(conduwuit_mods), link(name = "rocksdb"))]
|
||||
#[cfg_attr(conduwuit_mods, link(name = "rocksdb", kind = "static"))]
|
||||
unsafe extern "C" {
|
||||
pub unsafe fn rocksdb_list_column_families();
|
||||
pub unsafe fn rocksdb_logger_create_stderr_logger();
|
||||
pub unsafe fn rocksdb_logger_create_callback_logger();
|
||||
pub unsafe fn rocksdb_options_set_info_log();
|
||||
pub unsafe fn rocksdb_get_options_from_string();
|
||||
pub unsafe fn rocksdb_writebatch_create();
|
||||
pub unsafe fn rocksdb_writebatch_destroy();
|
||||
pub unsafe fn rocksdb_writebatch_put_cf();
|
||||
pub unsafe fn rocksdb_writebatch_delete_cf();
|
||||
pub unsafe fn rocksdb_iter_value();
|
||||
pub unsafe fn rocksdb_iter_seek_to_last();
|
||||
pub unsafe fn rocksdb_iter_seek_for_prev();
|
||||
pub unsafe fn rocksdb_iter_seek_to_first();
|
||||
pub unsafe fn rocksdb_iter_next();
|
||||
pub unsafe fn rocksdb_iter_prev();
|
||||
pub unsafe fn rocksdb_iter_seek();
|
||||
pub unsafe fn rocksdb_iter_valid();
|
||||
pub unsafe fn rocksdb_iter_get_error();
|
||||
pub unsafe fn rocksdb_iter_key();
|
||||
pub unsafe fn rocksdb_iter_destroy();
|
||||
pub unsafe fn rocksdb_livefiles();
|
||||
pub unsafe fn rocksdb_livefiles_count();
|
||||
pub unsafe fn rocksdb_livefiles_destroy();
|
||||
pub unsafe fn rocksdb_livefiles_column_family_name();
|
||||
pub unsafe fn rocksdb_livefiles_name();
|
||||
pub unsafe fn rocksdb_livefiles_size();
|
||||
pub unsafe fn rocksdb_livefiles_level();
|
||||
pub unsafe fn rocksdb_livefiles_smallestkey();
|
||||
pub unsafe fn rocksdb_livefiles_largestkey();
|
||||
pub unsafe fn rocksdb_livefiles_entries();
|
||||
pub unsafe fn rocksdb_livefiles_deletions();
|
||||
pub unsafe fn rocksdb_put_cf();
|
||||
pub unsafe fn rocksdb_delete_cf();
|
||||
pub unsafe fn rocksdb_get_pinned_cf();
|
||||
pub unsafe fn rocksdb_create_column_family();
|
||||
pub unsafe fn rocksdb_get_latest_sequence_number();
|
||||
pub unsafe fn rocksdb_batched_multi_get_cf();
|
||||
pub unsafe fn rocksdb_cancel_all_background_work();
|
||||
pub unsafe fn rocksdb_repair_db();
|
||||
pub unsafe fn rocksdb_list_column_families_destroy();
|
||||
pub unsafe fn rocksdb_flush();
|
||||
pub unsafe fn rocksdb_flush_wal();
|
||||
pub unsafe fn rocksdb_open_column_families();
|
||||
pub unsafe fn rocksdb_open_for_read_only_column_families();
|
||||
pub unsafe fn rocksdb_open_as_secondary_column_families();
|
||||
pub unsafe fn rocksdb_open_column_families_with_ttl();
|
||||
pub unsafe fn rocksdb_open();
|
||||
pub unsafe fn rocksdb_open_for_read_only();
|
||||
pub unsafe fn rocksdb_open_with_ttl();
|
||||
pub unsafe fn rocksdb_open_as_secondary();
|
||||
pub unsafe fn rocksdb_write();
|
||||
pub unsafe fn rocksdb_create_iterator_cf();
|
||||
pub unsafe fn rocksdb_backup_engine_create_new_backup_flush();
|
||||
pub unsafe fn rocksdb_backup_engine_options_create();
|
||||
pub unsafe fn rocksdb_write_buffer_manager_destroy();
|
||||
pub unsafe fn rocksdb_options_set_ttl();
|
||||
}
|
|
@ -1,7 +1,6 @@
|
|||
# Summary
|
||||
|
||||
- [Introduction](introduction.md)
|
||||
- [Differences from upstream Conduit](differences.md)
|
||||
- [Configuration](configuration.md)
|
||||
- [Examples](configuration/examples.md)
|
||||
- [Deploying](deploying.md)
|
||||
|
|
36
docs/assets/conduwuit_logo.svg
Normal file
36
docs/assets/conduwuit_logo.svg
Normal file
|
@ -0,0 +1,36 @@
|
|||
<svg
|
||||
version="1.1"
|
||||
id="Layer_1"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
x="0px"
|
||||
y="0px"
|
||||
width="100%"
|
||||
viewBox="0 0 864 864"
|
||||
enableBackground="new 0 0 864 864"
|
||||
xmlSpace="preserve"
|
||||
>
|
||||
<path
|
||||
fill="#EC008C"
|
||||
opacity="1.000000"
|
||||
stroke="none"
|
||||
d="M0.999997,649.000000 C1.000000,433.052795 1.000000,217.105591 1.000000,1.079198 C288.876801,1.079198 576.753601,1.079198 865.000000,1.079198 C865.000000,73.025414 865.000000,145.051453 864.634888,217.500671 C852.362488,223.837280 840.447632,229.735275 828.549438,235.666794 C782.143677,258.801056 735.743225,281.945923 688.998657,304.980469 C688.122009,304.476532 687.580750,304.087708 687.053894,303.680206 C639.556946,266.944733 573.006775,291.446869 560.804199,350.179443 C560.141357,353.369446 559.717590,356.609131 559.195374,359.748962 C474.522705,359.748962 390.283478,359.748962 306.088135,359.748962 C298.804138,318.894806 265.253357,295.206024 231.834442,293.306793 C201.003021,291.554596 169.912033,310.230042 156.935104,338.792725 C149.905151,354.265930 147.884064,370.379944 151.151794,387.034515 C155.204453,407.689667 166.300507,423.954224 183.344437,436.516663 C181.938263,437.607025 180.887405,438.409576 179.849426,439.228516 C147.141953,465.032562 139.918045,510.888947 163.388611,545.322632 C167.274551,551.023804 172.285187,555.958313 176.587341,561.495728 C125.846893,587.012817 75.302292,612.295532 24.735992,637.534790 C16.874903,641.458496 8.914484,645.183228 0.999997,649.000000 z"
|
||||
/>
|
||||
<path
|
||||
fill="#000000"
|
||||
opacity="1.000000"
|
||||
stroke="none"
|
||||
d="M689.340759,305.086823 C735.743225,281.945923 782.143677,258.801056 828.549438,235.666794 C840.447632,229.735275 852.362488,223.837280 864.634888,217.961929 C865.000000,433.613190 865.000000,649.226379 865.000000,864.919800 C577.000000,864.919800 289.000000,864.919800 1.000000,864.919800 C1.000000,793.225708 1.000000,721.576721 0.999997,649.463867 C8.914484,645.183228 16.874903,641.458496 24.735992,637.534790 C75.302292,612.295532 125.846893,587.012817 176.939667,561.513062 C178.543060,562.085083 179.606812,562.886414 180.667526,563.691833 C225.656799,597.853394 291.232574,574.487244 304.462524,519.579773 C304.989105,517.394409 305.501068,515.205505 305.984619,513.166748 C391.466370,513.166748 476.422729,513.166748 561.331177,513.166748 C573.857727,555.764343 608.978149,572.880920 638.519897,572.672791 C671.048340,572.443665 700.623230,551.730408 711.658752,520.910583 C722.546875,490.502106 715.037842,453.265564 682.776733,429.447052 C683.966064,428.506866 685.119507,427.602356 686.265320,426.688232 C712.934143,405.412262 723.011475,370.684631 711.897339,338.686676 C707.312805,325.487671 699.185303,314.725128 689.340759,305.086823 z"
|
||||
/>
|
||||
<path
|
||||
fill="#FEFBFC"
|
||||
opacity="1.000000"
|
||||
stroke="none"
|
||||
d="M688.998657,304.980469 C699.185303,314.725128 707.312805,325.487671 711.897339,338.686676 C723.011475,370.684631 712.934143,405.412262 686.265320,426.688232 C685.119507,427.602356 683.966064,428.506866 682.776733,429.447052 C715.037842,453.265564 722.546875,490.502106 711.658752,520.910583 C700.623230,551.730408 671.048340,572.443665 638.519897,572.672791 C608.978149,572.880920 573.857727,555.764343 561.331177,513.166748 C476.422729,513.166748 391.466370,513.166748 305.984619,513.166748 C305.501068,515.205505 304.989105,517.394409 304.462524,519.579773 C291.232574,574.487244 225.656799,597.853394 180.667526,563.691833 C179.606812,562.886414 178.543060,562.085083 177.128418,561.264465 C172.285187,555.958313 167.274551,551.023804 163.388611,545.322632 C139.918045,510.888947 147.141953,465.032562 179.849426,439.228516 C180.887405,438.409576 181.938263,437.607025 183.344437,436.516663 C166.300507,423.954224 155.204453,407.689667 151.151794,387.034515 C147.884064,370.379944 149.905151,354.265930 156.935104,338.792725 C169.912033,310.230042 201.003021,291.554596 231.834442,293.306793 C265.253357,295.206024 298.804138,318.894806 306.088135,359.748962 C390.283478,359.748962 474.522705,359.748962 559.195374,359.748962 C559.717590,356.609131 560.141357,353.369446 560.804199,350.179443 C573.006775,291.446869 639.556946,266.944733 687.053894,303.680206 C687.580750,304.087708 688.122009,304.476532 688.998657,304.980469 M703.311279,484.370789 C698.954468,457.053253 681.951416,440.229645 656.413696,429.482330 C673.953552,421.977875 688.014709,412.074219 696.456482,395.642365 C704.862061,379.280853 706.487793,362.316345 700.947998,344.809204 C691.688965,315.548492 664.183716,296.954437 633.103516,298.838257 C618.467957,299.725372 605.538086,305.139557 594.588501,314.780121 C577.473999,329.848511 570.185486,349.121399 571.838501,371.750854 C479.166595,371.750854 387.082886,371.750854 294.582672,371.750854 C293.993011,354.662048 288.485260,339.622314 276.940491,327.118439 C265.392609,314.611176 251.082092,307.205322 234.093262,305.960541 C203.355347,303.708374 176.337585,320.898438 166.089890,348.816620 C159.557541,366.613007 160.527206,384.117401 168.756042,401.172516 C177.054779,418.372589 191.471954,428.832886 207.526581,435.632172 C198.407059,442.272583 188.815598,448.302246 180.383728,455.660675 C171.685028,463.251984 166.849655,473.658661 163.940216,484.838684 C161.021744,496.053375 161.212982,507.259705 164.178833,518.426208 C171.577927,546.284302 197.338104,566.588867 226.001465,567.336853 C240.828415,567.723816 254.357819,563.819092 266.385468,555.199646 C284.811554,541.994751 293.631104,523.530579 294.687347,501.238312 C387.354828,501.238312 479.461304,501.238312 571.531799,501.238312 C577.616638,543.189026 615.312866,566.342102 651.310059,559.044739 C684.973938,552.220398 708.263306,519.393127 703.311279,484.370789 z"
|
||||
/>
|
||||
<path
|
||||
fill="#EC008C"
|
||||
opacity="1.000000"
|
||||
stroke="none"
|
||||
d="M703.401855,484.804718 C708.263306,519.393127 684.973938,552.220398 651.310059,559.044739 C615.312866,566.342102 577.616638,543.189026 571.531799,501.238312 C479.461304,501.238312 387.354828,501.238312 294.687347,501.238312 C293.631104,523.530579 284.811554,541.994751 266.385468,555.199646 C254.357819,563.819092 240.828415,567.723816 226.001465,567.336853 C197.338104,566.588867 171.577927,546.284302 164.178833,518.426208 C161.212982,507.259705 161.021744,496.053375 163.940216,484.838684 C166.849655,473.658661 171.685028,463.251984 180.383728,455.660675 C188.815598,448.302246 198.407059,442.272583 207.526581,435.632172 C191.471954,428.832886 177.054779,418.372589 168.756042,401.172516 C160.527206,384.117401 159.557541,366.613007 166.089890,348.816620 C176.337585,320.898438 203.355347,303.708374 234.093262,305.960541 C251.082092,307.205322 265.392609,314.611176 276.940491,327.118439 C288.485260,339.622314 293.993011,354.662048 294.582672,371.750854 C387.082886,371.750854 479.166595,371.750854 571.838501,371.750854 C570.185486,349.121399 577.473999,329.848511 594.588501,314.780121 C605.538086,305.139557 618.467957,299.725372 633.103516,298.838257 C664.183716,296.954437 691.688965,315.548492 700.947998,344.809204 C706.487793,362.316345 704.862061,379.280853 696.456482,395.642365 C688.014709,412.074219 673.953552,421.977875 656.413696,429.482330 C681.951416,440.229645 698.954468,457.053253 703.401855,484.804718 z"
|
||||
/>
|
||||
</svg>
|
After Width: | Height: | Size: 7 KiB |
BIN
docs/assets/gay dog anarchists.png
Normal file
BIN
docs/assets/gay dog anarchists.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 11 KiB |
|
@ -17,6 +17,8 @@ services:
|
|||
CONDUWUIT_PORT: 6167 # should match the loadbalancer traefik label
|
||||
CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||
CONDUWUIT_ALLOW_REGISTRATION: 'true'
|
||||
CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
|
||||
#CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
|
||||
CONDUWUIT_ALLOW_FEDERATION: 'true'
|
||||
CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
|
|
|
@ -33,6 +33,8 @@ services:
|
|||
CONDUWUIT_PORT: 6167
|
||||
CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||
CONDUWUIT_ALLOW_REGISTRATION: 'true'
|
||||
CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
|
||||
#CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
|
||||
CONDUWUIT_ALLOW_FEDERATION: 'true'
|
||||
CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
|
|
|
@ -17,6 +17,8 @@ services:
|
|||
CONDUWUIT_PORT: 6167
|
||||
CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||
CONDUWUIT_ALLOW_REGISTRATION: 'true'
|
||||
CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
|
||||
#CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
|
||||
CONDUWUIT_ALLOW_FEDERATION: 'true'
|
||||
CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
|
|
|
@ -53,28 +53,6 @@ If wanting to build using standard Rust toolchains, make sure you install:
|
|||
|
||||
You can build conduwuit using `cargo build --release --all-features`
|
||||
|
||||
## Migrating from Conduit
|
||||
|
||||
As mentioned in the README, there is little to no steps needed to migrate
|
||||
from Conduit. As long as you are using the RocksDB database backend, just
|
||||
replace the binary / container image / etc.
|
||||
|
||||
**WARNING**: As of conduwuit 0.5.0, all database and backwards compatibility
|
||||
with Conduit is no longer supported. We only support migrating *from* Conduit,
|
||||
not back to Conduit like before. If you are truly finding yourself wanting to
|
||||
migrate back to Conduit, we would appreciate all your feedback and if we can
|
||||
assist with any issues or concerns.
|
||||
|
||||
**Note**: If you are relying on Conduit's "automatic delegation" feature,
|
||||
this will **NOT** work on conduwuit and you must configure delegation manually.
|
||||
This is not a mistake and no support for this feature will be added.
|
||||
|
||||
If you are using SQLite, you **MUST** migrate to RocksDB. You can use this
|
||||
tool to migrate from SQLite to RocksDB: <https://github.com/ShadowJonathan/conduit_toolbox/>
|
||||
|
||||
See the `[global.well_known]` config section, or configure your web server
|
||||
appropriately to send the delegation responses.
|
||||
|
||||
## Adding a conduwuit user
|
||||
|
||||
While conduwuit can run as any user it is better to use dedicated users for
|
||||
|
@ -167,25 +145,32 @@ sudo chmod 700 /var/lib/conduwuit/
|
|||
|
||||
## Setting up the Reverse Proxy
|
||||
|
||||
Refer to the documentation or various guides online of your chosen reverse proxy
|
||||
software. There are many examples of basic Apache/Nginx reverse proxy setups
|
||||
out there.
|
||||
We recommend Caddy as a reverse proxy, as it is trivial to use, handling TLS certificates, reverse proxy headers, etc transparently with proper defaults.
|
||||
For other software, please refer to their respective documentation or online guides.
|
||||
|
||||
A [Caddy](https://caddyserver.com/) example will be provided as this
|
||||
is the recommended reverse proxy for new users and is very trivial to use
|
||||
(handles TLS, reverse proxy headers, etc transparently with proper defaults).
|
||||
### Caddy
|
||||
|
||||
Lighttpd is not supported as it seems to mess with the `X-Matrix` Authorization
|
||||
header, making federation non-functional. If a workaround is found, feel free to share to get it added to the documentation here.
|
||||
After installing Caddy via your preferred method, create `/etc/caddy/conf.d/conduwuit_caddyfile`
|
||||
and enter this (substitute for your server name).
|
||||
|
||||
If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent this (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can).
|
||||
```caddyfile
|
||||
your.server.name, your.server.name:8448 {
|
||||
# TCP reverse_proxy
|
||||
reverse_proxy 127.0.0.1:6167
|
||||
# UNIX socket
|
||||
#reverse_proxy unix//run/conduwuit/conduwuit.sock
|
||||
}
|
||||
```
|
||||
|
||||
If using Nginx, you need to give conduwuit the request URI using `$request_uri`, or like so:
|
||||
- `proxy_pass http://127.0.0.1:6167$request_uri;`
|
||||
- `proxy_pass http://127.0.0.1:6167;`
|
||||
That's it! Just start and enable the service and you're set.
|
||||
|
||||
Nginx users need to increase `client_max_body_size` (default is 1M) to match
|
||||
`max_request_size` defined in conduwuit.toml.
|
||||
```bash
|
||||
sudo systemctl enable --now caddy
|
||||
```
|
||||
|
||||
### Other Reverse Proxies
|
||||
|
||||
As we would prefer our users to use Caddy, we will not provide configuration files for other proxys.
|
||||
|
||||
You will need to reverse proxy everything under following routes:
|
||||
- `/_matrix/` - core Matrix C-S and S-S APIs
|
||||
|
@ -208,25 +193,19 @@ Examples of delegation:
|
|||
- <https://puppygock.gay/.well-known/matrix/server>
|
||||
- <https://puppygock.gay/.well-known/matrix/client>
|
||||
|
||||
### Caddy
|
||||
For Apache and Nginx there are many examples available online.
|
||||
|
||||
Create `/etc/caddy/conf.d/conduwuit_caddyfile` and enter this (substitute for
|
||||
your server name).
|
||||
Lighttpd is not supported as it seems to mess with the `X-Matrix` Authorization
|
||||
header, making federation non-functional. If a workaround is found, feel free to share to get it added to the documentation here.
|
||||
|
||||
```caddyfile
|
||||
your.server.name, your.server.name:8448 {
|
||||
# TCP reverse_proxy
|
||||
127.0.0.1:6167
|
||||
# UNIX socket
|
||||
#reverse_proxy unix//run/conduwuit/conduwuit.sock
|
||||
}
|
||||
```
|
||||
If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from messing with the `X-Matrix` header (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can).
|
||||
|
||||
That's it! Just start and enable the service and you're set.
|
||||
If using Nginx, you need to give conduwuit the request URI using `$request_uri`, or like so:
|
||||
- `proxy_pass http://127.0.0.1:6167$request_uri;`
|
||||
- `proxy_pass http://127.0.0.1:6167;`
|
||||
|
||||
```bash
|
||||
sudo systemctl enable --now caddy
|
||||
```
|
||||
Nginx users need to increase `client_max_body_size` (default is 1M) to match
|
||||
`max_request_size` defined in conduwuit.toml.
|
||||
|
||||
## You're done
|
||||
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
# Hot Reloading ("Live" Development)
|
||||
|
||||
Note that hot reloading has not been refactored in quite a while and is not
|
||||
guaranteed to work at this time.
|
||||
|
||||
### Summary
|
||||
|
||||
When developing in debug-builds with the nightly toolchain, conduwuit is modular
|
||||
|
|
|
@ -5,12 +5,11 @@
|
|||
Have a look at [Complement's repository][complement] for an explanation of what
|
||||
it is.
|
||||
|
||||
To test against Complement, with Nix (or [Lix](https://lix.systems) and direnv
|
||||
installed and set up, you can:
|
||||
To test against Complement, with Nix (or [Lix](https://lix.systems) and
|
||||
[direnv installed and set up][direnv] (run `direnv allow` after setting up the hook), you can:
|
||||
|
||||
* Run `./bin/complement "$COMPLEMENT_SRC" ./path/to/logs.jsonl
|
||||
./path/to/results.jsonl` to build a Complement image, run the tests, and output
|
||||
the logs and results to the specified paths. This will also output the OCI image
|
||||
* Run `./bin/complement "$COMPLEMENT_SRC"` to build a Complement image, run
|
||||
the tests, and output the logs and results to the specified paths. This will also output the OCI image
|
||||
at `result`
|
||||
* Run `nix build .#complement` from the root of the repository to just build a
|
||||
Complement OCI image outputted to `result` (it's a `.tar.gz` file)
|
||||
|
@ -18,5 +17,15 @@ Complement OCI image outputted to `result` (it's a `.tar.gz` file)
|
|||
output from the commit/revision you want to test (e.g. from main)
|
||||
[here][ci-workflows]
|
||||
|
||||
If you want to use your own prebuilt OCI image (such as from our CI) without needing
|
||||
Nix installed, put the image at `complement_oci_image.tar.gz` in the root of the repo
|
||||
and run the script.
|
||||
|
||||
If you're on macOS and need to build an image, run `nix build .#linux-complement`.
|
||||
|
||||
We have a Complement fork as some tests have needed to be fixed. This can be found
|
||||
at: <https://github.com/girlbossceo/complement>
|
||||
|
||||
[ci-workflows]: https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml?query=event%3Apush+is%3Asuccess+actor%3Agirlbossceo
|
||||
[complement]: https://github.com/matrix-org/complement
|
||||
[direnv]: https://direnv.net/docs/hook.html
|
||||
|
|
|
@ -1,379 +0,0 @@
|
|||
#### **Note: This list may not up to date. There are rapidly more and more
|
||||
improvements, fixes, changes, etc being made that it is becoming more difficult
|
||||
to maintain this list. I recommend that you give conduwuit a try and see the
|
||||
differences for yourself. If you have any concerns, feel free to join the
|
||||
conduwuit Matrix room and ask any pre-usage questions.**
|
||||
|
||||
### list of features, bug fixes, etc that conduwuit does that Conduit does not
|
||||
|
||||
Outgoing typing indicators, outgoing read receipts, **and** outgoing presence!
|
||||
|
||||
## Performance
|
||||
|
||||
- Concurrency support for individual homeserver key fetching for faster remote
|
||||
room joins and room joins that will error less frequently
|
||||
- Send `Cache-Control` response header with `immutable` and 1 year cache length
|
||||
for all media requests (download and thumbnail) to instruct clients to cache
|
||||
media, and reduce server load from media requests that could be otherwise cached
|
||||
- Add feature flags and config options to enable/build with zstd, brotli, and/or
|
||||
gzip HTTP body compression (response and request)
|
||||
- Eliminate all usage of the thread-blocking `getaddrinfo(3)` call upon DNS
|
||||
queries, significantly improving federation latency/ping and cache DNS results
|
||||
(NXDOMAINs, successful queries, etc) using hickory-dns / hickory-resolver
|
||||
- Enable HTTP/2 support on all requests
|
||||
- Vastly improve RocksDB default settings to use new features that help with
|
||||
performance significantly, uses settings tailored to SSDs, various ways to tweak
|
||||
RocksDB, and a conduwuit setting to tell RocksDB to use settings that are
|
||||
tailored to HDDs or slow spinning rust storage or buggy filesystems.
|
||||
- Implement database flush and cleanup conduwuit operations when using RocksDB
|
||||
- Implement RocksDB write buffer corking and coalescing in database write-heavy
|
||||
areas
|
||||
- Perform connection pooling and keepalives where necessary to significantly
|
||||
improve federation performance and latency
|
||||
- Various config options to tweak connection pooling, request timeouts,
|
||||
connection timeouts, DNS timeouts and settings, etc with good defaults which
|
||||
also help huge with performance via reusing connections and retrying where
|
||||
needed
|
||||
- Properly get and use the amount of parallelism / tokio workers
|
||||
- Implement building conduwuit with jemalloc (which extends to the RocksDB
|
||||
jemalloc feature for maximum gains) or hardened_malloc light variant, and
|
||||
io_uring support, and produce CI builds with jemalloc and io_uring by default
|
||||
for performance (Nix doesn't seem to build
|
||||
[hardened_malloc-rs](https://github.com/girlbossceo/hardened_malloc-rs)
|
||||
properly)
|
||||
- Add support for caching DNS results with hickory-dns / hickory-resolver in
|
||||
conduwuit (not a replacement for a proper resolver cache, but still far better
|
||||
than nothing), also properly falls back on TCP for UDP errors or if a SRV
|
||||
response is too large
|
||||
- Add config option for using DNS over TCP, and config option for controlling
|
||||
A/AAAA record lookup strategy (e.g. don't query AAAA records if you don't have
|
||||
IPv6 connectivity)
|
||||
- Overall significant database, Client-Server, and federation performance and
|
||||
latency improvements (check out the ping room leaderboards if you don't believe
|
||||
me :>)
|
||||
- Add config options for RocksDB compression and bottommost compression,
|
||||
including choosing the algorithm and compression level
|
||||
- Use [loole](https://github.com/mahdi-shojaee/loole) MPSC channels instead of
|
||||
tokio MPSC channels for huge performance boosts in sending channels (mainly
|
||||
relevant for federation) and presence channels
|
||||
- Use `tracing`/`log`'s `release_max_level_info` feature to improve performance,
|
||||
build speeds, binary size, and CPU usage in release builds by avoid compiling
|
||||
debug/trace log level macros that users will generally never use (can be
|
||||
disabled with a build-time feature flag)
|
||||
- Remove some unnecessary checks on EDU handling for incoming transactions,
|
||||
effectively speeding them up
|
||||
- Simplify, dedupe, etc huge chunks of the codebase, including some that were
|
||||
unnecessary overhead, binary bloats, or preventing compiler/linker optimisations
|
||||
- Implement zero-copy RocksDB database accessors, substantially improving
|
||||
performance caused by unnecessary memory allocations
|
||||
|
||||
## General Fixes/Features
|
||||
|
||||
- Add legacy Element client hack fixing password changes and deactivations on
|
||||
legacy Element Android/iOS due to usage of an unspecced `user` field for UIAA
|
||||
- Raise and improve all the various request timeouts making some things like
|
||||
room joins and client bugs error less or none at all than they should, and make
|
||||
them all user configurable
|
||||
- Add missing `reason` field to user ban events (`/ban`)
|
||||
- Safer and cleaner shutdowns across incoming/outgoing requests (graceful
|
||||
shutdown) and the database
|
||||
- Stop sending `make_join` requests on room joins if 15 servers respond with
|
||||
`M_UNSUPPORTED_ROOM_VERSION` or `M_INVALID_ROOM_VERSION`
|
||||
- Stop sending `make_join` requests if 50 servers cannot provide `make_join` for
|
||||
us
|
||||
- Respect *most* client parameters for `/media/` requests (`allow_redirect`
|
||||
still needs work)
|
||||
- Return joined member count of rooms for push rules/conditions instead of a
|
||||
hardcoded value of 10
|
||||
- Make `CONDUIT_CONFIG` optional, relevant for container users that configure
|
||||
only by environment variables and no longer need to set `CONDUIT_CONFIG` to an
|
||||
empty string.
|
||||
- Allow HEAD and PATCH (MSC4138) HTTP requests in CORS for clients (despite not
|
||||
being explicity mentioned in Matrix spec, HTTP spec says all HEAD requests need
|
||||
to behave the same as GET requests, Synapse supports HEAD requests)
|
||||
- Fix using conduwuit with flake-compat on NixOS
|
||||
- Resolve and remove some "features" from upstream that result in concurrency
|
||||
hazards, exponential backoff issues, or arbitrary performance limiters
|
||||
- Find more servers for outbound federation `/hierarchy` requests instead of
|
||||
just the room ID server name
|
||||
- Support for suggesting servers to join through at
|
||||
`/_matrix/client/v3/directory/room/{roomAlias}`
|
||||
- Support for suggesting servers to join through us at
|
||||
`/_matrix/federation/v1/query/directory`
|
||||
- Misc edge-case search fixes (e.g. potentially missing some events)
|
||||
- Misc `/sync` fixes (e.g. returning unnecessary data or incorrect/invalid
|
||||
responses)
|
||||
- Add `replaces_state` and `prev_sender` in `unsigned` for state event changes
|
||||
which primarily makes Element's "See history" button on a state event functional
|
||||
- Fix Conduit not allowing incoming federation requests for various world
|
||||
readable rooms
|
||||
- Fix Conduit not respecting the client-requested file name on media requests
|
||||
- Prevent sending junk / non-membership events to `/send_join` and `/send_leave`
|
||||
endpoints
|
||||
- Only allow the requested membership type on `/send_join` and `/send_leave`
|
||||
endpoints (e.g. don't allow leave memberships on join endpoints)
|
||||
- Prevent state key impersonation on `/send_join` and `/send_leave` endpoints
|
||||
- Validate `X-Matrix` origin and request body `"origin"` field on incoming
|
||||
transactions
|
||||
- Add `GET /_matrix/client/v1/register/m.login.registration_token/validity`
|
||||
endpoint
|
||||
- Explicitly define support for sliding sync at `/_matrix/client/versions`
|
||||
(`org.matrix.msc3575`)
|
||||
- Fix seeing empty status messages on user presences
|
||||
|
||||
## Moderation
|
||||
|
||||
- (Also see [Admin Room](#admin-room) for all the admin commands pertaining to
|
||||
moderation, there's a lot!)
|
||||
- Add support for room banning/blocking by ID using admin command
|
||||
- Add support for serving `support` well-known from `[global.well_known]`
|
||||
(MSC1929) (`/.well-known/matrix/support`)
|
||||
- Config option to forbid publishing rooms to the room directory
|
||||
(`lockdown_public_room_directory`) except for admins
|
||||
- Admin commands to delete room aliases and unpublish rooms from our room
|
||||
directory
|
||||
- For all
|
||||
[`/report`](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3roomsroomidreporteventid)
|
||||
requests: check if the reported event ID belongs to the reported room ID, raise
|
||||
report reasoning character limit to 750, fix broken formatting, make a small
|
||||
delayed random response per spec suggestion on privacy, and check if the sender
|
||||
user is in the reported room.
|
||||
- Support blocking servers from downloading remote media from, returning a 404
|
||||
- Don't allow `m.call.invite` events to be sent in public rooms (prevents
|
||||
calling the entire room)
|
||||
- On new public room creations, only allow moderators to send `m.call.invite`,
|
||||
`org.matrix.msc3401.call`, and `org.matrix.msc3401.call.member` events to
|
||||
prevent unprivileged users from calling the entire room
|
||||
- Add support for a "global ACLs" feature (`forbidden_remote_server_names`) that
|
||||
blocks inbound remote room invites, room joins by room ID on server name, room
|
||||
joins by room alias on server name, incoming federated joins, and incoming
|
||||
federated room directory requests. This is very helpful for blocking servers
|
||||
that are purely toxic/bad and serve no value in allowing our users to suffer
|
||||
from things like room invite spam or such. Please note that this is not a
|
||||
substitute for room ACLs.
|
||||
- Add support for a config option to forbid our local users from sending
|
||||
federated room directory requests for
|
||||
(`forbidden_remote_room_directory_server_names`). Similar to above, useful for
|
||||
blocking servers that help prevent our users from wandering into bad areas of
|
||||
Matrix via room directories of those malicious servers.
|
||||
- Add config option for auto remediating/deactivating local non-admin users who
|
||||
attempt to join bad/forbidden rooms (`auto_deactivate_banned_room_attempts`)
|
||||
- Deactivating users will remove their profile picture, blurhash, display name,
|
||||
and leave all rooms by default just like Synapse and for additional privacy
|
||||
- Reject some EDUs from ACL'd users such as read receipts and typing indicators
|
||||
|
||||
## Privacy/Security
|
||||
|
||||
- Add config option for device name federation with a privacy-friendly default
|
||||
(disabled)
|
||||
- Add config option for requiring authentication to the `/publicRooms` endpoint
|
||||
(room directory) with a default enabled for privacy
|
||||
- Add config option for federating `/publicRooms` endpoint (room directory) to
|
||||
other servers with a default disabled for privacy
|
||||
- Uses proper `argon2` crate by RustCrypto instead of questionable `rust-argon2`
|
||||
crate
|
||||
- Generate passwords with 25 characters instead of 15
|
||||
- Config option `ip_range_denylist` to support refusing to send requests
|
||||
(typically federation) to specific IP ranges, typically RFC 1918, non-routable,
|
||||
testnet, etc addresses like Synapse for security (note: this is not a guaranteed
|
||||
protection, and you should be using a firewall with zones if you want guaranteed
|
||||
protection as doing this on the application level is prone to bypasses).
|
||||
- Config option to block non-admin users from sending room invites or receiving
|
||||
remote room invites. Admin users are still allowed.
|
||||
- Config option to disable incoming and/or outgoing remote read receipts
|
||||
- Config option to disable incoming and/or outgoing remote typing indicators
|
||||
- Config option to disable incoming, outgoing, and/or local presence and for
|
||||
timing out remote users
|
||||
- Sanitise file names for the `Content-Disposition` header for all media
|
||||
requests (thumbnails, downloads, uploads)
|
||||
- Media repository on handling `Content-Disposition` and `Content-Type` is fully
|
||||
spec compliant and secured
|
||||
- Send secure default HTTP headers such as a strong restrictive CSP (see
|
||||
MSC4149), deny iframes, disable `X-XSS-Protection`, disable interest cohort in
|
||||
`Permission-Policy`, etc to mitigate any potential attack surface such as from
|
||||
untrusted media
|
||||
|
||||
## Administration/Logging
|
||||
|
||||
- Commandline argument to specify the path to a config file instead of relying
|
||||
on `CONDUIT_CONFIG`
|
||||
- Revamped admin room infrastructure and commands
|
||||
- Substantially clean up, improve, and fix logging (less noisy dead server
|
||||
logging, registration attempts, more useful troubleshooting logging, proper
|
||||
error propagation, etc)
|
||||
- Configurable RocksDB logging (`LOG` files) with proper defaults (rotate, max
|
||||
size, verbosity, etc) to stop LOG files from accumulating so much
|
||||
- Explicit startup error if your configuration allows open registration without
|
||||
a token or such like Synapse with a way to bypass it if needed
|
||||
- Replace the lightning bolt emoji option with support for setting any arbitrary
|
||||
text (e.g. another emoji) to suffix to all new user registrations, with a
|
||||
conduwuit default of "🏳️⚧️"
|
||||
- Implement config option to auto join rooms upon registration
|
||||
- Warn on unknown config options specified
|
||||
- Add `/_conduwuit/server_version` route to return the version of conduwuit
|
||||
without relying on the federation API `/_matrix/federation/v1/version`
|
||||
- Add `/_conduwuit/local_user_count` route to return the amount of registered
|
||||
active local users on your homeserver *if federation is enabled*
|
||||
- Add configurable RocksDB recovery modes to aid in recovering corrupted RocksDB
|
||||
databases
|
||||
- Support config options via `CONDUWUIT_` prefix and accessing non-global struct
|
||||
config options with the `__` split (e.g. `CONDUWUIT_WELL_KNOWN__SERVER`)
|
||||
- Add support for listening on multiple TCP ports and multiple addresses
|
||||
- **Opt-in** Sentry.io telemetry and metrics, mainly used for crash reporting
|
||||
- Log the client IP on various requests such as registrations, banned room join
|
||||
attempts, logins, deactivations, federation transactions, etc
|
||||
- Fix Conduit dropping some remote server federation response errors
|
||||
|
||||
## Maintenance/Stability
|
||||
|
||||
- GitLab CI ported to GitHub Actions
|
||||
- Add support for the Matrix spec compliance test suite
|
||||
[Complement](https://github.com/matrix-org/complement/) via the Nix flake and
|
||||
various other fixes for it
|
||||
- Implement running and diff'ing Complement results in CI and error if any
|
||||
mismatch occurs to prevent large cases of conduwuit regressions
|
||||
- Repo is (officially) mirrored to GitHub, GitLab, git.gay, git.girlcock.ceo,
|
||||
sourcehut, and Codeberg (see README.md for their links)
|
||||
- Docker container images published to GitLab Container Registry, GitHub
|
||||
Container Registry, and Dockerhub
|
||||
- Extensively revamp the example config to be extremely helpful and useful to
|
||||
both new users and power users
|
||||
- Fixed every single clippy (default lints) and rustc warnings, including some
|
||||
that were performance related or potential safety issues / unsoundness
|
||||
- Add a **lot** of other clippy and rustc lints and a rustfmt.toml file
|
||||
- Repo uses [Renovate](https://docs.renovatebot.com/) and keeps ALL
|
||||
dependencies as up to date as possible
|
||||
- Purge unmaintained/irrelevant/broken database backends (heed, sled, persy) and
|
||||
other unnecessary code or overhead
|
||||
- webp support for images
|
||||
- Add cargo audit support to CI
|
||||
- Add documentation lints via lychee and markdownlint-cli to CI
|
||||
- CI tests for all sorts of feature matrixes (jemalloc, non-defaullt, all
|
||||
features, etc)
|
||||
- Add static and dynamic linking smoke tests in CI to prevent any potential
|
||||
linking regressions for Complement, static binaries, Nix devshells, etc
|
||||
- Add timestamp by commit date when building OCI images for keeping image build
|
||||
reproducibility and still have a meaningful "last modified date" for OCI image
|
||||
- Add timestamp by commit date via `SOURCE_DATE_EPOCH` for Debian packages
|
||||
- Startup check if conduwuit running in a container and is listening on
|
||||
127.0.0.1 (generally containers are using NAT networking and 0.0.0.0 is the
|
||||
intended listening address)
|
||||
- Add a panic catcher layer to return panic messages in HTTP responses if a
|
||||
panic occurs
|
||||
- Add full compatibility support for SHA256 media file names instead of base64
|
||||
file names to overcome filesystem file name length limitations (OS error file
|
||||
name too long) while still retaining upstream database compatibility
|
||||
- Remove SQLite support due to being very poor performance, difficult to
|
||||
maintain against RocksDB, and is a blocker to significantly improved database
|
||||
code
|
||||
|
||||
## Admin Room
|
||||
|
||||
- Add support for a console CLI interface that can issue admin commands and
|
||||
output them in your terminal
|
||||
- Add support for an admin-user-only commandline admin room interface that can
|
||||
be issued in any room with the `\\!admin` or `\!admin` prefix and returns the
|
||||
response as yourself in the same room
|
||||
- Add admin commands for uptime, server startup, server shutdown, and server
|
||||
restart
|
||||
- Fix admin room handler to not panic/crash if the admin room command response
|
||||
fails (e.g. too large message)
|
||||
- Add command to dynamically change conduwuit's tracing log level filter on the
|
||||
fly
|
||||
- Add admin command to fetch a server's `/.well-known/matrix/support` file
|
||||
- Add debug admin command to force update user device lists (could potentially
|
||||
resolve some E2EE flukes)
|
||||
- Implement **RocksDB online backups**, listing RocksDB backups, and listing
|
||||
database file counts all via admin commands
|
||||
- Add various database visibility commands such as being able to query the
|
||||
getters and iterators used in conduwuit, a very helpful online debugging utility
|
||||
- Forbid the admin room from being made public or world readable history
|
||||
- Add `!admin` as a way to call the admin bot
|
||||
- Extend clear cache admin command to support clearing more caches such as DNS
|
||||
and TLS name overrides
|
||||
- Admin debug command to send a federation request/ping to a server's
|
||||
`/_matrix/federation/v1/version` endpoint and measures the latency it took
|
||||
- Add admin command to bulk delete media via a codeblock list of MXC URLs.
|
||||
- Add admin command to delete both the thumbnail and media MXC URLs from an
|
||||
event ID (e.g. from an abuse report)
|
||||
- Add admin command to list all the rooms a local user is joined in
|
||||
- Add admin command to list joined members in a room
|
||||
- Add admin command to view the room topic of a room
|
||||
- Add admin command to delete all remote media in the past X minutes as a form
|
||||
of deleting media that you don't want on your server that a remote user posted
|
||||
in a room, a `--force` flag to ignore errors, and support for reading `last
|
||||
modified time` instead of `creation time` for filesystems that don't support
|
||||
file created metadata
|
||||
- Add admin command to return a room's full/complete state
|
||||
- Admin debug command to fetch a PDU from a remote server and inserts it into
|
||||
our database/timeline as backfill
|
||||
- Add admin command to delete media via a specific MXC. This deletes the MXC
|
||||
from our database, and the file locally.
|
||||
- Add admin commands for banning (blocking) room IDs from our local users
|
||||
joining (admins are always allowed) and evicts all our local users from that
|
||||
room, in addition to bulk room banning support, and blocks room invites (remote
|
||||
and local) to the banned room, as a moderation feature
|
||||
- Add admin commands to output jemalloc memory stats and memory usage
|
||||
- Add admin command to get rooms a *remote* user shares with us
|
||||
- Add debug admin commands to get the earliest and latest PDU in a room
|
||||
- Add debug admin command to echo a message
|
||||
- Add admin command to insert rooms tags for a user, most useful for inserting
|
||||
the `m.server_notice` tag on your admin room to make it "persistent" in the
|
||||
"System Alerts" section of Element
|
||||
- Add experimental admin debug command for Dendrite's `AdminDownloadState`
|
||||
(`/admin/downloadState/{serverName}/{roomID}`) admin API endpoint to download
|
||||
and use a remote server's room state in the room
|
||||
- Disable URL previews by default in the admin room due to various command
|
||||
outputs having "URLs" in them that clients may needlessly render/request
|
||||
- Extend memory usage admin server command to support showing memory allocator
|
||||
stats such as jemalloc's
|
||||
- Add admin debug command to see memory allocator's full extended debug
|
||||
statistics such as jemalloc's
|
||||
|
||||
## Misc
|
||||
|
||||
- Add guest support for accessing TURN servers via `turn_allow_guests` like
|
||||
Synapse
|
||||
- Support for creating rooms with custom room IDs like Maunium Synapse
|
||||
(`room_id` request body field to `/createRoom`)
|
||||
- Query parameter `?format=event|content` for returning either the room state
|
||||
event's content (default) for the full room state event on
|
||||
`/_matrix/client/v3/rooms/{roomId}/state/{eventType}[/{stateKey}]` requests (see
|
||||
<https://github.com/matrix-org/matrix-spec/issues/1047>)
|
||||
- Send a User-Agent on all of our requests
|
||||
- Send `avatar_url` on invite room membership events/changes
|
||||
- Support sending [`well_known` response to client login
|
||||
responses](https://spec.matrix.org/v1.10/client-server-api/#post_matrixclientv3login)
|
||||
if using config option `[well_known.client]`
|
||||
- Implement `include_state` search criteria support for `/search` requests
|
||||
(response now can include room states)
|
||||
- Declare various missing Matrix versions and features at
|
||||
`/_matrix/client/versions`
|
||||
- Implement legacy Matrix `/v1/` media endpoints that some clients and servers
|
||||
may still call
|
||||
- Config option to change Conduit's behaviour of homeserver key fetching
|
||||
(`query_trusted_key_servers_first`). This option sets whether conduwuit will
|
||||
query trusted notary key servers first before the individual homeserver(s), or
|
||||
vice versa which may help in joining certain rooms.
|
||||
- Implement unstable MSC2666 support for querying mutual rooms with a user
|
||||
- Implement unstable MSC3266 room summary API support
|
||||
- Implement unstable MSC4125 support for specifying servers to join via on
|
||||
federated invites
|
||||
- Make conduwuit build and be functional under Nix + macOS
|
||||
- Log out all sessions after unsetting the emergency password
|
||||
- Assume well-knowns are broken if they exceed past 12288 characters.
|
||||
- Add support for listening on both HTTP and HTTPS if using direct TLS with
|
||||
conduwuit for usecases such as Complement
|
||||
- Add config option for disabling RocksDB Direct IO if needed
|
||||
- Add various documentation on maintaining conduwuit, using RocksDB online
|
||||
backups, some troubleshooting, using admin commands, moderation documentation,
|
||||
etc
|
||||
- (Developers): Add support for [hot reloadable/"live" modular
|
||||
development](development/hot_reload.md)
|
||||
- (Developers): Add support for tokio-console
|
||||
- (Developers): Add support for tracing flame graphs
|
||||
- No cryptocurrency donations allowed, conduwuit is fully maintained by
|
||||
independent queer maintainers, and with a strong priority on inclusitivity and
|
||||
comfort for protected groups 🏳️⚧️
|
||||
- [Add a community Code of Conduct for all conduwuit community spaces, primarily
|
||||
the Matrix space](https://conduwuit.puppyirl.gay/conduwuit_coc.html)
|
|
@ -4,10 +4,6 @@
|
|||
|
||||
{{#include ../README.md:body}}
|
||||
|
||||
#### What's different about your fork than upstream Conduit?
|
||||
|
||||
See the [differences](differences.md) page
|
||||
|
||||
#### How can I deploy my own?
|
||||
|
||||
- [Deployment options](deploying.md)
|
||||
|
|
91
engage.toml
91
engage.toml
|
@ -18,12 +18,12 @@ script = "direnv --version"
|
|||
[[task]]
|
||||
name = "rustc"
|
||||
group = "versions"
|
||||
script = "rustc --version"
|
||||
script = "rustc --version -v"
|
||||
|
||||
[[task]]
|
||||
name = "cargo"
|
||||
group = "versions"
|
||||
script = "cargo --version"
|
||||
script = "cargo --version -v"
|
||||
|
||||
[[task]]
|
||||
name = "cargo-fmt"
|
||||
|
@ -60,15 +60,10 @@ name = "markdownlint"
|
|||
group = "versions"
|
||||
script = "markdownlint --version"
|
||||
|
||||
[[task]]
|
||||
name = "dpkg"
|
||||
group = "versions"
|
||||
script = "dpkg --version"
|
||||
|
||||
[[task]]
|
||||
name = "cargo-audit"
|
||||
group = "security"
|
||||
script = "cargo audit -D warnings -D unmaintained -D unsound -D yanked"
|
||||
script = "cargo audit --color=always -D warnings -D unmaintained -D unsound -D yanked"
|
||||
|
||||
[[task]]
|
||||
name = "cargo-fmt"
|
||||
|
@ -86,6 +81,7 @@ env DIRENV_DEVSHELL=all-features \
|
|||
direnv exec . \
|
||||
cargo doc \
|
||||
--workspace \
|
||||
--locked \
|
||||
--profile test \
|
||||
--all-features \
|
||||
--no-deps \
|
||||
|
@ -97,10 +93,11 @@ env DIRENV_DEVSHELL=all-features \
|
|||
name = "clippy/default"
|
||||
group = "lints"
|
||||
script = """
|
||||
direnv exec . \
|
||||
cargo clippy \
|
||||
--workspace \
|
||||
--locked \
|
||||
--profile test \
|
||||
--all-targets \
|
||||
--color=always \
|
||||
-- \
|
||||
-D warnings
|
||||
|
@ -114,8 +111,8 @@ env DIRENV_DEVSHELL=all-features \
|
|||
direnv exec . \
|
||||
cargo clippy \
|
||||
--workspace \
|
||||
--locked \
|
||||
--profile test \
|
||||
--all-targets \
|
||||
--all-features \
|
||||
--color=always \
|
||||
-- \
|
||||
|
@ -123,31 +120,36 @@ env DIRENV_DEVSHELL=all-features \
|
|||
"""
|
||||
|
||||
[[task]]
|
||||
name = "clippy/jemalloc"
|
||||
name = "clippy/no-features"
|
||||
group = "lints"
|
||||
script = """
|
||||
env DIRENV_DEVSHELL=no-features \
|
||||
direnv exec . \
|
||||
cargo clippy \
|
||||
--workspace \
|
||||
--locked \
|
||||
--profile test \
|
||||
--features jemalloc \
|
||||
--all-targets \
|
||||
--no-default-features \
|
||||
--color=always \
|
||||
-- \
|
||||
-D warnings
|
||||
"""
|
||||
|
||||
#[[task]]
|
||||
#name = "clippy/hardened_malloc"
|
||||
#group = "lints"
|
||||
#script = """
|
||||
#cargo clippy \
|
||||
# --workspace \
|
||||
# --features hardened_malloc \
|
||||
# --all-targets \
|
||||
# --color=always \
|
||||
# -- \
|
||||
# -D warnings
|
||||
#"""
|
||||
[[task]]
|
||||
name = "clippy/other-features"
|
||||
group = "lints"
|
||||
script = """
|
||||
direnv exec . \
|
||||
cargo clippy \
|
||||
--workspace \
|
||||
--locked \
|
||||
--profile test \
|
||||
--no-default-features \
|
||||
--features=console,systemd,element_hacks,direct_tls,perf_measurements,brotli_compression,blurhashing \
|
||||
--color=always \
|
||||
-- \
|
||||
-D warnings
|
||||
"""
|
||||
|
||||
[[task]]
|
||||
name = "lychee"
|
||||
|
@ -159,30 +161,18 @@ name = "markdownlint"
|
|||
group = "lints"
|
||||
script = "markdownlint docs *.md || true" # TODO: fix the ton of markdown lints so we can drop `|| true`
|
||||
|
||||
[[task]]
|
||||
name = "cargo/all"
|
||||
group = "tests"
|
||||
script = """
|
||||
env DIRENV_DEVSHELL=all-features \
|
||||
direnv exec . \
|
||||
cargo test \
|
||||
--workspace \
|
||||
--profile test \
|
||||
--all-targets \
|
||||
--all-features \
|
||||
--color=always \
|
||||
-- \
|
||||
--color=always
|
||||
"""
|
||||
|
||||
[[task]]
|
||||
name = "cargo/default"
|
||||
group = "tests"
|
||||
script = """
|
||||
env DIRENV_DEVSHELL=default \
|
||||
direnv exec . \
|
||||
cargo test \
|
||||
--workspace \
|
||||
--locked \
|
||||
--profile test \
|
||||
--all-targets \
|
||||
--no-fail-fast \
|
||||
--color=always \
|
||||
-- \
|
||||
--color=always
|
||||
|
@ -197,22 +187,3 @@ depends = ["cargo/default"]
|
|||
script = """
|
||||
git diff --exit-code conduwuit-example.toml
|
||||
"""
|
||||
|
||||
# Ensure that the flake's default output can build and run without crashing
|
||||
#
|
||||
# This is a dynamically-linked jemalloc build, which is a case not covered by
|
||||
# our other tests. We've had linking problems in the past with dynamic
|
||||
# jemalloc builds that usually show up as an immediate segfault or "invalid free"
|
||||
[[task]]
|
||||
name = "nix-default"
|
||||
group = "tests"
|
||||
script = """
|
||||
env DIRENV_DEVSHELL=dynamic \
|
||||
CARGO_PROFILE="test" \
|
||||
direnv exec . \
|
||||
bin/nix-build-and-cache just .#default-test
|
||||
env DIRENV_DEVSHELL=dynamic \
|
||||
CARGO_PROFILE="test" \
|
||||
direnv exec . \
|
||||
nix run -L .#default-test -- --help && nix run -L .#default-test -- --version
|
||||
"""
|
||||
|
|
56
flake.lock
generated
56
flake.lock
generated
|
@ -10,11 +10,11 @@
|
|||
"nixpkgs-stable": "nixpkgs-stable"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1731270564,
|
||||
"narHash": "sha256-6KMC/NH/VWP5Eb+hA56hz0urel3jP6Y6cF2PX6xaTkk=",
|
||||
"lastModified": 1738524606,
|
||||
"narHash": "sha256-hPYEJ4juK3ph7kbjbvv7PlU1D9pAkkhl+pwx8fZY53U=",
|
||||
"owner": "zhaofengli",
|
||||
"repo": "attic",
|
||||
"rev": "47752427561f1c34debb16728a210d378f0ece36",
|
||||
"rev": "ff8a897d1f4408ebbf4d45fa9049c06b3e1e3f4e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -32,11 +32,11 @@
|
|||
"nixpkgs": "nixpkgs_4"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1733424942,
|
||||
"narHash": "sha256-5t7Sl6EkOaoP4FvzLmH7HFDbdl9SizmLh53RjDQCbWQ=",
|
||||
"lastModified": 1737621947,
|
||||
"narHash": "sha256-8HFvG7fvIFbgtaYAY2628Tb89fA55nPm2jSiNs0/Cws=",
|
||||
"owner": "cachix",
|
||||
"repo": "cachix",
|
||||
"rev": "8b6b0e4694b9aa78b2ea4c93bff6e1a222dc7e4a",
|
||||
"rev": "f65a3cd5e339c223471e64c051434616e18cc4f5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -80,11 +80,11 @@
|
|||
"complement": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1734303596,
|
||||
"narHash": "sha256-HjDRyLR4MBqQ3IjfMM6eE+8ayztXlbz3gXdyDmFla68=",
|
||||
"lastModified": 1741891349,
|
||||
"narHash": "sha256-YvrzOWcX7DH1drp5SGa+E/fc7wN3hqFtPbqPjZpOu1Q=",
|
||||
"owner": "girlbossceo",
|
||||
"repo": "complement",
|
||||
"rev": "14cc5be797b774f1a2b9f826f38181066d4952b8",
|
||||
"rev": "e587b3df569cba411aeac7c20b6366d03c143745",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -117,11 +117,11 @@
|
|||
},
|
||||
"crane_2": {
|
||||
"locked": {
|
||||
"lastModified": 1734808813,
|
||||
"narHash": "sha256-3aH/0Y6ajIlfy7j52FGZ+s4icVX0oHhqBzRdlOeztqg=",
|
||||
"lastModified": 1739936662,
|
||||
"narHash": "sha256-x4syUjNUuRblR07nDPeLDP7DpphaBVbUaSoeZkFbGSk=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "72e2d02dbac80c8c86bf6bf3e785536acf8ee926",
|
||||
"rev": "19de14aaeb869287647d9461cbd389187d8ecdb7",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -170,11 +170,11 @@
|
|||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1735799625,
|
||||
"narHash": "sha256-lFadwWDvVIub11bwfZhsh2WUByf9LOi6yjsSUMmE0xk=",
|
||||
"lastModified": 1740724364,
|
||||
"narHash": "sha256-D1jLIueJx1dPrP09ZZwTrPf4cubV+TsFMYbpYYTVj6A=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "a9d84a1545814910cb4ab0515ed6921e8b07ee95",
|
||||
"rev": "edf7d9e431cda8782e729253835f178a356d3aab",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -364,11 +364,11 @@
|
|||
"liburing": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1733603756,
|
||||
"narHash": "sha256-eTKnZDZ1Ex++v+BI0DBcUBmCXAO/tE8hxK9MiyztZkU=",
|
||||
"lastModified": 1740613216,
|
||||
"narHash": "sha256-NpPOBqNND3Qe9IwqYs0mJLGTmIx7e6FgUEBAnJ+1ZLA=",
|
||||
"owner": "axboe",
|
||||
"repo": "liburing",
|
||||
"rev": "c3d5d6270cd5ed48d817fc1e8e95f7c8b222f2ff",
|
||||
"rev": "e1003e496e66f9b0ae06674869795edf772d5500",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -550,11 +550,11 @@
|
|||
},
|
||||
"nixpkgs_5": {
|
||||
"locked": {
|
||||
"lastModified": 1735685343,
|
||||
"narHash": "sha256-h1CpBzdJDNtSUb5QMyfFHKHocTTky+4McgQEBQBM+xA=",
|
||||
"lastModified": 1740547748,
|
||||
"narHash": "sha256-Ly2fBL1LscV+KyCqPRufUBuiw+zmWrlJzpWOWbahplg=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "81934660d6e9ea54d2f0cdee821e8533b10c221a",
|
||||
"rev": "3a05eebede89661660945da1f151959900903b6a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -567,16 +567,16 @@
|
|||
"rocksdb": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1734469478,
|
||||
"narHash": "sha256-IcQ4N8xADYal79K+ONmNq4RLlIwdgUqgrVzgNgiIaG8=",
|
||||
"lastModified": 1741308171,
|
||||
"narHash": "sha256-YdBvdQ75UJg5ffwNjxizpviCVwVDJnBkM8ZtGIduMgY=",
|
||||
"owner": "girlbossceo",
|
||||
"repo": "rocksdb",
|
||||
"rev": "8b4808e7de2fbb5d119d8d72cdca76d8ab84bc47",
|
||||
"rev": "3ce04794bcfbbb0d2e6f81ae35fc4acf688b6986",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "girlbossceo",
|
||||
"ref": "v9.9.3",
|
||||
"ref": "v9.11.1",
|
||||
"repo": "rocksdb",
|
||||
"type": "github"
|
||||
}
|
||||
|
@ -599,11 +599,11 @@
|
|||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1735742096,
|
||||
"narHash": "sha256-q3a80h8Jf8wfmPURUgRR46nQCB3I5fhZ+/swulTF5HY=",
|
||||
"lastModified": 1740691488,
|
||||
"narHash": "sha256-Fs6vBrByuiOf2WO77qeMDMTXcTGzrIMqLBv+lNeywwM=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "7e639ee3dda6ed9cecc79d41f6d38235121e483d",
|
||||
"rev": "fe3eda77d3a7ce212388bda7b6cec8bffcc077e5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
32
flake.nix
32
flake.nix
|
@ -9,7 +9,7 @@
|
|||
flake-utils.url = "github:numtide/flake-utils?ref=main";
|
||||
nix-filter.url = "github:numtide/nix-filter?ref=main";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
|
||||
rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.9.3"; flake = false; };
|
||||
rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.11.1"; flake = false; };
|
||||
liburing = { url = "github:axboe/liburing?ref=master"; flake = false; };
|
||||
};
|
||||
|
||||
|
@ -26,7 +26,7 @@
|
|||
file = ./rust-toolchain.toml;
|
||||
|
||||
# See also `rust-toolchain.toml`
|
||||
sha256 = "sha256-s1RPtyvDGJaX/BisLT+ifVfuhDT1nZkZ1NcK8sbwELM=";
|
||||
sha256 = "sha256-X/4ZBHO3iW0fOenQ3foEvscgAPJYl2abspaBThDOukI=";
|
||||
};
|
||||
|
||||
mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: {
|
||||
|
@ -64,8 +64,10 @@
|
|||
patches = [];
|
||||
cmakeFlags = pkgs.lib.subtractLists
|
||||
[
|
||||
# no real reason to have snappy, no one uses this
|
||||
# no real reason to have snappy or zlib, no one uses this
|
||||
"-DWITH_SNAPPY=1"
|
||||
"-DZLIB=1"
|
||||
"-DWITH_ZLIB=1"
|
||||
# we dont need to use ldb or sst_dump (core_tools)
|
||||
"-DWITH_CORE_TOOLS=1"
|
||||
# we dont need to build rocksdb tests
|
||||
|
@ -82,6 +84,8 @@
|
|||
++ [
|
||||
# no real reason to have snappy, no one uses this
|
||||
"-DWITH_SNAPPY=0"
|
||||
"-DZLIB=0"
|
||||
"-DWITH_ZLIB=0"
|
||||
# we dont need to use ldb or sst_dump (core_tools)
|
||||
"-DWITH_CORE_TOOLS=0"
|
||||
# we dont need trace tools
|
||||
|
@ -140,23 +144,26 @@
|
|||
toolchain
|
||||
]
|
||||
++ (with pkgsHost.pkgs; [
|
||||
engage
|
||||
cargo-audit
|
||||
|
||||
# Required by hardened-malloc.rs dep
|
||||
binutils
|
||||
|
||||
cargo-audit
|
||||
cargo-auditable
|
||||
|
||||
# Needed for producing Debian packages
|
||||
cargo-deb
|
||||
|
||||
# Needed for CI to check validity of produced Debian packages (dpkg-deb)
|
||||
dpkg
|
||||
|
||||
engage
|
||||
|
||||
# Needed for Complement
|
||||
go
|
||||
|
||||
# Needed for our script for Complement
|
||||
jq
|
||||
gotestfmt
|
||||
|
||||
# Needed for finding broken markdown links
|
||||
lychee
|
||||
|
@ -169,21 +176,10 @@
|
|||
|
||||
# used for rust caching in CI to speed it up
|
||||
sccache
|
||||
|
||||
# needed so we can get rid of gcc and other unused deps that bloat OCI images
|
||||
removeReferencesTo
|
||||
]
|
||||
# liburing is Linux-exclusive
|
||||
++ lib.optional stdenv.hostPlatform.isLinux liburing
|
||||
# needed to build Rust applications on macOS
|
||||
++ lib.optionals stdenv.hostPlatform.isDarwin [
|
||||
# https://github.com/NixOS/nixpkgs/issues/206242
|
||||
# ld: library not found for -liconv
|
||||
libiconv
|
||||
# https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell
|
||||
# https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612
|
||||
pkgsBuildHost.darwin.apple_sdk.frameworks.Security
|
||||
])
|
||||
++ lib.optional stdenv.hostPlatform.isLinux numactl)
|
||||
++ scope.main.buildInputs
|
||||
++ scope.main.propagatedBuildInputs
|
||||
++ scope.main.nativeBuildInputs;
|
||||
|
|
21
nix/pkgs/complement/certificate.crt
Normal file
21
nix/pkgs/complement/certificate.crt
Normal file
|
@ -0,0 +1,21 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDfzCCAmegAwIBAgIUcrZdSPmCh33Evys/U6mTPpShqdcwDQYJKoZIhvcNAQEL
|
||||
BQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29mZXJz
|
||||
IGluYy4xDDAKBgNVBAMMA2hzMTAgFw0yNTAzMTMxMjU4NTFaGA8yMDUyMDcyODEy
|
||||
NTg1MVowPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29m
|
||||
ZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
|
||||
AQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjHuCLZLpYt
|
||||
/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZRxmOhtp88
|
||||
awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZbo61q8HBp
|
||||
L0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42BhGtnJZsK
|
||||
K5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBevUdBh8gl
|
||||
8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaNxMG8wCQYDVR0TBAIwADALBgNV
|
||||
HQ8EBAMCBPAwNgYDVR0RBC8wLYIRKi5kb2NrZXIuaW50ZXJuYWyCA2hzMYIDaHMy
|
||||
ggNoczOCA2hzNIcEfwAAATAdBgNVHQ4EFgQUr4VYrmW1d+vjBTJewvy7fJYhLDYw
|
||||
DQYJKoZIhvcNAQELBQADggEBADkYqkjNYxjWX8hUUAmFHNdCwzT1CpYe/5qzLiyJ
|
||||
irDSdMlC5g6QqMUSrpu7nZxo1lRe1dXGroFVfWpoDxyCjSQhplQZgtYqtyLfOIx+
|
||||
HQ7cPE/tUU/KsTGc0aL61cETB6u8fj+rQKUGdfbSlm0Rpu4v0gC8RnDj06X/hZ7e
|
||||
VkWU+dOBzxlqHuLlwFFtVDgCyyTatIROx5V+GpMHrVqBPO7HcHhwqZ30k2kMM8J3
|
||||
y1CWaliQM85jqtSZV+yUHKQV8EksSowCFJuguf+Ahz0i0/koaI3i8m4MRN/1j13d
|
||||
jbTaX5a11Ynm3A27jioZdtMRty6AJ88oCp18jxVzqTxNNO4=
|
||||
-----END CERTIFICATE-----
|
|
@ -6,7 +6,7 @@ allow_public_room_directory_over_federation = true
|
|||
allow_public_room_directory_without_auth = true
|
||||
allow_registration = true
|
||||
database_path = "/database"
|
||||
log = "trace,h2=warn,hyper=warn"
|
||||
log = "trace,h2=debug,hyper=debug"
|
||||
port = [8008, 8448]
|
||||
trusted_servers = []
|
||||
only_query_trusted_key_servers = false
|
||||
|
@ -17,21 +17,34 @@ ip_range_denylist = []
|
|||
url_preview_domain_contains_allowlist = ["*"]
|
||||
url_preview_domain_explicit_denylist = ["*"]
|
||||
media_compat_file_link = false
|
||||
media_startup_check = false
|
||||
prune_missing_media = false
|
||||
log_colors = false
|
||||
media_startup_check = true
|
||||
prune_missing_media = true
|
||||
log_colors = true
|
||||
admin_room_notices = false
|
||||
allow_check_for_updates = false
|
||||
allow_unstable_room_versions = true
|
||||
rocksdb_log_level = "debug"
|
||||
intentionally_unknown_config_option_for_testing = true
|
||||
rocksdb_log_level = "info"
|
||||
rocksdb_max_log_files = 1
|
||||
rocksdb_recovery_mode = 0
|
||||
rocksdb_paranoid_file_checks = true
|
||||
log_guest_registrations = false
|
||||
allow_legacy_media = true
|
||||
startup_netburst = false
|
||||
startup_netburst = true
|
||||
startup_netburst_keep = -1
|
||||
|
||||
allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure = true
|
||||
|
||||
# valgrind makes things so slow
|
||||
dns_timeout = 60
|
||||
dns_attempts = 20
|
||||
request_conn_timeout = 60
|
||||
request_timeout = 120
|
||||
well_known_conn_timeout = 60
|
||||
well_known_timeout = 60
|
||||
federation_idle_timeout = 300
|
||||
sender_timeout = 300
|
||||
sender_idle_timeout = 300
|
||||
sender_retry_backoff_limit = 300
|
||||
|
||||
[global.tls]
|
||||
certs = "/certificate.crt"
|
||||
dual_protocol = true
|
||||
key = "/private_key.key"
|
||||
|
|
|
@ -3,10 +3,8 @@
|
|||
, buildEnv
|
||||
, coreutils
|
||||
, dockerTools
|
||||
, gawk
|
||||
, lib
|
||||
, main
|
||||
, openssl
|
||||
, stdenv
|
||||
, tini
|
||||
, writeShellScriptBin
|
||||
|
@ -18,18 +16,12 @@ let
|
|||
all_features = true;
|
||||
disable_release_max_log_level = true;
|
||||
disable_features = [
|
||||
# no reason to use jemalloc for complement, just has compatibility/build issues
|
||||
"jemalloc"
|
||||
"jemalloc_stats"
|
||||
"jemalloc_prof"
|
||||
# console/CLI stuff isn't used or relevant for complement
|
||||
"console"
|
||||
"tokio_console"
|
||||
# sentry telemetry isn't useful for complement, disabled by default anyways
|
||||
"sentry_telemetry"
|
||||
"perf_measurements"
|
||||
# the containers don't use or need systemd signal support
|
||||
"systemd"
|
||||
# this is non-functional on nix for some reason
|
||||
"hardened_malloc"
|
||||
# dont include experimental features
|
||||
|
@ -48,28 +40,6 @@ let
|
|||
start = writeShellScriptBin "start" ''
|
||||
set -euxo pipefail
|
||||
|
||||
${lib.getExe openssl} genrsa -out private_key.key 2048
|
||||
${lib.getExe openssl} req \
|
||||
-new \
|
||||
-sha256 \
|
||||
-key private_key.key \
|
||||
-subj "/C=US/ST=CA/O=MyOrg, Inc./CN=$SERVER_NAME" \
|
||||
-out signing_request.csr
|
||||
cp ${./v3.ext} v3.ext
|
||||
echo "DNS.1 = $SERVER_NAME" >> v3.ext
|
||||
echo "IP.1 = $(${lib.getExe gawk} 'END{print $1}' /etc/hosts)" \
|
||||
>> v3.ext
|
||||
${lib.getExe openssl} x509 \
|
||||
-req \
|
||||
-extfile v3.ext \
|
||||
-in signing_request.csr \
|
||||
-CA /complement/ca/ca.crt \
|
||||
-CAkey /complement/ca/ca.key \
|
||||
-CAcreateserial \
|
||||
-out certificate.crt \
|
||||
-days 1 \
|
||||
-sha256
|
||||
|
||||
${lib.getExe' coreutils "env"} \
|
||||
CONDUWUIT_SERVER_NAME="$SERVER_NAME" \
|
||||
${lib.getExe main'}
|
||||
|
@ -105,7 +75,8 @@ dockerTools.buildImage {
|
|||
else [];
|
||||
|
||||
Env = [
|
||||
"SSL_CERT_FILE=/complement/ca/ca.crt"
|
||||
"CONDUWUIT_TLS__KEY=${./private_key.key}"
|
||||
"CONDUWUIT_TLS__CERTS=${./certificate.crt}"
|
||||
"CONDUWUIT_CONFIG=${./config.toml}"
|
||||
"RUST_BACKTRACE=full"
|
||||
];
|
||||
|
|
28
nix/pkgs/complement/private_key.key
Normal file
28
nix/pkgs/complement/private_key.key
Normal file
|
@ -0,0 +1,28 @@
|
|||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDS/odmZivxajeb
|
||||
iyT7SMuhXqnMm+hF+zEARLcbieem0wG4x7gi2S6WLf8DlifdXax6me13eYk4rBnT
|
||||
LvGEvNNx0px5M54H+FVyoVa3c1tmA66WUcZjobafPGsDh5j+5qpScgWwjkMPGg1a
|
||||
09CphCFswO4PpxUUORX/OTGj/rEKxximW6OtavBwaS9F7mqjXJK7lCrcZxKq5ucc
|
||||
ebGMmCoO660hROSTBaFigdRTVicclk+NgYRrZyWbCiuXPjQ0jlOE2rcaDepqTUga
|
||||
Qs/2tdT4kBzBH6kZOiQOIN/ddXaj032QXr1HQYfIJfJmiM6nmRob8nik5rpZdWNO
|
||||
/Ncsro/fAgMBAAECggEAITCCkfv+a5I+vwvrPE/eIDso0JOxvNhfg+BLQVy3AMnu
|
||||
WmeoMmshZeREWgcTrEGg8QQnk4Sdrjl8MnkO6sddJ2luza3t7OkGX+q7Hk5aETkB
|
||||
DIo+f8ufU3sIhlydF3OnVSK0fGpUaBq8AQ6Soyeyrk3G5NVufmjgae5QPbDBnqUb
|
||||
piOGyfcwagL4JtCbZsMk8AT7vQSynLm6zaWsVzWNd71jummLqtVV063K95J9PqVN
|
||||
D8meEcP3WR5kQrvf+mgy9RVgWLRtVWN8OLZfJ9yrnl4Efj62elrldUj4jaCFezGQ
|
||||
8f0W+d8jjt038qhmEdymw2MWQ+X/b0R79lJar1Up8QKBgQD1DtHxauhl+JUoI3y+
|
||||
3eboqXl7YPJt1/GTnChb4b6D1Z1hvLsOKUa7hjGEfruYGbsWXBCRMICdfzp+iWcq
|
||||
/lEOp7/YU9OaW4lQMoG4sXMoBWd9uLgg0E+aH6VDJOBvxsfafqM4ufmtspzwEm90
|
||||
FU1cq6oImomFnPChSq4X+3+YpwKBgQDcalaK9llCcscWA8HAP8WVVNTjCOqiDp9q
|
||||
td61E9IO/FIB/gW5y+JkaFRrA2CN1zY3s3K92uveLTNYTArecWlDcPNNFDuaYu2M
|
||||
Roz4bC104HGh+zztJ0iPVzELL81Lgg6wHhLONN+eVi4gTftJxzJFXybyb+xVT25A
|
||||
91ynKXB+CQKBgQC+Ub43MoI+/6pHvBfb3FbDByvz6D0flgBmVXb6tP3TQYmzKHJV
|
||||
8zSd2wCGGC71V7Z3DRVIzVR1/SOetnPLbivhp+JUzfWfAcxI3pDksdvvjxLrDxTh
|
||||
VycbWcxtsywjY0w/ou581eLVRcygnpC0pP6qJCAwAmUfwd0YRvmiYo6cLQKBgHIW
|
||||
UIlJDdaJFmdctnLOD3VGHZMOUHRlYTqYvJe5lKbRD5mcZFZRI/OY1Ok3LEj+tj+K
|
||||
kL+YizHK76KqaY3N4hBYbHbfHCLDRfWvptQHGlg+vFJ9eoG+LZ6UIPyLV5XX0cZz
|
||||
KoS1dXG9Zc6uznzXsDucDsq6B/f4TzctUjXsCyARAoGAOKb4HtuNyYAW0jUlujR7
|
||||
IMHwUesOGlhSXqFtP9aTvk6qJgvV0+3CKcWEb4y02g+uYftP8BLNbJbIt9qOqLYh
|
||||
tOVyzCoamAi8araAhjA0w4dXvqDCDK7k/gZFkojmKQtRijoxTHnWcDc3vAjYCgaM
|
||||
9MVtdgSkuh2gwkD/mMoAJXM=
|
||||
-----END PRIVATE KEY-----
|
16
nix/pkgs/complement/signing_request.csr
Normal file
16
nix/pkgs/complement/signing_request.csr
Normal file
|
@ -0,0 +1,16 @@
|
|||
-----BEGIN CERTIFICATE REQUEST-----
|
||||
MIIChDCCAWwCAQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQK
|
||||
DAx3b29mZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQAD
|
||||
ggEPADCCAQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjH
|
||||
uCLZLpYt/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZR
|
||||
xmOhtp88awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZb
|
||||
o61q8HBpL0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42B
|
||||
hGtnJZsKK5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBe
|
||||
vUdBh8gl8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaAAMA0GCSqGSIb3DQEB
|
||||
CwUAA4IBAQDR/gjfxN0IID1MidyhZB4qpdWn3m6qZnEQqoTyHHdWalbfNXcALC79
|
||||
ffS+Smx40N5hEPvqy6euR89N5YuYvt8Hs+j7aWNBn7Wus5Favixcm2JcfCTJn2R3
|
||||
r8FefuSs2xGkoyGsPFFcXE13SP/9zrZiwvOgSIuTdz/Pbh6GtEx7aV4DqHJsrXnb
|
||||
XuPxpQleoBqKvQgSlmaEBsJg13TQB+Fl2foBVUtqAFDQiv+RIuircf0yesMCKJaK
|
||||
MPH4Oo+r3pR8lI8ewfJPreRhCoV+XrGYMubaakz003TJ1xlOW8M+N9a6eFyMVh76
|
||||
U1nY/KP8Ua6Lgaj9PRz7JCRzNoshZID/
|
||||
-----END CERTIFICATE REQUEST-----
|
|
@ -4,3 +4,9 @@ keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
|
|||
subjectAltName = @alt_names
|
||||
|
||||
[alt_names]
|
||||
DNS.1 = *.docker.internal
|
||||
DNS.2 = hs1
|
||||
DNS.3 = hs2
|
||||
DNS.4 = hs3
|
||||
DNS.5 = hs4
|
||||
IP.1 = 127.0.0.1
|
||||
|
|
|
@ -82,7 +82,7 @@ rust-jemalloc-sys' = (rust-jemalloc-sys.override {
|
|||
buildDepsOnlyEnv =
|
||||
let
|
||||
rocksdb' = (rocksdb.override {
|
||||
jemalloc = rust-jemalloc-sys';
|
||||
jemalloc = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys';
|
||||
# rocksdb fails to build with prefixed jemalloc, which is required on
|
||||
# darwin due to [1]. In this case, fall back to building rocksdb with
|
||||
# libc malloc. This should not cause conflicts, because all of the
|
||||
|
@ -103,6 +103,12 @@ buildDepsOnlyEnv =
|
|||
++ [ "-DPORTABLE=haswell" ]) else ([ "-DPORTABLE=1" ])
|
||||
)
|
||||
++ old.cmakeFlags;
|
||||
|
||||
# outputs has "tools" which we dont need or use
|
||||
outputs = [ "out" ];
|
||||
|
||||
# preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use
|
||||
preInstall = "";
|
||||
});
|
||||
in
|
||||
{
|
||||
|
@ -149,13 +155,20 @@ commonAttrs = {
|
|||
|
||||
# Keep sorted
|
||||
include = [
|
||||
".cargo"
|
||||
"Cargo.lock"
|
||||
"Cargo.toml"
|
||||
"deps"
|
||||
"src"
|
||||
];
|
||||
};
|
||||
|
||||
doCheck = true;
|
||||
|
||||
cargoExtraArgs = "--no-default-features --locked "
|
||||
+ lib.optionalString
|
||||
(features'' != [])
|
||||
"--features " + (builtins.concatStringsSep "," features'');
|
||||
|
||||
dontStrip = profile == "dev" || profile == "test";
|
||||
dontPatchELF = profile == "dev" || profile == "test";
|
||||
|
||||
|
@ -181,27 +194,7 @@ commonAttrs = {
|
|||
# differing values for `NIX_CFLAGS_COMPILE`, which contributes to spurious
|
||||
# rebuilds of bindgen and its depedents.
|
||||
jq
|
||||
|
||||
# needed so we can get rid of gcc and other unused deps that bloat OCI images
|
||||
removeReferencesTo
|
||||
]
|
||||
# needed to build Rust applications on macOS
|
||||
++ lib.optionals stdenv.hostPlatform.isDarwin [
|
||||
# https://github.com/NixOS/nixpkgs/issues/206242
|
||||
# ld: library not found for -liconv
|
||||
libiconv
|
||||
|
||||
# https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell
|
||||
# https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612
|
||||
pkgsBuildHost.darwin.apple_sdk.frameworks.Security
|
||||
];
|
||||
|
||||
# for some reason gcc and other weird deps are added to OCI images and bloats it up
|
||||
#
|
||||
# <https://github.com/input-output-hk/haskell.nix/issues/829>
|
||||
postInstall = with pkgsBuildHost; ''
|
||||
find "$out" -type f -exec remove-references-to -t ${stdenv.cc} -t ${gcc} -t ${llvm} -t ${rustc.unwrapped} -t ${rustc} '{}' +
|
||||
'';
|
||||
};
|
||||
in
|
||||
|
||||
|
@ -210,16 +203,13 @@ craneLib.buildPackage ( commonAttrs // {
|
|||
env = buildDepsOnlyEnv;
|
||||
});
|
||||
|
||||
cargoExtraArgs = "--no-default-features "
|
||||
doCheck = true;
|
||||
|
||||
cargoExtraArgs = "--no-default-features --locked "
|
||||
+ lib.optionalString
|
||||
(features'' != [])
|
||||
"--features " + (builtins.concatStringsSep "," features'');
|
||||
|
||||
# This is redundant with CI
|
||||
cargoTestCommand = "";
|
||||
cargoCheckCommand = "";
|
||||
doCheck = false;
|
||||
|
||||
env = buildPackageEnv;
|
||||
|
||||
passthru = {
|
||||
|
|
|
@ -28,5 +28,19 @@ dockerTools.buildLayeredImage {
|
|||
Env = [
|
||||
"RUST_BACKTRACE=full"
|
||||
];
|
||||
Labels = {
|
||||
"org.opencontainers.image.authors" = "June Clementine Strawberry <june@girlboss.ceo> and Jason Volk
|
||||
<jason@zemos.net>";
|
||||
"org.opencontainers.image.created" ="@${toString inputs.self.lastModified}";
|
||||
"org.opencontainers.image.description" = "a very cool Matrix chat homeserver written in Rust";
|
||||
"org.opencontainers.image.documentation" = "https://conduwuit.puppyirl.gay/";
|
||||
"org.opencontainers.image.licenses" = "Apache-2.0";
|
||||
"org.opencontainers.image.revision" = inputs.self.rev or inputs.self.dirtyRev or "";
|
||||
"org.opencontainers.image.source" = "https://github.com/girlbossceo/conduwuit";
|
||||
"org.opencontainers.image.title" = main.pname;
|
||||
"org.opencontainers.image.url" = "https://conduwuit.puppyirl.gay/";
|
||||
"org.opencontainers.image.vendor" = "girlbossceo";
|
||||
"org.opencontainers.image.version" = main.version;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
# If you're having trouble making the relevant changes, bug a maintainer.
|
||||
|
||||
[toolchain]
|
||||
channel = "1.83.0"
|
||||
channel = "1.86.0"
|
||||
profile = "minimal"
|
||||
components = [
|
||||
# For rust-analyzer
|
||||
|
@ -24,5 +24,6 @@ targets = [
|
|||
"x86_64-unknown-linux-gnu",
|
||||
"x86_64-unknown-linux-musl",
|
||||
"aarch64-unknown-linux-musl",
|
||||
"aarch64-unknown-linux-gnu",
|
||||
#"aarch64-apple-darwin",
|
||||
]
|
||||
|
|
|
@ -2,7 +2,7 @@ array_width = 80
|
|||
chain_width = 60
|
||||
comment_width = 80
|
||||
condense_wildcard_suffixes = true
|
||||
edition = "2024"
|
||||
style_edition = "2024"
|
||||
fn_call_width = 80
|
||||
fn_single_line = true
|
||||
format_code_in_doc_comments = true
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
use clap::Parser;
|
||||
use conduwuit::Result;
|
||||
use ruma::events::room::message::RoomMessageEventContent;
|
||||
|
||||
use crate::{
|
||||
appservice, appservice::AppserviceCommand, check, check::CheckCommand, command::Command,
|
||||
|
@ -50,13 +49,10 @@ pub(super) enum AdminCommand {
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip_all, name = "command")]
|
||||
pub(super) async fn process(
|
||||
command: AdminCommand,
|
||||
context: &Command<'_>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn process(command: AdminCommand, context: &Command<'_>) -> Result {
|
||||
use AdminCommand::*;
|
||||
|
||||
Ok(match command {
|
||||
match command {
|
||||
| Appservices(command) => appservice::process(command, context).await?,
|
||||
| Media(command) => media::process(command, context).await?,
|
||||
| Users(command) => user::process(command, context).await?,
|
||||
|
@ -66,5 +62,7 @@ pub(super) async fn process(
|
|||
| Debug(command) => debug::process(command, context).await?,
|
||||
| Query(command) => query::process(command, context).await?,
|
||||
| Check(command) => check::process(command, context).await?,
|
||||
})
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use ruma::{api::appservice::Registration, events::room::message::RoomMessageEventContent};
|
||||
|
||||
use crate::{admin_command, Result};
|
||||
use crate::{Result, admin_command};
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn register(&self) -> Result<RoomMessageEventContent> {
|
||||
|
|
|
@ -2,20 +2,11 @@ mod commands;
|
|||
|
||||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use ruma::events::room::message::RoomMessageEventContent;
|
||||
|
||||
use crate::Command;
|
||||
use crate::admin_command_dispatch;
|
||||
|
||||
#[admin_command_dispatch]
|
||||
#[derive(Debug, Subcommand)]
|
||||
pub(super) enum CheckCommand {
|
||||
AllUsers,
|
||||
}
|
||||
|
||||
pub(super) async fn process(
|
||||
command: CheckCommand,
|
||||
context: &Command<'_>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
Ok(match command {
|
||||
| CheckCommand::AllUsers => context.check_all_users().await?,
|
||||
})
|
||||
CheckAllUsers,
|
||||
}
|
||||
|
|
|
@ -1,6 +1,12 @@
|
|||
use std::time::SystemTime;
|
||||
use std::{fmt, time::SystemTime};
|
||||
|
||||
use conduwuit::Result;
|
||||
use conduwuit_service::Services;
|
||||
use futures::{
|
||||
Future, FutureExt,
|
||||
io::{AsyncWriteExt, BufWriter},
|
||||
lock::Mutex,
|
||||
};
|
||||
use ruma::EventId;
|
||||
|
||||
pub(crate) struct Command<'a> {
|
||||
|
@ -8,4 +14,26 @@ pub(crate) struct Command<'a> {
|
|||
pub(crate) body: &'a [&'a str],
|
||||
pub(crate) timer: SystemTime,
|
||||
pub(crate) reply_id: Option<&'a EventId>,
|
||||
pub(crate) output: Mutex<BufWriter<Vec<u8>>>,
|
||||
}
|
||||
|
||||
impl Command<'_> {
|
||||
pub(crate) fn write_fmt(
|
||||
&self,
|
||||
arguments: fmt::Arguments<'_>,
|
||||
) -> impl Future<Output = Result> + Send + '_ + use<'_> {
|
||||
let buf = format!("{arguments}");
|
||||
self.output.lock().then(|mut output| async move {
|
||||
output.write_all(buf.as_bytes()).await.map_err(Into::into)
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn write_str<'a>(
|
||||
&'a self,
|
||||
s: &'a str,
|
||||
) -> impl Future<Output = Result> + Send + 'a {
|
||||
self.output.lock().then(move |mut output| async move {
|
||||
output.write_all(s.as_bytes()).await.map_err(Into::into)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,16 +6,26 @@ use std::{
|
|||
};
|
||||
|
||||
use conduwuit::{
|
||||
debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, Result,
|
||||
Error, Result, debug_error, err, info,
|
||||
matrix::pdu::{PduEvent, PduId, RawPduId},
|
||||
trace, utils,
|
||||
utils::{
|
||||
stream::{IterStream, ReadyExt},
|
||||
string::EMPTY,
|
||||
},
|
||||
warn,
|
||||
};
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use futures::{FutureExt, StreamExt, TryStreamExt};
|
||||
use ruma::{
|
||||
api::{client::error::ErrorKind, federation::event::get_room_state},
|
||||
events::room::message::RoomMessageEventContent,
|
||||
CanonicalJsonObject, EventId, OwnedEventId, OwnedRoomOrAliasId, RoomId, RoomVersionId,
|
||||
ServerName,
|
||||
api::{client::error::ErrorKind, federation::event::get_room_state},
|
||||
events::room::message::RoomMessageEventContent,
|
||||
};
|
||||
use service::rooms::{
|
||||
short::{ShortEventId, ShortRoomId},
|
||||
state_compressor::HashSetCompressStateEvent,
|
||||
};
|
||||
use service::rooms::state_compressor::HashSetCompressStateEvent;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
use crate::admin_command;
|
||||
|
@ -50,7 +60,7 @@ pub(super) async fn get_auth_chain(
|
|||
.rooms
|
||||
.auth_chain
|
||||
.event_ids_iter(room_id, once(event_id.as_ref()))
|
||||
.await?
|
||||
.ready_filter_map(Result::ok)
|
||||
.count()
|
||||
.await;
|
||||
|
||||
|
@ -131,13 +141,42 @@ pub(super) async fn get_pdu(&self, event_id: Box<EventId>) -> Result<RoomMessage
|
|||
}
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn get_short_pdu(
|
||||
&self,
|
||||
shortroomid: ShortRoomId,
|
||||
shorteventid: ShortEventId,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
let pdu_id: RawPduId = PduId {
|
||||
shortroomid,
|
||||
shorteventid: shorteventid.into(),
|
||||
}
|
||||
.into();
|
||||
|
||||
let pdu_json = self
|
||||
.services
|
||||
.rooms
|
||||
.timeline
|
||||
.get_pdu_json_from_id(&pdu_id)
|
||||
.await;
|
||||
|
||||
match pdu_json {
|
||||
| Ok(json) => {
|
||||
let json_text =
|
||||
serde_json::to_string_pretty(&json).expect("canonical json is valid json");
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!("```json\n{json_text}\n```",)))
|
||||
},
|
||||
| Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")),
|
||||
}
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn get_remote_pdu_list(
|
||||
&self,
|
||||
server: Box<ServerName>,
|
||||
force: bool,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
if !self.services.globals.config.allow_federation {
|
||||
if !self.services.server.config.allow_federation {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Federation is disabled on this homeserver.",
|
||||
));
|
||||
|
@ -172,7 +211,8 @@ pub(super) async fn get_remote_pdu_list(
|
|||
|
||||
for pdu in list {
|
||||
if force {
|
||||
if let Err(e) = self.get_remote_pdu(Box::from(pdu), server.clone()).await {
|
||||
match self.get_remote_pdu(Box::from(pdu), server.clone()).await {
|
||||
| Err(e) => {
|
||||
failed_count = failed_count.saturating_add(1);
|
||||
self.services
|
||||
.admin
|
||||
|
@ -182,8 +222,10 @@ pub(super) async fn get_remote_pdu_list(
|
|||
.await
|
||||
.ok();
|
||||
warn!("Failed to get remote PDU, ignoring error: {e}");
|
||||
} else {
|
||||
},
|
||||
| _ => {
|
||||
success_count = success_count.saturating_add(1);
|
||||
},
|
||||
}
|
||||
} else {
|
||||
self.get_remote_pdu(Box::from(pdu), server.clone()).await?;
|
||||
|
@ -202,7 +244,7 @@ pub(super) async fn get_remote_pdu(
|
|||
event_id: Box<EventId>,
|
||||
server: Box<ServerName>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
if !self.services.globals.config.allow_federation {
|
||||
if !self.services.server.config.allow_federation {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Federation is disabled on this homeserver.",
|
||||
));
|
||||
|
@ -294,11 +336,10 @@ pub(super) async fn get_room_state(
|
|||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_full(&room_id)
|
||||
.await?
|
||||
.values()
|
||||
.map(PduEvent::to_state_event)
|
||||
.collect();
|
||||
.room_state_full_pdus(&room_id)
|
||||
.map_ok(PduEvent::into_state_event)
|
||||
.try_collect()
|
||||
.await?;
|
||||
|
||||
if room_state.is_empty() {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
|
@ -386,7 +427,7 @@ pub(super) async fn change_log_level(
|
|||
let handles = &["console"];
|
||||
|
||||
if reset {
|
||||
let old_filter_layer = match EnvFilter::try_new(&self.services.globals.config.log) {
|
||||
let old_filter_layer = match EnvFilter::try_new(&self.services.server.config.log) {
|
||||
| Ok(s) => s,
|
||||
| Err(e) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
|
@ -405,7 +446,7 @@ pub(super) async fn change_log_level(
|
|||
| Ok(()) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Successfully changed log level back to config value {}",
|
||||
self.services.globals.config.log
|
||||
self.services.server.config.log
|
||||
)));
|
||||
},
|
||||
| Err(e) => {
|
||||
|
@ -521,7 +562,7 @@ pub(super) async fn first_pdu_in_room(
|
|||
.services
|
||||
.rooms
|
||||
.state_cache
|
||||
.server_in_room(&self.services.globals.config.server_name, &room_id)
|
||||
.server_in_room(&self.services.server.name, &room_id)
|
||||
.await
|
||||
{
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
|
@ -550,7 +591,7 @@ pub(super) async fn latest_pdu_in_room(
|
|||
.services
|
||||
.rooms
|
||||
.state_cache
|
||||
.server_in_room(&self.services.globals.config.server_name, &room_id)
|
||||
.server_in_room(&self.services.server.name, &room_id)
|
||||
.await
|
||||
{
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
|
@ -580,7 +621,7 @@ pub(super) async fn force_set_room_state_from_server(
|
|||
.services
|
||||
.rooms
|
||||
.state_cache
|
||||
.server_in_room(&self.services.globals.config.server_name, &room_id)
|
||||
.server_in_room(&self.services.server.name, &room_id)
|
||||
.await
|
||||
{
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
|
@ -607,6 +648,7 @@ pub(super) async fn force_set_room_state_from_server(
|
|||
room_id: room_id.clone().into(),
|
||||
event_id: first_pdu.event_id.clone(),
|
||||
})
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
for pdu in remote_state_response.pdus.clone() {
|
||||
|
@ -615,6 +657,7 @@ pub(super) async fn force_set_room_state_from_server(
|
|||
.rooms
|
||||
.event_handler
|
||||
.parse_incoming_pdu(&pdu)
|
||||
.boxed()
|
||||
.await
|
||||
{
|
||||
| Ok(t) => t,
|
||||
|
@ -678,6 +721,7 @@ pub(super) async fn force_set_room_state_from_server(
|
|||
.rooms
|
||||
.event_handler
|
||||
.resolve_state(&room_id, &room_version, state)
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
info!("Forcing new room state");
|
||||
|
@ -692,7 +736,7 @@ pub(super) async fn force_set_room_state_from_server(
|
|||
.save_state(room_id.clone().as_ref(), new_room_state)
|
||||
.await?;
|
||||
|
||||
let state_lock = self.services.rooms.state.mutex.lock(&room_id).await;
|
||||
let state_lock = self.services.rooms.state.mutex.lock(&*room_id).await;
|
||||
self.services
|
||||
.rooms
|
||||
.state
|
||||
|
@ -723,8 +767,7 @@ pub(super) async fn get_signing_keys(
|
|||
notary: Option<Box<ServerName>>,
|
||||
query: bool,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
let server_name =
|
||||
server_name.unwrap_or_else(|| self.services.server.config.server_name.clone().into());
|
||||
let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into());
|
||||
|
||||
if let Some(notary) = notary {
|
||||
let signing_keys = self
|
||||
|
@ -760,8 +803,7 @@ pub(super) async fn get_verify_keys(
|
|||
&self,
|
||||
server_name: Option<Box<ServerName>>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
let server_name =
|
||||
server_name.unwrap_or_else(|| self.services.server.config.server_name.clone().into());
|
||||
let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into());
|
||||
|
||||
let keys = self
|
||||
.services
|
||||
|
@ -785,13 +827,13 @@ pub(super) async fn resolve_true_destination(
|
|||
server_name: Box<ServerName>,
|
||||
no_cache: bool,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
if !self.services.globals.config.allow_federation {
|
||||
if !self.services.server.config.allow_federation {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Federation is disabled on this homeserver.",
|
||||
));
|
||||
}
|
||||
|
||||
if server_name == self.services.globals.config.server_name {
|
||||
if server_name == self.services.server.name {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for \
|
||||
fetching local PDUs.",
|
||||
|
@ -810,19 +852,27 @@ pub(super) async fn resolve_true_destination(
|
|||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn memory_stats(&self) -> Result<RoomMessageEventContent> {
|
||||
let html_body = conduwuit::alloc::memory_stats();
|
||||
pub(super) async fn memory_stats(&self, opts: Option<String>) -> Result<RoomMessageEventContent> {
|
||||
const OPTS: &str = "abcdefghijklmnopqrstuvwxyz";
|
||||
|
||||
if html_body.is_none() {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"malloc stats are not supported on your compiled malloc.",
|
||||
));
|
||||
}
|
||||
let opts: String = OPTS
|
||||
.chars()
|
||||
.filter(|&c| {
|
||||
let allow_any = opts.as_ref().is_some_and(|opts| opts == "*");
|
||||
|
||||
Ok(RoomMessageEventContent::text_html(
|
||||
"This command's output can only be viewed by clients that render HTML.".to_owned(),
|
||||
html_body.expect("string result"),
|
||||
))
|
||||
let allow = allow_any || opts.as_ref().is_some_and(|opts| opts.contains(c));
|
||||
|
||||
!allow
|
||||
})
|
||||
.collect();
|
||||
|
||||
let stats = conduwuit::alloc::memory_stats(&opts).unwrap_or_default();
|
||||
|
||||
self.write_str("```\n").await?;
|
||||
self.write_str(&stats).await?;
|
||||
self.write_str("\n```").await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
}
|
||||
|
||||
#[cfg(tokio_unstable)]
|
||||
|
@ -895,7 +945,7 @@ pub(super) async fn list_dependencies(&self, names: bool) -> Result<RoomMessageE
|
|||
} else {
|
||||
String::new()
|
||||
};
|
||||
writeln!(out, "{name} | {version} | {feats}")?;
|
||||
writeln!(out, "| {name} | {version} | {feats} |")?;
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(out))
|
||||
|
@ -907,19 +957,64 @@ pub(super) async fn database_stats(
|
|||
property: Option<String>,
|
||||
map: Option<String>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned());
|
||||
let map_name = map.as_ref().map_or(EMPTY, String::as_str);
|
||||
let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned());
|
||||
self.services
|
||||
.db
|
||||
.iter()
|
||||
.filter(|&(&name, _)| map_name.is_empty() || map_name == name)
|
||||
.try_stream()
|
||||
.try_for_each(|(&name, map)| {
|
||||
let res = map.property(&property).expect("invalid property");
|
||||
writeln!(self, "##### {name}:\n```\n{}\n```", res.trim())
|
||||
})
|
||||
.await?;
|
||||
|
||||
let mut out = String::new();
|
||||
for (&name, map) in self.services.db.iter() {
|
||||
if !map_name.is_empty() && map_name != name {
|
||||
continue;
|
||||
Ok(RoomMessageEventContent::notice_plain(""))
|
||||
}
|
||||
|
||||
let res = map.property(&property)?;
|
||||
let res = res.trim();
|
||||
writeln!(out, "##### {name}:\n```\n{res}\n```")?;
|
||||
#[admin_command]
|
||||
pub(super) async fn database_files(
|
||||
&self,
|
||||
map: Option<String>,
|
||||
level: Option<i32>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
let mut files: Vec<_> = self.services.db.db.file_list().collect::<Result<_>>()?;
|
||||
|
||||
files.sort_by_key(|f| f.name.clone());
|
||||
|
||||
writeln!(self, "| lev | sst | keys | dels | size | column |").await?;
|
||||
writeln!(self, "| ---: | :--- | ---: | ---: | ---: | :--- |").await?;
|
||||
files
|
||||
.into_iter()
|
||||
.filter(|file| {
|
||||
map.as_deref()
|
||||
.is_none_or(|map| map == file.column_family_name)
|
||||
})
|
||||
.filter(|file| level.as_ref().is_none_or(|&level| level == file.level))
|
||||
.try_stream()
|
||||
.try_for_each(|file| {
|
||||
writeln!(
|
||||
self,
|
||||
"| {} | {:<13} | {:7}+ | {:4}- | {:9} | {} |",
|
||||
file.level,
|
||||
file.name,
|
||||
file.num_entries,
|
||||
file.num_deletions,
|
||||
file.size,
|
||||
file.column_family_name,
|
||||
)
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_plain(""))
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(out))
|
||||
#[admin_command]
|
||||
pub(super) async fn trim_memory(&self) -> Result<RoomMessageEventContent> {
|
||||
conduwuit::alloc::trim(None)?;
|
||||
|
||||
writeln!(self, "done").await?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_plain(""))
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ pub(crate) mod tester;
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use ruma::{EventId, OwnedRoomOrAliasId, RoomId, ServerName};
|
||||
use service::rooms::short::{ShortEventId, ShortRoomId};
|
||||
|
||||
use self::tester::TesterCommand;
|
||||
use crate::admin_command_dispatch;
|
||||
|
@ -31,12 +32,21 @@ pub(super) enum DebugCommand {
|
|||
/// the command.
|
||||
ParsePdu,
|
||||
|
||||
/// - Retrieve and print a PDU by ID from the conduwuit database
|
||||
/// - Retrieve and print a PDU by EventID from the conduwuit database
|
||||
GetPdu {
|
||||
/// An event ID (a $ followed by the base64 reference hash)
|
||||
event_id: Box<EventId>,
|
||||
},
|
||||
|
||||
/// - Retrieve and print a PDU by PduId from the conduwuit database
|
||||
GetShortPdu {
|
||||
/// Shortroomid integer
|
||||
shortroomid: ShortRoomId,
|
||||
|
||||
/// Shorteventid integer
|
||||
shorteventid: ShortEventId,
|
||||
},
|
||||
|
||||
/// - Attempts to retrieve a PDU from a remote server. Inserts it into our
|
||||
/// database/timeline if found and we do not have this PDU already
|
||||
/// (following normal event auth rules, handles it as an incoming PDU).
|
||||
|
@ -181,7 +191,13 @@ pub(super) enum DebugCommand {
|
|||
},
|
||||
|
||||
/// - Print extended memory usage
|
||||
MemoryStats,
|
||||
///
|
||||
/// Optional argument is a character mask (a sequence of characters in any
|
||||
/// order) which enable additional extended statistics. Known characters are
|
||||
/// "abdeglmx". For convenience, a '*' will enable everything.
|
||||
MemoryStats {
|
||||
opts: Option<String>,
|
||||
},
|
||||
|
||||
/// - Print general tokio runtime metric totals.
|
||||
RuntimeMetrics,
|
||||
|
@ -207,6 +223,17 @@ pub(super) enum DebugCommand {
|
|||
map: Option<String>,
|
||||
},
|
||||
|
||||
/// - Trim memory usage
|
||||
TrimMemory,
|
||||
|
||||
/// - List database files
|
||||
DatabaseFiles {
|
||||
map: Option<String>,
|
||||
|
||||
#[arg(long)]
|
||||
level: Option<i32>,
|
||||
},
|
||||
|
||||
/// - Developer test stubs
|
||||
#[command(subcommand)]
|
||||
#[allow(non_snake_case)]
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use conduwuit::Err;
|
||||
use ruma::events::room::message::RoomMessageEventContent;
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch, Result};
|
||||
use crate::{Result, admin_command, admin_command_dispatch};
|
||||
|
||||
#[admin_command_dispatch]
|
||||
#[derive(Debug, clap::Subcommand)]
|
||||
|
@ -31,7 +31,7 @@ async fn failure(&self) -> Result<RoomMessageEventContent> {
|
|||
#[admin_command]
|
||||
async fn tester(&self) -> Result<RoomMessageEventContent> {
|
||||
|
||||
Ok(RoomMessageEventContent::notice_plain("completed"))
|
||||
Ok(RoomMessageEventContent::notice_plain("legacy"))
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
|
|
|
@ -3,7 +3,7 @@ use std::fmt::Write;
|
|||
use conduwuit::Result;
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
events::room::message::RoomMessageEventContent, OwnedRoomId, RoomId, ServerName, UserId,
|
||||
OwnedRoomId, RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent,
|
||||
};
|
||||
|
||||
use crate::{admin_command, get_room_info};
|
||||
|
@ -92,7 +92,7 @@ pub(super) async fn remote_user_in_rooms(
|
|||
&self,
|
||||
user_id: Box<UserId>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
if user_id.server_name() == self.services.globals.config.server_name {
|
||||
if user_id.server_name() == self.services.server.name {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"User belongs to our server, please use `list-joined-rooms` user admin command \
|
||||
instead.",
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use conduwuit::{
|
||||
debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago, Result,
|
||||
Result, debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago,
|
||||
};
|
||||
use conduwuit_service::media::Dim;
|
||||
use ruma::{
|
||||
events::room::message::RoomMessageEventContent, EventId, Mxc, MxcUri, OwnedMxcUri,
|
||||
OwnedServerName, ServerName,
|
||||
EventId, Mxc, MxcUri, OwnedMxcUri, OwnedServerName, ServerName,
|
||||
events::room::message::RoomMessageEventContent,
|
||||
};
|
||||
|
||||
use crate::{admin_command, utils::parse_local_user_id};
|
||||
|
@ -41,7 +41,8 @@ pub(super) async fn delete(
|
|||
let mut mxc_urls = Vec::with_capacity(4);
|
||||
|
||||
// parsing the PDU for any MXC URLs begins here
|
||||
if let Ok(event_json) = self.services.rooms.timeline.get_pdu_json(&event_id).await {
|
||||
match self.services.rooms.timeline.get_pdu_json(&event_id).await {
|
||||
| Ok(event_json) => {
|
||||
if let Some(content_key) = event_json.get("content") {
|
||||
debug!("Event ID has \"content\".");
|
||||
let content_obj = content_key.as_object();
|
||||
|
@ -58,8 +59,8 @@ pub(super) async fn delete(
|
|||
mxc_urls.push(final_url);
|
||||
} else {
|
||||
info!(
|
||||
"Found a URL in the event ID {event_id} but did not start with \
|
||||
mxc://, ignoring"
|
||||
"Found a URL in the event ID {event_id} but did not start \
|
||||
with mxc://, ignoring"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -76,16 +77,16 @@ pub(super) async fn delete(
|
|||
|
||||
if thumbnail_url.to_string().starts_with("\"mxc://") {
|
||||
debug!(
|
||||
"Pushing thumbnail URL {thumbnail_url} to list of MXCs \
|
||||
to delete"
|
||||
"Pushing thumbnail URL {thumbnail_url} to list of \
|
||||
MXCs to delete"
|
||||
);
|
||||
let final_thumbnail_url =
|
||||
thumbnail_url.to_string().replace('"', "");
|
||||
mxc_urls.push(final_thumbnail_url);
|
||||
} else {
|
||||
info!(
|
||||
"Found a thumbnail URL in the event ID {event_id} but \
|
||||
did not start with mxc://, ignoring"
|
||||
"Found a thumbnail URL in the event ID {event_id} \
|
||||
but did not start with mxc://, ignoring"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
|
@ -124,8 +125,8 @@ pub(super) async fn delete(
|
|||
}
|
||||
} else {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Event ID does not have a \"content\" key or failed parsing the event \
|
||||
ID JSON.",
|
||||
"Event ID does not have a \"content\" key or failed parsing the \
|
||||
event ID JSON.",
|
||||
));
|
||||
}
|
||||
} else {
|
||||
|
@ -134,10 +135,12 @@ pub(super) async fn delete(
|
|||
event type that contains media.",
|
||||
));
|
||||
}
|
||||
} else {
|
||||
},
|
||||
| _ => {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Event ID does not exist or is not known to us.",
|
||||
));
|
||||
},
|
||||
}
|
||||
|
||||
if mxc_urls.is_empty() {
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#![allow(rustdoc::broken_intra_doc_links)]
|
||||
mod commands;
|
||||
|
||||
use clap::Subcommand;
|
||||
|
@ -27,18 +28,18 @@ pub(super) enum MediaCommand {
|
|||
DeleteList,
|
||||
|
||||
/// - Deletes all remote (and optionally local) media created before or
|
||||
/// after \[duration] time using filesystem metadata first created at
|
||||
/// date, or fallback to last modified date. This will always ignore
|
||||
/// errors by default.
|
||||
/// after [duration] time using filesystem metadata first created at date,
|
||||
/// or fallback to last modified date. This will always ignore errors by
|
||||
/// default.
|
||||
DeletePastRemoteMedia {
|
||||
/// - The relative time (e.g. 30s, 5m, 7d) within which to search
|
||||
duration: String,
|
||||
|
||||
/// - Only delete media created more recently than \[duration] ago
|
||||
/// - Only delete media created before [duration] ago
|
||||
#[arg(long, short)]
|
||||
before: bool,
|
||||
|
||||
/// - Only delete media created after \[duration] ago
|
||||
/// - Only delete media created after [duration] ago
|
||||
#[arg(long, short)]
|
||||
after: bool,
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#![recursion_limit = "192"]
|
||||
#![allow(clippy::wildcard_imports)]
|
||||
#![allow(clippy::enum_glob_use)]
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
|
||||
pub(crate) mod admin;
|
||||
pub(crate) mod command;
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use std::{
|
||||
fmt::Write,
|
||||
mem::take,
|
||||
panic::AssertUnwindSafe,
|
||||
sync::{Arc, Mutex},
|
||||
time::SystemTime,
|
||||
|
@ -7,7 +8,7 @@ use std::{
|
|||
|
||||
use clap::{CommandFactory, Parser};
|
||||
use conduwuit::{
|
||||
debug, error,
|
||||
Error, Result, debug, error,
|
||||
log::{
|
||||
capture,
|
||||
capture::Capture,
|
||||
|
@ -15,24 +16,24 @@ use conduwuit::{
|
|||
},
|
||||
trace,
|
||||
utils::string::{collect_stream, common_prefix},
|
||||
warn, Error, Result,
|
||||
warn,
|
||||
};
|
||||
use futures::future::FutureExt;
|
||||
use futures::{AsyncWriteExt, future::FutureExt, io::BufWriter};
|
||||
use ruma::{
|
||||
EventId,
|
||||
events::{
|
||||
relation::InReplyTo,
|
||||
room::message::{Relation::Reply, RoomMessageEventContent},
|
||||
},
|
||||
EventId,
|
||||
};
|
||||
use service::{
|
||||
admin::{CommandInput, CommandOutput, ProcessorFuture, ProcessorResult},
|
||||
Services,
|
||||
admin::{CommandInput, CommandOutput, ProcessorFuture, ProcessorResult},
|
||||
};
|
||||
use tracing::Level;
|
||||
use tracing_subscriber::{filter::LevelFilter, EnvFilter};
|
||||
use tracing_subscriber::{EnvFilter, filter::LevelFilter};
|
||||
|
||||
use crate::{admin, admin::AdminCommand, Command};
|
||||
use crate::{Command, admin, admin::AdminCommand};
|
||||
|
||||
#[must_use]
|
||||
pub(super) fn complete(line: &str) -> String { complete_command(AdminCommand::command(), line) }
|
||||
|
@ -62,11 +63,35 @@ async fn process_command(services: Arc<Services>, input: &CommandInput) -> Proce
|
|||
body: &body,
|
||||
timer: SystemTime::now(),
|
||||
reply_id: input.reply_id.as_deref(),
|
||||
output: BufWriter::new(Vec::new()).into(),
|
||||
};
|
||||
|
||||
process(&context, command, &args).await
|
||||
let (result, mut logs) = process(&context, command, &args).await;
|
||||
|
||||
let output = &mut context.output.lock().await;
|
||||
output.flush().await.expect("final flush of output stream");
|
||||
|
||||
let output =
|
||||
String::from_utf8(take(output.get_mut())).expect("invalid utf8 in command output stream");
|
||||
|
||||
match result {
|
||||
| Ok(()) if logs.is_empty() =>
|
||||
Ok(Some(reply(RoomMessageEventContent::notice_markdown(output), context.reply_id))),
|
||||
|
||||
| Ok(()) => {
|
||||
logs.write_str(output.as_str()).expect("output buffer");
|
||||
Ok(Some(reply(RoomMessageEventContent::notice_markdown(logs), context.reply_id)))
|
||||
},
|
||||
| Err(error) => {
|
||||
write!(&mut logs, "Command failed with error:\n```\n{error:#?}\n```")
|
||||
.expect("output buffer");
|
||||
|
||||
Err(reply(RoomMessageEventContent::notice_markdown(logs), context.reply_id))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::result_large_err)]
|
||||
fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult {
|
||||
let link =
|
||||
"Please submit a [bug report](https://github.com/girlbossceo/conduwuit/issues/new). 🥺";
|
||||
|
@ -76,12 +101,12 @@ fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult {
|
|||
Err(reply(content, command.reply_id.as_deref()))
|
||||
}
|
||||
|
||||
// Parse and process a message from the admin room
|
||||
/// Parse and process a message from the admin room
|
||||
async fn process(
|
||||
context: &Command<'_>,
|
||||
command: AdminCommand,
|
||||
args: &[String],
|
||||
) -> ProcessorResult {
|
||||
) -> (Result, String) {
|
||||
let (capture, logs) = capture_create(context);
|
||||
|
||||
let capture_scope = capture.start();
|
||||
|
@ -104,18 +129,7 @@ async fn process(
|
|||
}
|
||||
drop(logs);
|
||||
|
||||
match result {
|
||||
| Ok(content) => {
|
||||
write!(&mut output, "{0}", content.body())
|
||||
.expect("failed to format command result to output buffer");
|
||||
Ok(Some(reply(RoomMessageEventContent::notice_markdown(output), context.reply_id)))
|
||||
},
|
||||
| Err(error) => {
|
||||
write!(&mut output, "Command failed with error:\n```\n{error:#?}\n```")
|
||||
.expect("failed to format command result to output");
|
||||
Err(reply(RoomMessageEventContent::notice_markdown(output), context.reply_id))
|
||||
},
|
||||
}
|
||||
(result, output)
|
||||
}
|
||||
|
||||
fn capture_create(context: &Command<'_>) -> (Arc<Capture>, Arc<Mutex<String>>) {
|
||||
|
@ -151,7 +165,8 @@ fn capture_create(context: &Command<'_>) -> (Arc<Capture>, Arc<Mutex<String>>) {
|
|||
(capture, logs)
|
||||
}
|
||||
|
||||
// Parse chat messages from the admin room into an AdminCommand object
|
||||
/// Parse chat messages from the admin room into an AdminCommand object
|
||||
#[allow(clippy::result_large_err)]
|
||||
fn parse<'a>(
|
||||
services: &Arc<Services>,
|
||||
input: &'a CommandInput,
|
||||
|
@ -219,7 +234,7 @@ fn complete_command(mut cmd: clap::Command, line: &str) -> String {
|
|||
ret.join(" ")
|
||||
}
|
||||
|
||||
// Parse chat messages from the admin room into an AdminCommand object
|
||||
/// Parse chat messages from the admin room into an AdminCommand object
|
||||
fn parse_line(command_line: &str) -> Vec<String> {
|
||||
let mut argv = command_line
|
||||
.split_whitespace()
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use futures::StreamExt;
|
||||
use ruma::{events::room::message::RoomMessageEventContent, RoomId, UserId};
|
||||
use ruma::{RoomId, UserId, events::room::message::RoomMessageEventContent};
|
||||
|
||||
use crate::Command;
|
||||
use crate::{admin_command, admin_command_dispatch};
|
||||
|
||||
#[admin_command_dispatch]
|
||||
#[derive(Debug, Subcommand)]
|
||||
/// All the getters and iterators from src/database/key_value/account_data.rs
|
||||
pub(crate) enum AccountDataCommand {
|
||||
|
@ -19,7 +20,7 @@ pub(crate) enum AccountDataCommand {
|
|||
},
|
||||
|
||||
/// - Searches the account data for a specific kind.
|
||||
Get {
|
||||
AccountDataGet {
|
||||
/// Full user ID
|
||||
user_id: Box<UserId>,
|
||||
/// Account data event type
|
||||
|
@ -29,19 +30,18 @@ pub(crate) enum AccountDataCommand {
|
|||
},
|
||||
}
|
||||
|
||||
/// All the getters and iterators from src/database/key_value/account_data.rs
|
||||
pub(super) async fn process(
|
||||
subcommand: AccountDataCommand,
|
||||
context: &Command<'_>,
|
||||
#[admin_command]
|
||||
async fn changes_since(
|
||||
&self,
|
||||
user_id: Box<UserId>,
|
||||
since: u64,
|
||||
room_id: Option<Box<RoomId>>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
let services = context.services;
|
||||
|
||||
match subcommand {
|
||||
| AccountDataCommand::ChangesSince { user_id, since, room_id } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let results: Vec<_> = services
|
||||
let results: Vec<_> = self
|
||||
.services
|
||||
.account_data
|
||||
.changes_since(room_id.as_deref(), &user_id, since)
|
||||
.changes_since(room_id.as_deref(), &user_id, since, None)
|
||||
.collect()
|
||||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
@ -49,10 +49,18 @@ pub(super) async fn process(
|
|||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
},
|
||||
| AccountDataCommand::Get { user_id, kind, room_id } => {
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn account_data_get(
|
||||
&self,
|
||||
user_id: Box<UserId>,
|
||||
kind: String,
|
||||
room_id: Option<Box<RoomId>>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let results = services
|
||||
let results = self
|
||||
.services
|
||||
.account_data
|
||||
.get_raw(room_id.as_deref(), &user_id, &kind)
|
||||
.await;
|
||||
|
@ -61,6 +69,4 @@ pub(super) async fn process(
|
|||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use ruma::events::room::message::RoomMessageEventContent;
|
||||
|
||||
use crate::Command;
|
||||
|
||||
|
@ -18,10 +17,7 @@ pub(crate) enum AppserviceCommand {
|
|||
}
|
||||
|
||||
/// All the getters and iterators from src/database/key_value/appservice.rs
|
||||
pub(super) async fn process(
|
||||
subcommand: AppserviceCommand,
|
||||
context: &Command<'_>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_>) -> Result {
|
||||
let services = context.services;
|
||||
|
||||
match subcommand {
|
||||
|
@ -31,18 +27,15 @@ pub(super) async fn process(
|
|||
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")
|
||||
},
|
||||
| AppserviceCommand::All => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let results = services.appservice.all().await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")
|
||||
},
|
||||
}
|
||||
.await
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use ruma::{events::room::message::RoomMessageEventContent, ServerName};
|
||||
use ruma::ServerName;
|
||||
|
||||
use crate::Command;
|
||||
|
||||
|
@ -21,10 +21,7 @@ pub(crate) enum GlobalsCommand {
|
|||
}
|
||||
|
||||
/// All the getters and iterators from src/database/key_value/globals.rs
|
||||
pub(super) async fn process(
|
||||
subcommand: GlobalsCommand,
|
||||
context: &Command<'_>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) -> Result {
|
||||
let services = context.services;
|
||||
|
||||
match subcommand {
|
||||
|
@ -33,36 +30,29 @@ pub(super) async fn process(
|
|||
let results = services.globals.db.database_version().await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")
|
||||
},
|
||||
| GlobalsCommand::CurrentCount => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let results = services.globals.db.current_count();
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")
|
||||
},
|
||||
| GlobalsCommand::LastCheckForUpdatesId => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let results = services.updates.last_check_for_updates_id().await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")
|
||||
},
|
||||
| GlobalsCommand::SigningKeysFor { origin } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let results = services.server_keys.verify_keys_for(&origin).await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")
|
||||
},
|
||||
}
|
||||
.await
|
||||
}
|
||||
|
|
|
@ -3,10 +3,13 @@ mod appservice;
|
|||
mod globals;
|
||||
mod presence;
|
||||
mod pusher;
|
||||
mod raw;
|
||||
mod resolver;
|
||||
mod room_alias;
|
||||
mod room_state_cache;
|
||||
mod room_timeline;
|
||||
mod sending;
|
||||
mod short;
|
||||
mod users;
|
||||
|
||||
use clap::Subcommand;
|
||||
|
@ -14,9 +17,10 @@ use conduwuit::Result;
|
|||
|
||||
use self::{
|
||||
account_data::AccountDataCommand, appservice::AppserviceCommand, globals::GlobalsCommand,
|
||||
presence::PresenceCommand, pusher::PusherCommand, resolver::ResolverCommand,
|
||||
presence::PresenceCommand, pusher::PusherCommand, raw::RawCommand, resolver::ResolverCommand,
|
||||
room_alias::RoomAliasCommand, room_state_cache::RoomStateCacheCommand,
|
||||
sending::SendingCommand, users::UsersCommand,
|
||||
room_timeline::RoomTimelineCommand, sending::SendingCommand, short::ShortCommand,
|
||||
users::UsersCommand,
|
||||
};
|
||||
use crate::admin_command_dispatch;
|
||||
|
||||
|
@ -44,6 +48,10 @@ pub(super) enum QueryCommand {
|
|||
#[command(subcommand)]
|
||||
RoomStateCache(RoomStateCacheCommand),
|
||||
|
||||
/// - rooms/timeline iterators and getters
|
||||
#[command(subcommand)]
|
||||
RoomTimeline(RoomTimelineCommand),
|
||||
|
||||
/// - globals.rs iterators and getters
|
||||
#[command(subcommand)]
|
||||
Globals(GlobalsCommand),
|
||||
|
@ -63,4 +71,12 @@ pub(super) enum QueryCommand {
|
|||
/// - pusher service
|
||||
#[command(subcommand)]
|
||||
Pusher(PusherCommand),
|
||||
|
||||
/// - short service
|
||||
#[command(subcommand)]
|
||||
Short(ShortCommand),
|
||||
|
||||
/// - raw service
|
||||
#[command(subcommand)]
|
||||
Raw(RawCommand),
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use futures::StreamExt;
|
||||
use ruma::{events::room::message::RoomMessageEventContent, UserId};
|
||||
use ruma::UserId;
|
||||
|
||||
use crate::Command;
|
||||
|
||||
|
@ -23,21 +23,16 @@ pub(crate) enum PresenceCommand {
|
|||
}
|
||||
|
||||
/// All the getters and iterators in key_value/presence.rs
|
||||
pub(super) async fn process(
|
||||
subcommand: PresenceCommand,
|
||||
context: &Command<'_>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn process(subcommand: PresenceCommand, context: &Command<'_>) -> Result {
|
||||
let services = context.services;
|
||||
|
||||
match subcommand {
|
||||
| PresenceCommand::GetPresence { user_id } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let results = services.presence.db.get_presence(&user_id).await;
|
||||
let results = services.presence.get_presence(&user_id).await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")
|
||||
},
|
||||
| PresenceCommand::PresenceSince { since } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
|
@ -49,9 +44,8 @@ pub(super) async fn process(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")
|
||||
},
|
||||
}
|
||||
.await
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use ruma::{events::room::message::RoomMessageEventContent, UserId};
|
||||
use ruma::UserId;
|
||||
|
||||
use crate::Command;
|
||||
|
||||
|
@ -13,10 +13,7 @@ pub(crate) enum PusherCommand {
|
|||
},
|
||||
}
|
||||
|
||||
pub(super) async fn process(
|
||||
subcommand: PusherCommand,
|
||||
context: &Command<'_>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn process(subcommand: PusherCommand, context: &Command<'_>) -> Result {
|
||||
let services = context.services;
|
||||
|
||||
match subcommand {
|
||||
|
@ -25,9 +22,8 @@ pub(super) async fn process(
|
|||
let results = services.pusher.get_pushers(&user_id).await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")
|
||||
},
|
||||
}
|
||||
.await
|
||||
}
|
||||
|
|
504
src/admin/query/raw.rs
Normal file
504
src/admin/query/raw.rs
Normal file
|
@ -0,0 +1,504 @@
|
|||
use std::{borrow::Cow, collections::BTreeMap, ops::Deref, sync::Arc};
|
||||
|
||||
use clap::Subcommand;
|
||||
use conduwuit::{
|
||||
Err, Result, apply, at, is_zero,
|
||||
utils::{
|
||||
stream::{IterStream, ReadyExt, TryIgnore, TryParallelExt},
|
||||
string::EMPTY,
|
||||
},
|
||||
};
|
||||
use conduwuit_database::Map;
|
||||
use conduwuit_service::Services;
|
||||
use futures::{FutureExt, Stream, StreamExt, TryStreamExt};
|
||||
use ruma::events::room::message::RoomMessageEventContent;
|
||||
use tokio::time::Instant;
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch};
|
||||
|
||||
#[admin_command_dispatch]
|
||||
#[derive(Debug, Subcommand)]
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
/// Query tables from database
|
||||
pub(crate) enum RawCommand {
|
||||
/// - List database maps
|
||||
RawMaps,
|
||||
|
||||
/// - Raw database query
|
||||
RawGet {
|
||||
/// Map name
|
||||
map: String,
|
||||
|
||||
/// Key
|
||||
key: String,
|
||||
},
|
||||
|
||||
/// - Raw database delete (for string keys)
|
||||
RawDel {
|
||||
/// Map name
|
||||
map: String,
|
||||
|
||||
/// Key
|
||||
key: String,
|
||||
},
|
||||
|
||||
/// - Raw database keys iteration
|
||||
RawKeys {
|
||||
/// Map name
|
||||
map: String,
|
||||
|
||||
/// Key prefix
|
||||
prefix: Option<String>,
|
||||
},
|
||||
|
||||
/// - Raw database key size breakdown
|
||||
RawKeysSizes {
|
||||
/// Map name
|
||||
map: Option<String>,
|
||||
|
||||
/// Key prefix
|
||||
prefix: Option<String>,
|
||||
},
|
||||
|
||||
/// - Raw database keys total bytes
|
||||
RawKeysTotal {
|
||||
/// Map name
|
||||
map: Option<String>,
|
||||
|
||||
/// Key prefix
|
||||
prefix: Option<String>,
|
||||
},
|
||||
|
||||
/// - Raw database values size breakdown
|
||||
RawValsSizes {
|
||||
/// Map name
|
||||
map: Option<String>,
|
||||
|
||||
/// Key prefix
|
||||
prefix: Option<String>,
|
||||
},
|
||||
|
||||
/// - Raw database values total bytes
|
||||
RawValsTotal {
|
||||
/// Map name
|
||||
map: Option<String>,
|
||||
|
||||
/// Key prefix
|
||||
prefix: Option<String>,
|
||||
},
|
||||
|
||||
/// - Raw database items iteration
|
||||
RawIter {
|
||||
/// Map name
|
||||
map: String,
|
||||
|
||||
/// Key prefix
|
||||
prefix: Option<String>,
|
||||
},
|
||||
|
||||
/// - Raw database keys iteration
|
||||
RawKeysFrom {
|
||||
/// Map name
|
||||
map: String,
|
||||
|
||||
/// Lower-bound
|
||||
start: String,
|
||||
|
||||
/// Limit
|
||||
#[arg(short, long)]
|
||||
limit: Option<usize>,
|
||||
},
|
||||
|
||||
/// - Raw database items iteration
|
||||
RawIterFrom {
|
||||
/// Map name
|
||||
map: String,
|
||||
|
||||
/// Lower-bound
|
||||
start: String,
|
||||
|
||||
/// Limit
|
||||
#[arg(short, long)]
|
||||
limit: Option<usize>,
|
||||
},
|
||||
|
||||
/// - Raw database record count
|
||||
RawCount {
|
||||
/// Map name
|
||||
map: Option<String>,
|
||||
|
||||
/// Key prefix
|
||||
prefix: Option<String>,
|
||||
},
|
||||
|
||||
/// - Compact database
|
||||
Compact {
|
||||
#[arg(short, long, alias("column"))]
|
||||
map: Option<Vec<String>>,
|
||||
|
||||
#[arg(long)]
|
||||
start: Option<String>,
|
||||
|
||||
#[arg(long)]
|
||||
stop: Option<String>,
|
||||
|
||||
#[arg(long)]
|
||||
from: Option<usize>,
|
||||
|
||||
#[arg(long)]
|
||||
into: Option<usize>,
|
||||
|
||||
/// There is one compaction job per column; then this controls how many
|
||||
/// columns are compacted in parallel. If zero, one compaction job is
|
||||
/// still run at a time here, but in exclusive-mode blocking any other
|
||||
/// automatic compaction jobs until complete.
|
||||
#[arg(long)]
|
||||
parallelism: Option<usize>,
|
||||
|
||||
#[arg(long, default_value("false"))]
|
||||
exhaustive: bool,
|
||||
},
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn compact(
|
||||
&self,
|
||||
map: Option<Vec<String>>,
|
||||
start: Option<String>,
|
||||
stop: Option<String>,
|
||||
from: Option<usize>,
|
||||
into: Option<usize>,
|
||||
parallelism: Option<usize>,
|
||||
exhaustive: bool,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
use conduwuit_database::compact::Options;
|
||||
|
||||
let default_all_maps: Option<_> = map.is_none().then(|| {
|
||||
self.services
|
||||
.db
|
||||
.keys()
|
||||
.map(Deref::deref)
|
||||
.map(ToOwned::to_owned)
|
||||
});
|
||||
|
||||
let maps: Vec<_> = map
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.chain(default_all_maps.into_iter().flatten())
|
||||
.map(|map| self.services.db.get(&map))
|
||||
.filter_map(Result::ok)
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
if maps.is_empty() {
|
||||
return Err!("--map argument invalid. not found in database");
|
||||
}
|
||||
|
||||
let range = (
|
||||
start.as_ref().map(String::as_bytes).map(Into::into),
|
||||
stop.as_ref().map(String::as_bytes).map(Into::into),
|
||||
);
|
||||
|
||||
let options = Options {
|
||||
range,
|
||||
level: (from, into),
|
||||
exclusive: parallelism.is_some_and(is_zero!()),
|
||||
exhaustive,
|
||||
};
|
||||
|
||||
let runtime = self.services.server.runtime().clone();
|
||||
let parallelism = parallelism.unwrap_or(1);
|
||||
let results = maps
|
||||
.into_iter()
|
||||
.try_stream()
|
||||
.paralleln_and_then(runtime, parallelism, move |map| {
|
||||
map.compact_blocking(options.clone())?;
|
||||
Ok(map.name().to_owned())
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let timer = Instant::now();
|
||||
let results = results.await;
|
||||
let query_time = timer.elapsed();
|
||||
self.write_str(&format!("Jobs completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"))
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_count(
|
||||
&self,
|
||||
map: Option<String>,
|
||||
prefix: Option<String>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||
|
||||
let timer = Instant::now();
|
||||
let count = with_maps_or(map.as_deref(), self.services)
|
||||
.then(|map| map.raw_count_prefix(&prefix))
|
||||
.ready_fold(0_usize, usize::saturating_add)
|
||||
.await;
|
||||
|
||||
let query_time = timer.elapsed();
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{count:#?}\n```"))
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_keys(
|
||||
&self,
|
||||
map: String,
|
||||
prefix: Option<String>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
writeln!(self, "```").boxed().await?;
|
||||
|
||||
let map = self.services.db.get(map.as_str())?;
|
||||
let timer = Instant::now();
|
||||
prefix
|
||||
.as_deref()
|
||||
.map_or_else(|| map.raw_keys().boxed(), |prefix| map.raw_keys_prefix(prefix).boxed())
|
||||
.map_ok(String::from_utf8_lossy)
|
||||
.try_for_each(|str| writeln!(self, "{str:?}"))
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
let query_time = timer.elapsed();
|
||||
let out = format!("\n```\n\nQuery completed in {query_time:?}");
|
||||
self.write_str(out.as_str()).await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_keys_sizes(
|
||||
&self,
|
||||
map: Option<String>,
|
||||
prefix: Option<String>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||
|
||||
let timer = Instant::now();
|
||||
let result = with_maps_or(map.as_deref(), self.services)
|
||||
.map(|map| map.raw_keys_prefix(&prefix))
|
||||
.flatten()
|
||||
.ignore_err()
|
||||
.map(<[u8]>::len)
|
||||
.ready_fold_default(|mut map: BTreeMap<_, usize>, len| {
|
||||
let entry = map.entry(len).or_default();
|
||||
*entry = entry.saturating_add(1);
|
||||
map
|
||||
})
|
||||
.await;
|
||||
|
||||
let query_time = timer.elapsed();
|
||||
let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}");
|
||||
self.write_str(result.as_str()).await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_keys_total(
|
||||
&self,
|
||||
map: Option<String>,
|
||||
prefix: Option<String>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||
|
||||
let timer = Instant::now();
|
||||
let result = with_maps_or(map.as_deref(), self.services)
|
||||
.map(|map| map.raw_keys_prefix(&prefix))
|
||||
.flatten()
|
||||
.ignore_err()
|
||||
.map(<[u8]>::len)
|
||||
.ready_fold_default(|acc: usize, len| acc.saturating_add(len))
|
||||
.await;
|
||||
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}"))
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_vals_sizes(
|
||||
&self,
|
||||
map: Option<String>,
|
||||
prefix: Option<String>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||
|
||||
let timer = Instant::now();
|
||||
let result = with_maps_or(map.as_deref(), self.services)
|
||||
.map(|map| map.raw_stream_prefix(&prefix))
|
||||
.flatten()
|
||||
.ignore_err()
|
||||
.map(at!(1))
|
||||
.map(<[u8]>::len)
|
||||
.ready_fold_default(|mut map: BTreeMap<_, usize>, len| {
|
||||
let entry = map.entry(len).or_default();
|
||||
*entry = entry.saturating_add(1);
|
||||
map
|
||||
})
|
||||
.await;
|
||||
|
||||
let query_time = timer.elapsed();
|
||||
let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}");
|
||||
self.write_str(result.as_str()).await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_vals_total(
|
||||
&self,
|
||||
map: Option<String>,
|
||||
prefix: Option<String>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||
|
||||
let timer = Instant::now();
|
||||
let result = with_maps_or(map.as_deref(), self.services)
|
||||
.map(|map| map.raw_stream_prefix(&prefix))
|
||||
.flatten()
|
||||
.ignore_err()
|
||||
.map(at!(1))
|
||||
.map(<[u8]>::len)
|
||||
.ready_fold_default(|acc: usize, len| acc.saturating_add(len))
|
||||
.await;
|
||||
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}"))
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_iter(
|
||||
&self,
|
||||
map: String,
|
||||
prefix: Option<String>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
writeln!(self, "```").await?;
|
||||
|
||||
let map = self.services.db.get(&map)?;
|
||||
let timer = Instant::now();
|
||||
prefix
|
||||
.as_deref()
|
||||
.map_or_else(|| map.raw_stream().boxed(), |prefix| map.raw_stream_prefix(prefix).boxed())
|
||||
.map_ok(apply!(2, String::from_utf8_lossy))
|
||||
.map_ok(apply!(2, Cow::into_owned))
|
||||
.try_for_each(|keyval| writeln!(self, "{keyval:?}"))
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
let query_time = timer.elapsed();
|
||||
self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}"))
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_keys_from(
|
||||
&self,
|
||||
map: String,
|
||||
start: String,
|
||||
limit: Option<usize>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
writeln!(self, "```").await?;
|
||||
|
||||
let map = self.services.db.get(&map)?;
|
||||
let timer = Instant::now();
|
||||
map.raw_keys_from(&start)
|
||||
.map_ok(String::from_utf8_lossy)
|
||||
.take(limit.unwrap_or(usize::MAX))
|
||||
.try_for_each(|str| writeln!(self, "{str:?}"))
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
let query_time = timer.elapsed();
|
||||
self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}"))
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_iter_from(
|
||||
&self,
|
||||
map: String,
|
||||
start: String,
|
||||
limit: Option<usize>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
let map = self.services.db.get(&map)?;
|
||||
let timer = Instant::now();
|
||||
let result = map
|
||||
.raw_stream_from(&start)
|
||||
.map_ok(apply!(2, String::from_utf8_lossy))
|
||||
.map_ok(apply!(2, Cow::into_owned))
|
||||
.take(limit.unwrap_or(usize::MAX))
|
||||
.try_collect::<Vec<(String, String)>>()
|
||||
.await?;
|
||||
|
||||
let query_time = timer.elapsed();
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_del(&self, map: String, key: String) -> Result<RoomMessageEventContent> {
|
||||
let map = self.services.db.get(&map)?;
|
||||
let timer = Instant::now();
|
||||
map.remove(&key);
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Operation completed in {query_time:?}"
|
||||
)))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_get(&self, map: String, key: String) -> Result<RoomMessageEventContent> {
|
||||
let map = self.services.db.get(&map)?;
|
||||
let timer = Instant::now();
|
||||
let handle = map.get(&key).await?;
|
||||
let query_time = timer.elapsed();
|
||||
let result = String::from_utf8_lossy(&handle);
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```"
|
||||
)))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_maps(&self) -> Result<RoomMessageEventContent> {
|
||||
let list: Vec<_> = self.services.db.iter().map(at!(0)).copied().collect();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!("{list:#?}")))
|
||||
}
|
||||
|
||||
fn with_maps_or<'a>(
|
||||
map: Option<&'a str>,
|
||||
services: &'a Services,
|
||||
) -> impl Stream<Item = &'a Arc<Map>> + Send + 'a {
|
||||
let default_all_maps = map
|
||||
.is_none()
|
||||
.then(|| services.db.keys().map(Deref::deref))
|
||||
.into_iter()
|
||||
.flatten();
|
||||
|
||||
map.into_iter()
|
||||
.chain(default_all_maps)
|
||||
.map(|map| services.db.get(map))
|
||||
.filter_map(Result::ok)
|
||||
.stream()
|
||||
}
|
|
@ -1,8 +1,7 @@
|
|||
use std::fmt::Write;
|
||||
|
||||
use clap::Subcommand;
|
||||
use conduwuit::{utils::time, Result};
|
||||
use ruma::{events::room::message::RoomMessageEventContent, OwnedServerName};
|
||||
use conduwuit::{Result, utils::time};
|
||||
use futures::StreamExt;
|
||||
use ruma::{OwnedServerName, events::room::message::RoomMessageEventContent};
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch};
|
||||
|
||||
|
@ -28,56 +27,48 @@ async fn destinations_cache(
|
|||
) -> Result<RoomMessageEventContent> {
|
||||
use service::resolver::cache::CachedDest;
|
||||
|
||||
let mut out = String::new();
|
||||
writeln!(out, "| Server Name | Destination | Hostname | Expires |")?;
|
||||
writeln!(out, "| ----------- | ----------- | -------- | ------- |")?;
|
||||
let row = |(name, &CachedDest { ref dest, ref host, expire })| {
|
||||
let expire = time::format(expire, "%+");
|
||||
writeln!(out, "| {name} | {dest} | {host} | {expire} |").expect("wrote line");
|
||||
};
|
||||
writeln!(self, "| Server Name | Destination | Hostname | Expires |").await?;
|
||||
writeln!(self, "| ----------- | ----------- | -------- | ------- |").await?;
|
||||
|
||||
let map = self
|
||||
.services
|
||||
.resolver
|
||||
.cache
|
||||
.destinations
|
||||
.read()
|
||||
.expect("locked");
|
||||
let mut destinations = self.services.resolver.cache.destinations().boxed();
|
||||
|
||||
while let Some((name, CachedDest { dest, host, expire })) = destinations.next().await {
|
||||
if let Some(server_name) = server_name.as_ref() {
|
||||
map.get_key_value(server_name).map(row);
|
||||
} else {
|
||||
map.iter().for_each(row);
|
||||
if name != server_name {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(out))
|
||||
let expire = time::format(expire, "%+");
|
||||
self.write_str(&format!("| {name} | {dest} | {host} | {expire} |\n"))
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::notice_plain(""))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn overrides_cache(&self, server_name: Option<String>) -> Result<RoomMessageEventContent> {
|
||||
use service::resolver::cache::CachedOverride;
|
||||
|
||||
let mut out = String::new();
|
||||
writeln!(out, "| Server Name | IP | Port | Expires |")?;
|
||||
writeln!(out, "| ----------- | --- | ----:| ------- |")?;
|
||||
let row = |(name, &CachedOverride { ref ips, port, expire })| {
|
||||
let expire = time::format(expire, "%+");
|
||||
writeln!(out, "| {name} | {ips:?} | {port} | {expire} |").expect("wrote line");
|
||||
};
|
||||
writeln!(self, "| Server Name | IP | Port | Expires | Overriding |").await?;
|
||||
writeln!(self, "| ----------- | --- | ----:| ------- | ---------- |").await?;
|
||||
|
||||
let map = self
|
||||
.services
|
||||
.resolver
|
||||
.cache
|
||||
.overrides
|
||||
.read()
|
||||
.expect("locked");
|
||||
let mut overrides = self.services.resolver.cache.overrides().boxed();
|
||||
|
||||
while let Some((name, CachedOverride { ips, port, expire, overriding })) =
|
||||
overrides.next().await
|
||||
{
|
||||
if let Some(server_name) = server_name.as_ref() {
|
||||
map.get_key_value(server_name).map(row);
|
||||
} else {
|
||||
map.iter().for_each(row);
|
||||
if name != server_name {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(out))
|
||||
let expire = time::format(expire, "%+");
|
||||
self.write_str(&format!("| {name} | {ips:?} | {port} | {expire} | {overriding:?} |\n"))
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::notice_plain(""))
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use futures::StreamExt;
|
||||
use ruma::{events::room::message::RoomMessageEventContent, RoomAliasId, RoomId};
|
||||
use ruma::{RoomAliasId, RoomId};
|
||||
|
||||
use crate::Command;
|
||||
|
||||
|
@ -24,10 +24,7 @@ pub(crate) enum RoomAliasCommand {
|
|||
}
|
||||
|
||||
/// All the getters and iterators in src/database/key_value/rooms/alias.rs
|
||||
pub(super) async fn process(
|
||||
subcommand: RoomAliasCommand,
|
||||
context: &Command<'_>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn process(subcommand: RoomAliasCommand, context: &Command<'_>) -> Result {
|
||||
let services = context.services;
|
||||
|
||||
match subcommand {
|
||||
|
@ -36,9 +33,7 @@ pub(super) async fn process(
|
|||
let results = services.rooms.alias.resolve_local_alias(&alias).await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")
|
||||
},
|
||||
| RoomAliasCommand::LocalAliasesForRoom { room_id } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
|
@ -51,9 +46,7 @@ pub(super) async fn process(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{aliases:#?}\n```"
|
||||
)))
|
||||
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{aliases:#?}\n```")
|
||||
},
|
||||
| RoomAliasCommand::AllLocalAliases => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
|
@ -66,9 +59,8 @@ pub(super) async fn process(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{aliases:#?}\n```"
|
||||
)))
|
||||
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{aliases:#?}\n```")
|
||||
},
|
||||
}
|
||||
.await
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use conduwuit::{Error, Result};
|
||||
use futures::StreamExt;
|
||||
use ruma::{events::room::message::RoomMessageEventContent, RoomId, ServerName, UserId};
|
||||
use ruma::{RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent};
|
||||
|
||||
use crate::Command;
|
||||
|
||||
|
@ -76,13 +76,10 @@ pub(crate) enum RoomStateCacheCommand {
|
|||
},
|
||||
}
|
||||
|
||||
pub(super) async fn process(
|
||||
subcommand: RoomStateCacheCommand,
|
||||
context: &Command<'_>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command<'_>) -> Result {
|
||||
let services = context.services;
|
||||
|
||||
match subcommand {
|
||||
let c = match subcommand {
|
||||
| RoomStateCacheCommand::ServerInRoom { server, room_id } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result = services
|
||||
|
@ -92,7 +89,7 @@ pub(super) async fn process(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
},
|
||||
|
@ -107,7 +104,7 @@ pub(super) async fn process(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
},
|
||||
|
@ -122,7 +119,7 @@ pub(super) async fn process(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
},
|
||||
|
@ -137,7 +134,7 @@ pub(super) async fn process(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
},
|
||||
|
@ -152,7 +149,7 @@ pub(super) async fn process(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
},
|
||||
|
@ -167,7 +164,7 @@ pub(super) async fn process(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
},
|
||||
|
@ -176,7 +173,7 @@ pub(super) async fn process(
|
|||
let results = services.rooms.state_cache.room_joined_count(&room_id).await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
},
|
||||
|
@ -189,7 +186,7 @@ pub(super) async fn process(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
},
|
||||
|
@ -204,7 +201,7 @@ pub(super) async fn process(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
},
|
||||
|
@ -219,7 +216,7 @@ pub(super) async fn process(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
},
|
||||
|
@ -232,7 +229,7 @@ pub(super) async fn process(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
},
|
||||
|
@ -245,7 +242,7 @@ pub(super) async fn process(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
},
|
||||
|
@ -260,7 +257,7 @@ pub(super) async fn process(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
},
|
||||
|
@ -274,7 +271,7 @@ pub(super) async fn process(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
},
|
||||
|
@ -288,7 +285,7 @@ pub(super) async fn process(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
},
|
||||
|
@ -301,9 +298,13 @@ pub(super) async fn process(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
},
|
||||
}
|
||||
}?;
|
||||
|
||||
context.write_str(c.body()).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::{utils::stream::TryTools, PduCount, Result};
|
||||
use conduwuit::{PduCount, Result, utils::stream::TryTools};
|
||||
use futures::TryStreamExt;
|
||||
use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomOrAliasId};
|
||||
use ruma::{OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent};
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch};
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use futures::StreamExt;
|
||||
use ruma::{events::room::message::RoomMessageEventContent, ServerName, UserId};
|
||||
use ruma::{ServerName, UserId, events::room::message::RoomMessageEventContent};
|
||||
use service::sending::Destination;
|
||||
|
||||
use crate::Command;
|
||||
|
@ -62,7 +62,14 @@ pub(crate) enum SendingCommand {
|
|||
}
|
||||
|
||||
/// All the getters and iterators in key_value/sending.rs
|
||||
pub(super) async fn process(
|
||||
pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) -> Result {
|
||||
let c = reprocess(subcommand, context).await?;
|
||||
context.write_str(c.body()).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// All the getters and iterators in key_value/sending.rs
|
||||
pub(super) async fn reprocess(
|
||||
subcommand: SendingCommand,
|
||||
context: &Command<'_>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
|
|
45
src/admin/query/short.rs
Normal file
45
src/admin/query/short.rs
Normal file
|
@ -0,0 +1,45 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use ruma::{OwnedEventId, OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent};
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch};
|
||||
|
||||
#[admin_command_dispatch]
|
||||
#[derive(Debug, Subcommand)]
|
||||
/// Query tables from database
|
||||
pub(crate) enum ShortCommand {
|
||||
ShortEventId {
|
||||
event_id: OwnedEventId,
|
||||
},
|
||||
|
||||
ShortRoomId {
|
||||
room_id: OwnedRoomOrAliasId,
|
||||
},
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn short_event_id(
|
||||
&self,
|
||||
event_id: OwnedEventId,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
let shortid = self
|
||||
.services
|
||||
.rooms
|
||||
.short
|
||||
.get_shorteventid(&event_id)
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}")))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn short_room_id(
|
||||
&self,
|
||||
room_id: OwnedRoomOrAliasId,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
|
||||
|
||||
let shortid = self.services.rooms.short.get_shortroomid(&room_id).await?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}")))
|
||||
}
|
|
@ -2,7 +2,7 @@ use clap::Subcommand;
|
|||
use conduwuit::Result;
|
||||
use futures::stream::StreamExt;
|
||||
use ruma::{
|
||||
events::room::message::RoomMessageEventContent, OwnedDeviceId, OwnedRoomId, OwnedUserId,
|
||||
OwnedDeviceId, OwnedRoomId, OwnedUserId, events::room::message::RoomMessageEventContent,
|
||||
};
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch};
|
||||
|
@ -15,6 +15,8 @@ pub(crate) enum UsersCommand {
|
|||
|
||||
IterUsers,
|
||||
|
||||
IterUsers2,
|
||||
|
||||
PasswordHash {
|
||||
user_id: OwnedUserId,
|
||||
},
|
||||
|
@ -89,6 +91,33 @@ pub(crate) enum UsersCommand {
|
|||
room_id: OwnedRoomId,
|
||||
session_id: String,
|
||||
},
|
||||
|
||||
GetSharedRooms {
|
||||
user_a: OwnedUserId,
|
||||
user_b: OwnedUserId,
|
||||
},
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn get_shared_rooms(
|
||||
&self,
|
||||
user_a: OwnedUserId,
|
||||
user_b: OwnedUserId,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result: Vec<_> = self
|
||||
.services
|
||||
.rooms
|
||||
.state_cache
|
||||
.get_shared_rooms(&user_a, &user_b)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
|
@ -207,6 +236,23 @@ async fn iter_users(&self) -> Result<RoomMessageEventContent> {
|
|||
)))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn iter_users2(&self) -> Result<RoomMessageEventContent> {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result: Vec<_> = self.services.users.stream().collect().await;
|
||||
let result: Vec<_> = result
|
||||
.into_iter()
|
||||
.map(ruma::UserId::as_bytes)
|
||||
.map(String::from_utf8_lossy)
|
||||
.collect();
|
||||
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```"
|
||||
)))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn count_users(&self) -> Result<RoomMessageEventContent> {
|
||||
let timer = tokio::time::Instant::now();
|
||||
|
@ -367,7 +413,7 @@ async fn get_to_device_events(
|
|||
let result = self
|
||||
.services
|
||||
.users
|
||||
.get_to_device_events(&user_id, &device_id)
|
||||
.get_to_device_events(&user_id, &device_id, None, None)
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
|
|
@ -4,10 +4,10 @@ use clap::Subcommand;
|
|||
use conduwuit::Result;
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
events::room::message::RoomMessageEventContent, OwnedRoomAliasId, OwnedRoomId, RoomId,
|
||||
OwnedRoomAliasId, OwnedRoomId, RoomId, events::room::message::RoomMessageEventContent,
|
||||
};
|
||||
|
||||
use crate::{escape_html, Command};
|
||||
use crate::{Command, escape_html};
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
pub(crate) enum RoomAliasCommand {
|
||||
|
@ -44,7 +44,14 @@ pub(crate) enum RoomAliasCommand {
|
|||
},
|
||||
}
|
||||
|
||||
pub(super) async fn process(
|
||||
pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) -> Result {
|
||||
let c = reprocess(command, context).await?;
|
||||
context.write_str(c.body()).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) async fn reprocess(
|
||||
command: RoomAliasCommand,
|
||||
context: &Command<'_>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
|
@ -59,13 +66,14 @@ pub(super) async fn process(
|
|||
format!("#{}:{}", room_alias_localpart, services.globals.server_name());
|
||||
let room_alias = match OwnedRoomAliasId::parse(room_alias_str) {
|
||||
| Ok(alias) => alias,
|
||||
| Err(err) =>
|
||||
| Err(err) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed to parse alias: {err}"
|
||||
))),
|
||||
)));
|
||||
},
|
||||
};
|
||||
match command {
|
||||
| RoomAliasCommand::Set { force, room_id, .. } =>
|
||||
| RoomAliasCommand::Set { force, room_id, .. } => {
|
||||
match (force, services.rooms.alias.resolve_local_alias(&room_alias).await) {
|
||||
| (true, Ok(id)) => {
|
||||
match services.rooms.alias.set_alias(
|
||||
|
@ -99,8 +107,9 @@ pub(super) async fn process(
|
|||
))),
|
||||
}
|
||||
},
|
||||
}
|
||||
},
|
||||
| RoomAliasCommand::Remove { .. } =>
|
||||
| RoomAliasCommand::Remove { .. } => {
|
||||
match services.rooms.alias.resolve_local_alias(&room_alias).await {
|
||||
| Ok(id) => match services
|
||||
.rooms
|
||||
|
@ -117,14 +126,16 @@ pub(super) async fn process(
|
|||
},
|
||||
| Err(_) =>
|
||||
Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")),
|
||||
}
|
||||
},
|
||||
| RoomAliasCommand::Which { .. } =>
|
||||
| RoomAliasCommand::Which { .. } => {
|
||||
match services.rooms.alias.resolve_local_alias(&room_alias).await {
|
||||
| Ok(id) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Alias resolves to {id}"
|
||||
))),
|
||||
| Err(_) =>
|
||||
Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")),
|
||||
}
|
||||
},
|
||||
| RoomAliasCommand::List { .. } => unreachable!(),
|
||||
}
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
use conduwuit::Result;
|
||||
use futures::StreamExt;
|
||||
use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId};
|
||||
use ruma::{OwnedRoomId, events::room::message::RoomMessageEventContent};
|
||||
|
||||
use crate::{admin_command, get_room_info, PAGE_SIZE};
|
||||
use crate::{PAGE_SIZE, admin_command, get_room_info};
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn list_rooms(
|
||||
|
@ -42,7 +42,7 @@ pub(super) async fn list_rooms(
|
|||
|
||||
if rooms.is_empty() {
|
||||
return Ok(RoomMessageEventContent::text_plain("No more rooms."));
|
||||
};
|
||||
}
|
||||
|
||||
let output_plain = format!(
|
||||
"Rooms ({}):\n```\n{}\n```",
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use futures::StreamExt;
|
||||
use ruma::{events::room::message::RoomMessageEventContent, RoomId};
|
||||
use ruma::{RoomId, events::room::message::RoomMessageEventContent};
|
||||
|
||||
use crate::{get_room_info, Command, PAGE_SIZE};
|
||||
use crate::{Command, PAGE_SIZE, get_room_info};
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
pub(crate) enum RoomDirectoryCommand {
|
||||
|
@ -25,7 +25,13 @@ pub(crate) enum RoomDirectoryCommand {
|
|||
},
|
||||
}
|
||||
|
||||
pub(super) async fn process(
|
||||
pub(super) async fn process(command: RoomDirectoryCommand, context: &Command<'_>) -> Result {
|
||||
let c = reprocess(command, context).await?;
|
||||
context.write_str(c.body()).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) async fn reprocess(
|
||||
command: RoomDirectoryCommand,
|
||||
context: &Command<'_>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
|
@ -61,7 +67,7 @@ pub(super) async fn process(
|
|||
|
||||
if rooms.is_empty() {
|
||||
return Ok(RoomMessageEventContent::text_plain("No more rooms."));
|
||||
};
|
||||
}
|
||||
|
||||
let output = format!(
|
||||
"Rooms (page {page}):\n```\n{}\n```",
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::{utils::ReadyExt, Result};
|
||||
use conduwuit::{Result, utils::ReadyExt};
|
||||
use futures::StreamExt;
|
||||
use ruma::{events::room::message::RoomMessageEventContent, RoomId};
|
||||
use ruma::{RoomId, events::room::message::RoomMessageEventContent};
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch};
|
||||
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
use api::client::leave_room;
|
||||
use clap::Subcommand;
|
||||
use conduwuit::{
|
||||
debug, error, info,
|
||||
Result, debug,
|
||||
utils::{IterStream, ReadyExt},
|
||||
warn, Result,
|
||||
warn,
|
||||
};
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
events::room::message::RoomMessageEventContent, OwnedRoomId, RoomAliasId, RoomId,
|
||||
RoomOrAliasId,
|
||||
OwnedRoomId, RoomAliasId, RoomId, RoomOrAliasId,
|
||||
events::room::message::RoomMessageEventContent,
|
||||
};
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch, get_room_info};
|
||||
|
@ -17,51 +17,23 @@ use crate::{admin_command, admin_command_dispatch, get_room_info};
|
|||
#[derive(Debug, Subcommand)]
|
||||
pub(crate) enum RoomModerationCommand {
|
||||
/// - Bans a room from local users joining and evicts all our local users
|
||||
/// (including server
|
||||
/// admins)
|
||||
/// from the room. Also blocks any invites (local and remote) for the
|
||||
/// banned room.
|
||||
///
|
||||
/// Server admins (users in the conduwuit admin room) will not be evicted
|
||||
/// and server admins can still join the room. To evict admins too, use
|
||||
/// --force (also ignores errors) To disable incoming federation of the
|
||||
/// room, use --disable-federation
|
||||
/// banned room, and disables federation entirely with it.
|
||||
BanRoom {
|
||||
#[arg(short, long)]
|
||||
/// Evicts admins out of the room and ignores any potential errors when
|
||||
/// making our local users leave the room
|
||||
force: bool,
|
||||
|
||||
#[arg(long)]
|
||||
/// Disables incoming federation of the room after banning and evicting
|
||||
/// users
|
||||
disable_federation: bool,
|
||||
|
||||
/// The room in the format of `!roomid:example.com` or a room alias in
|
||||
/// the format of `#roomalias:example.com`
|
||||
room: Box<RoomOrAliasId>,
|
||||
},
|
||||
|
||||
/// - Bans a list of rooms (room IDs and room aliases) from a newline
|
||||
/// delimited codeblock similar to `user deactivate-all`
|
||||
BanListOfRooms {
|
||||
#[arg(short, long)]
|
||||
/// Evicts admins out of the room and ignores any potential errors when
|
||||
/// making our local users leave the room
|
||||
force: bool,
|
||||
|
||||
#[arg(long)]
|
||||
/// Disables incoming federation of the room after banning and evicting
|
||||
/// users
|
||||
disable_federation: bool,
|
||||
},
|
||||
/// delimited codeblock similar to `user deactivate-all`. Applies the same
|
||||
/// steps as ban-room
|
||||
BanListOfRooms,
|
||||
|
||||
/// - Unbans a room to allow local users to join again
|
||||
///
|
||||
/// To re-enable incoming federation of the room, use --enable-federation
|
||||
UnbanRoom {
|
||||
#[arg(long)]
|
||||
/// Enables incoming federation of the room after unbanning
|
||||
enable_federation: bool,
|
||||
|
||||
/// The room in the format of `!roomid:example.com` or a room alias in
|
||||
/// the format of `#roomalias:example.com`
|
||||
room: Box<RoomOrAliasId>,
|
||||
|
@ -77,12 +49,7 @@ pub(crate) enum RoomModerationCommand {
|
|||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn ban_room(
|
||||
&self,
|
||||
force: bool,
|
||||
disable_federation: bool,
|
||||
room: Box<RoomOrAliasId>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
async fn ban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventContent> {
|
||||
debug!("Got room alias or ID: {}", room);
|
||||
|
||||
let admin_room_alias = &self.services.globals.admin_alias;
|
||||
|
@ -96,12 +63,13 @@ async fn ban_room(
|
|||
let room_id = if room.is_room_id() {
|
||||
let room_id = match RoomId::parse(&room) {
|
||||
| Ok(room_id) => room_id,
|
||||
| Err(e) =>
|
||||
| Err(e) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed to parse room ID {room}. Please note that this requires a full room \
|
||||
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
||||
(`#roomalias:example.com`): {e}"
|
||||
))),
|
||||
)));
|
||||
},
|
||||
};
|
||||
|
||||
debug!("Room specified is a room ID, banning room ID");
|
||||
|
@ -111,12 +79,13 @@ async fn ban_room(
|
|||
} else if room.is_room_alias_id() {
|
||||
let room_alias = match RoomAliasId::parse(&room) {
|
||||
| Ok(room_alias) => room_alias,
|
||||
| Err(e) =>
|
||||
| Err(e) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed to parse room ID {room}. Please note that this requires a full room \
|
||||
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
||||
(`#roomalias:example.com`): {e}"
|
||||
))),
|
||||
)));
|
||||
},
|
||||
};
|
||||
|
||||
debug!(
|
||||
|
@ -124,18 +93,18 @@ async fn ban_room(
|
|||
locally, if not using get_alias_helper to fetch room ID remotely"
|
||||
);
|
||||
|
||||
let room_id = if let Ok(room_id) = self
|
||||
let room_id = match self
|
||||
.services
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_local_alias(room_alias)
|
||||
.await
|
||||
{
|
||||
room_id
|
||||
} else {
|
||||
| Ok(room_id) => room_id,
|
||||
| _ => {
|
||||
debug!(
|
||||
"We don't have this room alias to a room ID locally, attempting to fetch room \
|
||||
ID over federation"
|
||||
"We don't have this room alias to a room ID locally, attempting to fetch \
|
||||
room ID over federation"
|
||||
);
|
||||
|
||||
match self
|
||||
|
@ -159,6 +128,7 @@ async fn ban_room(
|
|||
)));
|
||||
},
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
self.services.rooms.metadata.ban_room(&room_id, true);
|
||||
|
@ -172,98 +142,56 @@ async fn ban_room(
|
|||
));
|
||||
};
|
||||
|
||||
debug!("Making all users leave the room {}", &room);
|
||||
if force {
|
||||
debug!("Making all users leave the room {room_id} and forgetting it");
|
||||
let mut users = self
|
||||
.services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.ready_filter(|user| self.services.globals.user_is_local(user))
|
||||
.boxed();
|
||||
|
||||
while let Some(local_user) = users.next().await {
|
||||
while let Some(ref user_id) = users.next().await {
|
||||
debug!(
|
||||
"Attempting leave for user {local_user} in room {room_id} (forced, ignoring all \
|
||||
errors, evicting admins too)",
|
||||
"Attempting leave for user {user_id} in room {room_id} (ignoring all errors, \
|
||||
evicting admins too)",
|
||||
);
|
||||
|
||||
if let Err(e) = leave_room(self.services, local_user, &room_id, None).await {
|
||||
warn!(%e, "Failed to leave room");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let mut users = self
|
||||
.services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&room_id)
|
||||
.ready_filter(|user| self.services.globals.user_is_local(user))
|
||||
.boxed();
|
||||
|
||||
while let Some(local_user) = users.next().await {
|
||||
if self.services.users.is_admin(local_user).await {
|
||||
continue;
|
||||
if let Err(e) = leave_room(self.services, user_id, &room_id, None).await {
|
||||
warn!("Failed to leave room: {e}");
|
||||
}
|
||||
|
||||
debug!("Attempting leave for user {} in room {}", &local_user, &room_id);
|
||||
if let Err(e) = leave_room(self.services, local_user, &room_id, None).await {
|
||||
error!(
|
||||
"Error attempting to make local user {} leave room {} during room banning: \
|
||||
{}",
|
||||
&local_user, &room_id, e
|
||||
);
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Error attempting to make local user {} leave room {} during room banning \
|
||||
(room is still banned but not removing any more users): {}\nIf you would \
|
||||
like to ignore errors, use --force",
|
||||
&local_user, &room_id, e
|
||||
)));
|
||||
}
|
||||
}
|
||||
self.services.rooms.state_cache.forget(&room_id, user_id);
|
||||
}
|
||||
|
||||
// remove any local aliases, ignore errors
|
||||
for local_alias in &self
|
||||
.services
|
||||
self.services
|
||||
.rooms
|
||||
.alias
|
||||
.local_aliases_for_room(&room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect::<Vec<_>>()
|
||||
.await
|
||||
{
|
||||
_ = self
|
||||
.services
|
||||
.for_each(|local_alias| async move {
|
||||
self.services
|
||||
.rooms
|
||||
.alias
|
||||
.remove_alias(local_alias, &self.services.globals.server_user)
|
||||
.remove_alias(&local_alias, &self.services.globals.server_user)
|
||||
.await
|
||||
.ok();
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
// unpublish from room directory, ignore errors
|
||||
// unpublish from room directory
|
||||
self.services.rooms.directory.set_not_public(&room_id);
|
||||
|
||||
if disable_federation {
|
||||
self.services.rooms.metadata.disable_room(&room_id, true);
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Room banned, removed all our local users, and disabled incoming federation with \
|
||||
room.",
|
||||
));
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(
|
||||
"Room banned and removed all our local users, use `!admin federation disable-room` to \
|
||||
stop receiving new inbound federation events as well if needed.",
|
||||
"Room banned, removed all our local users, and disabled incoming federation with room.",
|
||||
))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn ban_list_of_rooms(
|
||||
&self,
|
||||
force: bool,
|
||||
disable_federation: bool,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
async fn ban_list_of_rooms(&self) -> Result<RoomMessageEventContent> {
|
||||
if self.body.len() < 2
|
||||
|| !self.body[0].trim().starts_with("```")
|
||||
|| self.body.last().unwrap_or(&"").trim() != "```"
|
||||
|
@ -290,7 +218,7 @@ async fn ban_list_of_rooms(
|
|||
if let Ok(admin_room_id) = self.services.admin.get_admin_room().await {
|
||||
if room.to_owned().eq(&admin_room_id) || room.to_owned().eq(admin_room_alias)
|
||||
{
|
||||
info!("User specified admin room in bulk ban list, ignoring");
|
||||
warn!("User specified admin room in bulk ban list, ignoring");
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
@ -299,19 +227,12 @@ async fn ban_list_of_rooms(
|
|||
let room_id = match RoomId::parse(room_alias_or_id) {
|
||||
| Ok(room_id) => room_id,
|
||||
| Err(e) => {
|
||||
if force {
|
||||
// ignore rooms we failed to parse if we're force banning
|
||||
// ignore rooms we failed to parse
|
||||
warn!(
|
||||
"Error parsing room \"{room}\" during bulk room banning, \
|
||||
ignoring error and logging here: {e}"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"{room} is not a valid room ID or room alias, please fix the \
|
||||
list and try again: {e}"
|
||||
)));
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -321,15 +242,15 @@ async fn ban_list_of_rooms(
|
|||
if room_alias_or_id.is_room_alias_id() {
|
||||
match RoomAliasId::parse(room_alias_or_id) {
|
||||
| Ok(room_alias) => {
|
||||
let room_id = if let Ok(room_id) = self
|
||||
let room_id = match self
|
||||
.services
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_local_alias(room_alias)
|
||||
.await
|
||||
{
|
||||
room_id
|
||||
} else {
|
||||
| Ok(room_id) => room_id,
|
||||
| _ => {
|
||||
debug!(
|
||||
"We don't have this room alias to a room ID locally, \
|
||||
attempting to fetch room ID over federation"
|
||||
|
@ -346,62 +267,40 @@ async fn ban_list_of_rooms(
|
|||
debug!(
|
||||
?room_id,
|
||||
?servers,
|
||||
"Got federation response fetching room ID for {room}",
|
||||
"Got federation response fetching room ID for \
|
||||
{room}",
|
||||
);
|
||||
room_id
|
||||
},
|
||||
| Err(e) => {
|
||||
// don't fail if force blocking
|
||||
if force {
|
||||
warn!(
|
||||
"Failed to resolve room alias {room} to a room \
|
||||
ID: {e}"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed to resolve room alias {room} to a room ID: \
|
||||
{e}"
|
||||
)));
|
||||
},
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
room_ids.push(room_id);
|
||||
},
|
||||
| Err(e) => {
|
||||
if force {
|
||||
// ignore rooms we failed to parse if we're force deleting
|
||||
error!(
|
||||
warn!(
|
||||
"Error parsing room \"{room}\" during bulk room banning, \
|
||||
ignoring error and logging here: {e}"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"{room} is not a valid room ID or room alias, please fix the \
|
||||
list and try again: {e}"
|
||||
)));
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
| Err(e) => {
|
||||
if force {
|
||||
// ignore rooms we failed to parse if we're force deleting
|
||||
error!(
|
||||
"Error parsing room \"{room}\" during bulk room banning, ignoring error \
|
||||
and logging here: {e}"
|
||||
warn!(
|
||||
"Error parsing room \"{room}\" during bulk room banning, ignoring error and \
|
||||
logging here: {e}"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"{room} is not a valid room ID or room alias, please fix the list and try \
|
||||
again: {e}"
|
||||
)));
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -412,56 +311,27 @@ async fn ban_list_of_rooms(
|
|||
debug!("Banned {room_id} successfully");
|
||||
room_ban_count = room_ban_count.saturating_add(1);
|
||||
|
||||
debug!("Making all users leave the room {}", &room_id);
|
||||
if force {
|
||||
debug!("Making all users leave the room {room_id} and forgetting it");
|
||||
let mut users = self
|
||||
.services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.ready_filter(|user| self.services.globals.user_is_local(user))
|
||||
.boxed();
|
||||
|
||||
while let Some(local_user) = users.next().await {
|
||||
while let Some(ref user_id) = users.next().await {
|
||||
debug!(
|
||||
"Attempting leave for user {local_user} in room {room_id} (forced, ignoring \
|
||||
all errors, evicting admins too)",
|
||||
"Attempting leave for user {user_id} in room {room_id} (ignoring all errors, \
|
||||
evicting admins too)",
|
||||
);
|
||||
|
||||
if let Err(e) = leave_room(self.services, local_user, &room_id, None).await {
|
||||
warn!(%e, "Failed to leave room");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let mut users = self
|
||||
.services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&room_id)
|
||||
.ready_filter(|user| self.services.globals.user_is_local(user))
|
||||
.boxed();
|
||||
|
||||
while let Some(local_user) = users.next().await {
|
||||
if self.services.users.is_admin(local_user).await {
|
||||
continue;
|
||||
if let Err(e) = leave_room(self.services, user_id, &room_id, None).await {
|
||||
warn!("Failed to leave room: {e}");
|
||||
}
|
||||
|
||||
debug!("Attempting leave for user {local_user} in room {room_id}");
|
||||
if let Err(e) = leave_room(self.services, local_user, &room_id, None).await {
|
||||
error!(
|
||||
"Error attempting to make local user {local_user} leave room {room_id} \
|
||||
during bulk room banning: {e}",
|
||||
);
|
||||
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Error attempting to make local user {} leave room {} during room \
|
||||
banning (room is still banned but not removing any more users and not \
|
||||
banning any more rooms): {}\nIf you would like to ignore errors, use \
|
||||
--force",
|
||||
&local_user, &room_id, e
|
||||
)));
|
||||
}
|
||||
}
|
||||
self.services.rooms.state_cache.forget(&room_id, user_id);
|
||||
}
|
||||
|
||||
// remove any local aliases, ignore errors
|
||||
|
@ -483,38 +353,27 @@ async fn ban_list_of_rooms(
|
|||
// unpublish from room directory, ignore errors
|
||||
self.services.rooms.directory.set_not_public(&room_id);
|
||||
|
||||
if disable_federation {
|
||||
self.services.rooms.metadata.disable_room(&room_id, true);
|
||||
}
|
||||
}
|
||||
|
||||
if disable_federation {
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, \
|
||||
and disabled incoming federation with the room."
|
||||
"Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, and \
|
||||
disabled incoming federation with the room."
|
||||
)))
|
||||
} else {
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Finished bulk room ban, banned {room_ban_count} total rooms and evicted all users."
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn unban_room(
|
||||
&self,
|
||||
enable_federation: bool,
|
||||
room: Box<RoomOrAliasId>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
async fn unban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventContent> {
|
||||
let room_id = if room.is_room_id() {
|
||||
let room_id = match RoomId::parse(&room) {
|
||||
| Ok(room_id) => room_id,
|
||||
| Err(e) =>
|
||||
| Err(e) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed to parse room ID {room}. Please note that this requires a full room \
|
||||
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
||||
(`#roomalias:example.com`): {e}"
|
||||
))),
|
||||
)));
|
||||
},
|
||||
};
|
||||
|
||||
debug!("Room specified is a room ID, unbanning room ID");
|
||||
|
@ -524,12 +383,13 @@ async fn unban_room(
|
|||
} else if room.is_room_alias_id() {
|
||||
let room_alias = match RoomAliasId::parse(&room) {
|
||||
| Ok(room_alias) => room_alias,
|
||||
| Err(e) =>
|
||||
| Err(e) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed to parse room ID {room}. Please note that this requires a full room \
|
||||
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
||||
(`#roomalias:example.com`): {e}"
|
||||
))),
|
||||
)));
|
||||
},
|
||||
};
|
||||
|
||||
debug!(
|
||||
|
@ -537,18 +397,18 @@ async fn unban_room(
|
|||
locally, if not using get_alias_helper to fetch room ID remotely"
|
||||
);
|
||||
|
||||
let room_id = if let Ok(room_id) = self
|
||||
let room_id = match self
|
||||
.services
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_local_alias(room_alias)
|
||||
.await
|
||||
{
|
||||
room_id
|
||||
} else {
|
||||
| Ok(room_id) => room_id,
|
||||
| _ => {
|
||||
debug!(
|
||||
"We don't have this room alias to a room ID locally, attempting to fetch room \
|
||||
ID over federation"
|
||||
"We don't have this room alias to a room ID locally, attempting to fetch \
|
||||
room ID over federation"
|
||||
);
|
||||
|
||||
match self
|
||||
|
@ -572,6 +432,7 @@ async fn unban_room(
|
|||
)));
|
||||
},
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
self.services.rooms.metadata.ban_room(&room_id, false);
|
||||
|
@ -585,15 +446,8 @@ async fn unban_room(
|
|||
));
|
||||
};
|
||||
|
||||
if enable_federation {
|
||||
self.services.rooms.metadata.disable_room(&room_id, false);
|
||||
return Ok(RoomMessageEventContent::text_plain("Room unbanned."));
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(
|
||||
"Room unbanned, you may need to re-enable federation with the room using enable-room if \
|
||||
this is a remote room to make it fully functional.",
|
||||
))
|
||||
Ok(RoomMessageEventContent::text_plain("Room unbanned and federation re-enabled."))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use std::{fmt::Write, sync::Arc};
|
||||
use std::{fmt::Write, path::PathBuf, sync::Arc};
|
||||
|
||||
use conduwuit::{info, utils::time, warn, Err, Result};
|
||||
use conduwuit::{Err, Result, info, utils::time, warn};
|
||||
use ruma::events::room::message::RoomMessageEventContent;
|
||||
|
||||
use crate::admin_command;
|
||||
|
@ -22,11 +22,22 @@ pub(super) async fn uptime(&self) -> Result<RoomMessageEventContent> {
|
|||
pub(super) async fn show_config(&self) -> Result<RoomMessageEventContent> {
|
||||
// Construct and send the response
|
||||
Ok(RoomMessageEventContent::text_markdown(format!(
|
||||
"```\n{}\n```",
|
||||
self.services.globals.config
|
||||
"{}",
|
||||
*self.services.server.config
|
||||
)))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn reload_config(
|
||||
&self,
|
||||
path: Option<PathBuf>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
let path = path.as_deref().into_iter();
|
||||
self.services.config.reload(path)?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain("Successfully reconfigured."))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn list_features(
|
||||
&self,
|
||||
|
@ -81,7 +92,7 @@ pub(super) async fn clear_caches(&self) -> Result<RoomMessageEventContent> {
|
|||
|
||||
#[admin_command]
|
||||
pub(super) async fn list_backups(&self) -> Result<RoomMessageEventContent> {
|
||||
let result = self.services.globals.db.backup_list()?;
|
||||
let result = self.services.db.db.backup_list()?;
|
||||
|
||||
if result.is_empty() {
|
||||
Ok(RoomMessageEventContent::text_plain("No backups found."))
|
||||
|
@ -92,31 +103,24 @@ pub(super) async fn list_backups(&self) -> Result<RoomMessageEventContent> {
|
|||
|
||||
#[admin_command]
|
||||
pub(super) async fn backup_database(&self) -> Result<RoomMessageEventContent> {
|
||||
let globals = Arc::clone(&self.services.globals);
|
||||
let db = Arc::clone(&self.services.db);
|
||||
let mut result = self
|
||||
.services
|
||||
.server
|
||||
.runtime()
|
||||
.spawn_blocking(move || match globals.db.backup() {
|
||||
.spawn_blocking(move || match db.db.backup() {
|
||||
| Ok(()) => String::new(),
|
||||
| Err(e) => e.to_string(),
|
||||
})
|
||||
.await?;
|
||||
|
||||
if result.is_empty() {
|
||||
result = self.services.globals.db.backup_list()?;
|
||||
result = self.services.db.db.backup_list()?;
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(result))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn list_database_files(&self) -> Result<RoomMessageEventContent> {
|
||||
let result = self.services.globals.db.file_list()?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(result))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn admin_notice(&self, message: Vec<String>) -> Result<RoomMessageEventContent> {
|
||||
let message = message.join(" ");
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
mod commands;
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
|
||||
|
@ -14,6 +16,11 @@ pub(super) enum ServerCommand {
|
|||
/// - Show configuration values
|
||||
ShowConfig,
|
||||
|
||||
/// - Reload configuration values
|
||||
ReloadConfig {
|
||||
path: Option<PathBuf>,
|
||||
},
|
||||
|
||||
/// - List the features built into the server
|
||||
ListFeatures {
|
||||
#[arg(short, long)]
|
||||
|
@ -39,9 +46,6 @@ pub(super) enum ServerCommand {
|
|||
/// - List database backups
|
||||
ListBackups,
|
||||
|
||||
/// - List database files
|
||||
ListDatabaseFiles,
|
||||
|
||||
/// - Send a message to the admin room.
|
||||
AdminNotice {
|
||||
message: Vec<String>,
|
||||
|
|
|
@ -2,23 +2,24 @@ use std::{collections::BTreeMap, fmt::Write as _};
|
|||
|
||||
use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room};
|
||||
use conduwuit::{
|
||||
debug_warn, error, info, is_equal_to,
|
||||
Result, debug, debug_warn, error, info, is_equal_to,
|
||||
matrix::pdu::PduBuilder,
|
||||
utils::{self, ReadyExt},
|
||||
warn, PduBuilder, Result,
|
||||
warn,
|
||||
};
|
||||
use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname};
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, UserId,
|
||||
events::{
|
||||
RoomAccountDataEventType, StateEventType,
|
||||
room::{
|
||||
message::RoomMessageEventContent,
|
||||
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
|
||||
redaction::RoomRedactionEventContent,
|
||||
},
|
||||
tag::{TagEvent, TagEventContent, TagInfo},
|
||||
RoomAccountDataEventType, StateEventType,
|
||||
},
|
||||
EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, UserId,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
|
@ -31,19 +32,21 @@ const BULK_JOIN_REASON: &str = "Bulk force joining this room as initiated by the
|
|||
|
||||
#[admin_command]
|
||||
pub(super) async fn list_users(&self) -> Result<RoomMessageEventContent> {
|
||||
let users = self
|
||||
let users: Vec<_> = self
|
||||
.services
|
||||
.users
|
||||
.list_local_users()
|
||||
.map(ToString::to_string)
|
||||
.collect::<Vec<_>>()
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
let mut plain_msg = format!("Found {} local user account(s):\n```\n", users.len());
|
||||
plain_msg += users.join("\n").as_str();
|
||||
plain_msg += "\n```";
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(plain_msg))
|
||||
self.write_str(plain_msg.as_str()).await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
|
@ -55,16 +58,16 @@ pub(super) async fn create_user(
|
|||
// Validate user id
|
||||
let user_id = parse_local_user_id(self.services, &username)?;
|
||||
|
||||
if self.services.users.exists(&user_id).await {
|
||||
if let Err(e) = user_id.validate_strict() {
|
||||
if self.services.config.emergency_password.is_none() {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Userid {user_id} already exists"
|
||||
"Username {user_id} contains disallowed characters or spaces: {e}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
if user_id.is_historical() {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"User ID {user_id} does not conform to new Matrix identifier spec"
|
||||
)));
|
||||
if self.services.users.exists(&user_id).await {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!("User {user_id} already exists")));
|
||||
}
|
||||
|
||||
let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH));
|
||||
|
@ -81,12 +84,12 @@ pub(super) async fn create_user(
|
|||
// content is set to the user's display name with a space before it
|
||||
if !self
|
||||
.services
|
||||
.globals
|
||||
.server
|
||||
.config
|
||||
.new_user_displayname_suffix
|
||||
.is_empty()
|
||||
{
|
||||
write!(displayname, " {}", self.services.globals.config.new_user_displayname_suffix)
|
||||
write!(displayname, " {}", self.services.server.config.new_user_displayname_suffix)
|
||||
.expect("should be able to write to string buffer");
|
||||
}
|
||||
|
||||
|
@ -112,8 +115,8 @@ pub(super) async fn create_user(
|
|||
)
|
||||
.await?;
|
||||
|
||||
if !self.services.globals.config.auto_join_rooms.is_empty() {
|
||||
for room in &self.services.globals.config.auto_join_rooms {
|
||||
if !self.services.server.config.auto_join_rooms.is_empty() {
|
||||
for room in &self.services.server.config.auto_join_rooms {
|
||||
let Ok(room_id) = self.services.rooms.alias.resolve(room).await else {
|
||||
error!(%user_id, "Failed to resolve room alias to room ID when attempting to auto join {room}, skipping");
|
||||
continue;
|
||||
|
@ -164,7 +167,7 @@ pub(super) async fn create_user(
|
|||
"Failed to automatically join room {room} for user {user_id}: {e}"
|
||||
);
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -183,12 +186,12 @@ pub(super) async fn create_user(
|
|||
.is_ok_and(is_equal_to!(1))
|
||||
{
|
||||
self.services.admin.make_user_admin(&user_id).await?;
|
||||
|
||||
warn!("Granting {user_id} admin privileges as the first user");
|
||||
}
|
||||
} else {
|
||||
debug!("create_user admin command called without an admin room being available");
|
||||
}
|
||||
|
||||
// Inhibit login does not work for guests
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Created user with user_id: {user_id} and password: `{password}`"
|
||||
)))
|
||||
|
@ -548,7 +551,7 @@ pub(super) async fn force_join_list_of_local_users(
|
|||
debug_warn!("Failed force joining {user_id} to {room_id} during bulk join: {e}");
|
||||
failed_joins = failed_joins.saturating_add(1);
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
|
@ -644,7 +647,7 @@ pub(super) async fn force_join_all_local_users(
|
|||
debug_warn!("Failed force joining {user_id} to {room_id} during bulk join: {e}");
|
||||
failed_joins = failed_joins.saturating_add(1);
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
|
@ -692,6 +695,19 @@ pub(super) async fn force_leave_room(
|
|||
self.services.globals.user_is_local(&user_id),
|
||||
"Parsed user_id must be a local user"
|
||||
);
|
||||
|
||||
if !self
|
||||
.services
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(&user_id, &room_id)
|
||||
.await
|
||||
{
|
||||
return Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"{user_id} is not joined in the room"
|
||||
)));
|
||||
}
|
||||
|
||||
leave_room(self.services, &user_id, &room_id, None).await?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
|
@ -912,10 +928,10 @@ pub(super) async fn redact_event(
|
|||
self.services.globals.server_name()
|
||||
);
|
||||
|
||||
let redaction_event_id = {
|
||||
let state_lock = self.services.rooms.state.mutex.lock(&room_id).await;
|
||||
|
||||
let redaction_event_id = self
|
||||
.services
|
||||
self.services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
|
@ -930,11 +946,12 @@ pub(super) async fn redact_event(
|
|||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
.await?
|
||||
};
|
||||
|
||||
drop(state_lock);
|
||||
let out = format!("Successfully redacted event. Redaction event ID: {redaction_event_id}");
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Successfully redacted event. Redaction event ID: {redaction_event_id}"
|
||||
)))
|
||||
self.write_str(out.as_str()).await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use conduwuit_core::{err, Err, Result};
|
||||
use conduwuit_core::{Err, Result, err};
|
||||
use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId};
|
||||
use service::Services;
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@ brotli_compression = [
|
|||
]
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
axum-client-ip.workspace = true
|
||||
axum-extra.workspace = true
|
||||
axum.workspace = true
|
||||
|
@ -50,7 +51,7 @@ http.workspace = true
|
|||
http-body-util.workspace = true
|
||||
hyper.workspace = true
|
||||
ipaddress.workspace = true
|
||||
jsonwebtoken.workspace = true
|
||||
itertools.workspace = true
|
||||
log.workspace = true
|
||||
rand.workspace = true
|
||||
reqwest.workspace = true
|
||||
|
|
|
@ -3,34 +3,38 @@ use std::fmt::Write;
|
|||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{
|
||||
debug_info, error, info, is_equal_to, utils, utils::ReadyExt, warn, Error, PduBuilder, Result,
|
||||
Err, Error, Result, debug_info, err, error, info, is_equal_to,
|
||||
matrix::pdu::PduBuilder,
|
||||
utils,
|
||||
utils::{ReadyExt, stream::BroadbandExt},
|
||||
warn,
|
||||
};
|
||||
use conduwuit_service::Services;
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use register::RegistrationKind;
|
||||
use ruma::{
|
||||
OwnedRoomId, UserId,
|
||||
api::client::{
|
||||
account::{
|
||||
change_password, check_registration_token_validity, deactivate, get_3pids,
|
||||
get_username_availability,
|
||||
ThirdPartyIdRemovalStatus, change_password, check_registration_token_validity,
|
||||
deactivate, get_3pids, get_username_availability,
|
||||
register::{self, LoginType},
|
||||
request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn,
|
||||
whoami, ThirdPartyIdRemovalStatus,
|
||||
whoami,
|
||||
},
|
||||
error::ErrorKind,
|
||||
uiaa::{AuthFlow, AuthType, UiaaInfo},
|
||||
},
|
||||
events::{
|
||||
GlobalAccountDataEventType, StateEventType,
|
||||
room::{
|
||||
message::RoomMessageEventContent,
|
||||
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
|
||||
},
|
||||
GlobalAccountDataEventType, StateEventType,
|
||||
},
|
||||
push, OwnedRoomId, UserId,
|
||||
push,
|
||||
};
|
||||
use service::Services;
|
||||
|
||||
use super::{join_room_by_id_helper, DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
|
||||
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH, join_room_by_id_helper};
|
||||
use crate::Ruma;
|
||||
|
||||
const RANDOM_USER_ID_LENGTH: usize = 10;
|
||||
|
@ -59,6 +63,14 @@ pub(crate) async fn get_register_available_route(
|
|||
|| appservice.registration.id.contains("matrix_appservice_irc")
|
||||
});
|
||||
|
||||
if services
|
||||
.globals
|
||||
.forbidden_usernames()
|
||||
.is_match(&body.username)
|
||||
{
|
||||
return Err!(Request(Forbidden("Username is forbidden")));
|
||||
}
|
||||
|
||||
// don't force the username lowercase if it's from matrix-appservice-irc
|
||||
let body_username = if is_matrix_appservice_irc {
|
||||
body.username.clone()
|
||||
|
@ -67,30 +79,45 @@ pub(crate) async fn get_register_available_route(
|
|||
};
|
||||
|
||||
// Validate user id
|
||||
let user_id = UserId::parse_with_server_name(body_username, services.globals.server_name())
|
||||
.ok()
|
||||
.filter(|user_id| {
|
||||
(!user_id.is_historical() || is_matrix_appservice_irc)
|
||||
&& services.globals.user_is_local(user_id)
|
||||
})
|
||||
.ok_or(Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
|
||||
let user_id =
|
||||
match UserId::parse_with_server_name(&body_username, services.globals.server_name()) {
|
||||
| Ok(user_id) => {
|
||||
if let Err(e) = user_id.validate_strict() {
|
||||
// unless the username is from the broken matrix appservice IRC bridge, we
|
||||
// should follow synapse's behaviour on not allowing things like spaces
|
||||
// and UTF-8 characters in usernames
|
||||
if !is_matrix_appservice_irc {
|
||||
return Err!(Request(InvalidUsername(debug_warn!(
|
||||
"Username {body_username} contains disallowed characters or spaces: \
|
||||
{e}"
|
||||
))));
|
||||
}
|
||||
}
|
||||
|
||||
user_id
|
||||
},
|
||||
| Err(e) => {
|
||||
return Err!(Request(InvalidUsername(debug_warn!(
|
||||
"Username {body_username} is not valid: {e}"
|
||||
))));
|
||||
},
|
||||
};
|
||||
|
||||
// Check if username is creative enough
|
||||
if services.users.exists(&user_id).await {
|
||||
return Err(Error::BadRequest(ErrorKind::UserInUse, "Desired user ID is already taken."));
|
||||
return Err!(Request(UserInUse("User ID is not available.")));
|
||||
}
|
||||
|
||||
if services
|
||||
.globals
|
||||
.forbidden_usernames()
|
||||
.is_match(user_id.localpart())
|
||||
{
|
||||
return Err(Error::BadRequest(ErrorKind::Unknown, "Username is forbidden."));
|
||||
if let Some(ref info) = body.appservice_info {
|
||||
if !info.is_user_match(&user_id) {
|
||||
return Err!(Request(Exclusive("Username is not in an appservice namespace.")));
|
||||
}
|
||||
}
|
||||
|
||||
// TODO add check for appservice namespaces
|
||||
if services.appservice.is_exclusive_user_id(&user_id).await {
|
||||
return Err!(Request(Exclusive("Username is reserved by an appservice.")));
|
||||
}
|
||||
|
||||
// If no if check is true we have an username that's available to be used.
|
||||
Ok(get_username_availability::v3::Response { available: true })
|
||||
}
|
||||
|
||||
|
@ -118,20 +145,31 @@ pub(crate) async fn register_route(
|
|||
InsecureClientIp(client): InsecureClientIp,
|
||||
body: Ruma<register::v3::Request>,
|
||||
) -> Result<register::v3::Response> {
|
||||
if !services.globals.allow_registration() && body.appservice_info.is_none() {
|
||||
info!(
|
||||
"Registration disabled and request not from known appservice, rejecting \
|
||||
registration attempt for username \"{}\"",
|
||||
body.username.as_deref().unwrap_or("")
|
||||
);
|
||||
return Err(Error::BadRequest(ErrorKind::forbidden(), "Registration has been disabled."));
|
||||
let is_guest = body.kind == RegistrationKind::Guest;
|
||||
let emergency_mode_enabled = services.config.emergency_password.is_some();
|
||||
|
||||
if !services.config.allow_registration && body.appservice_info.is_none() {
|
||||
match (body.username.as_ref(), body.initial_device_display_name.as_ref()) {
|
||||
| (Some(username), Some(device_display_name)) => {
|
||||
info!(%is_guest, user = %username, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled");
|
||||
},
|
||||
| (Some(username), _) => {
|
||||
info!(%is_guest, user = %username, "Rejecting registration attempt as registration is disabled");
|
||||
},
|
||||
| (_, Some(device_display_name)) => {
|
||||
info!(%is_guest, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled");
|
||||
},
|
||||
| (None, _) => {
|
||||
info!(%is_guest, "Rejecting registration attempt as registration is disabled");
|
||||
},
|
||||
}
|
||||
|
||||
let is_guest = body.kind == RegistrationKind::Guest;
|
||||
return Err!(Request(Forbidden("Registration has been disabled.")));
|
||||
}
|
||||
|
||||
if is_guest
|
||||
&& (!services.globals.allow_guest_registration()
|
||||
|| (services.globals.allow_registration()
|
||||
&& (!services.config.allow_guest_registration
|
||||
|| (services.config.allow_registration
|
||||
&& services.globals.registration_token.is_some()))
|
||||
{
|
||||
info!(
|
||||
|
@ -139,10 +177,7 @@ pub(crate) async fn register_route(
|
|||
rejecting guest registration attempt, initial device name: \"{}\"",
|
||||
body.initial_device_display_name.as_deref().unwrap_or("")
|
||||
);
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::GuestAccessForbidden,
|
||||
"Guest registration is disabled.",
|
||||
));
|
||||
return Err!(Request(GuestAccessForbidden("Guest registration is disabled.")));
|
||||
}
|
||||
|
||||
// forbid guests from registering if there is not a real admin user yet. give
|
||||
|
@ -153,13 +188,10 @@ pub(crate) async fn register_route(
|
|||
rejecting registration. Guest's initial device name: \"{}\"",
|
||||
body.initial_device_display_name.as_deref().unwrap_or("")
|
||||
);
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::forbidden(),
|
||||
"Registration temporarily disabled.",
|
||||
));
|
||||
return Err!(Request(Forbidden("Registration is temporarily disabled.")));
|
||||
}
|
||||
|
||||
let user_id = match (&body.username, is_guest) {
|
||||
let user_id = match (body.username.as_ref(), is_guest) {
|
||||
| (Some(username), false) => {
|
||||
// workaround for https://github.com/matrix-org/matrix-appservice-irc/issues/1780 due to inactivity of fixing the issue
|
||||
let is_matrix_appservice_irc =
|
||||
|
@ -169,6 +201,12 @@ pub(crate) async fn register_route(
|
|||
|| appservice.registration.id.contains("matrix_appservice_irc")
|
||||
});
|
||||
|
||||
if services.globals.forbidden_usernames().is_match(username)
|
||||
&& !emergency_mode_enabled
|
||||
{
|
||||
return Err!(Request(Forbidden("Username is forbidden")));
|
||||
}
|
||||
|
||||
// don't force the username lowercase if it's from matrix-appservice-irc
|
||||
let body_username = if is_matrix_appservice_irc {
|
||||
username.clone()
|
||||
|
@ -176,31 +214,34 @@ pub(crate) async fn register_route(
|
|||
username.to_lowercase()
|
||||
};
|
||||
|
||||
let proposed_user_id =
|
||||
UserId::parse_with_server_name(body_username, services.globals.server_name())
|
||||
.ok()
|
||||
.filter(|user_id| {
|
||||
(!user_id.is_historical() || is_matrix_appservice_irc)
|
||||
&& services.globals.user_is_local(user_id)
|
||||
})
|
||||
.ok_or(Error::BadRequest(
|
||||
ErrorKind::InvalidUsername,
|
||||
"Username is invalid.",
|
||||
))?;
|
||||
|
||||
if services.users.exists(&proposed_user_id).await {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::UserInUse,
|
||||
"Desired user ID is already taken.",
|
||||
));
|
||||
let proposed_user_id = match UserId::parse_with_server_name(
|
||||
&body_username,
|
||||
services.globals.server_name(),
|
||||
) {
|
||||
| Ok(user_id) => {
|
||||
if let Err(e) = user_id.validate_strict() {
|
||||
// unless the username is from the broken matrix appservice IRC bridge, or
|
||||
// we are in emergency mode, we should follow synapse's behaviour on
|
||||
// not allowing things like spaces and UTF-8 characters in usernames
|
||||
if !is_matrix_appservice_irc && !emergency_mode_enabled {
|
||||
return Err!(Request(InvalidUsername(debug_warn!(
|
||||
"Username {body_username} contains disallowed characters or \
|
||||
spaces: {e}"
|
||||
))));
|
||||
}
|
||||
}
|
||||
|
||||
if services
|
||||
.globals
|
||||
.forbidden_usernames()
|
||||
.is_match(proposed_user_id.localpart())
|
||||
{
|
||||
return Err(Error::BadRequest(ErrorKind::Unknown, "Username is forbidden."));
|
||||
user_id
|
||||
},
|
||||
| Err(e) => {
|
||||
return Err!(Request(InvalidUsername(debug_warn!(
|
||||
"Username {body_username} is not valid: {e}"
|
||||
))));
|
||||
},
|
||||
};
|
||||
|
||||
if services.users.exists(&proposed_user_id).await {
|
||||
return Err!(Request(UserInUse("User ID is not available.")));
|
||||
}
|
||||
|
||||
proposed_user_id
|
||||
|
@ -218,15 +259,20 @@ pub(crate) async fn register_route(
|
|||
};
|
||||
|
||||
if body.body.login_type == Some(LoginType::ApplicationService) {
|
||||
if let Some(ref info) = body.appservice_info {
|
||||
if !info.is_user_match(&user_id) {
|
||||
return Err(Error::BadRequest(ErrorKind::Exclusive, "User is not in namespace."));
|
||||
match body.appservice_info {
|
||||
| Some(ref info) =>
|
||||
if !info.is_user_match(&user_id) && !emergency_mode_enabled {
|
||||
return Err!(Request(Exclusive(
|
||||
"Username is not in an appservice namespace."
|
||||
)));
|
||||
},
|
||||
| _ => {
|
||||
return Err!(Request(MissingToken("Missing appservice token.")));
|
||||
},
|
||||
}
|
||||
} else {
|
||||
return Err(Error::BadRequest(ErrorKind::MissingToken, "Missing appservice token."));
|
||||
}
|
||||
} else if services.appservice.is_exclusive_user_id(&user_id).await {
|
||||
return Err(Error::BadRequest(ErrorKind::Exclusive, "User ID reserved by appservice."));
|
||||
} else if services.appservice.is_exclusive_user_id(&user_id).await && !emergency_mode_enabled
|
||||
{
|
||||
return Err!(Request(Exclusive("Username is reserved by an appservice.")));
|
||||
}
|
||||
|
||||
// UIAA
|
||||
|
@ -256,12 +302,13 @@ pub(crate) async fn register_route(
|
|||
};
|
||||
|
||||
if !skip_auth {
|
||||
if let Some(auth) = &body.auth {
|
||||
match &body.auth {
|
||||
| Some(auth) => {
|
||||
let (worked, uiaainfo) = services
|
||||
.uiaa
|
||||
.try_auth(
|
||||
&UserId::parse_with_server_name("", services.globals.server_name())
|
||||
.expect("we know this is valid"),
|
||||
.unwrap(),
|
||||
"".into(),
|
||||
auth,
|
||||
&uiaainfo,
|
||||
|
@ -271,18 +318,23 @@ pub(crate) async fn register_route(
|
|||
return Err(Error::Uiaa(uiaainfo));
|
||||
}
|
||||
// Success!
|
||||
} else if let Some(json) = body.json_body {
|
||||
},
|
||||
| _ => match body.json_body {
|
||||
| Some(ref json) => {
|
||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||
services.uiaa.create(
|
||||
&UserId::parse_with_server_name("", services.globals.server_name())
|
||||
.expect("we know this is valid"),
|
||||
.unwrap(),
|
||||
"".into(),
|
||||
&uiaainfo,
|
||||
&json,
|
||||
json,
|
||||
);
|
||||
return Err(Error::Uiaa(uiaainfo));
|
||||
} else {
|
||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
||||
},
|
||||
| _ => {
|
||||
return Err!(Request(NotJson("JSON body is not valid")));
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -299,7 +351,7 @@ pub(crate) async fn register_route(
|
|||
if !services.globals.new_user_displayname_suffix().is_empty()
|
||||
&& body.appservice_info.is_none()
|
||||
{
|
||||
write!(displayname, " {}", services.globals.config.new_user_displayname_suffix)
|
||||
write!(displayname, " {}", services.server.config.new_user_displayname_suffix)
|
||||
.expect("should be able to write to string buffer");
|
||||
}
|
||||
|
||||
|
@ -323,8 +375,12 @@ pub(crate) async fn register_route(
|
|||
)
|
||||
.await?;
|
||||
|
||||
// Inhibit login does not work for guests
|
||||
if !is_guest && body.inhibit_login {
|
||||
if (!is_guest && body.inhibit_login)
|
||||
|| body
|
||||
.appservice_info
|
||||
.as_ref()
|
||||
.is_some_and(|appservice| appservice.registration.device_management)
|
||||
{
|
||||
return Ok(register::v3::Response {
|
||||
access_token: None,
|
||||
user_id,
|
||||
|
@ -365,7 +421,7 @@ pub(crate) async fn register_route(
|
|||
\"{device_display_name}\""
|
||||
);
|
||||
|
||||
if services.globals.config.admin_room_notices {
|
||||
if services.server.config.admin_room_notices {
|
||||
services
|
||||
.admin
|
||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||
|
@ -378,7 +434,7 @@ pub(crate) async fn register_route(
|
|||
} else {
|
||||
info!("New user \"{user_id}\" registered on this server.");
|
||||
|
||||
if services.globals.config.admin_room_notices {
|
||||
if services.server.config.admin_room_notices {
|
||||
services
|
||||
.admin
|
||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||
|
@ -391,11 +447,11 @@ pub(crate) async fn register_route(
|
|||
}
|
||||
|
||||
// log in conduit admin channel if a guest registered
|
||||
if body.appservice_info.is_none() && is_guest && services.globals.log_guest_registrations() {
|
||||
info!("New guest user \"{user_id}\" registered on this server.");
|
||||
if body.appservice_info.is_none() && is_guest && services.config.log_guest_registrations {
|
||||
debug_info!("New guest user \"{user_id}\" registered on this server.");
|
||||
|
||||
if !device_display_name.is_empty() {
|
||||
if services.globals.config.admin_room_notices {
|
||||
if services.server.config.admin_room_notices {
|
||||
services
|
||||
.admin
|
||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||
|
@ -407,7 +463,7 @@ pub(crate) async fn register_route(
|
|||
}
|
||||
} else {
|
||||
#[allow(clippy::collapsible_else_if)]
|
||||
if services.globals.config.admin_room_notices {
|
||||
if services.server.config.admin_room_notices {
|
||||
services
|
||||
.admin
|
||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||
|
@ -421,7 +477,8 @@ pub(crate) async fn register_route(
|
|||
}
|
||||
|
||||
// If this is the first real user, grant them admin privileges except for guest
|
||||
// users Note: the server user, @conduit:servername, is generated first
|
||||
// users
|
||||
// Note: the server user is generated first
|
||||
if !is_guest {
|
||||
if let Ok(admin_room) = services.admin.get_admin_room().await {
|
||||
if services
|
||||
|
@ -438,10 +495,10 @@ pub(crate) async fn register_route(
|
|||
}
|
||||
|
||||
if body.appservice_info.is_none()
|
||||
&& !services.globals.config.auto_join_rooms.is_empty()
|
||||
&& (services.globals.allow_guests_auto_join_rooms() || !is_guest)
|
||||
&& !services.server.config.auto_join_rooms.is_empty()
|
||||
&& (services.config.allow_guests_auto_join_rooms || !is_guest)
|
||||
{
|
||||
for room in &services.globals.config.auto_join_rooms {
|
||||
for room in &services.server.config.auto_join_rooms {
|
||||
let Ok(room_id) = services.rooms.alias.resolve(room).await else {
|
||||
error!(
|
||||
"Failed to resolve room alias to room ID when attempting to auto join \
|
||||
|
@ -463,7 +520,7 @@ pub(crate) async fn register_route(
|
|||
}
|
||||
|
||||
if let Some(room_server_name) = room.server_name() {
|
||||
if let Err(e) = join_room_by_id_helper(
|
||||
match join_room_by_id_helper(
|
||||
&services,
|
||||
&user_id,
|
||||
&room_id,
|
||||
|
@ -475,11 +532,16 @@ pub(crate) async fn register_route(
|
|||
.boxed()
|
||||
.await
|
||||
{
|
||||
| Err(e) => {
|
||||
// don't return this error so we don't fail registrations
|
||||
error!("Failed to automatically join room {room} for user {user_id}: {e}");
|
||||
} else {
|
||||
error!(
|
||||
"Failed to automatically join room {room} for user {user_id}: {e}"
|
||||
);
|
||||
},
|
||||
| _ => {
|
||||
info!("Automatically joined room {room} for user {user_id}");
|
||||
};
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -521,8 +583,8 @@ pub(crate) async fn change_password_route(
|
|||
let sender_user = body
|
||||
.sender_user
|
||||
.as_ref()
|
||||
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
|
||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||
.ok_or_else(|| err!(Request(MissingToken("Missing access token."))))?;
|
||||
let sender_device = body.sender_device();
|
||||
|
||||
let mut uiaainfo = UiaaInfo {
|
||||
flows: vec![AuthFlow { stages: vec![AuthType::Password] }],
|
||||
|
@ -532,7 +594,8 @@ pub(crate) async fn change_password_route(
|
|||
auth_error: None,
|
||||
};
|
||||
|
||||
if let Some(auth) = &body.auth {
|
||||
match &body.auth {
|
||||
| Some(auth) => {
|
||||
let (worked, uiaainfo) = services
|
||||
.uiaa
|
||||
.try_auth(sender_user, sender_device, auth, &uiaainfo)
|
||||
|
@ -543,15 +606,20 @@ pub(crate) async fn change_password_route(
|
|||
}
|
||||
|
||||
// Success!
|
||||
} else if let Some(json) = body.json_body {
|
||||
},
|
||||
| _ => match body.json_body {
|
||||
| Some(ref json) => {
|
||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||
services
|
||||
.uiaa
|
||||
.create(sender_user, sender_device, &uiaainfo, &json);
|
||||
.create(sender_user, sender_device, &uiaainfo, json);
|
||||
|
||||
return Err(Error::Uiaa(uiaainfo));
|
||||
} else {
|
||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
||||
},
|
||||
| _ => {
|
||||
return Err!(Request(NotJson("JSON body is not valid")));
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
services
|
||||
|
@ -563,14 +631,34 @@ pub(crate) async fn change_password_route(
|
|||
services
|
||||
.users
|
||||
.all_device_ids(sender_user)
|
||||
.ready_filter(|id| id != sender_device)
|
||||
.ready_filter(|id| *id != sender_device)
|
||||
.for_each(|id| services.users.remove_device(sender_user, id))
|
||||
.await;
|
||||
|
||||
// Remove all pushers except the ones associated with this session
|
||||
services
|
||||
.pusher
|
||||
.get_pushkeys(sender_user)
|
||||
.map(ToOwned::to_owned)
|
||||
.broad_filter_map(|pushkey| async move {
|
||||
services
|
||||
.pusher
|
||||
.get_pusher_device(&pushkey)
|
||||
.await
|
||||
.ok()
|
||||
.filter(|pusher_device| pusher_device != sender_device)
|
||||
.is_some()
|
||||
.then_some(pushkey)
|
||||
})
|
||||
.for_each(|pushkey| async move {
|
||||
services.pusher.delete_pusher(sender_user, &pushkey).await;
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
info!("User {sender_user} changed their password.");
|
||||
|
||||
if services.globals.config.admin_room_notices {
|
||||
if services.server.config.admin_room_notices {
|
||||
services
|
||||
.admin
|
||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||
|
@ -625,8 +713,8 @@ pub(crate) async fn deactivate_route(
|
|||
let sender_user = body
|
||||
.sender_user
|
||||
.as_ref()
|
||||
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
|
||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||
.ok_or_else(|| err!(Request(MissingToken("Missing access token."))))?;
|
||||
let sender_device = body.sender_device();
|
||||
|
||||
let mut uiaainfo = UiaaInfo {
|
||||
flows: vec![AuthFlow { stages: vec![AuthType::Password] }],
|
||||
|
@ -636,7 +724,8 @@ pub(crate) async fn deactivate_route(
|
|||
auth_error: None,
|
||||
};
|
||||
|
||||
if let Some(auth) = &body.auth {
|
||||
match &body.auth {
|
||||
| Some(auth) => {
|
||||
let (worked, uiaainfo) = services
|
||||
.uiaa
|
||||
.try_auth(sender_user, sender_device, auth, &uiaainfo)
|
||||
|
@ -646,15 +735,20 @@ pub(crate) async fn deactivate_route(
|
|||
return Err(Error::Uiaa(uiaainfo));
|
||||
}
|
||||
// Success!
|
||||
} else if let Some(json) = body.json_body {
|
||||
},
|
||||
| _ => match body.json_body {
|
||||
| Some(ref json) => {
|
||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||
services
|
||||
.uiaa
|
||||
.create(sender_user, sender_device, &uiaainfo, &json);
|
||||
.create(sender_user, sender_device, &uiaainfo, json);
|
||||
|
||||
return Err(Error::Uiaa(uiaainfo));
|
||||
} else {
|
||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
||||
},
|
||||
| _ => {
|
||||
return Err!(Request(NotJson("JSON body is not valid")));
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Remove profile pictures and display name
|
||||
|
@ -673,7 +767,7 @@ pub(crate) async fn deactivate_route(
|
|||
|
||||
info!("User {sender_user} deactivated their account.");
|
||||
|
||||
if services.globals.config.admin_room_notices {
|
||||
if services.server.config.admin_room_notices {
|
||||
services
|
||||
.admin
|
||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||
|
@ -711,10 +805,7 @@ pub(crate) async fn third_party_route(
|
|||
pub(crate) async fn request_3pid_management_token_via_email_route(
|
||||
_body: Ruma<request_3pid_management_token_via_email::v3::Request>,
|
||||
) -> Result<request_3pid_management_token_via_email::v3::Response> {
|
||||
Err(Error::BadRequest(
|
||||
ErrorKind::ThreepidDenied,
|
||||
"Third party identifier is not allowed",
|
||||
))
|
||||
Err!(Request(ThreepidDenied("Third party identifiers are not implemented")))
|
||||
}
|
||||
|
||||
/// # `POST /_matrix/client/v3/account/3pid/msisdn/requestToken`
|
||||
|
@ -727,10 +818,7 @@ pub(crate) async fn request_3pid_management_token_via_email_route(
|
|||
pub(crate) async fn request_3pid_management_token_via_msisdn_route(
|
||||
_body: Ruma<request_3pid_management_token_via_msisdn::v3::Request>,
|
||||
) -> Result<request_3pid_management_token_via_msisdn::v3::Response> {
|
||||
Err(Error::BadRequest(
|
||||
ErrorKind::ThreepidDenied,
|
||||
"Third party identifier is not allowed",
|
||||
))
|
||||
Err!(Request(ThreepidDenied("Third party identifiers are not implemented")))
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/v1/register/m.login.registration_token/validity`
|
||||
|
@ -744,10 +832,7 @@ pub(crate) async fn check_registration_token_validity(
|
|||
body: Ruma<check_registration_token_validity::v1::Request>,
|
||||
) -> Result<check_registration_token_validity::v1::Response> {
|
||||
let Some(reg_token) = services.globals.registration_token.clone() else {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::forbidden(),
|
||||
"Server does not allow token registration.",
|
||||
));
|
||||
return Err!(Request(Forbidden("Server does not allow token registration")));
|
||||
};
|
||||
|
||||
Ok(check_registration_token_validity::v1::Response { valid: reg_token == body.token })
|
||||
|
@ -809,7 +894,7 @@ pub async fn full_user_deactivate(
|
|||
power_levels_content.users.remove(user_id);
|
||||
|
||||
// ignore errors so deactivation doesn't fail
|
||||
if let Err(e) = services
|
||||
match services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
|
@ -820,9 +905,12 @@ pub async fn full_user_deactivate(
|
|||
)
|
||||
.await
|
||||
{
|
||||
| Err(e) => {
|
||||
warn!(%room_id, %user_id, "Failed to demote user's own power level: {e}");
|
||||
} else {
|
||||
},
|
||||
| _ => {
|
||||
info!("Demoted {user_id} in {room_id} as part of account deactivation");
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{err, Err};
|
||||
use conduwuit::{Err, Result, err};
|
||||
use conduwuit_service::Services;
|
||||
use ruma::{
|
||||
RoomId, UserId,
|
||||
api::client::config::{
|
||||
get_global_account_data, get_room_account_data, set_global_account_data,
|
||||
set_room_account_data,
|
||||
|
@ -10,12 +12,11 @@ use ruma::{
|
|||
GlobalAccountDataEventType, RoomAccountDataEventType,
|
||||
},
|
||||
serde::Raw,
|
||||
RoomId, UserId,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use serde_json::{json, value::RawValue as RawJsonValue};
|
||||
|
||||
use crate::{service::Services, Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}`
|
||||
///
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{debug, Err, Result};
|
||||
use conduwuit::{Err, Result, debug};
|
||||
use conduwuit_service::Services;
|
||||
use futures::StreamExt;
|
||||
use rand::seq::SliceRandom;
|
||||
use ruma::{
|
||||
api::client::alias::{create_alias, delete_alias, get_alias},
|
||||
OwnedServerName, RoomAliasId, RoomId,
|
||||
api::client::alias::{create_alias, delete_alias, get_alias},
|
||||
};
|
||||
use service::Services;
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
|
@ -128,18 +128,26 @@ async fn room_available_servers(
|
|||
|
||||
// insert our server as the very first choice if in list, else check if we can
|
||||
// prefer the room alias server first
|
||||
if let Some(server_index) = servers
|
||||
match servers
|
||||
.iter()
|
||||
.position(|server_name| services.globals.server_is_ours(server_name))
|
||||
{
|
||||
| Some(server_index) => {
|
||||
servers.swap_remove(server_index);
|
||||
servers.insert(0, services.globals.server_name().to_owned());
|
||||
} else if let Some(alias_server_index) = servers
|
||||
},
|
||||
| _ => {
|
||||
match servers
|
||||
.iter()
|
||||
.position(|server| server == room_alias.server_name())
|
||||
{
|
||||
| Some(alias_server_index) => {
|
||||
servers.swap_remove(alias_server_index);
|
||||
servers.insert(0, room_alias.server_name().into());
|
||||
},
|
||||
| _ => {},
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
servers
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{err, Err, Result};
|
||||
use conduwuit::{Err, Result, err};
|
||||
use ruma::api::{appservice::ping, client::appservice::request_ping};
|
||||
|
||||
use crate::Ruma;
|
||||
|
@ -22,7 +22,13 @@ pub(crate) async fn appservice_ping(
|
|||
)));
|
||||
}
|
||||
|
||||
if appservice_info.registration.url.is_none() {
|
||||
if appservice_info.registration.url.is_none()
|
||||
|| appservice_info
|
||||
.registration
|
||||
.url
|
||||
.as_ref()
|
||||
.is_some_and(|url| url.is_empty() || url == "null")
|
||||
{
|
||||
return Err!(Request(UrlNotSet(
|
||||
"Appservice does not have a URL set, there is nothing to ping."
|
||||
)));
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
use std::cmp::Ordering;
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{err, Err};
|
||||
use conduwuit::{Err, Result, err};
|
||||
use ruma::{
|
||||
UInt,
|
||||
api::client::backup::{
|
||||
add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session,
|
||||
create_backup_version, delete_backup_keys, delete_backup_keys_for_room,
|
||||
|
@ -8,10 +11,9 @@ use ruma::{
|
|||
get_backup_keys_for_room, get_backup_keys_for_session, get_latest_backup_info,
|
||||
update_backup_version,
|
||||
},
|
||||
UInt,
|
||||
};
|
||||
|
||||
use crate::{Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `POST /_matrix/client/r0/room_keys/version`
|
||||
///
|
||||
|
@ -232,6 +234,66 @@ pub(crate) async fn add_backup_keys_for_session_route(
|
|||
)));
|
||||
}
|
||||
|
||||
// Check if we already have a better key
|
||||
let mut ok_to_replace = true;
|
||||
if let Some(old_key) = &services
|
||||
.key_backups
|
||||
.get_session(body.sender_user(), &body.version, &body.room_id, &body.session_id)
|
||||
.await
|
||||
.ok()
|
||||
{
|
||||
let old_is_verified = old_key
|
||||
.get_field::<bool>("is_verified")?
|
||||
.unwrap_or_default();
|
||||
|
||||
let new_is_verified = body
|
||||
.session_data
|
||||
.get_field::<bool>("is_verified")?
|
||||
.ok_or_else(|| err!(Request(BadJson("`is_verified` field should exist"))))?;
|
||||
|
||||
// Prefer key that `is_verified`
|
||||
if old_is_verified != new_is_verified {
|
||||
if old_is_verified {
|
||||
ok_to_replace = false;
|
||||
}
|
||||
} else {
|
||||
// If both have same `is_verified`, prefer the one with lower
|
||||
// `first_message_index`
|
||||
let old_first_message_index = old_key
|
||||
.get_field::<UInt>("first_message_index")?
|
||||
.unwrap_or(UInt::MAX);
|
||||
|
||||
let new_first_message_index = body
|
||||
.session_data
|
||||
.get_field::<UInt>("first_message_index")?
|
||||
.ok_or_else(|| {
|
||||
err!(Request(BadJson("`first_message_index` field should exist")))
|
||||
})?;
|
||||
|
||||
ok_to_replace = match new_first_message_index.cmp(&old_first_message_index) {
|
||||
| Ordering::Less => true,
|
||||
| Ordering::Greater => false,
|
||||
| Ordering::Equal => {
|
||||
// If both have same `first_message_index`, prefer the one with lower
|
||||
// `forwarded_count`
|
||||
let old_forwarded_count = old_key
|
||||
.get_field::<UInt>("forwarded_count")?
|
||||
.unwrap_or(UInt::MAX);
|
||||
|
||||
let new_forwarded_count = body
|
||||
.session_data
|
||||
.get_field::<UInt>("forwarded_count")?
|
||||
.ok_or_else(|| {
|
||||
err!(Request(BadJson("`forwarded_count` field should exist")))
|
||||
})?;
|
||||
|
||||
new_forwarded_count < old_forwarded_count
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if ok_to_replace {
|
||||
services
|
||||
.key_backups
|
||||
.add_key(
|
||||
|
@ -242,6 +304,7 @@ pub(crate) async fn add_backup_keys_for_session_route(
|
|||
&body.session_data,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(add_backup_keys_for_session::v3::Response {
|
||||
count: services
|
||||
|
|
|
@ -1,16 +1,17 @@
|
|||
use std::collections::BTreeMap;
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Result, Server};
|
||||
use ruma::{
|
||||
RoomVersionId,
|
||||
api::client::discovery::get_capabilities::{
|
||||
self, Capabilities, GetLoginTokenCapability, RoomVersionStability,
|
||||
RoomVersionsCapability, ThirdPartyIdChangesCapability,
|
||||
},
|
||||
RoomVersionId,
|
||||
};
|
||||
use serde_json::json;
|
||||
|
||||
use crate::{Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `GET /_matrix/client/v3/capabilities`
|
||||
///
|
||||
|
@ -21,7 +22,7 @@ pub(crate) async fn get_capabilities_route(
|
|||
_body: Ruma<get_capabilities::v3::Request>,
|
||||
) -> Result<get_capabilities::v3::Response> {
|
||||
let available: BTreeMap<RoomVersionId, RoomVersionStability> =
|
||||
services.server.available_room_versions().collect();
|
||||
Server::available_room_versions().collect();
|
||||
|
||||
let mut capabilities = Capabilities::default();
|
||||
capabilities.room_versions = RoomVersionsCapability {
|
||||
|
@ -32,13 +33,21 @@ pub(crate) async fn get_capabilities_route(
|
|||
// we do not implement 3PID stuff
|
||||
capabilities.thirdparty_id_changes = ThirdPartyIdChangesCapability { enabled: false };
|
||||
|
||||
// we dont support generating tokens yet
|
||||
capabilities.get_login_token = GetLoginTokenCapability { enabled: false };
|
||||
capabilities.get_login_token = GetLoginTokenCapability {
|
||||
enabled: services.server.config.login_via_existing_session,
|
||||
};
|
||||
|
||||
// MSC4133 capability
|
||||
capabilities
|
||||
.set("uk.tcpip.msc4133.profile_fields", json!({"enabled": true}))
|
||||
.expect("this is valid JSON we created");
|
||||
|
||||
capabilities
|
||||
.set(
|
||||
"org.matrix.msc4267.forget_forced_upon_leave",
|
||||
json!({"enabled": services.config.forget_forced_upon_leave}),
|
||||
)
|
||||
.expect("valid JSON we created");
|
||||
|
||||
Ok(get_capabilities::v3::Response { capabilities })
|
||||
}
|
||||
|
|
|
@ -1,23 +1,24 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
at, err, ref_at,
|
||||
Err, Result, at, debug_warn, err,
|
||||
matrix::pdu::PduEvent,
|
||||
ref_at,
|
||||
utils::{
|
||||
IterStream,
|
||||
future::TryExtExt,
|
||||
stream::{BroadbandExt, ReadyExt, TryIgnore, WidebandExt},
|
||||
IterStream,
|
||||
},
|
||||
Err, PduEvent, Result,
|
||||
};
|
||||
use futures::{join, try_join, FutureExt, StreamExt, TryFutureExt};
|
||||
use ruma::{
|
||||
api::client::{context::get_context, filter::LazyLoadOptions},
|
||||
events::StateEventType,
|
||||
OwnedEventId, UserId,
|
||||
use conduwuit_service::rooms::{lazy_loading, lazy_loading::Options, short::ShortStateKey};
|
||||
use futures::{
|
||||
FutureExt, StreamExt, TryFutureExt, TryStreamExt,
|
||||
future::{OptionFuture, join, join3, try_join3},
|
||||
};
|
||||
use ruma::{OwnedEventId, UserId, api::client::context::get_context, events::StateEventType};
|
||||
|
||||
use crate::{
|
||||
client::message::{event_filter, ignored_filter, update_lazy, visibility_filter, LazySet},
|
||||
Ruma,
|
||||
client::message::{event_filter, ignored_filter, lazy_loading_witness, visibility_filter},
|
||||
};
|
||||
|
||||
const LIMIT_MAX: usize = 100;
|
||||
|
@ -33,10 +34,15 @@ pub(crate) async fn get_context_route(
|
|||
State(services): State<crate::State>,
|
||||
body: Ruma<get_context::v3::Request>,
|
||||
) -> Result<get_context::v3::Response> {
|
||||
let filter = &body.filter;
|
||||
let sender = body.sender();
|
||||
let (sender_user, _) = sender;
|
||||
let (sender_user, sender_device) = sender;
|
||||
let room_id = &body.room_id;
|
||||
let event_id = &body.event_id;
|
||||
let filter = &body.filter;
|
||||
|
||||
if !services.rooms.metadata.exists(room_id).await {
|
||||
return Err!(Request(Forbidden("Room does not exist to this server")));
|
||||
}
|
||||
|
||||
// Use limit or else 10, with maximum 100
|
||||
let limit: usize = body
|
||||
|
@ -45,44 +51,33 @@ pub(crate) async fn get_context_route(
|
|||
.unwrap_or(LIMIT_DEFAULT)
|
||||
.min(LIMIT_MAX);
|
||||
|
||||
// some clients, at least element, seem to require knowledge of redundant
|
||||
// members for "inline" profiles on the timeline to work properly
|
||||
let lazy_load_enabled = matches!(filter.lazy_load_options, LazyLoadOptions::Enabled { .. });
|
||||
|
||||
let lazy_load_redundant = if let LazyLoadOptions::Enabled { include_redundant_members } =
|
||||
filter.lazy_load_options
|
||||
{
|
||||
include_redundant_members
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
let base_id = services
|
||||
.rooms
|
||||
.timeline
|
||||
.get_pdu_id(&body.event_id)
|
||||
.get_pdu_id(event_id)
|
||||
.map_err(|_| err!(Request(NotFound("Event not found."))));
|
||||
|
||||
let base_pdu = services
|
||||
.rooms
|
||||
.timeline
|
||||
.get_pdu(&body.event_id)
|
||||
.get_pdu(event_id)
|
||||
.map_err(|_| err!(Request(NotFound("Base event not found."))));
|
||||
|
||||
let visible = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_event(sender_user, &body.room_id, &body.event_id)
|
||||
.user_can_see_event(sender_user, room_id, event_id)
|
||||
.map(Ok);
|
||||
|
||||
let (base_id, base_pdu, visible) = try_join!(base_id, base_pdu, visible)?;
|
||||
let (base_id, base_pdu, visible) = try_join3(base_id, base_pdu, visible).await?;
|
||||
|
||||
if base_pdu.room_id != body.room_id || base_pdu.event_id != body.event_id {
|
||||
if base_pdu.room_id != *room_id || base_pdu.event_id != *event_id {
|
||||
return Err!(Request(NotFound("Base event not found.")));
|
||||
}
|
||||
|
||||
if !visible {
|
||||
return Err!(Request(Forbidden("You don't have permission to view this event.")));
|
||||
debug_warn!(req_evt = ?event_id, ?base_id, ?room_id, "Event requested by {sender_user} but is not allowed to see it, returning 404");
|
||||
return Err!(Request(NotFound("Event not found.")));
|
||||
}
|
||||
|
||||
let base_count = base_id.pdu_count();
|
||||
|
@ -112,65 +107,84 @@ pub(crate) async fn get_context_route(
|
|||
.collect();
|
||||
|
||||
let (base_event, events_before, events_after): (_, Vec<_>, Vec<_>) =
|
||||
join!(base_event, events_before, events_after);
|
||||
join3(base_event, events_before, events_after).boxed().await;
|
||||
|
||||
let lazy_loading_context = lazy_loading::Context {
|
||||
user_id: sender_user,
|
||||
device_id: sender_device,
|
||||
room_id,
|
||||
token: Some(base_count.into_unsigned()),
|
||||
options: Some(&filter.lazy_load_options),
|
||||
};
|
||||
|
||||
let lazy_loading_witnessed: OptionFuture<_> = filter
|
||||
.lazy_load_options
|
||||
.is_enabled()
|
||||
.then_some(
|
||||
base_event
|
||||
.iter()
|
||||
.chain(events_before.iter())
|
||||
.chain(events_after.iter()),
|
||||
)
|
||||
.map(|witnessed| lazy_loading_witness(&services, &lazy_loading_context, witnessed))
|
||||
.into();
|
||||
|
||||
let state_at = events_after
|
||||
.last()
|
||||
.map(ref_at!(1))
|
||||
.map_or(body.event_id.as_ref(), |e| e.event_id.as_ref());
|
||||
.map_or(body.event_id.as_ref(), |pdu| pdu.event_id.as_ref());
|
||||
|
||||
let state_ids = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.pdu_shortstatehash(state_at)
|
||||
.or_else(|_| services.rooms.state.get_room_shortstatehash(room_id))
|
||||
.and_then(|shortstatehash| services.rooms.state_accessor.state_full_ids(shortstatehash))
|
||||
.map_err(|e| err!(Database("State not found: {e}")))
|
||||
.await?;
|
||||
|
||||
let lazy = base_event
|
||||
.iter()
|
||||
.chain(events_before.iter())
|
||||
.chain(events_after.iter())
|
||||
.stream()
|
||||
.fold(LazySet::new(), |lazy, item| {
|
||||
update_lazy(&services, room_id, sender, lazy, item, lazy_load_redundant)
|
||||
})
|
||||
.await;
|
||||
|
||||
let lazy = &lazy;
|
||||
let state: Vec<_> = state_ids
|
||||
.iter()
|
||||
.stream()
|
||||
.broad_filter_map(|(shortstatekey, event_id)| {
|
||||
.map_ok(|shortstatehash| {
|
||||
services
|
||||
.rooms
|
||||
.short
|
||||
.get_statekey_from_short(*shortstatekey)
|
||||
.map_ok(move |(event_type, state_key)| (event_type, state_key, event_id))
|
||||
.ok()
|
||||
.state_accessor
|
||||
.state_full_ids(shortstatehash)
|
||||
.map(Ok)
|
||||
})
|
||||
.ready_filter_map(|(event_type, state_key, event_id)| {
|
||||
if !lazy_load_enabled || event_type != StateEventType::RoomMember {
|
||||
return Some(event_id);
|
||||
}
|
||||
.map_err(|e| err!(Database("State not found: {e}")))
|
||||
.try_flatten_stream()
|
||||
.try_collect()
|
||||
.boxed();
|
||||
|
||||
state_key
|
||||
let (lazy_loading_witnessed, state_ids) = join(lazy_loading_witnessed, state_ids).await;
|
||||
|
||||
let state_ids: Vec<(ShortStateKey, OwnedEventId)> = state_ids?;
|
||||
let shortstatekeys = state_ids.iter().map(at!(0)).stream();
|
||||
let shorteventids = state_ids.iter().map(ref_at!(1)).stream();
|
||||
let lazy_loading_witnessed = lazy_loading_witnessed.unwrap_or_default();
|
||||
let state: Vec<_> = services
|
||||
.rooms
|
||||
.short
|
||||
.multi_get_statekey_from_short(shortstatekeys)
|
||||
.zip(shorteventids)
|
||||
.ready_filter_map(|item| Some((item.0.ok()?, item.1)))
|
||||
.ready_filter_map(|((event_type, state_key), event_id)| {
|
||||
if filter.lazy_load_options.is_enabled()
|
||||
&& event_type == StateEventType::RoomMember
|
||||
&& state_key
|
||||
.as_str()
|
||||
.try_into()
|
||||
.ok()
|
||||
.filter(|&user_id: &&UserId| lazy.contains(user_id))
|
||||
.map(|_| event_id)
|
||||
.is_ok_and(|user_id: &UserId| !lazy_loading_witnessed.contains(user_id))
|
||||
{
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(event_id)
|
||||
})
|
||||
.broad_filter_map(|event_id: &OwnedEventId| {
|
||||
services.rooms.timeline.get_pdu(event_id).ok()
|
||||
services.rooms.timeline.get_pdu(event_id.as_ref()).ok()
|
||||
})
|
||||
.map(|pdu| pdu.to_state_event())
|
||||
.map(PduEvent::into_state_event)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
Ok(get_context::v3::Response {
|
||||
event: base_event.map(at!(1)).as_ref().map(PduEvent::to_room_event),
|
||||
event: base_event.map(at!(1)).map(PduEvent::into_room_event),
|
||||
|
||||
start: events_before
|
||||
.last()
|
||||
|
@ -189,13 +203,13 @@ pub(crate) async fn get_context_route(
|
|||
events_before: events_before
|
||||
.into_iter()
|
||||
.map(at!(1))
|
||||
.map(|pdu| pdu.to_room_event())
|
||||
.map(PduEvent::into_room_event)
|
||||
.collect(),
|
||||
|
||||
events_after: events_after
|
||||
.into_iter()
|
||||
.map(at!(1))
|
||||
.map(|pdu| pdu.to_room_event())
|
||||
.map(PduEvent::into_room_event)
|
||||
.collect(),
|
||||
|
||||
state,
|
||||
|
|
|
@ -1,18 +1,18 @@
|
|||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{err, Err};
|
||||
use conduwuit::{Err, Error, Result, debug, err, utils};
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
MilliSecondsSinceUnixEpoch, OwnedDeviceId,
|
||||
api::client::{
|
||||
device::{self, delete_device, delete_devices, get_device, get_devices, update_device},
|
||||
error::ErrorKind,
|
||||
uiaa::{AuthFlow, AuthType, UiaaInfo},
|
||||
},
|
||||
MilliSecondsSinceUnixEpoch,
|
||||
};
|
||||
|
||||
use super::SESSION_ID_LENGTH;
|
||||
use crate::{utils, Error, Result, Ruma};
|
||||
use crate::{Ruma, client::DEVICE_ID_LENGTH};
|
||||
|
||||
/// # `GET /_matrix/client/r0/devices`
|
||||
///
|
||||
|
@ -59,14 +59,15 @@ pub(crate) async fn update_device_route(
|
|||
InsecureClientIp(client): InsecureClientIp,
|
||||
body: Ruma<update_device::v3::Request>,
|
||||
) -> Result<update_device::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let sender_user = body.sender_user();
|
||||
let appservice = body.appservice_info.as_ref();
|
||||
|
||||
let mut device = services
|
||||
match services
|
||||
.users
|
||||
.get_device_metadata(sender_user, &body.device_id)
|
||||
.await
|
||||
.map_err(|_| err!(Request(NotFound("Device not found."))))?;
|
||||
|
||||
{
|
||||
| Ok(mut device) => {
|
||||
device.display_name.clone_from(&body.display_name);
|
||||
device.last_seen_ip.clone_from(&Some(client.to_string()));
|
||||
device
|
||||
|
@ -79,6 +80,37 @@ pub(crate) async fn update_device_route(
|
|||
.await?;
|
||||
|
||||
Ok(update_device::v3::Response {})
|
||||
},
|
||||
| Err(_) => {
|
||||
let Some(appservice) = appservice else {
|
||||
return Err!(Request(NotFound("Device not found.")));
|
||||
};
|
||||
if !appservice.registration.device_management {
|
||||
return Err!(Request(NotFound("Device not found.")));
|
||||
}
|
||||
|
||||
debug!(
|
||||
"Creating new device for {sender_user} from appservice {} as MSC4190 is enabled \
|
||||
and device ID does not exist",
|
||||
appservice.registration.id
|
||||
);
|
||||
|
||||
let device_id = OwnedDeviceId::from(utils::random_string(DEVICE_ID_LENGTH));
|
||||
|
||||
services
|
||||
.users
|
||||
.create_device(
|
||||
sender_user,
|
||||
&device_id,
|
||||
&appservice.registration.as_token,
|
||||
None,
|
||||
Some(client.to_string()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
return Ok(update_device::v3::Response {});
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// # `DELETE /_matrix/client/r0/devices/{deviceId}`
|
||||
|
@ -95,8 +127,21 @@ pub(crate) async fn delete_device_route(
|
|||
State(services): State<crate::State>,
|
||||
body: Ruma<delete_device::v3::Request>,
|
||||
) -> Result<delete_device::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||
let (sender_user, sender_device) = body.sender();
|
||||
let appservice = body.appservice_info.as_ref();
|
||||
|
||||
if appservice.is_some_and(|appservice| appservice.registration.device_management) {
|
||||
debug!(
|
||||
"Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \
|
||||
enabled"
|
||||
);
|
||||
services
|
||||
.users
|
||||
.remove_device(sender_user, &body.device_id)
|
||||
.await;
|
||||
|
||||
return Ok(delete_device::v3::Response {});
|
||||
}
|
||||
|
||||
// UIAA
|
||||
let mut uiaainfo = UiaaInfo {
|
||||
|
@ -107,7 +152,8 @@ pub(crate) async fn delete_device_route(
|
|||
auth_error: None,
|
||||
};
|
||||
|
||||
if let Some(auth) = &body.auth {
|
||||
match &body.auth {
|
||||
| Some(auth) => {
|
||||
let (worked, uiaainfo) = services
|
||||
.uiaa
|
||||
.try_auth(sender_user, sender_device, auth, &uiaainfo)
|
||||
|
@ -117,15 +163,20 @@ pub(crate) async fn delete_device_route(
|
|||
return Err!(Uiaa(uiaainfo));
|
||||
}
|
||||
// Success!
|
||||
} else if let Some(json) = body.json_body {
|
||||
},
|
||||
| _ => match body.json_body {
|
||||
| Some(ref json) => {
|
||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||
services
|
||||
.uiaa
|
||||
.create(sender_user, sender_device, &uiaainfo, &json);
|
||||
.create(sender_user, sender_device, &uiaainfo, json);
|
||||
|
||||
return Err!(Uiaa(uiaainfo));
|
||||
} else {
|
||||
},
|
||||
| _ => {
|
||||
return Err!(Request(NotJson("Not json.")));
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
services
|
||||
|
@ -136,11 +187,12 @@ pub(crate) async fn delete_device_route(
|
|||
Ok(delete_device::v3::Response {})
|
||||
}
|
||||
|
||||
/// # `PUT /_matrix/client/r0/devices/{deviceId}`
|
||||
/// # `POST /_matrix/client/v3/delete_devices`
|
||||
///
|
||||
/// Deletes the given device.
|
||||
/// Deletes the given list of devices.
|
||||
///
|
||||
/// - Requires UIAA to verify user password
|
||||
/// - Requires UIAA to verify user password unless from an appservice with
|
||||
/// MSC4190 enabled.
|
||||
///
|
||||
/// For each device:
|
||||
/// - Invalidates access token
|
||||
|
@ -152,8 +204,20 @@ pub(crate) async fn delete_devices_route(
|
|||
State(services): State<crate::State>,
|
||||
body: Ruma<delete_devices::v3::Request>,
|
||||
) -> Result<delete_devices::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||
let (sender_user, sender_device) = body.sender();
|
||||
let appservice = body.appservice_info.as_ref();
|
||||
|
||||
if appservice.is_some_and(|appservice| appservice.registration.device_management) {
|
||||
debug!(
|
||||
"Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \
|
||||
enabled"
|
||||
);
|
||||
for device_id in &body.devices {
|
||||
services.users.remove_device(sender_user, device_id).await;
|
||||
}
|
||||
|
||||
return Ok(delete_devices::v3::Response {});
|
||||
}
|
||||
|
||||
// UIAA
|
||||
let mut uiaainfo = UiaaInfo {
|
||||
|
@ -164,7 +228,8 @@ pub(crate) async fn delete_devices_route(
|
|||
auth_error: None,
|
||||
};
|
||||
|
||||
if let Some(auth) = &body.auth {
|
||||
match &body.auth {
|
||||
| Some(auth) => {
|
||||
let (worked, uiaainfo) = services
|
||||
.uiaa
|
||||
.try_auth(sender_user, sender_device, auth, &uiaainfo)
|
||||
|
@ -174,15 +239,20 @@ pub(crate) async fn delete_devices_route(
|
|||
return Err(Error::Uiaa(uiaainfo));
|
||||
}
|
||||
// Success!
|
||||
} else if let Some(json) = body.json_body {
|
||||
},
|
||||
| _ => match body.json_body {
|
||||
| Some(ref json) => {
|
||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||
services
|
||||
.uiaa
|
||||
.create(sender_user, sender_device, &uiaainfo, &json);
|
||||
.create(sender_user, sender_device, &uiaainfo, json);
|
||||
|
||||
return Err(Error::Uiaa(uiaainfo));
|
||||
} else {
|
||||
},
|
||||
| _ => {
|
||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for device_id in &body.devices {
|
||||
|
|
|
@ -1,30 +1,41 @@
|
|||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{info, warn, Err, Error, Result};
|
||||
use futures::{StreamExt, TryFutureExt};
|
||||
use conduwuit::{
|
||||
Err, Result, err, info,
|
||||
utils::{
|
||||
TryFutureExtExt,
|
||||
math::Expected,
|
||||
result::FlatOk,
|
||||
stream::{ReadyExt, WidebandExt},
|
||||
},
|
||||
};
|
||||
use conduwuit_service::Services;
|
||||
use futures::{
|
||||
FutureExt, StreamExt, TryFutureExt,
|
||||
future::{join, join4, join5},
|
||||
};
|
||||
use ruma::{
|
||||
OwnedRoomId, RoomId, ServerName, UInt, UserId,
|
||||
api::{
|
||||
client::{
|
||||
directory::{
|
||||
get_public_rooms, get_public_rooms_filtered, get_room_visibility,
|
||||
set_room_visibility,
|
||||
},
|
||||
error::ErrorKind,
|
||||
room,
|
||||
},
|
||||
federation,
|
||||
},
|
||||
directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork},
|
||||
directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork, RoomTypeFilter},
|
||||
events::{
|
||||
StateEventType,
|
||||
room::{
|
||||
join_rules::{JoinRule, RoomJoinRulesEventContent},
|
||||
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
|
||||
},
|
||||
StateEventType,
|
||||
},
|
||||
uint, OwnedRoomId, RoomId, ServerName, UInt, UserId,
|
||||
uint,
|
||||
};
|
||||
use service::Services;
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
|
@ -41,10 +52,13 @@ pub(crate) async fn get_public_rooms_filtered_route(
|
|||
) -> Result<get_public_rooms_filtered::v3::Response> {
|
||||
if let Some(server) = &body.server {
|
||||
if services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_room_directory_server_names
|
||||
.contains(server)
|
||||
.is_match(server.host())
|
||||
|| services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(server.host())
|
||||
{
|
||||
return Err!(Request(Forbidden("Server is banned on this homeserver.")));
|
||||
}
|
||||
|
@ -60,11 +74,7 @@ pub(crate) async fn get_public_rooms_filtered_route(
|
|||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
warn!(?body.server, "Failed to return /publicRooms: {e}");
|
||||
Error::BadRequest(
|
||||
ErrorKind::Unknown,
|
||||
"Failed to return the requested server's public room list.",
|
||||
)
|
||||
err!(Request(Unknown(warn!(?body.server, "Failed to return /publicRooms: {e}"))))
|
||||
})?;
|
||||
|
||||
Ok(response)
|
||||
|
@ -83,10 +93,13 @@ pub(crate) async fn get_public_rooms_route(
|
|||
) -> Result<get_public_rooms::v3::Response> {
|
||||
if let Some(server) = &body.server {
|
||||
if services
|
||||
.server
|
||||
.config
|
||||
.forbidden_remote_room_directory_server_names
|
||||
.contains(server)
|
||||
.is_match(server.host())
|
||||
|| services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(server.host())
|
||||
{
|
||||
return Err!(Request(Forbidden("Server is banned on this homeserver.")));
|
||||
}
|
||||
|
@ -102,11 +115,7 @@ pub(crate) async fn get_public_rooms_route(
|
|||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
warn!(?body.server, "Failed to return /publicRooms: {e}");
|
||||
Error::BadRequest(
|
||||
ErrorKind::Unknown,
|
||||
"Failed to return the requested server's public room list.",
|
||||
)
|
||||
err!(Request(Unknown(warn!(?body.server, "Failed to return /publicRooms: {e}"))))
|
||||
})?;
|
||||
|
||||
Ok(get_public_rooms::v3::Response {
|
||||
|
@ -126,11 +135,11 @@ pub(crate) async fn set_room_visibility_route(
|
|||
InsecureClientIp(client): InsecureClientIp,
|
||||
body: Ruma<set_room_visibility::v3::Request>,
|
||||
) -> Result<set_room_visibility::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let sender_user = body.sender_user();
|
||||
|
||||
if !services.rooms.metadata.exists(&body.room_id).await {
|
||||
// Return 404 if the room doesn't exist
|
||||
return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found"));
|
||||
return Err!(Request(NotFound("Room not found")));
|
||||
}
|
||||
|
||||
if services
|
||||
|
@ -144,15 +153,12 @@ pub(crate) async fn set_room_visibility_route(
|
|||
}
|
||||
|
||||
if !user_can_publish_room(&services, sender_user, &body.room_id).await? {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::forbidden(),
|
||||
"User is not allowed to publish this room",
|
||||
));
|
||||
return Err!(Request(Forbidden("User is not allowed to publish this room")));
|
||||
}
|
||||
|
||||
match &body.visibility {
|
||||
| room::Visibility::Public => {
|
||||
if services.globals.config.lockdown_public_room_directory
|
||||
if services.server.config.lockdown_public_room_directory
|
||||
&& !services.users.is_admin(sender_user).await
|
||||
&& body.appservice_info.is_none()
|
||||
{
|
||||
|
@ -162,7 +168,7 @@ pub(crate) async fn set_room_visibility_route(
|
|||
body.room_id
|
||||
);
|
||||
|
||||
if services.globals.config.admin_room_notices {
|
||||
if services.server.config.admin_room_notices {
|
||||
services
|
||||
.admin
|
||||
.send_text(&format!(
|
||||
|
@ -173,15 +179,14 @@ pub(crate) async fn set_room_visibility_route(
|
|||
.await;
|
||||
}
|
||||
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::forbidden(),
|
||||
return Err!(Request(Forbidden(
|
||||
"Publishing rooms to the room directory is not allowed",
|
||||
));
|
||||
)));
|
||||
}
|
||||
|
||||
services.rooms.directory.set_public(&body.room_id);
|
||||
|
||||
if services.globals.config.admin_room_notices {
|
||||
if services.server.config.admin_room_notices {
|
||||
services
|
||||
.admin
|
||||
.send_text(&format!(
|
||||
|
@ -194,10 +199,7 @@ pub(crate) async fn set_room_visibility_route(
|
|||
},
|
||||
| room::Visibility::Private => services.rooms.directory.set_not_public(&body.room_id),
|
||||
| _ => {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Room visibility type is not supported.",
|
||||
));
|
||||
return Err!(Request(InvalidParam("Room visibility type is not supported.",)));
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -213,7 +215,7 @@ pub(crate) async fn get_room_visibility_route(
|
|||
) -> Result<get_room_visibility::v3::Response> {
|
||||
if !services.rooms.metadata.exists(&body.room_id).await {
|
||||
// Return 404 if the room doesn't exist
|
||||
return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found"));
|
||||
return Err!(Request(NotFound("Room not found")));
|
||||
}
|
||||
|
||||
Ok(get_room_visibility::v3::Response {
|
||||
|
@ -261,22 +263,23 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
|||
}
|
||||
|
||||
// Use limit or else 10, with maximum 100
|
||||
let limit = limit.map_or(10, u64::from);
|
||||
let mut num_since: u64 = 0;
|
||||
let limit: usize = limit.map_or(10_u64, u64::from).try_into()?;
|
||||
let mut num_since: usize = 0;
|
||||
|
||||
if let Some(s) = &since {
|
||||
let mut characters = s.chars();
|
||||
let backwards = match characters.next() {
|
||||
| Some('n') => false,
|
||||
| Some('p') => true,
|
||||
| _ =>
|
||||
return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token")),
|
||||
| _ => {
|
||||
return Err!(Request(InvalidParam("Invalid `since` token")));
|
||||
},
|
||||
};
|
||||
|
||||
num_since = characters
|
||||
.collect::<String>()
|
||||
.parse()
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token."))?;
|
||||
.map_err(|_| err!(Request(InvalidParam("Invalid `since` token."))))?;
|
||||
|
||||
if backwards {
|
||||
num_since = num_since.saturating_sub(limit);
|
||||
|
@ -288,8 +291,12 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
|||
.directory
|
||||
.public_rooms()
|
||||
.map(ToOwned::to_owned)
|
||||
.then(|room_id| public_rooms_chunk(services, room_id))
|
||||
.filter_map(|chunk| async move {
|
||||
.wide_then(|room_id| public_rooms_chunk(services, room_id))
|
||||
.ready_filter_map(|chunk| {
|
||||
if !filter.room_types.is_empty() && !filter.room_types.contains(&RoomTypeFilter::from(chunk.room_type.clone())) {
|
||||
return None;
|
||||
}
|
||||
|
||||
if let Some(query) = filter.generic_search_term.as_ref().map(|q| q.to_lowercase()) {
|
||||
if let Some(name) = &chunk.name {
|
||||
if name.as_str().to_lowercase().contains(&query) {
|
||||
|
@ -321,40 +328,24 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
|||
|
||||
all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members));
|
||||
|
||||
let total_room_count_estimate = UInt::try_from(all_rooms.len()).unwrap_or_else(|_| uint!(0));
|
||||
let total_room_count_estimate = UInt::try_from(all_rooms.len())
|
||||
.unwrap_or_else(|_| uint!(0))
|
||||
.into();
|
||||
|
||||
let chunk: Vec<_> = all_rooms
|
||||
.into_iter()
|
||||
.skip(
|
||||
num_since
|
||||
.try_into()
|
||||
.expect("num_since should not be this high"),
|
||||
)
|
||||
.take(limit.try_into().expect("limit should not be this high"))
|
||||
.collect();
|
||||
let chunk: Vec<_> = all_rooms.into_iter().skip(num_since).take(limit).collect();
|
||||
|
||||
let prev_batch = if num_since == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(format!("p{num_since}"))
|
||||
};
|
||||
let prev_batch = num_since.ne(&0).then_some(format!("p{num_since}"));
|
||||
|
||||
let next_batch = if chunk.len() < limit.try_into().unwrap() {
|
||||
None
|
||||
} else {
|
||||
Some(format!(
|
||||
"n{}",
|
||||
num_since
|
||||
.checked_add(limit)
|
||||
.expect("num_since and limit should not be that large")
|
||||
))
|
||||
};
|
||||
let next_batch = chunk
|
||||
.len()
|
||||
.ge(&limit)
|
||||
.then_some(format!("n{}", num_since.expected_add(limit)));
|
||||
|
||||
Ok(get_public_rooms_filtered::v3::Response {
|
||||
chunk,
|
||||
prev_batch,
|
||||
next_batch,
|
||||
total_room_count_estimate: Some(total_room_count_estimate),
|
||||
total_room_count_estimate,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -365,71 +356,50 @@ async fn user_can_publish_room(
|
|||
user_id: &UserId,
|
||||
room_id: &RoomId,
|
||||
) -> Result<bool> {
|
||||
if let Ok(event) = services
|
||||
match services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")
|
||||
.await
|
||||
{
|
||||
serde_json::from_str(event.content.get())
|
||||
.map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels"))
|
||||
| Ok(event) => serde_json::from_str(event.content.get())
|
||||
.map_err(|_| err!(Database("Invalid event content for m.room.power_levels")))
|
||||
.map(|content: RoomPowerLevelsEventContent| {
|
||||
RoomPowerLevels::from(content)
|
||||
.user_can_send_state(user_id, StateEventType::RoomHistoryVisibility)
|
||||
})
|
||||
} else if let Ok(event) = services
|
||||
}),
|
||||
| _ => {
|
||||
match services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(room_id, &StateEventType::RoomCreate, "")
|
||||
.await
|
||||
{
|
||||
Ok(event.sender == user_id)
|
||||
} else {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::forbidden(),
|
||||
"User is not allowed to publish this room",
|
||||
));
|
||||
| Ok(event) => Ok(event.sender == user_id),
|
||||
| _ => Err!(Request(Forbidden("User is not allowed to publish this room"))),
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async fn public_rooms_chunk(services: &Services, room_id: OwnedRoomId) -> PublicRoomsChunk {
|
||||
PublicRoomsChunk {
|
||||
canonical_alias: services
|
||||
let name = services.rooms.state_accessor.get_name(&room_id).ok();
|
||||
|
||||
let room_type = services.rooms.state_accessor.get_room_type(&room_id).ok();
|
||||
|
||||
let canonical_alias = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_canonical_alias(&room_id)
|
||||
.await
|
||||
.ok(),
|
||||
name: services.rooms.state_accessor.get_name(&room_id).await.ok(),
|
||||
num_joined_members: services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_joined_count(&room_id)
|
||||
.await
|
||||
.unwrap_or(0)
|
||||
.try_into()
|
||||
.expect("joined count overflows ruma UInt"),
|
||||
topic: services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_room_topic(&room_id)
|
||||
.await
|
||||
.ok(),
|
||||
world_readable: services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.is_world_readable(&room_id)
|
||||
.await,
|
||||
guest_can_join: services.rooms.state_accessor.guest_can_join(&room_id).await,
|
||||
avatar_url: services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_avatar(&room_id)
|
||||
.await
|
||||
.into_option()
|
||||
.unwrap_or_default()
|
||||
.url,
|
||||
join_rule: services
|
||||
.ok();
|
||||
|
||||
let avatar_url = services.rooms.state_accessor.get_avatar(&room_id);
|
||||
|
||||
let topic = services.rooms.state_accessor.get_room_topic(&room_id).ok();
|
||||
|
||||
let world_readable = services.rooms.state_accessor.is_world_readable(&room_id);
|
||||
|
||||
let join_rule = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&room_id, &StateEventType::RoomJoinRules, "")
|
||||
|
@ -438,15 +408,36 @@ async fn public_rooms_chunk(services: &Services, room_id: OwnedRoomId) -> Public
|
|||
| JoinRule::Knock => "knock".into(),
|
||||
| JoinRule::KnockRestricted(_) => "knock_restricted".into(),
|
||||
| _ => "invite".into(),
|
||||
})
|
||||
.await
|
||||
.unwrap_or_default(),
|
||||
room_type: services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_room_type(&room_id)
|
||||
.await
|
||||
.ok(),
|
||||
});
|
||||
|
||||
let guest_can_join = services.rooms.state_accessor.guest_can_join(&room_id);
|
||||
|
||||
let num_joined_members = services.rooms.state_cache.room_joined_count(&room_id);
|
||||
|
||||
let (
|
||||
(avatar_url, canonical_alias, guest_can_join, join_rule, name),
|
||||
(num_joined_members, room_type, topic, world_readable),
|
||||
) = join(
|
||||
join5(avatar_url, canonical_alias, guest_can_join, join_rule, name),
|
||||
join4(num_joined_members, room_type, topic, world_readable),
|
||||
)
|
||||
.boxed()
|
||||
.await;
|
||||
|
||||
PublicRoomsChunk {
|
||||
avatar_url: avatar_url.into_option().unwrap_or_default().url,
|
||||
canonical_alias,
|
||||
guest_can_join,
|
||||
join_rule: join_rule.unwrap_or_default(),
|
||||
name,
|
||||
num_joined_members: num_joined_members
|
||||
.map(TryInto::try_into)
|
||||
.map(Result::ok)
|
||||
.flat_ok()
|
||||
.unwrap_or_else(|| uint!(0)),
|
||||
room_id,
|
||||
room_type,
|
||||
topic,
|
||||
world_readable,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::err;
|
||||
use conduwuit::{Result, err};
|
||||
use ruma::api::client::filter::{create_filter, get_filter};
|
||||
|
||||
use crate::{Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}`
|
||||
///
|
||||
|
|
|
@ -1,30 +1,30 @@
|
|||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{err, utils, Error, Result};
|
||||
use futures::{stream::FuturesUnordered, StreamExt};
|
||||
use conduwuit::{Err, Error, Result, debug, debug_warn, err, result::NotFound, utils};
|
||||
use conduwuit_service::{Services, users::parse_master_key};
|
||||
use futures::{StreamExt, stream::FuturesUnordered};
|
||||
use ruma::{
|
||||
OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId,
|
||||
api::{
|
||||
client::{
|
||||
error::ErrorKind,
|
||||
keys::{
|
||||
claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures,
|
||||
claim_keys, get_key_changes, get_keys, upload_keys,
|
||||
upload_signatures::{self},
|
||||
upload_signing_keys,
|
||||
},
|
||||
uiaa::{AuthFlow, AuthType, UiaaInfo},
|
||||
},
|
||||
federation,
|
||||
},
|
||||
encryption::CrossSigningKey,
|
||||
serde::Raw,
|
||||
OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId,
|
||||
};
|
||||
use serde_json::json;
|
||||
|
||||
use super::SESSION_ID_LENGTH;
|
||||
use crate::{
|
||||
service::{users::parse_master_key, Services},
|
||||
Ruma,
|
||||
};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `POST /_matrix/client/r0/keys/upload`
|
||||
///
|
||||
|
@ -40,6 +40,20 @@ pub(crate) async fn upload_keys_route(
|
|||
let (sender_user, sender_device) = body.sender();
|
||||
|
||||
for (key_id, one_time_key) in &body.one_time_keys {
|
||||
if one_time_key
|
||||
.deserialize()
|
||||
.inspect_err(|e| {
|
||||
debug_warn!(
|
||||
?key_id,
|
||||
?one_time_key,
|
||||
"Invalid one time key JSON submitted by client, skipping: {e}"
|
||||
);
|
||||
})
|
||||
.is_err()
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
services
|
||||
.users
|
||||
.add_one_time_key(sender_user, sender_device, key_id, one_time_key)
|
||||
|
@ -47,14 +61,44 @@ pub(crate) async fn upload_keys_route(
|
|||
}
|
||||
|
||||
if let Some(device_keys) = &body.device_keys {
|
||||
// TODO: merge this and the existing event?
|
||||
// This check is needed to assure that signatures are kept
|
||||
if services
|
||||
let deser_device_keys = device_keys.deserialize().map_err(|e| {
|
||||
err!(Request(BadJson(debug_warn!(
|
||||
?device_keys,
|
||||
"Invalid device keys JSON uploaded by client: {e}"
|
||||
))))
|
||||
})?;
|
||||
|
||||
if deser_device_keys.user_id != sender_user {
|
||||
return Err!(Request(Unknown(
|
||||
"User ID in keys uploaded does not match your own user ID"
|
||||
)));
|
||||
}
|
||||
if deser_device_keys.device_id != sender_device {
|
||||
return Err!(Request(Unknown(
|
||||
"Device ID in keys uploaded does not match your own device ID"
|
||||
)));
|
||||
}
|
||||
|
||||
if let Ok(existing_keys) = services
|
||||
.users
|
||||
.get_device_keys(sender_user, sender_device)
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
if existing_keys.json().get() == device_keys.json().get() {
|
||||
debug!(
|
||||
?sender_user,
|
||||
?sender_device,
|
||||
?device_keys,
|
||||
"Ignoring user uploaded keys as they are an exact copy already in the \
|
||||
database"
|
||||
);
|
||||
} else {
|
||||
services
|
||||
.users
|
||||
.add_device_keys(sender_user, sender_device, device_keys)
|
||||
.await;
|
||||
}
|
||||
} else {
|
||||
services
|
||||
.users
|
||||
.add_device_keys(sender_user, sender_device, device_keys)
|
||||
|
@ -125,7 +169,30 @@ pub(crate) async fn upload_signing_keys_route(
|
|||
auth_error: None,
|
||||
};
|
||||
|
||||
if let Some(auth) = &body.auth {
|
||||
match check_for_new_keys(
|
||||
services,
|
||||
sender_user,
|
||||
body.self_signing_key.as_ref(),
|
||||
body.user_signing_key.as_ref(),
|
||||
body.master_key.as_ref(),
|
||||
)
|
||||
.await
|
||||
.inspect_err(|e| debug!(?e))
|
||||
{
|
||||
| Ok(exists) => {
|
||||
if let Some(result) = exists {
|
||||
// No-op, they tried to reupload the same set of keys
|
||||
// (lost connection for example)
|
||||
return Ok(result);
|
||||
}
|
||||
debug!(
|
||||
"Skipping UIA in accordance with MSC3967, the user didn't have any existing keys"
|
||||
);
|
||||
// Some of the keys weren't found, so we let them upload
|
||||
},
|
||||
| _ => {
|
||||
match &body.auth {
|
||||
| Some(auth) => {
|
||||
let (worked, uiaainfo) = services
|
||||
.uiaa
|
||||
.try_auth(sender_user, sender_device, auth, &uiaainfo)
|
||||
|
@ -135,83 +202,165 @@ pub(crate) async fn upload_signing_keys_route(
|
|||
return Err(Error::Uiaa(uiaainfo));
|
||||
}
|
||||
// Success!
|
||||
} else if let Some(json) = body.json_body {
|
||||
},
|
||||
| _ => match body.json_body {
|
||||
| Some(json) => {
|
||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||
services
|
||||
.uiaa
|
||||
.create(sender_user, sender_device, &uiaainfo, &json);
|
||||
|
||||
return Err(Error::Uiaa(uiaainfo));
|
||||
} else {
|
||||
},
|
||||
| _ => {
|
||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
if let Some(master_key) = &body.master_key {
|
||||
services
|
||||
.users
|
||||
.add_cross_signing_keys(
|
||||
sender_user,
|
||||
master_key,
|
||||
&body.master_key,
|
||||
&body.self_signing_key,
|
||||
&body.user_signing_key,
|
||||
true, // notify so that other users see the new keys
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(upload_signing_keys::v3::Response {})
|
||||
}
|
||||
|
||||
async fn check_for_new_keys(
|
||||
services: crate::State,
|
||||
user_id: &UserId,
|
||||
self_signing_key: Option<&Raw<CrossSigningKey>>,
|
||||
user_signing_key: Option<&Raw<CrossSigningKey>>,
|
||||
master_signing_key: Option<&Raw<CrossSigningKey>>,
|
||||
) -> Result<Option<upload_signing_keys::v3::Response>> {
|
||||
debug!("checking for existing keys");
|
||||
let mut empty = false;
|
||||
if let Some(master_signing_key) = master_signing_key {
|
||||
let (key, value) = parse_master_key(user_id, master_signing_key)?;
|
||||
let result = services
|
||||
.users
|
||||
.get_master_key(None, user_id, &|_| true)
|
||||
.await;
|
||||
if result.is_not_found() {
|
||||
empty = true;
|
||||
} else {
|
||||
let existing_master_key = result?;
|
||||
let (existing_key, existing_value) = parse_master_key(user_id, &existing_master_key)?;
|
||||
if existing_key != key || existing_value != value {
|
||||
return Err!(Request(Forbidden(
|
||||
"Tried to change an existing master key, UIA required"
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(user_signing_key) = user_signing_key {
|
||||
let key = services.users.get_user_signing_key(user_id).await;
|
||||
if key.is_not_found() && !empty {
|
||||
return Err!(Request(Forbidden(
|
||||
"Tried to update an existing user signing key, UIA required"
|
||||
)));
|
||||
}
|
||||
if !key.is_not_found() {
|
||||
let existing_signing_key = key?.deserialize()?;
|
||||
if existing_signing_key != user_signing_key.deserialize()? {
|
||||
return Err!(Request(Forbidden(
|
||||
"Tried to change an existing user signing key, UIA required"
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(self_signing_key) = self_signing_key {
|
||||
let key = services
|
||||
.users
|
||||
.get_self_signing_key(None, user_id, &|_| true)
|
||||
.await;
|
||||
if key.is_not_found() && !empty {
|
||||
debug!(?key);
|
||||
return Err!(Request(Forbidden(
|
||||
"Tried to add a new signing key independently from the master key"
|
||||
)));
|
||||
}
|
||||
if !key.is_not_found() {
|
||||
let existing_signing_key = key?.deserialize()?;
|
||||
if existing_signing_key != self_signing_key.deserialize()? {
|
||||
return Err!(Request(Forbidden(
|
||||
"Tried to update an existing self signing key, UIA required"
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
if empty {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
Ok(Some(upload_signing_keys::v3::Response {}))
|
||||
}
|
||||
|
||||
/// # `POST /_matrix/client/r0/keys/signatures/upload`
|
||||
///
|
||||
/// Uploads end-to-end key signatures from the sender user.
|
||||
///
|
||||
/// TODO: clean this timo-code up more and integrate failures. tried to improve
|
||||
/// it a bit to stop exploding the entire request on bad sigs, but needs way
|
||||
/// more work.
|
||||
pub(crate) async fn upload_signatures_route(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<upload_signatures::v3::Request>,
|
||||
) -> Result<upload_signatures::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
if body.signed_keys.is_empty() {
|
||||
debug!("Empty signed_keys sent in key signature upload");
|
||||
return Ok(upload_signatures::v3::Response::new());
|
||||
}
|
||||
|
||||
let sender_user = body.sender_user();
|
||||
|
||||
for (user_id, keys) in &body.signed_keys {
|
||||
for (key_id, key) in keys {
|
||||
let key = serde_json::to_value(key)
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?;
|
||||
let Ok(key) = serde_json::to_value(key)
|
||||
.inspect_err(|e| debug_warn!(?key_id, "Invalid \"key\" JSON: {e}"))
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
|
||||
for signature in key
|
||||
.get("signatures")
|
||||
.ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Missing signatures field."))?
|
||||
.get(sender_user.to_string())
|
||||
.ok_or(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Invalid user in signatures field.",
|
||||
))?
|
||||
.as_object()
|
||||
.ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid signature."))?
|
||||
.clone()
|
||||
{
|
||||
// Signature validation?
|
||||
let signature = (
|
||||
signature.0,
|
||||
signature
|
||||
.1
|
||||
.as_str()
|
||||
.ok_or(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Invalid signature value.",
|
||||
))?
|
||||
.to_owned(),
|
||||
);
|
||||
let Some(signatures) = key.get("signatures") else {
|
||||
continue;
|
||||
};
|
||||
|
||||
services
|
||||
let Some(sender_user_val) = signatures.get(sender_user.to_string()) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let Some(sender_user_object) = sender_user_val.as_object() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
for (signature, val) in sender_user_object.clone() {
|
||||
let Some(val) = val.as_str().map(ToOwned::to_owned) else {
|
||||
continue;
|
||||
};
|
||||
let signature = (signature, val);
|
||||
|
||||
if let Err(_e) = services
|
||||
.users
|
||||
.sign_key(user_id, key_id, signature, sender_user)
|
||||
.await?;
|
||||
.await
|
||||
.inspect_err(|e| debug_warn!("{e}"))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(upload_signatures::v3::Response {
|
||||
failures: BTreeMap::new(), // TODO: integrate
|
||||
})
|
||||
Ok(upload_signatures::v3::Response { failures: BTreeMap::new() })
|
||||
}
|
||||
|
||||
/// # `POST /_matrix/client/r0/keys/changes`
|
||||
|
@ -385,7 +534,8 @@ where
|
|||
.collect();
|
||||
|
||||
while let Some((server, response)) = futures.next().await {
|
||||
if let Ok(response) = response {
|
||||
match response {
|
||||
| Ok(response) => {
|
||||
for (user, master_key) in response.master_keys {
|
||||
let (master_key_id, mut master_key) = parse_master_key(&user, &master_key)?;
|
||||
|
||||
|
@ -403,17 +553,21 @@ where
|
|||
.users
|
||||
.add_cross_signing_keys(
|
||||
&user, &raw, &None, &None,
|
||||
false, /* Dont notify. A notification would trigger another key request
|
||||
* resulting in an endless loop */
|
||||
false, /* Dont notify. A notification would trigger another key
|
||||
* request resulting in an endless loop */
|
||||
)
|
||||
.await?;
|
||||
if let Some(raw) = raw {
|
||||
master_keys.insert(user.clone(), raw);
|
||||
}
|
||||
}
|
||||
|
||||
self_signing_keys.extend(response.self_signing_keys);
|
||||
device_keys.extend(response.device_keys);
|
||||
} else {
|
||||
},
|
||||
| _ => {
|
||||
failures.insert(server.to_string(), json!({}));
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3,16 +3,16 @@ use std::time::Duration;
|
|||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{
|
||||
err,
|
||||
Err, Result, err,
|
||||
utils::{self, content_disposition::make_content_disposition, math::ruma_from_usize},
|
||||
Err, Result,
|
||||
};
|
||||
use conduwuit_service::{
|
||||
media::{Dim, FileMeta, CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, MXC_LENGTH},
|
||||
Services,
|
||||
media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta, MXC_LENGTH},
|
||||
};
|
||||
use reqwest::Url;
|
||||
use ruma::{
|
||||
Mxc, UserId,
|
||||
api::client::{
|
||||
authenticated_media::{
|
||||
get_content, get_content_as_filename, get_content_thumbnail, get_media_config,
|
||||
|
@ -20,7 +20,6 @@ use ruma::{
|
|||
},
|
||||
media::create_content,
|
||||
},
|
||||
Mxc, UserId,
|
||||
};
|
||||
|
||||
use crate::Ruma;
|
||||
|
@ -31,7 +30,7 @@ pub(crate) async fn get_media_config_route(
|
|||
_body: Ruma<get_media_config::v1::Request>,
|
||||
) -> Result<get_media_config::v1::Response> {
|
||||
Ok(get_media_config::v1::Response {
|
||||
upload_size: ruma_from_usize(services.globals.config.max_request_size),
|
||||
upload_size: ruma_from_usize(services.server.config.max_request_size),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -57,18 +56,27 @@ pub(crate) async fn create_content_route(
|
|||
let filename = body.filename.as_deref();
|
||||
let content_type = body.content_type.as_deref();
|
||||
let content_disposition = make_content_disposition(None, content_type, filename);
|
||||
let mxc = Mxc {
|
||||
let ref mxc = Mxc {
|
||||
server_name: services.globals.server_name(),
|
||||
media_id: &utils::random_string(MXC_LENGTH),
|
||||
};
|
||||
|
||||
services
|
||||
.media
|
||||
.create(&mxc, Some(user), Some(&content_disposition), content_type, &body.file)
|
||||
.await
|
||||
.map(|()| create_content::v3::Response {
|
||||
.create(mxc, Some(user), Some(&content_disposition), content_type, &body.file)
|
||||
.await?;
|
||||
|
||||
let blurhash = body.generate_blurhash.then(|| {
|
||||
services
|
||||
.media
|
||||
.create_blurhash(&body.file, content_type, filename)
|
||||
.ok()
|
||||
.flatten()
|
||||
});
|
||||
|
||||
Ok(create_content::v3::Response {
|
||||
content_uri: mxc.to_string().into(),
|
||||
blurhash: None,
|
||||
blurhash: blurhash.flatten(),
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -3,21 +3,20 @@
|
|||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{
|
||||
err,
|
||||
Err, Result, err,
|
||||
utils::{content_disposition::make_content_disposition, math::ruma_from_usize},
|
||||
Err, Result,
|
||||
};
|
||||
use conduwuit_service::media::{Dim, FileMeta, CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN};
|
||||
use conduwuit_service::media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta};
|
||||
use reqwest::Url;
|
||||
use ruma::{
|
||||
Mxc,
|
||||
api::client::media::{
|
||||
create_content, get_content, get_content_as_filename, get_content_thumbnail,
|
||||
get_media_config, get_media_preview,
|
||||
},
|
||||
Mxc,
|
||||
};
|
||||
|
||||
use crate::{client::create_content_route, Ruma, RumaResponse};
|
||||
use crate::{Ruma, RumaResponse, client::create_content_route};
|
||||
|
||||
/// # `GET /_matrix/media/v3/config`
|
||||
///
|
||||
|
@ -27,7 +26,7 @@ pub(crate) async fn get_media_config_legacy_route(
|
|||
_body: Ruma<get_media_config::v3::Request>,
|
||||
) -> Result<get_media_config::v3::Response> {
|
||||
Ok(get_media_config::v3::Response {
|
||||
upload_size: ruma_from_usize(services.globals.config.max_request_size),
|
||||
upload_size: ruma_from_usize(services.server.config.max_request_size),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -50,7 +49,7 @@ pub(crate) async fn get_media_config_legacy_legacy_route(
|
|||
/// # `GET /_matrix/media/v3/preview_url`
|
||||
///
|
||||
/// Returns URL preview.
|
||||
#[tracing::instrument(skip_all, fields(%client), name = "url_preview_legacy")]
|
||||
#[tracing::instrument(skip_all, fields(%client), name = "url_preview_legacy", level = "debug")]
|
||||
pub(crate) async fn get_media_preview_legacy_route(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(client): InsecureClientIp,
|
||||
|
@ -131,7 +130,7 @@ pub(crate) async fn create_content_legacy_route(
|
|||
/// - Only redirects if `allow_redirect` is true
|
||||
/// - Uses client-provided `timeout_ms` if available, else defaults to 20
|
||||
/// seconds
|
||||
#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy")]
|
||||
#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy", level = "debug")]
|
||||
pub(crate) async fn get_content_legacy_route(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(client): InsecureClientIp,
|
||||
|
@ -142,14 +141,17 @@ pub(crate) async fn get_content_legacy_route(
|
|||
media_id: &body.media_id,
|
||||
};
|
||||
|
||||
if let Some(FileMeta {
|
||||
match services.media.get(&mxc).await? {
|
||||
| Some(FileMeta {
|
||||
content,
|
||||
content_type,
|
||||
content_disposition,
|
||||
}) = services.media.get(&mxc).await?
|
||||
{
|
||||
let content_disposition =
|
||||
make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None);
|
||||
}) => {
|
||||
let content_disposition = make_content_disposition(
|
||||
content_disposition.as_ref(),
|
||||
content_type.as_deref(),
|
||||
None,
|
||||
);
|
||||
|
||||
Ok(get_content::v3::Response {
|
||||
file: content.expect("entire file contents"),
|
||||
|
@ -158,7 +160,9 @@ pub(crate) async fn get_content_legacy_route(
|
|||
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
|
||||
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
|
||||
})
|
||||
} else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote {
|
||||
},
|
||||
| _ =>
|
||||
if !services.globals.server_is_ours(&body.server_name) && body.allow_remote {
|
||||
let response = services
|
||||
.media
|
||||
.fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms)
|
||||
|
@ -182,6 +186,7 @@ pub(crate) async fn get_content_legacy_route(
|
|||
})
|
||||
} else {
|
||||
Err!(Request(NotFound("Media not found.")))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -197,7 +202,7 @@ pub(crate) async fn get_content_legacy_route(
|
|||
/// - Only redirects if `allow_redirect` is true
|
||||
/// - Uses client-provided `timeout_ms` if available, else defaults to 20
|
||||
/// seconds
|
||||
#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy")]
|
||||
#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy", level = "debug")]
|
||||
pub(crate) async fn get_content_legacy_legacy_route(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(client): InsecureClientIp,
|
||||
|
@ -216,7 +221,7 @@ pub(crate) async fn get_content_legacy_legacy_route(
|
|||
/// - Only redirects if `allow_redirect` is true
|
||||
/// - Uses client-provided `timeout_ms` if available, else defaults to 20
|
||||
/// seconds
|
||||
#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy")]
|
||||
#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy", level = "debug")]
|
||||
pub(crate) async fn get_content_as_filename_legacy_route(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(client): InsecureClientIp,
|
||||
|
@ -227,12 +232,12 @@ pub(crate) async fn get_content_as_filename_legacy_route(
|
|||
media_id: &body.media_id,
|
||||
};
|
||||
|
||||
if let Some(FileMeta {
|
||||
match services.media.get(&mxc).await? {
|
||||
| Some(FileMeta {
|
||||
content,
|
||||
content_type,
|
||||
content_disposition,
|
||||
}) = services.media.get(&mxc).await?
|
||||
{
|
||||
}) => {
|
||||
let content_disposition = make_content_disposition(
|
||||
content_disposition.as_ref(),
|
||||
content_type.as_deref(),
|
||||
|
@ -246,7 +251,9 @@ pub(crate) async fn get_content_as_filename_legacy_route(
|
|||
cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()),
|
||||
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
|
||||
})
|
||||
} else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote {
|
||||
},
|
||||
| _ =>
|
||||
if !services.globals.server_is_ours(&body.server_name) && body.allow_remote {
|
||||
let response = services
|
||||
.media
|
||||
.fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms)
|
||||
|
@ -270,6 +277,7 @@ pub(crate) async fn get_content_as_filename_legacy_route(
|
|||
})
|
||||
} else {
|
||||
Err!(Request(NotFound("Media not found.")))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -303,7 +311,7 @@ pub(crate) async fn get_content_as_filename_legacy_legacy_route(
|
|||
/// - Only redirects if `allow_redirect` is true
|
||||
/// - Uses client-provided `timeout_ms` if available, else defaults to 20
|
||||
/// seconds
|
||||
#[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get_legacy")]
|
||||
#[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get_legacy", level = "debug")]
|
||||
pub(crate) async fn get_content_thumbnail_legacy_route(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(client): InsecureClientIp,
|
||||
|
@ -315,14 +323,17 @@ pub(crate) async fn get_content_thumbnail_legacy_route(
|
|||
};
|
||||
|
||||
let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?;
|
||||
if let Some(FileMeta {
|
||||
match services.media.get_thumbnail(&mxc, &dim).await? {
|
||||
| Some(FileMeta {
|
||||
content,
|
||||
content_type,
|
||||
content_disposition,
|
||||
}) = services.media.get_thumbnail(&mxc, &dim).await?
|
||||
{
|
||||
let content_disposition =
|
||||
make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None);
|
||||
}) => {
|
||||
let content_disposition = make_content_disposition(
|
||||
content_disposition.as_ref(),
|
||||
content_type.as_deref(),
|
||||
None,
|
||||
);
|
||||
|
||||
Ok(get_content_thumbnail::v3::Response {
|
||||
file: content.expect("entire file contents"),
|
||||
|
@ -331,7 +342,9 @@ pub(crate) async fn get_content_thumbnail_legacy_route(
|
|||
cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()),
|
||||
content_disposition: Some(content_disposition),
|
||||
})
|
||||
} else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote {
|
||||
},
|
||||
| _ =>
|
||||
if !services.globals.server_is_ours(&body.server_name) && body.allow_remote {
|
||||
let response = services
|
||||
.media
|
||||
.fetch_remote_thumbnail_legacy(&body)
|
||||
|
@ -355,6 +368,7 @@ pub(crate) async fn get_content_thumbnail_legacy_route(
|
|||
})
|
||||
} else {
|
||||
Err!(Request(NotFound("Media not found.")))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,33 +1,39 @@
|
|||
use std::collections::HashSet;
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
at, is_equal_to,
|
||||
Err, Result, at,
|
||||
matrix::{
|
||||
Event,
|
||||
pdu::{PduCount, PduEvent},
|
||||
},
|
||||
utils::{
|
||||
IterStream, ReadyExt,
|
||||
result::{FlatOk, LogErr},
|
||||
stream::{BroadbandExt, TryIgnore, WidebandExt},
|
||||
IterStream, ReadyExt,
|
||||
},
|
||||
Event, PduCount, Result,
|
||||
};
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use conduwuit_service::{
|
||||
Services,
|
||||
rooms::{
|
||||
lazy_loading,
|
||||
lazy_loading::{Options, Witness},
|
||||
timeline::PdusIterItem,
|
||||
},
|
||||
};
|
||||
use futures::{FutureExt, StreamExt, TryFutureExt, future::OptionFuture, pin_mut};
|
||||
use ruma::{
|
||||
RoomId, UserId,
|
||||
api::{
|
||||
client::{filter::RoomEventFilter, message::get_message_events},
|
||||
Direction,
|
||||
client::{filter::RoomEventFilter, message::get_message_events},
|
||||
},
|
||||
events::{AnyStateEvent, StateEventType, TimelineEventType, TimelineEventType::*},
|
||||
serde::Raw,
|
||||
DeviceId, OwnedUserId, RoomId, UserId,
|
||||
};
|
||||
use service::{rooms::timeline::PdusIterItem, Services};
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
pub(crate) type LazySet = HashSet<OwnedUserId>;
|
||||
|
||||
/// list of safe and common non-state events to ignore if the user is ignored
|
||||
const IGNORED_MESSAGE_TYPES: &[TimelineEventType; 17] = &[
|
||||
const IGNORED_MESSAGE_TYPES: &[TimelineEventType] = &[
|
||||
Audio,
|
||||
CallInvite,
|
||||
Emote,
|
||||
|
@ -66,6 +72,10 @@ pub(crate) async fn get_message_events_route(
|
|||
let room_id = &body.room_id;
|
||||
let filter = &body.filter;
|
||||
|
||||
if !services.rooms.metadata.exists(room_id).await {
|
||||
return Err!(Request(Forbidden("Room does not exist to this server")));
|
||||
}
|
||||
|
||||
let from: PduCount = body
|
||||
.from
|
||||
.as_deref()
|
||||
|
@ -84,13 +94,6 @@ pub(crate) async fn get_message_events_route(
|
|||
.unwrap_or(LIMIT_DEFAULT)
|
||||
.min(LIMIT_MAX);
|
||||
|
||||
services.rooms.lazy_loading.lazy_load_confirm_delivery(
|
||||
sender_user,
|
||||
sender_device,
|
||||
room_id,
|
||||
from,
|
||||
);
|
||||
|
||||
if matches!(body.dir, Direction::Backward) {
|
||||
services
|
||||
.rooms
|
||||
|
@ -127,39 +130,38 @@ pub(crate) async fn get_message_events_route(
|
|||
.collect()
|
||||
.await;
|
||||
|
||||
let lazy = events
|
||||
.iter()
|
||||
.stream()
|
||||
.fold(LazySet::new(), |lazy, item| {
|
||||
update_lazy(&services, room_id, sender, lazy, item, false)
|
||||
})
|
||||
.await;
|
||||
let lazy_loading_context = lazy_loading::Context {
|
||||
user_id: sender_user,
|
||||
device_id: sender_device,
|
||||
room_id,
|
||||
token: Some(from.into_unsigned()),
|
||||
options: Some(&filter.lazy_load_options),
|
||||
};
|
||||
|
||||
let state = lazy
|
||||
.iter()
|
||||
.stream()
|
||||
.broad_filter_map(|user_id| get_member_event(&services, room_id, user_id))
|
||||
let witness: OptionFuture<_> = filter
|
||||
.lazy_load_options
|
||||
.is_enabled()
|
||||
.then(|| lazy_loading_witness(&services, &lazy_loading_context, events.iter()))
|
||||
.into();
|
||||
|
||||
let state = witness
|
||||
.map(Option::into_iter)
|
||||
.map(|option| option.flat_map(Witness::into_iter))
|
||||
.map(IterStream::stream)
|
||||
.into_stream()
|
||||
.flatten()
|
||||
.broad_filter_map(|user_id| async move {
|
||||
get_member_event(&services, room_id, &user_id).await
|
||||
})
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
let next_token = events.last().map(at!(0));
|
||||
|
||||
if !cfg!(feature = "element_hacks") {
|
||||
if let Some(next_token) = next_token {
|
||||
services.rooms.lazy_loading.lazy_load_mark_sent(
|
||||
sender_user,
|
||||
sender_device,
|
||||
room_id,
|
||||
lazy,
|
||||
next_token,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let chunk = events
|
||||
.into_iter()
|
||||
.map(at!(1))
|
||||
.map(|pdu| pdu.to_room_event())
|
||||
.map(PduEvent::into_room_event)
|
||||
.collect();
|
||||
|
||||
Ok(get_message_events::v3::Response {
|
||||
|
@ -170,6 +172,52 @@ pub(crate) async fn get_message_events_route(
|
|||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn lazy_loading_witness<'a, I>(
|
||||
services: &Services,
|
||||
lazy_loading_context: &lazy_loading::Context<'_>,
|
||||
events: I,
|
||||
) -> Witness
|
||||
where
|
||||
I: Iterator<Item = &'a PdusIterItem> + Clone + Send,
|
||||
{
|
||||
let oldest = events
|
||||
.clone()
|
||||
.map(|(count, _)| count)
|
||||
.copied()
|
||||
.min()
|
||||
.unwrap_or_else(PduCount::max);
|
||||
|
||||
let newest = events
|
||||
.clone()
|
||||
.map(|(count, _)| count)
|
||||
.copied()
|
||||
.max()
|
||||
.unwrap_or_else(PduCount::max);
|
||||
|
||||
let receipts = services
|
||||
.rooms
|
||||
.read_receipt
|
||||
.readreceipts_since(lazy_loading_context.room_id, oldest.into_unsigned());
|
||||
|
||||
pin_mut!(receipts);
|
||||
let witness: Witness = events
|
||||
.stream()
|
||||
.map(|(_, pdu)| pdu.sender.clone())
|
||||
.chain(
|
||||
receipts
|
||||
.ready_take_while(|(_, c, _)| *c <= newest.into_unsigned())
|
||||
.map(|(user_id, ..)| user_id.to_owned()),
|
||||
)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
services
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.witness_retain(witness, lazy_loading_context)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_member_event(
|
||||
services: &Services,
|
||||
room_id: &RoomId,
|
||||
|
@ -179,75 +227,54 @@ async fn get_member_event(
|
|||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())
|
||||
.map_ok(PduEvent::into_state_event)
|
||||
.await
|
||||
.map(|member_event| member_event.to_state_event())
|
||||
.ok()
|
||||
}
|
||||
|
||||
pub(crate) async fn update_lazy(
|
||||
services: &Services,
|
||||
room_id: &RoomId,
|
||||
sender: (&UserId, &DeviceId),
|
||||
mut lazy: LazySet,
|
||||
item: &PdusIterItem,
|
||||
force: bool,
|
||||
) -> LazySet {
|
||||
let (_, event) = &item;
|
||||
let (sender_user, sender_device) = sender;
|
||||
|
||||
/* TODO: Remove the "element_hacks" check when these are resolved:
|
||||
* https://github.com/vector-im/element-android/issues/3417
|
||||
* https://github.com/vector-im/element-web/issues/21034
|
||||
*/
|
||||
if force || cfg!(features = "element_hacks") {
|
||||
lazy.insert(event.sender().into());
|
||||
return lazy;
|
||||
}
|
||||
|
||||
if lazy.contains(event.sender()) {
|
||||
return lazy;
|
||||
}
|
||||
|
||||
if !services
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.lazy_load_was_sent_before(sender_user, sender_device, room_id, event.sender())
|
||||
.await
|
||||
{
|
||||
lazy.insert(event.sender().into());
|
||||
}
|
||||
|
||||
lazy
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) async fn ignored_filter(
|
||||
services: &Services,
|
||||
item: PdusIterItem,
|
||||
user_id: &UserId,
|
||||
) -> Option<PdusIterItem> {
|
||||
let (_, pdu) = &item;
|
||||
let (_, ref pdu) = item;
|
||||
|
||||
is_ignored_pdu(services, pdu, user_id)
|
||||
.await
|
||||
.eq(&false)
|
||||
.then_some(item)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) async fn is_ignored_pdu(
|
||||
services: &Services,
|
||||
pdu: &PduEvent,
|
||||
user_id: &UserId,
|
||||
) -> bool {
|
||||
// exclude Synapse's dummy events from bloating up response bodies. clients
|
||||
// don't need to see this.
|
||||
if pdu.kind.to_cow_str() == "org.matrix.dummy_event" {
|
||||
return None;
|
||||
return true;
|
||||
}
|
||||
|
||||
if IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok()
|
||||
&& (services.users.user_is_ignored(&pdu.sender, user_id).await
|
||||
|| services
|
||||
.server
|
||||
let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok();
|
||||
|
||||
let ignored_server = services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.iter()
|
||||
.any(is_equal_to!(pdu.sender().server_name())))
|
||||
.is_match(pdu.sender().server_name().host());
|
||||
|
||||
if ignored_type
|
||||
&& (ignored_server || services.users.user_is_ignored(&pdu.sender, user_id).await)
|
||||
{
|
||||
return None;
|
||||
return true;
|
||||
}
|
||||
|
||||
Some(item)
|
||||
false
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) async fn visibility_filter(
|
||||
services: &Services,
|
||||
item: PdusIterItem,
|
||||
|
@ -263,7 +290,16 @@ pub(crate) async fn visibility_filter(
|
|||
.then_some(item)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn event_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Option<PdusIterItem> {
|
||||
let (_, pdu) = &item;
|
||||
pdu.matches(filter).then_some(item)
|
||||
}
|
||||
|
||||
#[cfg_attr(debug_assertions, conduwuit::ctor)]
|
||||
fn _is_sorted() {
|
||||
debug_assert!(
|
||||
IGNORED_MESSAGE_TYPES.is_sorted(),
|
||||
"IGNORED_MESSAGE_TYPES must be sorted by the developer"
|
||||
);
|
||||
}
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::utils;
|
||||
use conduwuit::{Error, Result, utils};
|
||||
use ruma::{
|
||||
api::client::{account, error::ErrorKind},
|
||||
authentication::TokenType,
|
||||
};
|
||||
|
||||
use super::TOKEN_LENGTH;
|
||||
use crate::{Error, Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `POST /_matrix/client/v3/user/{userId}/openid/request_token`
|
||||
///
|
||||
|
@ -37,7 +37,7 @@ pub(crate) async fn create_openid_token_route(
|
|||
Ok(account::request_openid_token::v3::Response {
|
||||
access_token,
|
||||
token_type: TokenType::Bearer,
|
||||
matrix_server_name: services.globals.config.server_name.clone(),
|
||||
matrix_server_name: services.server.name.clone(),
|
||||
expires_in: Duration::from_secs(expires_in),
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,12 +1,10 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use axum::extract::State;
|
||||
use ruma::api::client::{
|
||||
error::ErrorKind,
|
||||
presence::{get_presence, set_presence},
|
||||
};
|
||||
use conduwuit::{Err, Result};
|
||||
use ruma::api::client::presence::{get_presence, set_presence};
|
||||
|
||||
use crate::{Error, Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `PUT /_matrix/client/r0/presence/{userId}/status`
|
||||
///
|
||||
|
@ -15,24 +13,17 @@ pub(crate) async fn set_presence_route(
|
|||
State(services): State<crate::State>,
|
||||
body: Ruma<set_presence::v3::Request>,
|
||||
) -> Result<set_presence::v3::Response> {
|
||||
if !services.globals.allow_local_presence() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::forbidden(),
|
||||
"Presence is disabled on this server",
|
||||
));
|
||||
if !services.config.allow_local_presence {
|
||||
return Err!(Request(Forbidden("Presence is disabled on this server")));
|
||||
}
|
||||
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
if sender_user != &body.user_id && body.appservice_info.is_none() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Not allowed to set presence of other users",
|
||||
));
|
||||
if body.sender_user() != body.user_id && body.appservice_info.is_none() {
|
||||
return Err!(Request(InvalidParam("Not allowed to set presence of other users")));
|
||||
}
|
||||
|
||||
services
|
||||
.presence
|
||||
.set_presence(sender_user, &body.presence, None, None, body.status_msg.clone())
|
||||
.set_presence(body.sender_user(), &body.presence, None, None, body.status_msg.clone())
|
||||
.await?;
|
||||
|
||||
Ok(set_presence::v3::Response {})
|
||||
|
@ -47,21 +38,15 @@ pub(crate) async fn get_presence_route(
|
|||
State(services): State<crate::State>,
|
||||
body: Ruma<get_presence::v3::Request>,
|
||||
) -> Result<get_presence::v3::Response> {
|
||||
if !services.globals.allow_local_presence() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::forbidden(),
|
||||
"Presence is disabled on this server",
|
||||
));
|
||||
if !services.config.allow_local_presence {
|
||||
return Err!(Request(Forbidden("Presence is disabled on this server",)));
|
||||
}
|
||||
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
let mut presence_event = None;
|
||||
|
||||
let has_shared_rooms = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.user_sees_user(sender_user, &body.user_id)
|
||||
.user_sees_user(body.sender_user(), &body.user_id)
|
||||
.await;
|
||||
|
||||
if has_shared_rooms {
|
||||
|
@ -70,7 +55,8 @@ pub(crate) async fn get_presence_route(
|
|||
}
|
||||
}
|
||||
|
||||
if let Some(presence) = presence_event {
|
||||
match presence_event {
|
||||
| Some(presence) => {
|
||||
let status_msg = if presence
|
||||
.content
|
||||
.status_msg
|
||||
|
@ -82,20 +68,22 @@ pub(crate) async fn get_presence_route(
|
|||
presence.content.status_msg
|
||||
};
|
||||
|
||||
let last_active_ago = match presence.content.currently_active {
|
||||
| Some(true) => None,
|
||||
| _ => presence
|
||||
.content
|
||||
.last_active_ago
|
||||
.map(|millis| Duration::from_millis(millis.into())),
|
||||
};
|
||||
|
||||
Ok(get_presence::v3::Response {
|
||||
// TODO: Should ruma just use the presenceeventcontent type here?
|
||||
status_msg,
|
||||
currently_active: presence.content.currently_active,
|
||||
last_active_ago: presence
|
||||
.content
|
||||
.last_active_ago
|
||||
.map(|millis| Duration::from_millis(millis.into())),
|
||||
last_active_ago,
|
||||
presence: presence.content.presence,
|
||||
})
|
||||
} else {
|
||||
Err(Error::BadRequest(
|
||||
ErrorKind::NotFound,
|
||||
"Presence state for this user was not found",
|
||||
))
|
||||
},
|
||||
| _ => Err!(Request(NotFound("Presence state for this user was not found"))),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,12 +2,15 @@ use std::collections::BTreeMap;
|
|||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
pdu::PduBuilder,
|
||||
utils::{stream::TryIgnore, IterStream},
|
||||
warn, Err, Error, Result,
|
||||
Err, Error, Result,
|
||||
matrix::pdu::PduBuilder,
|
||||
utils::{IterStream, stream::TryIgnore},
|
||||
warn,
|
||||
};
|
||||
use futures::{future::join3, StreamExt, TryStreamExt};
|
||||
use conduwuit_service::Services;
|
||||
use futures::{StreamExt, TryStreamExt, future::join3};
|
||||
use ruma::{
|
||||
OwnedMxcUri, OwnedRoomId, UserId,
|
||||
api::{
|
||||
client::{
|
||||
error::ErrorKind,
|
||||
|
@ -19,9 +22,7 @@ use ruma::{
|
|||
},
|
||||
events::room::member::{MembershipState, RoomMemberEventContent},
|
||||
presence::PresenceState,
|
||||
OwnedMxcUri, OwnedRoomId, UserId,
|
||||
};
|
||||
use service::Services;
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
|
@ -51,7 +52,7 @@ pub(crate) async fn set_displayname_route(
|
|||
update_displayname(&services, &body.user_id, body.displayname.clone(), &all_joined_rooms)
|
||||
.await;
|
||||
|
||||
if services.globals.allow_local_presence() {
|
||||
if services.config.allow_local_presence {
|
||||
// Presence update
|
||||
services
|
||||
.presence
|
||||
|
@ -146,7 +147,7 @@ pub(crate) async fn set_avatar_url_route(
|
|||
)
|
||||
.await;
|
||||
|
||||
if services.globals.allow_local_presence() {
|
||||
if services.config.allow_local_presence {
|
||||
// Presence update
|
||||
services
|
||||
.presence
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{err, Err};
|
||||
use conduwuit::{Err, Error, Result, err};
|
||||
use conduwuit_service::Services;
|
||||
use ruma::{
|
||||
CanonicalJsonObject, CanonicalJsonValue,
|
||||
api::client::{
|
||||
error::ErrorKind,
|
||||
push::{
|
||||
|
@ -10,18 +12,16 @@ use ruma::{
|
|||
},
|
||||
},
|
||||
events::{
|
||||
push_rules::{PushRulesEvent, PushRulesEventContent},
|
||||
GlobalAccountDataEventType,
|
||||
push_rules::{PushRulesEvent, PushRulesEventContent},
|
||||
},
|
||||
push::{
|
||||
InsertPushRuleError, PredefinedContentRuleId, PredefinedOverrideRuleId,
|
||||
RemovePushRuleError, Ruleset,
|
||||
},
|
||||
CanonicalJsonObject, CanonicalJsonValue,
|
||||
};
|
||||
use service::Services;
|
||||
|
||||
use crate::{Error, Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `GET /_matrix/client/r0/pushrules/`
|
||||
///
|
||||
|
@ -503,7 +503,7 @@ pub(crate) async fn set_pushers_route(
|
|||
|
||||
services
|
||||
.pusher
|
||||
.set_pusher(sender_user, &body.action)
|
||||
.set_pusher(sender_user, body.sender_device(), &body.action)
|
||||
.await?;
|
||||
|
||||
Ok(set_pusher::v3::Response::new())
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
use std::collections::BTreeMap;
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{err, Err, PduCount};
|
||||
use conduwuit::{Err, PduCount, Result, err};
|
||||
use ruma::{
|
||||
MilliSecondsSinceUnixEpoch,
|
||||
api::client::{read_marker::set_read_marker, receipt::create_receipt},
|
||||
events::{
|
||||
receipt::{ReceiptThread, ReceiptType},
|
||||
RoomAccountDataEventType,
|
||||
receipt::{ReceiptThread, ReceiptType},
|
||||
},
|
||||
MilliSecondsSinceUnixEpoch,
|
||||
};
|
||||
|
||||
use crate::{Result, Ruma};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers`
|
||||
///
|
||||
|
@ -50,7 +50,7 @@ pub(crate) async fn set_read_marker_route(
|
|||
}
|
||||
|
||||
// ping presence
|
||||
if services.globals.allow_local_presence() {
|
||||
if services.config.allow_local_presence {
|
||||
services
|
||||
.presence
|
||||
.ping_presence(sender_user, &ruma::presence::PresenceState::Online)
|
||||
|
@ -126,7 +126,7 @@ pub(crate) async fn create_receipt_route(
|
|||
}
|
||||
|
||||
// ping presence
|
||||
if services.globals.allow_local_presence() {
|
||||
if services.config.allow_local_presence {
|
||||
services
|
||||
.presence
|
||||
.ping_presence(sender_user, &ruma::presence::PresenceState::Online)
|
||||
|
@ -197,11 +197,12 @@ pub(crate) async fn create_receipt_route(
|
|||
.read_receipt
|
||||
.private_read_set(&body.room_id, sender_user, count);
|
||||
},
|
||||
| _ =>
|
||||
| _ => {
|
||||
return Err!(Request(InvalidParam(warn!(
|
||||
"Received unknown read receipt type: {}",
|
||||
&body.receipt_type
|
||||
)))),
|
||||
))));
|
||||
},
|
||||
}
|
||||
|
||||
Ok(create_receipt::v3::Response {})
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Reference in a new issue