mirror of
https://gitlab.com/famedly/conduit.git
synced 2024-11-16 02:08:16 -07:00
Merge branch 'lints' into 'next'
fix CI See merge request famedly/conduit!564
This commit is contained in:
commit
5cf9f3df48
3
.gitignore
vendored
3
.gitignore
vendored
@ -68,3 +68,6 @@ cached_target
|
|||||||
|
|
||||||
# Direnv cache
|
# Direnv cache
|
||||||
/.direnv
|
/.direnv
|
||||||
|
|
||||||
|
# Gitlab CI cache
|
||||||
|
/.gitlab-ci.d
|
||||||
|
284
.gitlab-ci.yml
284
.gitlab-ci.yml
@ -1,244 +1,66 @@
|
|||||||
stages:
|
stages:
|
||||||
- build
|
- ci
|
||||||
- build docker image
|
- artifacts
|
||||||
- test
|
|
||||||
- upload artifacts
|
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
# Make GitLab CI go fast:
|
# Makes some things print in color
|
||||||
GIT_SUBMODULE_STRATEGY: recursive
|
TERM: ansi
|
||||||
FF_USE_FASTZIP: 1
|
|
||||||
CACHE_COMPRESSION_LEVEL: fastest
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------- #
|
before_script:
|
||||||
# Create and publish docker image #
|
# Enable nix-command and flakes
|
||||||
# --------------------------------------------------------------------- #
|
- if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi
|
||||||
|
|
||||||
.docker-shared-settings:
|
# Add nix-community binary cache
|
||||||
stage: "build docker image"
|
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi
|
||||||
needs: []
|
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi
|
||||||
tags: [ "docker" ]
|
|
||||||
variables:
|
# Install direnv and nix-direnv
|
||||||
# Docker in Docker:
|
- if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi
|
||||||
DOCKER_BUILDKIT: 1
|
|
||||||
image:
|
# Allow .envrc
|
||||||
name: docker.io/docker
|
- if command -v nix > /dev/null; then direnv allow; fi
|
||||||
services:
|
|
||||||
- name: docker.io/docker:dind
|
# Set CARGO_HOME to a cacheable path
|
||||||
alias: docker
|
- export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo"
|
||||||
|
|
||||||
|
ci:
|
||||||
|
stage: ci
|
||||||
|
image: nixos/nix:2.19.2
|
||||||
script:
|
script:
|
||||||
- apk add openssh-client
|
- direnv exec . engage
|
||||||
- eval $(ssh-agent -s)
|
cache:
|
||||||
- mkdir -p ~/.ssh && chmod 700 ~/.ssh
|
key: nix
|
||||||
- printf "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config
|
paths:
|
||||||
- sh .gitlab/setup-buildx-remote-builders.sh
|
- target
|
||||||
# Authorize against this project's own image registry:
|
- .gitlab-ci.d
|
||||||
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
|
|
||||||
# Build multiplatform image and push to temporary tag:
|
docker:
|
||||||
- >
|
stage: artifacts
|
||||||
docker buildx build
|
image: nixos/nix:2.19.2
|
||||||
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
script:
|
||||||
--pull
|
- nix build .#oci-image
|
||||||
--tag "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
|
||||||
--push
|
# Make the output less difficult to find
|
||||||
--provenance=false
|
- cp result docker-image.tar.gz
|
||||||
--file "Dockerfile" .
|
|
||||||
# Build multiplatform image to deb stage and extract their .deb files:
|
|
||||||
- >
|
|
||||||
docker buildx build
|
|
||||||
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
|
||||||
--target "packager-result"
|
|
||||||
--output="type=local,dest=/tmp/build-output"
|
|
||||||
--provenance=false
|
|
||||||
--file "Dockerfile" .
|
|
||||||
# Build multiplatform image to binary stage and extract their binaries:
|
|
||||||
- >
|
|
||||||
docker buildx build
|
|
||||||
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
|
||||||
--target "builder-result"
|
|
||||||
--output="type=local,dest=/tmp/build-output"
|
|
||||||
--provenance=false
|
|
||||||
--file "Dockerfile" .
|
|
||||||
# Copy to GitLab container registry:
|
|
||||||
- >
|
|
||||||
docker buildx imagetools create
|
|
||||||
--tag "$CI_REGISTRY_IMAGE/$TAG"
|
|
||||||
--tag "$CI_REGISTRY_IMAGE/$TAG-bullseye"
|
|
||||||
--tag "$CI_REGISTRY_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA"
|
|
||||||
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
|
||||||
# if DockerHub credentials exist, also copy to dockerhub:
|
|
||||||
- if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi
|
|
||||||
- >
|
|
||||||
if [ -n "${DOCKER_HUB}" ]; then
|
|
||||||
docker buildx imagetools create
|
|
||||||
--tag "$DOCKER_HUB_IMAGE/$TAG"
|
|
||||||
--tag "$DOCKER_HUB_IMAGE/$TAG-bullseye"
|
|
||||||
--tag "$DOCKER_HUB_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA"
|
|
||||||
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
|
||||||
; fi
|
|
||||||
- mv /tmp/build-output ./
|
|
||||||
artifacts:
|
artifacts:
|
||||||
paths:
|
paths:
|
||||||
- "./build-output/"
|
- docker-image.tar.gz
|
||||||
|
|
||||||
docker:next:
|
debian:
|
||||||
extends: .docker-shared-settings
|
stage: artifacts
|
||||||
rules:
|
image: rust:1.70.0
|
||||||
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "next"'
|
|
||||||
variables:
|
|
||||||
TAG: "matrix-conduit:next"
|
|
||||||
|
|
||||||
docker:master:
|
|
||||||
extends: .docker-shared-settings
|
|
||||||
rules:
|
|
||||||
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "master"'
|
|
||||||
variables:
|
|
||||||
TAG: "matrix-conduit:latest"
|
|
||||||
|
|
||||||
docker:tags:
|
|
||||||
extends: .docker-shared-settings
|
|
||||||
rules:
|
|
||||||
- if: "$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_TAG"
|
|
||||||
variables:
|
|
||||||
TAG: "matrix-conduit:$CI_COMMIT_TAG"
|
|
||||||
|
|
||||||
|
|
||||||
docker build debugging:
|
|
||||||
extends: .docker-shared-settings
|
|
||||||
rules:
|
|
||||||
- if: "$CI_MERGE_REQUEST_TITLE =~ /.*[Dd]ocker.*/"
|
|
||||||
variables:
|
|
||||||
TAG: "matrix-conduit-docker-tests:latest"
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
# Run tests #
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
|
|
||||||
cargo check:
|
|
||||||
stage: test
|
|
||||||
image: docker.io/rust:1.70.0-bullseye
|
|
||||||
needs: []
|
|
||||||
interruptible: true
|
|
||||||
before_script:
|
|
||||||
- "rustup show && rustc --version && cargo --version" # Print version info for debugging
|
|
||||||
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
|
||||||
script:
|
script:
|
||||||
- cargo check
|
- apt-get update && apt-get install -y --no-install-recommends libclang-dev
|
||||||
|
- cargo install cargo-deb
|
||||||
|
- cargo deb
|
||||||
|
|
||||||
|
# Make the output less difficult to find
|
||||||
.test-shared-settings:
|
- mv target/debian/*.deb .
|
||||||
stage: "test"
|
|
||||||
needs: []
|
|
||||||
image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest"
|
|
||||||
tags: ["docker"]
|
|
||||||
variables:
|
|
||||||
CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow
|
|
||||||
interruptible: true
|
|
||||||
|
|
||||||
test:cargo:
|
|
||||||
extends: .test-shared-settings
|
|
||||||
before_script:
|
|
||||||
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
|
||||||
script:
|
|
||||||
- rustc --version && cargo --version # Print version info for debugging
|
|
||||||
- "cargo test --color always --workspace --verbose --locked --no-fail-fast"
|
|
||||||
|
|
||||||
test:clippy:
|
|
||||||
extends: .test-shared-settings
|
|
||||||
allow_failure: true
|
|
||||||
before_script:
|
|
||||||
- rustup component add clippy
|
|
||||||
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
|
||||||
script:
|
|
||||||
- rustc --version && cargo --version # Print version info for debugging
|
|
||||||
- "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json"
|
|
||||||
artifacts:
|
artifacts:
|
||||||
when: always
|
|
||||||
reports:
|
|
||||||
codequality: gl-code-quality-report.json
|
|
||||||
|
|
||||||
test:format:
|
|
||||||
extends: .test-shared-settings
|
|
||||||
before_script:
|
|
||||||
- rustup component add rustfmt
|
|
||||||
script:
|
|
||||||
- cargo fmt --all -- --check
|
|
||||||
|
|
||||||
test:audit:
|
|
||||||
extends: .test-shared-settings
|
|
||||||
allow_failure: true
|
|
||||||
script:
|
|
||||||
- cargo audit --color always || true
|
|
||||||
- cargo audit --stale --json | gitlab-report -p audit > gl-sast-report.json
|
|
||||||
artifacts:
|
|
||||||
when: always
|
|
||||||
reports:
|
|
||||||
sast: gl-sast-report.json
|
|
||||||
|
|
||||||
test:dockerlint:
|
|
||||||
stage: "test"
|
|
||||||
needs: []
|
|
||||||
image: "ghcr.io/hadolint/hadolint@sha256:6c4b7c23f96339489dd35f21a711996d7ce63047467a9a562287748a03ad5242" # 2.8.0-alpine
|
|
||||||
interruptible: true
|
|
||||||
script:
|
|
||||||
- hadolint --version
|
|
||||||
# First pass: Print for CI log:
|
|
||||||
- >
|
|
||||||
hadolint
|
|
||||||
--no-fail --verbose
|
|
||||||
./Dockerfile
|
|
||||||
# Then output the results into a json for GitLab to pretty-print this in the MR:
|
|
||||||
- >
|
|
||||||
hadolint
|
|
||||||
--format gitlab_codeclimate
|
|
||||||
--failure-threshold error
|
|
||||||
./Dockerfile > dockerlint.json
|
|
||||||
artifacts:
|
|
||||||
when: always
|
|
||||||
reports:
|
|
||||||
codequality: dockerlint.json
|
|
||||||
paths:
|
paths:
|
||||||
- dockerlint.json
|
- "*.deb"
|
||||||
rules:
|
cache:
|
||||||
- if: '$CI_COMMIT_REF_NAME != "master"'
|
key: debian
|
||||||
changes:
|
paths:
|
||||||
- docker/*Dockerfile
|
- target
|
||||||
- Dockerfile
|
- .gitlab-ci.d
|
||||||
- .gitlab-ci.yml
|
|
||||||
- if: '$CI_COMMIT_REF_NAME == "master"'
|
|
||||||
- if: '$CI_COMMIT_REF_NAME == "next"'
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
# Store binaries as package so they have download urls #
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
|
|
||||||
# DISABLED FOR NOW, NEEDS TO BE FIXED AT A LATER TIME:
|
|
||||||
|
|
||||||
#publish:package:
|
|
||||||
# stage: "upload artifacts"
|
|
||||||
# needs:
|
|
||||||
# - "docker:tags"
|
|
||||||
# rules:
|
|
||||||
# - if: "$CI_COMMIT_TAG"
|
|
||||||
# image: curlimages/curl:latest
|
|
||||||
# tags: ["docker"]
|
|
||||||
# variables:
|
|
||||||
# GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts
|
|
||||||
# script:
|
|
||||||
# - 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"'
|
|
||||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"'
|
|
||||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit "${BASE_URL}/conduit-armv7-unknown-linux-gnu"'
|
|
||||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"'
|
|
||||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"'
|
|
||||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit.deb "${BASE_URL}/conduit-armv7-unknown-linux-gnu.deb"'
|
|
||||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit.deb "${BASE_URL}/conduit-aarch64-unknown-linux-gnu.deb"'
|
|
||||||
|
|
||||||
# Avoid duplicate pipelines
|
|
||||||
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
|
|
||||||
workflow:
|
|
||||||
rules:
|
|
||||||
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
|
|
||||||
- if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS"
|
|
||||||
when: never
|
|
||||||
- if: "$CI_COMMIT_BRANCH"
|
|
||||||
- if: "$CI_COMMIT_TAG"
|
|
||||||
|
136
Cargo.lock
generated
136
Cargo.lock
generated
@ -180,15 +180,6 @@ version = "1.6.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
|
checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "bincode"
|
|
||||||
version = "1.3.3"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad"
|
|
||||||
dependencies = [
|
|
||||||
"serde",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "bindgen"
|
name = "bindgen"
|
||||||
version = "0.65.1"
|
version = "0.65.1"
|
||||||
@ -368,11 +359,9 @@ dependencies = [
|
|||||||
"base64 0.21.2",
|
"base64 0.21.2",
|
||||||
"bytes",
|
"bytes",
|
||||||
"clap",
|
"clap",
|
||||||
"crossbeam",
|
|
||||||
"directories",
|
"directories",
|
||||||
"figment",
|
"figment",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"heed",
|
|
||||||
"hmac",
|
"hmac",
|
||||||
"http",
|
"http",
|
||||||
"image",
|
"image",
|
||||||
@ -487,20 +476,6 @@ dependencies = [
|
|||||||
"cfg-if",
|
"cfg-if",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "crossbeam"
|
|
||||||
version = "0.8.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c"
|
|
||||||
dependencies = [
|
|
||||||
"cfg-if",
|
|
||||||
"crossbeam-channel",
|
|
||||||
"crossbeam-deque",
|
|
||||||
"crossbeam-epoch",
|
|
||||||
"crossbeam-queue",
|
|
||||||
"crossbeam-utils",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "crossbeam-channel"
|
name = "crossbeam-channel"
|
||||||
version = "0.5.8"
|
version = "0.5.8"
|
||||||
@ -511,40 +486,6 @@ dependencies = [
|
|||||||
"crossbeam-utils",
|
"crossbeam-utils",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "crossbeam-deque"
|
|
||||||
version = "0.8.3"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef"
|
|
||||||
dependencies = [
|
|
||||||
"cfg-if",
|
|
||||||
"crossbeam-epoch",
|
|
||||||
"crossbeam-utils",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "crossbeam-epoch"
|
|
||||||
version = "0.9.15"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7"
|
|
||||||
dependencies = [
|
|
||||||
"autocfg",
|
|
||||||
"cfg-if",
|
|
||||||
"crossbeam-utils",
|
|
||||||
"memoffset 0.9.0",
|
|
||||||
"scopeguard",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "crossbeam-queue"
|
|
||||||
version = "0.3.8"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add"
|
|
||||||
dependencies = [
|
|
||||||
"cfg-if",
|
|
||||||
"crossbeam-utils",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "crossbeam-utils"
|
name = "crossbeam-utils"
|
||||||
version = "0.8.16"
|
version = "0.8.16"
|
||||||
@ -986,42 +927,6 @@ version = "0.4.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
|
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "heed"
|
|
||||||
version = "0.10.6"
|
|
||||||
source = "git+https://github.com/timokoesters/heed.git?rev=f6f825da7fb2c758867e05ad973ef800a6fe1d5d#f6f825da7fb2c758867e05ad973ef800a6fe1d5d"
|
|
||||||
dependencies = [
|
|
||||||
"bytemuck",
|
|
||||||
"byteorder",
|
|
||||||
"heed-traits",
|
|
||||||
"heed-types",
|
|
||||||
"libc",
|
|
||||||
"lmdb-rkv-sys",
|
|
||||||
"once_cell",
|
|
||||||
"page_size",
|
|
||||||
"serde",
|
|
||||||
"synchronoise",
|
|
||||||
"url",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "heed-traits"
|
|
||||||
version = "0.7.0"
|
|
||||||
source = "git+https://github.com/timokoesters/heed.git?rev=f6f825da7fb2c758867e05ad973ef800a6fe1d5d#f6f825da7fb2c758867e05ad973ef800a6fe1d5d"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "heed-types"
|
|
||||||
version = "0.7.2"
|
|
||||||
source = "git+https://github.com/timokoesters/heed.git?rev=f6f825da7fb2c758867e05ad973ef800a6fe1d5d#f6f825da7fb2c758867e05ad973ef800a6fe1d5d"
|
|
||||||
dependencies = [
|
|
||||||
"bincode",
|
|
||||||
"bytemuck",
|
|
||||||
"byteorder",
|
|
||||||
"heed-traits",
|
|
||||||
"serde",
|
|
||||||
"serde_json",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hermit-abi"
|
name = "hermit-abi"
|
||||||
version = "0.2.6"
|
version = "0.2.6"
|
||||||
@ -1379,17 +1284,6 @@ version = "0.5.6"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f"
|
checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "lmdb-rkv-sys"
|
|
||||||
version = "0.11.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "61b9ce6b3be08acefa3003c57b7565377432a89ec24476bbe72e11d101f852fe"
|
|
||||||
dependencies = [
|
|
||||||
"cc",
|
|
||||||
"libc",
|
|
||||||
"pkg-config",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lock_api"
|
name = "lock_api"
|
||||||
version = "0.4.10"
|
version = "0.4.10"
|
||||||
@ -1473,15 +1367,6 @@ dependencies = [
|
|||||||
"autocfg",
|
"autocfg",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "memoffset"
|
|
||||||
version = "0.9.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c"
|
|
||||||
dependencies = [
|
|
||||||
"autocfg",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "mime"
|
name = "mime"
|
||||||
version = "0.3.17"
|
version = "0.3.17"
|
||||||
@ -1524,7 +1409,7 @@ dependencies = [
|
|||||||
"bitflags 1.3.2",
|
"bitflags 1.3.2",
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"libc",
|
"libc",
|
||||||
"memoffset 0.7.1",
|
"memoffset",
|
||||||
"pin-utils",
|
"pin-utils",
|
||||||
"static_assertions",
|
"static_assertions",
|
||||||
]
|
]
|
||||||
@ -1701,16 +1586,6 @@ version = "0.1.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
|
checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "page_size"
|
|
||||||
version = "0.4.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "eebde548fbbf1ea81a99b128872779c437752fb99f217c45245e1a61dcd9edcd"
|
|
||||||
dependencies = [
|
|
||||||
"libc",
|
|
||||||
"winapi",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parking_lot"
|
name = "parking_lot"
|
||||||
version = "0.12.1"
|
version = "0.12.1"
|
||||||
@ -2727,15 +2602,6 @@ version = "0.1.2"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
|
checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "synchronoise"
|
|
||||||
version = "1.0.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "3dbc01390fc626ce8d1cffe3376ded2b72a11bb70e1c75f404a210e4daa4def2"
|
|
||||||
dependencies = [
|
|
||||||
"crossbeam-queue",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "thiserror"
|
name = "thiserror"
|
||||||
version = "1.0.40"
|
version = "1.0.40"
|
||||||
|
16
Cargo.toml
16
Cargo.toml
@ -9,10 +9,12 @@ readme = "README.md"
|
|||||||
version = "0.7.0-alpha"
|
version = "0.7.0-alpha"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
# When changing this, make sure to update the `flake.lock` file by running
|
# When changing this, make sure to update the hash near the text "THE
|
||||||
# `nix flake update`. If you don't have Nix installed or otherwise don't know
|
# rust-version HASH" in `flake.nix`. If you don't have Nix installed or
|
||||||
# how to do this, ping `@charles:computer.surgery` or `@dusk:gaze.systems` in
|
# otherwise don't know how to do this, ping `@charles:computer.surgery` or
|
||||||
# the matrix room.
|
# `@dusk:gaze.systems` in the matrix room.
|
||||||
|
#
|
||||||
|
# Also make sure to update the docker image tags in `.gitlab-ci.yml`.
|
||||||
rust-version = "1.70.0"
|
rust-version = "1.70.0"
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
@ -78,10 +80,10 @@ tracing-opentelemetry = "0.18.0"
|
|||||||
lru-cache = "0.1.2"
|
lru-cache = "0.1.2"
|
||||||
rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] }
|
rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] }
|
||||||
parking_lot = { version = "0.12.1", optional = true }
|
parking_lot = { version = "0.12.1", optional = true }
|
||||||
crossbeam = { version = "0.8.2", optional = true }
|
# crossbeam = { version = "0.8.2", optional = true }
|
||||||
num_cpus = "1.15.0"
|
num_cpus = "1.15.0"
|
||||||
threadpool = "1.8.1"
|
threadpool = "1.8.1"
|
||||||
heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
# heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
||||||
# Used for ruma wrapper
|
# Used for ruma wrapper
|
||||||
serde_html_form = "0.2.0"
|
serde_html_form = "0.2.0"
|
||||||
|
|
||||||
@ -112,7 +114,7 @@ default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"]
|
|||||||
#backend_sled = ["sled"]
|
#backend_sled = ["sled"]
|
||||||
backend_persy = ["persy", "parking_lot"]
|
backend_persy = ["persy", "parking_lot"]
|
||||||
backend_sqlite = ["sqlite"]
|
backend_sqlite = ["sqlite"]
|
||||||
backend_heed = ["heed", "crossbeam"]
|
#backend_heed = ["heed", "crossbeam"]
|
||||||
backend_rocksdb = ["rocksdb"]
|
backend_rocksdb = ["rocksdb"]
|
||||||
jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"]
|
jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"]
|
||||||
sqlite = ["rusqlite", "parking_lot", "tokio/signal"]
|
sqlite = ["rusqlite", "parking_lot", "tokio/signal"]
|
||||||
|
132
Dockerfile
132
Dockerfile
@ -1,132 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
FROM docker.io/rust:1.70-bullseye AS base
|
|
||||||
|
|
||||||
FROM base AS builder
|
|
||||||
WORKDIR /usr/src/conduit
|
|
||||||
|
|
||||||
# Install required packages to build Conduit and it's dependencies
|
|
||||||
RUN apt-get update && \
|
|
||||||
apt-get -y --no-install-recommends install libclang-dev=1:11.0-51+nmu5
|
|
||||||
|
|
||||||
# == Build dependencies without our own code separately for caching ==
|
|
||||||
#
|
|
||||||
# Need a fake main.rs since Cargo refuses to build anything otherwise.
|
|
||||||
#
|
|
||||||
# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature
|
|
||||||
# request that would allow just dependencies to be compiled, presumably
|
|
||||||
# regardless of whether source files are available.
|
|
||||||
RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs
|
|
||||||
COPY Cargo.toml Cargo.lock ./
|
|
||||||
RUN cargo build --release && rm -r src
|
|
||||||
|
|
||||||
# Copy over actual Conduit sources
|
|
||||||
COPY src src
|
|
||||||
|
|
||||||
# main.rs and lib.rs need their timestamp updated for this to work correctly since
|
|
||||||
# otherwise the build with the fake main.rs from above is newer than the
|
|
||||||
# source files (COPY preserves timestamps).
|
|
||||||
#
|
|
||||||
# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit
|
|
||||||
RUN touch src/main.rs && touch src/lib.rs && cargo build --release
|
|
||||||
|
|
||||||
|
|
||||||
# ONLY USEFUL FOR CI: target stage to extract build artifacts
|
|
||||||
FROM scratch AS builder-result
|
|
||||||
COPY --from=builder /usr/src/conduit/target/release/conduit /conduit
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------------------------------------------
|
|
||||||
# Build cargo-deb, a tool to package up rust binaries into .deb packages for Debian/Ubuntu based systems:
|
|
||||||
# ---------------------------------------------------------------------------------------------------------------
|
|
||||||
FROM base AS build-cargo-deb
|
|
||||||
|
|
||||||
RUN apt-get update && \
|
|
||||||
apt-get install -y --no-install-recommends \
|
|
||||||
dpkg \
|
|
||||||
dpkg-dev \
|
|
||||||
liblzma-dev
|
|
||||||
|
|
||||||
RUN cargo install cargo-deb
|
|
||||||
# => binary is in /usr/local/cargo/bin/cargo-deb
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------------------------------------------
|
|
||||||
# Package conduit build-result into a .deb package:
|
|
||||||
# ---------------------------------------------------------------------------------------------------------------
|
|
||||||
FROM builder AS packager
|
|
||||||
WORKDIR /usr/src/conduit
|
|
||||||
|
|
||||||
COPY ./LICENSE ./LICENSE
|
|
||||||
COPY ./README.md ./README.md
|
|
||||||
COPY debian ./debian
|
|
||||||
COPY --from=build-cargo-deb /usr/local/cargo/bin/cargo-deb /usr/local/cargo/bin/cargo-deb
|
|
||||||
|
|
||||||
# --no-build makes cargo-deb reuse already compiled project
|
|
||||||
RUN cargo deb --no-build
|
|
||||||
# => Package is in /usr/src/conduit/target/debian/<project_name>_<version>_<arch>.deb
|
|
||||||
|
|
||||||
|
|
||||||
# ONLY USEFUL FOR CI: target stage to extract build artifacts
|
|
||||||
FROM scratch AS packager-result
|
|
||||||
COPY --from=packager /usr/src/conduit/target/debian/*.deb /conduit.deb
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------------------------------------------
|
|
||||||
# Stuff below this line actually ends up in the resulting docker image
|
|
||||||
# ---------------------------------------------------------------------------------------------------------------
|
|
||||||
FROM docker.io/debian:bullseye-slim AS runner
|
|
||||||
|
|
||||||
# Standard port on which Conduit launches.
|
|
||||||
# You still need to map the port when using the docker command or docker-compose.
|
|
||||||
EXPOSE 6167
|
|
||||||
|
|
||||||
ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit
|
|
||||||
|
|
||||||
ENV CONDUIT_PORT=6167 \
|
|
||||||
CONDUIT_ADDRESS="0.0.0.0" \
|
|
||||||
CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \
|
|
||||||
CONDUIT_CONFIG=''
|
|
||||||
# └─> Set no config file to do all configuration with env vars
|
|
||||||
|
|
||||||
# Conduit needs:
|
|
||||||
# dpkg: to install conduit.deb
|
|
||||||
# ca-certificates: for https
|
|
||||||
# iproute2 & wget: for the healthcheck script
|
|
||||||
RUN apt-get update && apt-get -y --no-install-recommends install \
|
|
||||||
dpkg \
|
|
||||||
ca-certificates \
|
|
||||||
iproute2 \
|
|
||||||
wget \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Test if Conduit is still alive, uses the same endpoint as Element
|
|
||||||
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
|
|
||||||
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
|
|
||||||
|
|
||||||
# Install conduit.deb:
|
|
||||||
COPY --from=packager /usr/src/conduit/target/debian/*.deb /srv/conduit/
|
|
||||||
RUN dpkg -i /srv/conduit/*.deb
|
|
||||||
|
|
||||||
# Improve security: Don't run stuff as root, that does not need to run as root
|
|
||||||
# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems.
|
|
||||||
ARG USER_ID=1000
|
|
||||||
ARG GROUP_ID=1000
|
|
||||||
RUN set -x ; \
|
|
||||||
groupadd -r -g ${GROUP_ID} conduit ; \
|
|
||||||
useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1
|
|
||||||
|
|
||||||
# Create database directory, change ownership of Conduit files to conduit user and group and make the healthcheck executable:
|
|
||||||
RUN chown -cR conduit:conduit /srv/conduit && \
|
|
||||||
chmod +x /srv/conduit/healthcheck.sh && \
|
|
||||||
mkdir -p ${DEFAULT_DB_PATH} && \
|
|
||||||
chown -cR conduit:conduit ${DEFAULT_DB_PATH}
|
|
||||||
|
|
||||||
# Change user to conduit, no root permissions afterwards:
|
|
||||||
USER conduit
|
|
||||||
# Set container home directory
|
|
||||||
WORKDIR /srv/conduit
|
|
||||||
|
|
||||||
# Run Conduit and print backtraces on panics
|
|
||||||
ENV RUST_BACKTRACE=1
|
|
||||||
ENTRYPOINT [ "/usr/sbin/matrix-conduit" ]
|
|
64
engage.toml
Normal file
64
engage.toml
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
interpreter = ["bash", "-euo", "pipefail", "-c"]
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "engage"
|
||||||
|
group = "versions"
|
||||||
|
script = "engage --version"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "rustc"
|
||||||
|
group = "versions"
|
||||||
|
script = "rustc --version"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo"
|
||||||
|
group = "versions"
|
||||||
|
script = "cargo --version"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo-fmt"
|
||||||
|
group = "versions"
|
||||||
|
script = "cargo fmt --version"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "rustdoc"
|
||||||
|
group = "versions"
|
||||||
|
script = "rustdoc --version"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo-clippy"
|
||||||
|
group = "versions"
|
||||||
|
script = "cargo clippy -- --version"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo-fmt"
|
||||||
|
group = "lints"
|
||||||
|
script = "cargo fmt --check -- --color=always"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo-doc"
|
||||||
|
group = "lints"
|
||||||
|
script = """
|
||||||
|
RUSTDOCFLAGS="-D warnings" cargo doc \
|
||||||
|
--workspace \
|
||||||
|
--no-deps \
|
||||||
|
--document-private-items \
|
||||||
|
--color always
|
||||||
|
"""
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo-clippy"
|
||||||
|
group = "lints"
|
||||||
|
script = "cargo clippy --workspace --all-targets --color=always -- -D warnings"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo"
|
||||||
|
group = "tests"
|
||||||
|
script = """
|
||||||
|
cargo test \
|
||||||
|
--workspace \
|
||||||
|
--all-targets \
|
||||||
|
--color=always \
|
||||||
|
-- \
|
||||||
|
--color=always
|
||||||
|
"""
|
94
flake.lock
94
flake.lock
@ -2,21 +2,16 @@
|
|||||||
"nodes": {
|
"nodes": {
|
||||||
"crane": {
|
"crane": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"flake-compat": "flake-compat",
|
|
||||||
"flake-utils": [
|
|
||||||
"flake-utils"
|
|
||||||
],
|
|
||||||
"nixpkgs": [
|
"nixpkgs": [
|
||||||
"nixpkgs"
|
"nixpkgs"
|
||||||
],
|
]
|
||||||
"rust-overlay": "rust-overlay"
|
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1688772518,
|
"lastModified": 1705597458,
|
||||||
"narHash": "sha256-ol7gZxwvgLnxNSZwFTDJJ49xVY5teaSvF7lzlo3YQfM=",
|
"narHash": "sha256-vJ8Ib9ruxbaBxGEcA0d7dHqxpc6Z+SGR2XIxVeSMuLM=",
|
||||||
"owner": "ipetkov",
|
"owner": "ipetkov",
|
||||||
"repo": "crane",
|
"repo": "crane",
|
||||||
"rev": "8b08e96c9af8c6e3a2b69af5a7fa168750fcf88e",
|
"rev": "742170d82cd65c925dcddc5c3d6185699fbbad08",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -33,11 +28,11 @@
|
|||||||
"rust-analyzer-src": "rust-analyzer-src"
|
"rust-analyzer-src": "rust-analyzer-src"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1689488573,
|
"lastModified": 1705559032,
|
||||||
"narHash": "sha256-diVASflKCCryTYv0djvMnP2444mFsIG0ge5pa7ahauQ=",
|
"narHash": "sha256-Cb+Jd1+Gz4Wi+8elPnUIHnqQmE1qjDRZ+PsJaPaAffY=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "fenix",
|
"repo": "fenix",
|
||||||
"rev": "39096fe3f379036ff4a5fa198950b8e79defe939",
|
"rev": "e132ea0eb0c799a2109a91688e499d7bf4962801",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -46,32 +41,16 @@
|
|||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flake-compat": {
|
|
||||||
"flake": false,
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1673956053,
|
|
||||||
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
|
|
||||||
"owner": "edolstra",
|
|
||||||
"repo": "flake-compat",
|
|
||||||
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "edolstra",
|
|
||||||
"repo": "flake-compat",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flake-utils": {
|
"flake-utils": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"systems": "systems"
|
"systems": "systems"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1689068808,
|
"lastModified": 1705309234,
|
||||||
"narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
|
"narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "flake-utils",
|
"repo": "flake-utils",
|
||||||
"rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
|
"rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -80,13 +59,28 @@
|
|||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"nix-filter": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1705332318,
|
||||||
|
"narHash": "sha256-kcw1yFeJe9N4PjQji9ZeX47jg0p9A0DuU4djKvg1a7I=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "nix-filter",
|
||||||
|
"rev": "3449dc925982ad46246cfc36469baf66e1b64f17",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "nix-filter",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1689444953,
|
"lastModified": 1705496572,
|
||||||
"narHash": "sha256-0o56bfb2LC38wrinPdCGLDScd77LVcr7CrH1zK7qvDg=",
|
"narHash": "sha256-rPIe9G5EBLXdBdn9ilGc0nq082lzQd0xGGe092R/5QE=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "8acef304efe70152463a6399f73e636bcc363813",
|
"rev": "842d9d80cfd4560648c785f8a4e6f3b096790e19",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -101,17 +95,18 @@
|
|||||||
"crane": "crane",
|
"crane": "crane",
|
||||||
"fenix": "fenix",
|
"fenix": "fenix",
|
||||||
"flake-utils": "flake-utils",
|
"flake-utils": "flake-utils",
|
||||||
|
"nix-filter": "nix-filter",
|
||||||
"nixpkgs": "nixpkgs"
|
"nixpkgs": "nixpkgs"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"rust-analyzer-src": {
|
"rust-analyzer-src": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1689441253,
|
"lastModified": 1705523001,
|
||||||
"narHash": "sha256-4MSDZaFI4DOfsLIZYPMBl0snzWhX1/OqR/QHir382CY=",
|
"narHash": "sha256-TWq5vJ6m+9HGSDMsQAmz1TMegMi79R3TTyKjnPWsQp8=",
|
||||||
"owner": "rust-lang",
|
"owner": "rust-lang",
|
||||||
"repo": "rust-analyzer",
|
"repo": "rust-analyzer",
|
||||||
"rev": "996e054f1eb1dbfc8455ecabff0f6ff22ba7f7c8",
|
"rev": "9d9b34354d2f13e33568c9c55b226dd014a146a0",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -121,31 +116,6 @@
|
|||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"rust-overlay": {
|
|
||||||
"inputs": {
|
|
||||||
"flake-utils": [
|
|
||||||
"crane",
|
|
||||||
"flake-utils"
|
|
||||||
],
|
|
||||||
"nixpkgs": [
|
|
||||||
"crane",
|
|
||||||
"nixpkgs"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1688351637,
|
|
||||||
"narHash": "sha256-CLTufJ29VxNOIZ8UTg0lepsn3X03AmopmaLTTeHDCL4=",
|
|
||||||
"owner": "oxalica",
|
|
||||||
"repo": "rust-overlay",
|
|
||||||
"rev": "f9b92316727af9e6c7fee4a761242f7f46880329",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "oxalica",
|
|
||||||
"repo": "rust-overlay",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"systems": {
|
"systems": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1681028828,
|
"lastModified": 1681028828,
|
||||||
|
99
flake.nix
99
flake.nix
@ -2,6 +2,7 @@
|
|||||||
inputs = {
|
inputs = {
|
||||||
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
|
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
|
||||||
flake-utils.url = "github:numtide/flake-utils";
|
flake-utils.url = "github:numtide/flake-utils";
|
||||||
|
nix-filter.url = "github:numtide/nix-filter";
|
||||||
|
|
||||||
fenix = {
|
fenix = {
|
||||||
url = "github:nix-community/fenix";
|
url = "github:nix-community/fenix";
|
||||||
@ -10,7 +11,6 @@
|
|||||||
crane = {
|
crane = {
|
||||||
url = "github:ipetkov/crane";
|
url = "github:ipetkov/crane";
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
inputs.flake-utils.follows = "flake-utils";
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -18,6 +18,7 @@
|
|||||||
{ self
|
{ self
|
||||||
, nixpkgs
|
, nixpkgs
|
||||||
, flake-utils
|
, flake-utils
|
||||||
|
, nix-filter
|
||||||
|
|
||||||
, fenix
|
, fenix
|
||||||
, crane
|
, crane
|
||||||
@ -43,51 +44,91 @@
|
|||||||
sha256 = "sha256-gdYqng0y9iHYzYPAdkC/ka3DRny3La/S5G8ASj0Ayyc=";
|
sha256 = "sha256-gdYqng0y9iHYzYPAdkC/ka3DRny3La/S5G8ASj0Ayyc=";
|
||||||
};
|
};
|
||||||
|
|
||||||
# The system's RocksDB
|
mkToolchain = fenix.packages.${system}.combine;
|
||||||
ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include";
|
|
||||||
ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib";
|
buildToolchain = mkToolchain (with toolchain; [
|
||||||
|
cargo
|
||||||
|
rustc
|
||||||
|
]);
|
||||||
|
|
||||||
|
devToolchain = mkToolchain (with toolchain; [
|
||||||
|
cargo
|
||||||
|
clippy
|
||||||
|
rust-src
|
||||||
|
rustc
|
||||||
|
|
||||||
|
# Always use nightly rustfmt because most of its options are unstable
|
||||||
|
fenix.packages.${system}.latest.rustfmt
|
||||||
|
]);
|
||||||
|
|
||||||
|
builder =
|
||||||
|
((crane.mkLib pkgs).overrideToolchain buildToolchain).buildPackage;
|
||||||
|
|
||||||
# Shared between the package and the devShell
|
|
||||||
nativeBuildInputs = (with pkgs.rustPlatform; [
|
nativeBuildInputs = (with pkgs.rustPlatform; [
|
||||||
bindgenHook
|
bindgenHook
|
||||||
]);
|
]);
|
||||||
|
|
||||||
builder =
|
env = {
|
||||||
((crane.mkLib pkgs).overrideToolchain toolchain.toolchain).buildPackage;
|
ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include";
|
||||||
|
ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib";
|
||||||
|
};
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
packages.default = builder {
|
packages.default = builder {
|
||||||
src = ./.;
|
src = nix-filter {
|
||||||
|
root = ./.;
|
||||||
|
include = [
|
||||||
|
"src"
|
||||||
|
"Cargo.toml"
|
||||||
|
"Cargo.lock"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
# This is redundant with CI
|
||||||
|
doCheck = false;
|
||||||
|
|
||||||
inherit
|
inherit
|
||||||
stdenv
|
env
|
||||||
nativeBuildInputs
|
nativeBuildInputs
|
||||||
ROCKSDB_INCLUDE_DIR
|
stdenv;
|
||||||
ROCKSDB_LIB_DIR;
|
|
||||||
|
meta.mainProgram = cargoToml.package.name;
|
||||||
|
};
|
||||||
|
|
||||||
|
packages.oci-image =
|
||||||
|
let
|
||||||
|
package = self.packages.${system}.default;
|
||||||
|
in
|
||||||
|
pkgs.dockerTools.buildImage {
|
||||||
|
name = package.pname;
|
||||||
|
tag = "latest";
|
||||||
|
config = {
|
||||||
|
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT) are
|
||||||
|
# handled as expected
|
||||||
|
Entrypoint = [
|
||||||
|
"${pkgs.lib.getExe' pkgs.tini "tini"}"
|
||||||
|
"--"
|
||||||
|
];
|
||||||
|
Cmd = [
|
||||||
|
"${pkgs.lib.getExe package}"
|
||||||
|
];
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
devShells.default = (pkgs.mkShell.override { inherit stdenv; }) {
|
devShells.default = (pkgs.mkShell.override { inherit stdenv; }) {
|
||||||
# Rust Analyzer needs to be able to find the path to default crate
|
env = env // {
|
||||||
# sources, and it can read this environment variable to do so
|
# Rust Analyzer needs to be able to find the path to default crate
|
||||||
RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library";
|
# sources, and it can read this environment variable to do so. The
|
||||||
|
# `rust-src` component is required in order for this to work.
|
||||||
inherit
|
RUST_SRC_PATH = "${devToolchain}/lib/rustlib/src/rust/library";
|
||||||
ROCKSDB_INCLUDE_DIR
|
};
|
||||||
ROCKSDB_LIB_DIR;
|
|
||||||
|
|
||||||
# Development tools
|
# Development tools
|
||||||
nativeBuildInputs = nativeBuildInputs ++ (with toolchain; [
|
nativeBuildInputs = nativeBuildInputs ++ [
|
||||||
cargo
|
devToolchain
|
||||||
clippy
|
] ++ (with pkgs; [
|
||||||
rust-src
|
engage
|
||||||
rustc
|
|
||||||
rustfmt
|
|
||||||
]);
|
]);
|
||||||
};
|
};
|
||||||
|
|
||||||
checks = {
|
|
||||||
packagesDefault = self.packages.${system}.default;
|
|
||||||
devShellsDefault = self.devShells.${system}.default;
|
|
||||||
};
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -360,7 +360,7 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
|||||||
.bad_query_ratelimiter
|
.bad_query_ratelimiter
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.get(&*server)
|
.get(server)
|
||||||
{
|
{
|
||||||
// Exponential backoff
|
// Exponential backoff
|
||||||
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
|
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
|
||||||
@ -393,7 +393,7 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| Error::BadServerResponse("Query took too long")),
|
.map_err(|_e| Error::BadServerResponse("Query took too long")),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
@ -400,7 +400,7 @@ pub async fn get_member_events_route(
|
|||||||
if !services()
|
if !services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
@ -435,7 +435,7 @@ pub async fn joined_members_route(
|
|||||||
if !services()
|
if !services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
@ -712,7 +712,7 @@ async fn join_room_by_id_helper(
|
|||||||
}
|
}
|
||||||
|
|
||||||
info!("Running send_join auth check");
|
info!("Running send_join auth check");
|
||||||
if !state_res::event_auth::auth_check(
|
let authenticated = state_res::event_auth::auth_check(
|
||||||
&state_res::RoomVersion::new(&room_version_id).expect("room version is supported"),
|
&state_res::RoomVersion::new(&room_version_id).expect("room version is supported"),
|
||||||
&parsed_join_pdu,
|
&parsed_join_pdu,
|
||||||
None::<PduEvent>, // TODO: third party invite
|
None::<PduEvent>, // TODO: third party invite
|
||||||
@ -735,7 +735,9 @@ async fn join_room_by_id_helper(
|
|||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
warn!("Auth check failed: {e}");
|
warn!("Auth check failed: {e}");
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")
|
Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")
|
||||||
})? {
|
})?;
|
||||||
|
|
||||||
|
if !authenticated {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Auth check failed",
|
"Auth check failed",
|
||||||
|
@ -124,7 +124,7 @@ pub async fn get_message_events_route(
|
|||||||
let to = body
|
let to = body
|
||||||
.to
|
.to
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||||
|
|
||||||
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
||||||
sender_user,
|
sender_user,
|
||||||
|
@ -23,7 +23,7 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
|||||||
let to = body
|
let to = body
|
||||||
.to
|
.to
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
// Use limit or else 10, with maximum 100
|
||||||
let limit = body
|
let limit = body
|
||||||
@ -73,7 +73,7 @@ pub async fn get_relating_events_with_rel_type_route(
|
|||||||
let to = body
|
let to = body
|
||||||
.to
|
.to
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
// Use limit or else 10, with maximum 100
|
||||||
let limit = body
|
let limit = body
|
||||||
@ -121,7 +121,7 @@ pub async fn get_relating_events_route(
|
|||||||
let to = body
|
let to = body
|
||||||
.to
|
.to
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
// Use limit or else 10, with maximum 100
|
||||||
let limit = body
|
let limit = body
|
||||||
|
@ -117,12 +117,10 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
|||||||
} else {
|
} else {
|
||||||
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
||||||
};
|
};
|
||||||
let user_id =
|
|
||||||
UserId::parse_with_server_name(username, services().globals.server_name())
|
UserId::parse_with_server_name(username, services().globals.server_name()).map_err(
|
||||||
.map_err(|_| {
|
|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."),
|
||||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
)?
|
||||||
})?;
|
|
||||||
user_id
|
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
warn!("Unsupported or unknown login type: {:?}", &body.login_info);
|
warn!("Unsupported or unknown login type: {:?}", &body.login_info);
|
||||||
@ -163,6 +161,8 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
|||||||
|
|
||||||
info!("{} logged in", user_id);
|
info!("{} logged in", user_id);
|
||||||
|
|
||||||
|
// Homeservers are still required to send the `home_server` field
|
||||||
|
#[allow(deprecated)]
|
||||||
Ok(login::v3::Response {
|
Ok(login::v3::Response {
|
||||||
user_id,
|
user_id,
|
||||||
access_token: token,
|
access_token: token,
|
||||||
|
@ -85,7 +85,7 @@ pub async fn get_state_events_route(
|
|||||||
if !services()
|
if !services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
@ -118,7 +118,7 @@ pub async fn get_state_events_for_key_route(
|
|||||||
if !services()
|
if !services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
@ -157,7 +157,7 @@ pub async fn get_state_events_for_empty_key_route(
|
|||||||
if !services()
|
if !services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
|
@ -554,6 +554,7 @@ async fn sync_helper(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
async fn load_joined_room(
|
async fn load_joined_room(
|
||||||
sender_user: &UserId,
|
sender_user: &UserId,
|
||||||
sender_device: &DeviceId,
|
sender_device: &DeviceId,
|
||||||
@ -590,7 +591,7 @@ async fn load_joined_room(
|
|||||||
|| services()
|
|| services()
|
||||||
.rooms
|
.rooms
|
||||||
.user
|
.user
|
||||||
.last_notification_read(&sender_user, &room_id)?
|
.last_notification_read(sender_user, room_id)?
|
||||||
> since;
|
> since;
|
||||||
|
|
||||||
let mut timeline_users = HashSet::new();
|
let mut timeline_users = HashSet::new();
|
||||||
@ -599,16 +600,16 @@ async fn load_joined_room(
|
|||||||
}
|
}
|
||||||
|
|
||||||
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
||||||
&sender_user,
|
sender_user,
|
||||||
&sender_device,
|
sender_device,
|
||||||
&room_id,
|
room_id,
|
||||||
sincecount,
|
sincecount,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Database queries:
|
// Database queries:
|
||||||
|
|
||||||
let current_shortstatehash =
|
let current_shortstatehash =
|
||||||
if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? {
|
if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? {
|
||||||
s
|
s
|
||||||
} else {
|
} else {
|
||||||
error!("Room {} has no state", room_id);
|
error!("Room {} has no state", room_id);
|
||||||
@ -618,7 +619,7 @@ async fn load_joined_room(
|
|||||||
let since_shortstatehash = services()
|
let since_shortstatehash = services()
|
||||||
.rooms
|
.rooms
|
||||||
.user
|
.user
|
||||||
.get_token_shortstatehash(&room_id, since)?;
|
.get_token_shortstatehash(room_id, since)?;
|
||||||
|
|
||||||
let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) =
|
let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) =
|
||||||
if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) {
|
if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) {
|
||||||
@ -630,12 +631,12 @@ async fn load_joined_room(
|
|||||||
let joined_member_count = services()
|
let joined_member_count = services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.room_joined_count(&room_id)?
|
.room_joined_count(room_id)?
|
||||||
.unwrap_or(0);
|
.unwrap_or(0);
|
||||||
let invited_member_count = services()
|
let invited_member_count = services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.room_invited_count(&room_id)?
|
.room_invited_count(room_id)?
|
||||||
.unwrap_or(0);
|
.unwrap_or(0);
|
||||||
|
|
||||||
// Recalculate heroes (first 5 members)
|
// Recalculate heroes (first 5 members)
|
||||||
@ -648,7 +649,7 @@ async fn load_joined_room(
|
|||||||
for hero in services()
|
for hero in services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.all_pdus(&sender_user, &room_id)?
|
.all_pdus(sender_user, room_id)?
|
||||||
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
|
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
|
||||||
.filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember)
|
.filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember)
|
||||||
.map(|(_, pdu)| {
|
.map(|(_, pdu)| {
|
||||||
@ -669,11 +670,11 @@ async fn load_joined_room(
|
|||||||
) && (services()
|
) && (services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.is_joined(&user_id, &room_id)?
|
.is_joined(&user_id, room_id)?
|
||||||
|| services()
|
|| services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.is_invited(&user_id, &room_id)?)
|
.is_invited(&user_id, room_id)?)
|
||||||
{
|
{
|
||||||
Ok::<_, Error>(Some(state_key.clone()))
|
Ok::<_, Error>(Some(state_key.clone()))
|
||||||
} else {
|
} else {
|
||||||
@ -789,17 +790,17 @@ async fn load_joined_room(
|
|||||||
|
|
||||||
// Reset lazy loading because this is an initial sync
|
// Reset lazy loading because this is an initial sync
|
||||||
services().rooms.lazy_loading.lazy_load_reset(
|
services().rooms.lazy_loading.lazy_load_reset(
|
||||||
&sender_user,
|
sender_user,
|
||||||
&sender_device,
|
sender_device,
|
||||||
&room_id,
|
room_id,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// The state_events above should contain all timeline_users, let's mark them as lazy
|
// The state_events above should contain all timeline_users, let's mark them as lazy
|
||||||
// loaded.
|
// loaded.
|
||||||
services().rooms.lazy_loading.lazy_load_mark_sent(
|
services().rooms.lazy_loading.lazy_load_mark_sent(
|
||||||
&sender_user,
|
sender_user,
|
||||||
&sender_device,
|
sender_device,
|
||||||
&room_id,
|
room_id,
|
||||||
lazy_loaded,
|
lazy_loaded,
|
||||||
next_batchcount,
|
next_batchcount,
|
||||||
);
|
);
|
||||||
@ -866,14 +867,14 @@ async fn load_joined_room(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||||
&sender_user,
|
sender_user,
|
||||||
&sender_device,
|
sender_device,
|
||||||
&room_id,
|
room_id,
|
||||||
&event.sender,
|
&event.sender,
|
||||||
)? || lazy_load_send_redundant
|
)? || lazy_load_send_redundant
|
||||||
{
|
{
|
||||||
if let Some(member_event) = services().rooms.state_accessor.room_state_get(
|
if let Some(member_event) = services().rooms.state_accessor.room_state_get(
|
||||||
&room_id,
|
room_id,
|
||||||
&StateEventType::RoomMember,
|
&StateEventType::RoomMember,
|
||||||
event.sender.as_str(),
|
event.sender.as_str(),
|
||||||
)? {
|
)? {
|
||||||
@ -884,9 +885,9 @@ async fn load_joined_room(
|
|||||||
}
|
}
|
||||||
|
|
||||||
services().rooms.lazy_loading.lazy_load_mark_sent(
|
services().rooms.lazy_loading.lazy_load_mark_sent(
|
||||||
&sender_user,
|
sender_user,
|
||||||
&sender_device,
|
sender_device,
|
||||||
&room_id,
|
room_id,
|
||||||
lazy_loaded,
|
lazy_loaded,
|
||||||
next_batchcount,
|
next_batchcount,
|
||||||
);
|
);
|
||||||
@ -934,7 +935,7 @@ async fn load_joined_room(
|
|||||||
match new_membership {
|
match new_membership {
|
||||||
MembershipState::Join => {
|
MembershipState::Join => {
|
||||||
// A new user joined an encrypted room
|
// A new user joined an encrypted room
|
||||||
if !share_encrypted_room(&sender_user, &user_id, &room_id)? {
|
if !share_encrypted_room(sender_user, &user_id, room_id)? {
|
||||||
device_list_updates.insert(user_id);
|
device_list_updates.insert(user_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -954,15 +955,15 @@ async fn load_joined_room(
|
|||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.room_members(&room_id)
|
.room_members(room_id)
|
||||||
.flatten()
|
.flatten()
|
||||||
.filter(|user_id| {
|
.filter(|user_id| {
|
||||||
// Don't send key updates from the sender to the sender
|
// Don't send key updates from the sender to the sender
|
||||||
&sender_user != user_id
|
sender_user != user_id
|
||||||
})
|
})
|
||||||
.filter(|user_id| {
|
.filter(|user_id| {
|
||||||
// Only send keys if the sender doesn't share an encrypted room with the target already
|
// Only send keys if the sender doesn't share an encrypted room with the target already
|
||||||
!share_encrypted_room(&sender_user, user_id, &room_id)
|
!share_encrypted_room(sender_user, user_id, room_id)
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
@ -997,7 +998,7 @@ async fn load_joined_room(
|
|||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.user
|
.user
|
||||||
.notification_count(&sender_user, &room_id)?
|
.notification_count(sender_user, room_id)?
|
||||||
.try_into()
|
.try_into()
|
||||||
.expect("notification count can't go that high"),
|
.expect("notification count can't go that high"),
|
||||||
)
|
)
|
||||||
@ -1010,7 +1011,7 @@ async fn load_joined_room(
|
|||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.user
|
.user
|
||||||
.highlight_count(&sender_user, &room_id)?
|
.highlight_count(sender_user, room_id)?
|
||||||
.try_into()
|
.try_into()
|
||||||
.expect("highlight count can't go that high"),
|
.expect("highlight count can't go that high"),
|
||||||
)
|
)
|
||||||
@ -1039,15 +1040,15 @@ async fn load_joined_room(
|
|||||||
.rooms
|
.rooms
|
||||||
.edus
|
.edus
|
||||||
.read_receipt
|
.read_receipt
|
||||||
.readreceipts_since(&room_id, since)
|
.readreceipts_since(room_id, since)
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||||
.map(|(_, _, v)| v)
|
.map(|(_, _, v)| v)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
if services().rooms.edus.typing.last_typing_update(&room_id)? > since {
|
if services().rooms.edus.typing.last_typing_update(room_id)? > since {
|
||||||
edus.push(
|
edus.push(
|
||||||
serde_json::from_str(
|
serde_json::from_str(
|
||||||
&serde_json::to_string(&services().rooms.edus.typing.typings_all(&room_id)?)
|
&serde_json::to_string(&services().rooms.edus.typing.typings_all(room_id)?)
|
||||||
.expect("event is valid, we just created it"),
|
.expect("event is valid, we just created it"),
|
||||||
)
|
)
|
||||||
.expect("event is valid, we just created it"),
|
.expect("event is valid, we just created it"),
|
||||||
@ -1056,7 +1057,7 @@ async fn load_joined_room(
|
|||||||
|
|
||||||
// Save the state after this sync so we can send the correct state diff next sync
|
// Save the state after this sync so we can send the correct state diff next sync
|
||||||
services().rooms.user.associate_token_shortstatehash(
|
services().rooms.user.associate_token_shortstatehash(
|
||||||
&room_id,
|
room_id,
|
||||||
next_batch,
|
next_batch,
|
||||||
current_shortstatehash,
|
current_shortstatehash,
|
||||||
)?;
|
)?;
|
||||||
@ -1065,7 +1066,7 @@ async fn load_joined_room(
|
|||||||
account_data: RoomAccountData {
|
account_data: RoomAccountData {
|
||||||
events: services()
|
events: services()
|
||||||
.account_data
|
.account_data
|
||||||
.changes_since(Some(&room_id), &sender_user, since)?
|
.changes_since(Some(room_id), sender_user, since)?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter_map(|(_, v)| {
|
.filter_map(|(_, v)| {
|
||||||
serde_json::from_str(v.json().get())
|
serde_json::from_str(v.json().get())
|
||||||
@ -1110,13 +1111,13 @@ fn load_timeline(
|
|||||||
if services()
|
if services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.last_timeline_count(&sender_user, &room_id)?
|
.last_timeline_count(sender_user, room_id)?
|
||||||
> roomsincecount
|
> roomsincecount
|
||||||
{
|
{
|
||||||
let mut non_timeline_pdus = services()
|
let mut non_timeline_pdus = services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.pdus_until(&sender_user, &room_id, PduCount::max())?
|
.pdus_until(sender_user, room_id, PduCount::max())?
|
||||||
.filter_map(|r| {
|
.filter_map(|r| {
|
||||||
// Filter out buggy events
|
// Filter out buggy events
|
||||||
if r.is_err() {
|
if r.is_err() {
|
||||||
@ -1172,7 +1173,6 @@ fn share_encrypted_room(
|
|||||||
pub async fn sync_events_v4_route(
|
pub async fn sync_events_v4_route(
|
||||||
body: Ruma<sync_events::v4::Request>,
|
body: Ruma<sync_events::v4::Request>,
|
||||||
) -> Result<sync_events::v4::Response, RumaResponse<UiaaResponse>> {
|
) -> Result<sync_events::v4::Response, RumaResponse<UiaaResponse>> {
|
||||||
dbg!(&body.body);
|
|
||||||
let sender_user = body.sender_user.expect("user is authenticated");
|
let sender_user = body.sender_user.expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.expect("user is authenticated");
|
let sender_device = body.sender_device.expect("user is authenticated");
|
||||||
let mut body = body.body;
|
let mut body = body.body;
|
||||||
@ -1232,7 +1232,7 @@ pub async fn sync_events_v4_route(
|
|||||||
|
|
||||||
for room_id in &all_joined_rooms {
|
for room_id in &all_joined_rooms {
|
||||||
let current_shortstatehash =
|
let current_shortstatehash =
|
||||||
if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? {
|
if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? {
|
||||||
s
|
s
|
||||||
} else {
|
} else {
|
||||||
error!("Room {} has no state", room_id);
|
error!("Room {} has no state", room_id);
|
||||||
@ -1242,7 +1242,7 @@ pub async fn sync_events_v4_route(
|
|||||||
let since_shortstatehash = services()
|
let since_shortstatehash = services()
|
||||||
.rooms
|
.rooms
|
||||||
.user
|
.user
|
||||||
.get_token_shortstatehash(&room_id, globalsince)?;
|
.get_token_shortstatehash(room_id, globalsince)?;
|
||||||
|
|
||||||
let since_sender_member: Option<RoomMemberEventContent> = since_shortstatehash
|
let since_sender_member: Option<RoomMemberEventContent> = since_shortstatehash
|
||||||
.and_then(|shortstatehash| {
|
.and_then(|shortstatehash| {
|
||||||
@ -1331,7 +1331,7 @@ pub async fn sync_events_v4_route(
|
|||||||
if !share_encrypted_room(
|
if !share_encrypted_room(
|
||||||
&sender_user,
|
&sender_user,
|
||||||
&user_id,
|
&user_id,
|
||||||
&room_id,
|
room_id,
|
||||||
)? {
|
)? {
|
||||||
device_list_changes.insert(user_id);
|
device_list_changes.insert(user_id);
|
||||||
}
|
}
|
||||||
@ -1352,7 +1352,7 @@ pub async fn sync_events_v4_route(
|
|||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.room_members(&room_id)
|
.room_members(room_id)
|
||||||
.flatten()
|
.flatten()
|
||||||
.filter(|user_id| {
|
.filter(|user_id| {
|
||||||
// Don't send key updates from the sender to the sender
|
// Don't send key updates from the sender to the sender
|
||||||
@ -1360,7 +1360,7 @@ pub async fn sync_events_v4_route(
|
|||||||
})
|
})
|
||||||
.filter(|user_id| {
|
.filter(|user_id| {
|
||||||
// Only send keys if the sender doesn't share an encrypted room with the target already
|
// Only send keys if the sender doesn't share an encrypted room with the target already
|
||||||
!share_encrypted_room(&sender_user, user_id, &room_id)
|
!share_encrypted_room(&sender_user, user_id, room_id)
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
@ -1451,7 +1451,7 @@ pub async fn sync_events_v4_route(
|
|||||||
}
|
}
|
||||||
sync_events::v4::SyncOp {
|
sync_events::v4::SyncOp {
|
||||||
op: SlidingOp::Sync,
|
op: SlidingOp::Sync,
|
||||||
range: Some(r.clone()),
|
range: Some(r),
|
||||||
index: None,
|
index: None,
|
||||||
room_ids,
|
room_ids,
|
||||||
room_id: None,
|
room_id: None,
|
||||||
@ -1523,7 +1523,7 @@ pub async fn sync_events_v4_route(
|
|||||||
let roomsincecount = PduCount::Normal(*roomsince);
|
let roomsincecount = PduCount::Normal(*roomsince);
|
||||||
|
|
||||||
let (timeline_pdus, limited) =
|
let (timeline_pdus, limited) =
|
||||||
load_timeline(&sender_user, &room_id, roomsincecount, *timeline_limit)?;
|
load_timeline(&sender_user, room_id, roomsincecount, *timeline_limit)?;
|
||||||
|
|
||||||
if roomsince != &0 && timeline_pdus.is_empty() {
|
if roomsince != &0 && timeline_pdus.is_empty() {
|
||||||
continue;
|
continue;
|
||||||
@ -1555,63 +1555,58 @@ pub async fn sync_events_v4_route(
|
|||||||
|
|
||||||
let required_state = required_state_request
|
let required_state = required_state_request
|
||||||
.iter()
|
.iter()
|
||||||
.map(|state| {
|
.flat_map(|state| {
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &state.0, &state.1)
|
.room_state_get(room_id, &state.0, &state.1)
|
||||||
|
.ok()
|
||||||
|
.flatten()
|
||||||
|
.map(|state| state.to_sync_state_event())
|
||||||
})
|
})
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
.filter_map(|o| o)
|
|
||||||
.map(|state| state.to_sync_state_event())
|
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
// Heroes
|
// Heroes
|
||||||
let heroes = services()
|
let heroes = services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.room_members(&room_id)
|
.room_members(room_id)
|
||||||
.filter_map(|r| r.ok())
|
.filter_map(|r| r.ok())
|
||||||
.filter(|member| member != &sender_user)
|
.filter(|member| member != &sender_user)
|
||||||
.map(|member| {
|
.flat_map(|member| {
|
||||||
Ok::<_, Error>(
|
services()
|
||||||
services()
|
.rooms
|
||||||
.rooms
|
.state_accessor
|
||||||
.state_accessor
|
.get_member(room_id, &member)
|
||||||
.get_member(&room_id, &member)?
|
.ok()
|
||||||
.map(|memberevent| {
|
.flatten()
|
||||||
(
|
.map(|memberevent| {
|
||||||
memberevent
|
(
|
||||||
.displayname
|
memberevent
|
||||||
.unwrap_or_else(|| member.to_string()),
|
.displayname
|
||||||
memberevent.avatar_url,
|
.unwrap_or_else(|| member.to_string()),
|
||||||
)
|
memberevent.avatar_url,
|
||||||
}),
|
)
|
||||||
)
|
})
|
||||||
})
|
})
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
.filter_map(|o| o)
|
|
||||||
.take(5)
|
.take(5)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
let name = if heroes.len() > 1 {
|
let name = match &heroes[..] {
|
||||||
let last = heroes[0].0.clone();
|
[] => None,
|
||||||
Some(
|
[only] => Some(only.0.clone()),
|
||||||
heroes[1..]
|
[firsts @ .., last] => Some(
|
||||||
|
firsts
|
||||||
.iter()
|
.iter()
|
||||||
.map(|h| h.0.clone())
|
.map(|h| h.0.clone())
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.join(", ")
|
.join(", ")
|
||||||
+ " and "
|
+ " and "
|
||||||
+ &last,
|
+ &last.0,
|
||||||
)
|
),
|
||||||
} else if heroes.len() == 1 {
|
|
||||||
Some(heroes[0].0.clone())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let avatar = if heroes.len() == 1 {
|
let avatar = if let [only] = &heroes[..] {
|
||||||
heroes[0].1.clone()
|
only.1.clone()
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
@ -1619,15 +1614,11 @@ pub async fn sync_events_v4_route(
|
|||||||
rooms.insert(
|
rooms.insert(
|
||||||
room_id.clone(),
|
room_id.clone(),
|
||||||
sync_events::v4::SlidingSyncRoom {
|
sync_events::v4::SlidingSyncRoom {
|
||||||
name: services()
|
name: services().rooms.state_accessor.get_name(room_id)?.or(name),
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.get_name(&room_id)?
|
|
||||||
.or_else(|| name),
|
|
||||||
avatar: services()
|
avatar: services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.get_avatar(&room_id)?
|
.get_avatar(room_id)?
|
||||||
.map_or(avatar, |a| a.url),
|
.map_or(avatar, |a| a.url),
|
||||||
initial: Some(roomsince == &0),
|
initial: Some(roomsince == &0),
|
||||||
is_dm: None,
|
is_dm: None,
|
||||||
@ -1637,7 +1628,7 @@ pub async fn sync_events_v4_route(
|
|||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.user
|
.user
|
||||||
.highlight_count(&sender_user, &room_id)?
|
.highlight_count(&sender_user, room_id)?
|
||||||
.try_into()
|
.try_into()
|
||||||
.expect("notification count can't go that high"),
|
.expect("notification count can't go that high"),
|
||||||
),
|
),
|
||||||
@ -1645,7 +1636,7 @@ pub async fn sync_events_v4_route(
|
|||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.user
|
.user
|
||||||
.notification_count(&sender_user, &room_id)?
|
.notification_count(&sender_user, room_id)?
|
||||||
.try_into()
|
.try_into()
|
||||||
.expect("notification count can't go that high"),
|
.expect("notification count can't go that high"),
|
||||||
),
|
),
|
||||||
@ -1658,7 +1649,7 @@ pub async fn sync_events_v4_route(
|
|||||||
(services()
|
(services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.room_joined_count(&room_id)?
|
.room_joined_count(room_id)?
|
||||||
.unwrap_or(0) as u32)
|
.unwrap_or(0) as u32)
|
||||||
.into(),
|
.into(),
|
||||||
),
|
),
|
||||||
@ -1666,7 +1657,7 @@ pub async fn sync_events_v4_route(
|
|||||||
(services()
|
(services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.room_invited_count(&room_id)?
|
.room_invited_count(room_id)?
|
||||||
.unwrap_or(0) as u32)
|
.unwrap_or(0) as u32)
|
||||||
.into(),
|
.into(),
|
||||||
),
|
),
|
||||||
@ -1689,7 +1680,7 @@ pub async fn sync_events_v4_route(
|
|||||||
let _ = tokio::time::timeout(duration, watcher).await;
|
let _ = tokio::time::timeout(duration, watcher).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(dbg!(sync_events::v4::Response {
|
Ok(sync_events::v4::Response {
|
||||||
initial: globalsince == 0,
|
initial: globalsince == 0,
|
||||||
txn_id: body.txn_id.clone(),
|
txn_id: body.txn_id.clone(),
|
||||||
pos: next_batch.to_string(),
|
pos: next_batch.to_string(),
|
||||||
@ -1744,5 +1735,5 @@ pub async fn sync_events_v4_route(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
delta_token: None,
|
delta_token: None,
|
||||||
}))
|
})
|
||||||
}
|
}
|
||||||
|
@ -55,7 +55,7 @@ use std::{
|
|||||||
time::{Duration, Instant, SystemTime},
|
time::{Duration, Instant, SystemTime},
|
||||||
};
|
};
|
||||||
|
|
||||||
use tracing::{debug, error, trace, warn};
|
use tracing::{debug, error, warn};
|
||||||
|
|
||||||
/// Wraps either an literal IP address plus port, or a hostname plus complement
|
/// Wraps either an literal IP address plus port, or a hostname plus complement
|
||||||
/// (colon-plus-port if it was specified).
|
/// (colon-plus-port if it was specified).
|
||||||
@ -341,7 +341,7 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns: actual_destination, host header
|
/// Returns: actual_destination, host header
|
||||||
/// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names
|
/// Implemented according to the specification at <https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names>
|
||||||
/// Numbers in comments below refer to bullet points in linked section of specification
|
/// Numbers in comments below refer to bullet points in linked section of specification
|
||||||
async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) {
|
async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) {
|
||||||
debug!("Finding actual destination for {destination}");
|
debug!("Finding actual destination for {destination}");
|
||||||
@ -666,7 +666,7 @@ pub fn parse_incoming_pdu(
|
|||||||
|
|
||||||
let room_version_id = services().rooms.state.get_room_version(&room_id)?;
|
let room_version_id = services().rooms.state.get_room_version(&room_id)?;
|
||||||
|
|
||||||
let (event_id, value) = match gen_event_id_canonical_json(&pdu, &room_version_id) {
|
let (event_id, value) = match gen_event_id_canonical_json(pdu, &room_version_id) {
|
||||||
Ok(t) => t,
|
Ok(t) => t,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
// Event could not be converted to canonical json
|
// Event could not be converted to canonical json
|
||||||
@ -724,7 +724,7 @@ pub async fn send_transaction_message_route(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let r = parse_incoming_pdu(&pdu);
|
let r = parse_incoming_pdu(pdu);
|
||||||
let (event_id, value, room_id) = match r {
|
let (event_id, value, room_id) = match r {
|
||||||
Ok(t) => t,
|
Ok(t) => t,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@ -992,7 +992,7 @@ pub async fn get_event_route(
|
|||||||
|
|
||||||
if !services().rooms.state_accessor.server_can_see_event(
|
if !services().rooms.state_accessor.server_can_see_event(
|
||||||
sender_servername,
|
sender_servername,
|
||||||
&room_id,
|
room_id,
|
||||||
&body.event_id,
|
&body.event_id,
|
||||||
)? {
|
)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
@ -1058,7 +1058,7 @@ pub async fn get_backfill_route(
|
|||||||
let all_events = services()
|
let all_events = services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.pdus_until(&user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)?
|
.pdus_until(user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)?
|
||||||
.take(limit.try_into().unwrap());
|
.take(limit.try_into().unwrap());
|
||||||
|
|
||||||
let events = all_events
|
let events = all_events
|
||||||
@ -1075,7 +1075,7 @@ pub async fn get_backfill_route(
|
|||||||
})
|
})
|
||||||
.map(|(_, pdu)| services().rooms.timeline.get_pdu_json(&pdu.event_id))
|
.map(|(_, pdu)| services().rooms.timeline.get_pdu_json(&pdu.event_id))
|
||||||
.filter_map(|r| r.ok().flatten())
|
.filter_map(|r| r.ok().flatten())
|
||||||
.map(|pdu| PduEvent::convert_to_outgoing_federation_event(pdu))
|
.map(PduEvent::convert_to_outgoing_federation_event)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
Ok(get_backfill::v1::Response {
|
Ok(get_backfill::v1::Response {
|
||||||
|
@ -29,7 +29,9 @@ use crate::Result;
|
|||||||
/// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`.
|
/// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`.
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
#[serde(rename_all = "snake_case")]
|
#[serde(rename_all = "snake_case")]
|
||||||
|
#[derive(Default)]
|
||||||
pub enum ProxyConfig {
|
pub enum ProxyConfig {
|
||||||
|
#[default]
|
||||||
None,
|
None,
|
||||||
Global {
|
Global {
|
||||||
#[serde(deserialize_with = "crate::utils::deserialize_from_str")]
|
#[serde(deserialize_with = "crate::utils::deserialize_from_str")]
|
||||||
@ -48,11 +50,6 @@ impl ProxyConfig {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl Default for ProxyConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
ProxyConfig::None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
pub struct PartialProxyConfig {
|
pub struct PartialProxyConfig {
|
||||||
|
@ -8,6 +8,7 @@ use tokio::sync::watch;
|
|||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub(super) struct Watchers {
|
pub(super) struct Watchers {
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
watchers: RwLock<HashMap<Vec<u8>, (watch::Sender<()>, watch::Receiver<()>)>>,
|
watchers: RwLock<HashMap<Vec<u8>, (watch::Sender<()>, watch::Receiver<()>)>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
|||||||
let parsed = services()
|
let parsed = services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_compressor
|
.state_compressor
|
||||||
.parse_compressed_state_event(&compressed)?;
|
.parse_compressed_state_event(compressed)?;
|
||||||
result.insert(parsed.0, parsed.1);
|
result.insert(parsed.0, parsed.1);
|
||||||
|
|
||||||
i += 1;
|
i += 1;
|
||||||
@ -49,7 +49,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
|||||||
let (_, eventid) = services()
|
let (_, eventid) = services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_compressor
|
.state_compressor
|
||||||
.parse_compressed_state_event(&compressed)?;
|
.parse_compressed_state_event(compressed)?;
|
||||||
if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? {
|
if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? {
|
||||||
result.insert(
|
result.insert(
|
||||||
(
|
(
|
||||||
@ -101,7 +101,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
|||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_compressor
|
.state_compressor
|
||||||
.parse_compressed_state_event(&compressed)
|
.parse_compressed_state_event(compressed)
|
||||||
.ok()
|
.ok()
|
||||||
.map(|(_, id)| id)
|
.map(|(_, id)| id)
|
||||||
}))
|
}))
|
||||||
|
@ -471,6 +471,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator over all rooms a user was invited to.
|
/// Returns an iterator over all rooms a user was invited to.
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
fn rooms_invited<'a>(
|
fn rooms_invited<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
@ -549,6 +550,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator over all rooms a user left.
|
/// Returns an iterator over all rooms a user left.
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
fn rooms_left<'a>(
|
fn rooms_left<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
|
@ -10,7 +10,7 @@ impl service::rooms::threads::Data for KeyValueDatabase {
|
|||||||
user_id: &'a UserId,
|
user_id: &'a UserId,
|
||||||
room_id: &'a RoomId,
|
room_id: &'a RoomId,
|
||||||
until: u64,
|
until: u64,
|
||||||
include: &'a IncludeThreads,
|
_include: &'a IncludeThreads,
|
||||||
) -> Result<Box<dyn Iterator<Item = Result<(u64, PduEvent)>> + 'a>> {
|
) -> Result<Box<dyn Iterator<Item = Result<(u64, PduEvent)>> + 'a>> {
|
||||||
let prefix = services()
|
let prefix = services()
|
||||||
.rooms
|
.rooms
|
||||||
@ -27,7 +27,7 @@ impl service::rooms::threads::Data for KeyValueDatabase {
|
|||||||
self.threadid_userids
|
self.threadid_userids
|
||||||
.iter_from(¤t, true)
|
.iter_from(¤t, true)
|
||||||
.take_while(move |(k, _)| k.starts_with(&prefix))
|
.take_while(move |(k, _)| k.starts_with(&prefix))
|
||||||
.map(move |(pduid, users)| {
|
.map(move |(pduid, _users)| {
|
||||||
let count = utils::u64_from_bytes(&pduid[(mem::size_of::<u64>())..])
|
let count = utils::u64_from_bytes(&pduid[(mem::size_of::<u64>())..])
|
||||||
.map_err(|_| Error::bad_database("Invalid pduid in threadid_userids."))?;
|
.map_err(|_| Error::bad_database("Invalid pduid in threadid_userids."))?;
|
||||||
let mut pdu = services()
|
let mut pdu = services()
|
||||||
@ -52,13 +52,13 @@ impl service::rooms::threads::Data for KeyValueDatabase {
|
|||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.join(&[0xff][..]);
|
.join(&[0xff][..]);
|
||||||
|
|
||||||
self.threadid_userids.insert(&root_id, &users)?;
|
self.threadid_userids.insert(root_id, &users)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_participants(&self, root_id: &[u8]) -> Result<Option<Vec<OwnedUserId>>> {
|
fn get_participants(&self, root_id: &[u8]) -> Result<Option<Vec<OwnedUserId>>> {
|
||||||
if let Some(users) = self.threadid_userids.get(&root_id)? {
|
if let Some(users) = self.threadid_userids.get(root_id)? {
|
||||||
Ok(Some(
|
Ok(Some(
|
||||||
users
|
users
|
||||||
.split(|b| *b == 0xff)
|
.split(|b| *b == 0xff)
|
||||||
|
@ -39,11 +39,10 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
|||||||
|
|
||||||
/// Returns the `count` of this pdu's id.
|
/// Returns the `count` of this pdu's id.
|
||||||
fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<PduCount>> {
|
fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<PduCount>> {
|
||||||
Ok(self
|
self.eventid_pduid
|
||||||
.eventid_pduid
|
|
||||||
.get(event_id.as_bytes())?
|
.get(event_id.as_bytes())?
|
||||||
.map(|pdu_id| pdu_count(&pdu_id))
|
.map(|pdu_id| pdu_count(&pdu_id))
|
||||||
.transpose()?)
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the json of a pdu.
|
/// Returns the json of a pdu.
|
||||||
@ -80,7 +79,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
|||||||
|
|
||||||
/// Returns the pdu's id.
|
/// Returns the pdu's id.
|
||||||
fn get_pdu_id(&self, event_id: &EventId) -> Result<Option<Vec<u8>>> {
|
fn get_pdu_id(&self, event_id: &EventId) -> Result<Option<Vec<u8>>> {
|
||||||
Ok(self.eventid_pduid.get(event_id.as_bytes())?)
|
self.eventid_pduid.get(event_id.as_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the pdu.
|
/// Returns the pdu.
|
||||||
@ -230,7 +229,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
|||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
until: PduCount,
|
until: PduCount,
|
||||||
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
|
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
|
||||||
let (prefix, current) = count_to_id(&room_id, until, 1, true)?;
|
let (prefix, current) = count_to_id(room_id, until, 1, true)?;
|
||||||
|
|
||||||
let user_id = user_id.to_owned();
|
let user_id = user_id.to_owned();
|
||||||
|
|
||||||
@ -257,7 +256,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
|||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
from: PduCount,
|
from: PduCount,
|
||||||
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
|
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
|
||||||
let (prefix, current) = count_to_id(&room_id, from, 1, false)?;
|
let (prefix, current) = count_to_id(room_id, from, 1, false)?;
|
||||||
|
|
||||||
let user_id = user_id.to_owned();
|
let user_id = user_id.to_owned();
|
||||||
|
|
||||||
|
@ -238,7 +238,7 @@ async fn spawn_task<B: Send + 'static>(
|
|||||||
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
|
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn unrecognized_method<B>(
|
async fn unrecognized_method<B: Send>(
|
||||||
req: axum::http::Request<B>,
|
req: axum::http::Request<B>,
|
||||||
next: axum::middleware::Next<B>,
|
next: axum::middleware::Next<B>,
|
||||||
) -> std::result::Result<axum::response::Response, StatusCode> {
|
) -> std::result::Result<axum::response::Response, StatusCode> {
|
||||||
|
@ -50,7 +50,7 @@ enum AdminCommand {
|
|||||||
/// Registering a new bridge using the ID of an existing bridge will replace
|
/// Registering a new bridge using the ID of an existing bridge will replace
|
||||||
/// the old one.
|
/// the old one.
|
||||||
///
|
///
|
||||||
/// [commandbody]
|
/// [commandbody]()
|
||||||
/// # ```
|
/// # ```
|
||||||
/// # yaml content here
|
/// # yaml content here
|
||||||
/// # ```
|
/// # ```
|
||||||
@ -96,7 +96,7 @@ enum AdminCommand {
|
|||||||
/// Removing a mass amount of users from a room may cause a significant amount of leave events.
|
/// Removing a mass amount of users from a room may cause a significant amount of leave events.
|
||||||
/// The time to leave rooms may depend significantly on joined rooms and servers.
|
/// The time to leave rooms may depend significantly on joined rooms and servers.
|
||||||
///
|
///
|
||||||
/// [commandbody]
|
/// [commandbody]()
|
||||||
/// # ```
|
/// # ```
|
||||||
/// # User list here
|
/// # User list here
|
||||||
/// # ```
|
/// # ```
|
||||||
@ -121,7 +121,7 @@ enum AdminCommand {
|
|||||||
/// The PDU event is only checked for validity and is not added to the
|
/// The PDU event is only checked for validity and is not added to the
|
||||||
/// database.
|
/// database.
|
||||||
///
|
///
|
||||||
/// [commandbody]
|
/// [commandbody]()
|
||||||
/// # ```
|
/// # ```
|
||||||
/// # PDU json content here
|
/// # PDU json content here
|
||||||
/// # ```
|
/// # ```
|
||||||
@ -165,14 +165,14 @@ enum AdminCommand {
|
|||||||
EnableRoom { room_id: Box<RoomId> },
|
EnableRoom { room_id: Box<RoomId> },
|
||||||
|
|
||||||
/// Verify json signatures
|
/// Verify json signatures
|
||||||
/// [commandbody]
|
/// [commandbody]()
|
||||||
/// # ```
|
/// # ```
|
||||||
/// # json here
|
/// # json here
|
||||||
/// # ```
|
/// # ```
|
||||||
SignJson,
|
SignJson,
|
||||||
|
|
||||||
/// Verify json signatures
|
/// Verify json signatures
|
||||||
/// [commandbody]
|
/// [commandbody]()
|
||||||
/// # ```
|
/// # ```
|
||||||
/// # json here
|
/// # json here
|
||||||
/// # ```
|
/// # ```
|
||||||
@ -858,12 +858,15 @@ impl Service {
|
|||||||
.expect("Regex compilation should not fail");
|
.expect("Regex compilation should not fail");
|
||||||
let text = re.replace_all(&text, "<code>$1</code>: $4");
|
let text = re.replace_all(&text, "<code>$1</code>: $4");
|
||||||
|
|
||||||
// Look for a `[commandbody]` tag. If it exists, use all lines below it that
|
// Look for a `[commandbody]()` tag. If it exists, use all lines below it that
|
||||||
// start with a `#` in the USAGE section.
|
// start with a `#` in the USAGE section.
|
||||||
let mut text_lines: Vec<&str> = text.lines().collect();
|
let mut text_lines: Vec<&str> = text.lines().collect();
|
||||||
let mut command_body = String::new();
|
let mut command_body = String::new();
|
||||||
|
|
||||||
if let Some(line_index) = text_lines.iter().position(|line| *line == "[commandbody]") {
|
if let Some(line_index) = text_lines
|
||||||
|
.iter()
|
||||||
|
.position(|line| *line == "[commandbody]()")
|
||||||
|
{
|
||||||
text_lines.remove(line_index);
|
text_lines.remove(line_index);
|
||||||
|
|
||||||
while text_lines
|
while text_lines
|
||||||
|
@ -11,6 +11,7 @@ pub trait Data: Send + Sync {
|
|||||||
) -> Result<()>;
|
) -> Result<()>;
|
||||||
|
|
||||||
/// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`.
|
/// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`.
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
fn readreceipts_since<'a>(
|
fn readreceipts_since<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
|
@ -92,7 +92,7 @@ impl Service {
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
services().rooms.event_handler.acl_check(origin, &room_id)?;
|
services().rooms.event_handler.acl_check(origin, room_id)?;
|
||||||
|
|
||||||
// 1. Skip the PDU if we already have it as a timeline event
|
// 1. Skip the PDU if we already have it as a timeline event
|
||||||
if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? {
|
if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? {
|
||||||
@ -276,6 +276,7 @@ impl Service {
|
|||||||
r
|
r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::type_complexity, clippy::too_many_arguments)]
|
||||||
#[tracing::instrument(skip(self, create_event, value, pub_key_map))]
|
#[tracing::instrument(skip(self, create_event, value, pub_key_map))]
|
||||||
fn handle_outlier_pdu<'a>(
|
fn handle_outlier_pdu<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
@ -1009,6 +1010,7 @@ impl Service {
|
|||||||
/// b. Look at outlier pdu tree
|
/// b. Look at outlier pdu tree
|
||||||
/// c. Ask origin server over federation
|
/// c. Ask origin server over federation
|
||||||
/// d. TODO: Ask other servers over federation?
|
/// d. TODO: Ask other servers over federation?
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
#[tracing::instrument(skip_all)]
|
#[tracing::instrument(skip_all)]
|
||||||
pub(crate) fn fetch_and_handle_outliers<'a>(
|
pub(crate) fn fetch_and_handle_outliers<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
|
@ -14,6 +14,7 @@ use super::timeline::PduCount;
|
|||||||
pub struct Service {
|
pub struct Service {
|
||||||
pub db: &'static dyn Data,
|
pub db: &'static dyn Data,
|
||||||
|
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
pub lazy_load_waiting:
|
pub lazy_load_waiting:
|
||||||
Mutex<HashMap<(OwnedUserId, OwnedDeviceId, OwnedRoomId, PduCount), HashSet<OwnedUserId>>>,
|
Mutex<HashMap<(OwnedUserId, OwnedDeviceId, OwnedRoomId, PduCount), HashSet<OwnedUserId>>>,
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ use ruma::{EventId, RoomId, UserId};
|
|||||||
|
|
||||||
pub trait Data: Send + Sync {
|
pub trait Data: Send + Sync {
|
||||||
fn add_relation(&self, from: u64, to: u64) -> Result<()>;
|
fn add_relation(&self, from: u64, to: u64) -> Result<()>;
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
fn relations_until<'a>(
|
fn relations_until<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
user_id: &'a UserId,
|
user_id: &'a UserId,
|
||||||
|
@ -40,6 +40,7 @@ impl Service {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn paginate_relations_with_filter(
|
pub fn paginate_relations_with_filter(
|
||||||
&self,
|
&self,
|
||||||
sender_user: &UserId,
|
sender_user: &UserId,
|
||||||
@ -82,7 +83,7 @@ impl Service {
|
|||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
|
.user_can_see_event(sender_user, room_id, &pdu.event_id)
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
})
|
})
|
||||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
||||||
@ -106,7 +107,7 @@ impl Service {
|
|||||||
let events_before: Vec<_> = services()
|
let events_before: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
.pdu_metadata
|
.pdu_metadata
|
||||||
.relations_until(sender_user, &room_id, target, from)?
|
.relations_until(sender_user, room_id, target, from)?
|
||||||
.filter(|r| {
|
.filter(|r| {
|
||||||
r.as_ref().map_or(true, |(_, pdu)| {
|
r.as_ref().map_or(true, |(_, pdu)| {
|
||||||
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
|
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
|
||||||
@ -129,7 +130,7 @@ impl Service {
|
|||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
|
.user_can_see_event(sender_user, room_id, &pdu.event_id)
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
})
|
})
|
||||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
||||||
|
@ -4,6 +4,7 @@ use ruma::RoomId;
|
|||||||
pub trait Data: Send + Sync {
|
pub trait Data: Send + Sync {
|
||||||
fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
|
fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
|
||||||
|
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
fn search_pdus<'a>(
|
fn search_pdus<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
|
@ -197,7 +197,7 @@ impl Service {
|
|||||||
if let Ok(response) = services()
|
if let Ok(response) = services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&server,
|
server,
|
||||||
federation::space::get_hierarchy::v1::Request {
|
federation::space::get_hierarchy::v1::Request {
|
||||||
room_id: current_room.to_owned(),
|
room_id: current_room.to_owned(),
|
||||||
suggested_only,
|
suggested_only,
|
||||||
@ -235,7 +235,7 @@ impl Service {
|
|||||||
.room
|
.room
|
||||||
.allowed_room_ids
|
.allowed_room_ids
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|room| AllowRule::room_membership(room))
|
.map(AllowRule::room_membership)
|
||||||
.collect(),
|
.collect(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -245,7 +245,7 @@ impl Service {
|
|||||||
.room
|
.room
|
||||||
.allowed_room_ids
|
.allowed_room_ids
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|room| AllowRule::room_membership(room))
|
.map(AllowRule::room_membership)
|
||||||
.collect(),
|
.collect(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -313,7 +313,7 @@ impl Service {
|
|||||||
canonical_alias: services()
|
canonical_alias: services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")?
|
.room_state_get(room_id, &StateEventType::RoomCanonicalAlias, "")?
|
||||||
.map_or(Ok(None), |s| {
|
.map_or(Ok(None), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomCanonicalAliasEventContent| c.alias)
|
.map(|c: RoomCanonicalAliasEventContent| c.alias)
|
||||||
@ -321,11 +321,11 @@ impl Service {
|
|||||||
Error::bad_database("Invalid canonical alias event in database.")
|
Error::bad_database("Invalid canonical alias event in database.")
|
||||||
})
|
})
|
||||||
})?,
|
})?,
|
||||||
name: services().rooms.state_accessor.get_name(&room_id)?,
|
name: services().rooms.state_accessor.get_name(room_id)?,
|
||||||
num_joined_members: services()
|
num_joined_members: services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.room_joined_count(&room_id)?
|
.room_joined_count(room_id)?
|
||||||
.unwrap_or_else(|| {
|
.unwrap_or_else(|| {
|
||||||
warn!("Room {} has no member count", room_id);
|
warn!("Room {} has no member count", room_id);
|
||||||
0
|
0
|
||||||
@ -336,7 +336,7 @@ impl Service {
|
|||||||
topic: services()
|
topic: services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomTopic, "")?
|
.room_state_get(room_id, &StateEventType::RoomTopic, "")?
|
||||||
.map_or(Ok(None), |s| {
|
.map_or(Ok(None), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomTopicEventContent| Some(c.topic))
|
.map(|c: RoomTopicEventContent| Some(c.topic))
|
||||||
@ -348,7 +348,7 @@ impl Service {
|
|||||||
world_readable: services()
|
world_readable: services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
|
.room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||||
.map_or(Ok(false), |s| {
|
.map_or(Ok(false), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomHistoryVisibilityEventContent| {
|
.map(|c: RoomHistoryVisibilityEventContent| {
|
||||||
@ -363,7 +363,7 @@ impl Service {
|
|||||||
guest_can_join: services()
|
guest_can_join: services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")?
|
.room_state_get(room_id, &StateEventType::RoomGuestAccess, "")?
|
||||||
.map_or(Ok(false), |s| {
|
.map_or(Ok(false), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomGuestAccessEventContent| {
|
.map(|c: RoomGuestAccessEventContent| {
|
||||||
@ -376,7 +376,7 @@ impl Service {
|
|||||||
avatar_url: services()
|
avatar_url: services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
|
.room_state_get(room_id, &StateEventType::RoomAvatar, "")?
|
||||||
.map(|s| {
|
.map(|s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomAvatarEventContent| c.url)
|
.map(|c: RoomAvatarEventContent| c.url)
|
||||||
@ -389,7 +389,7 @@ impl Service {
|
|||||||
let join_rule = services()
|
let join_rule = services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomJoinRules, "")?
|
.room_state_get(room_id, &StateEventType::RoomJoinRules, "")?
|
||||||
.map(|s| {
|
.map(|s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomJoinRulesEventContent| c.join_rule)
|
.map(|c: RoomJoinRulesEventContent| c.join_rule)
|
||||||
@ -415,7 +415,7 @@ impl Service {
|
|||||||
room_type: services()
|
room_type: services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomCreate, "")?
|
.room_state_get(room_id, &StateEventType::RoomCreate, "")?
|
||||||
.map(|s| {
|
.map(|s| {
|
||||||
serde_json::from_str::<RoomCreateEventContent>(s.content.get()).map_err(|e| {
|
serde_json::from_str::<RoomCreateEventContent>(s.content.get()).map_err(|e| {
|
||||||
error!("Invalid room create event in database: {}", e);
|
error!("Invalid room create event in database: {}", e);
|
||||||
@ -455,7 +455,7 @@ impl Service {
|
|||||||
SpaceRoomJoinRule::Invite => services()
|
SpaceRoomJoinRule::Invite => services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.is_joined(sender_user, &room_id)?,
|
.is_joined(sender_user, room_id)?,
|
||||||
_ => false,
|
_ => false,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -479,17 +479,14 @@ impl Service {
|
|||||||
match join_rule {
|
match join_rule {
|
||||||
JoinRule::Restricted(r) => {
|
JoinRule::Restricted(r) => {
|
||||||
for rule in &r.allow {
|
for rule in &r.allow {
|
||||||
match rule {
|
if let join_rules::AllowRule::RoomMembership(rm) = rule {
|
||||||
join_rules::AllowRule::RoomMembership(rm) => {
|
if let Ok(true) = services()
|
||||||
if let Ok(true) = services()
|
.rooms
|
||||||
.rooms
|
.state_cache
|
||||||
.state_cache
|
.is_joined(sender_user, &rm.room_id)
|
||||||
.is_joined(sender_user, &rm.room_id)
|
{
|
||||||
{
|
return Ok(true);
|
||||||
return Ok(true);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
_ => {}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ impl Service {
|
|||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_compressor
|
.state_compressor
|
||||||
.parse_compressed_state_event(&new)
|
.parse_compressed_state_event(new)
|
||||||
.ok()
|
.ok()
|
||||||
.map(|(_, id)| id)
|
.map(|(_, id)| id)
|
||||||
}) {
|
}) {
|
||||||
@ -412,7 +412,7 @@ impl Service {
|
|||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_compressor
|
.state_compressor
|
||||||
.parse_compressed_state_event(&compressed)
|
.parse_compressed_state_event(compressed)
|
||||||
.ok()
|
.ok()
|
||||||
})
|
})
|
||||||
.filter_map(|(shortstatekey, event_id)| {
|
.filter_map(|(shortstatekey, event_id)| {
|
||||||
|
@ -180,7 +180,7 @@ impl Service {
|
|||||||
return Ok(*visibility);
|
return Ok(*visibility);
|
||||||
}
|
}
|
||||||
|
|
||||||
let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?;
|
let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?;
|
||||||
|
|
||||||
let history_visibility = self
|
let history_visibility = self
|
||||||
.state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")?
|
.state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")?
|
||||||
@ -197,11 +197,11 @@ impl Service {
|
|||||||
HistoryVisibility::Shared => currently_member,
|
HistoryVisibility::Shared => currently_member,
|
||||||
HistoryVisibility::Invited => {
|
HistoryVisibility::Invited => {
|
||||||
// Allow if any member on requesting server was AT LEAST invited, else deny
|
// Allow if any member on requesting server was AT LEAST invited, else deny
|
||||||
self.user_was_invited(shortstatehash, &user_id)
|
self.user_was_invited(shortstatehash, user_id)
|
||||||
}
|
}
|
||||||
HistoryVisibility::Joined => {
|
HistoryVisibility::Joined => {
|
||||||
// Allow if any member on requested server was joined, else deny
|
// Allow if any member on requested server was joined, else deny
|
||||||
self.user_was_joined(shortstatehash, &user_id)
|
self.user_was_joined(shortstatehash, user_id)
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
error!("Unknown history visibility {history_visibility}");
|
error!("Unknown history visibility {history_visibility}");
|
||||||
@ -221,10 +221,10 @@ impl Service {
|
|||||||
/// the room's history_visibility at that event's state.
|
/// the room's history_visibility at that event's state.
|
||||||
#[tracing::instrument(skip(self, user_id, room_id))]
|
#[tracing::instrument(skip(self, user_id, room_id))]
|
||||||
pub fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool> {
|
pub fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool> {
|
||||||
let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?;
|
let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?;
|
||||||
|
|
||||||
let history_visibility = self
|
let history_visibility = self
|
||||||
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
|
.room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||||
.map_or(Ok(HistoryVisibility::Shared), |s| {
|
.map_or(Ok(HistoryVisibility::Shared), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomHistoryVisibilityEventContent| c.history_visibility)
|
.map(|c: RoomHistoryVisibilityEventContent| c.history_visibility)
|
||||||
@ -276,7 +276,7 @@ impl Service {
|
|||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomName, "")?
|
.room_state_get(room_id, &StateEventType::RoomName, "")?
|
||||||
.map_or(Ok(None), |s| {
|
.map_or(Ok(None), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomNameEventContent| c.name)
|
.map(|c: RoomNameEventContent| c.name)
|
||||||
@ -288,7 +288,7 @@ impl Service {
|
|||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
|
.room_state_get(room_id, &StateEventType::RoomAvatar, "")?
|
||||||
.map_or(Ok(None), |s| {
|
.map_or(Ok(None), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map_err(|_| Error::bad_database("Invalid room avatar event in database."))
|
.map_err(|_| Error::bad_database("Invalid room avatar event in database."))
|
||||||
@ -303,7 +303,7 @@ impl Service {
|
|||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomMember, user_id.as_str())?
|
.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())?
|
||||||
.map_or(Ok(None), |s| {
|
.map_or(Ok(None), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map_err(|_| Error::bad_database("Invalid room member event in database."))
|
.map_err(|_| Error::bad_database("Invalid room member event in database."))
|
||||||
|
@ -78,6 +78,7 @@ pub trait Data: Send + Sync {
|
|||||||
) -> Box<dyn Iterator<Item = Result<OwnedRoomId>> + 'a>;
|
) -> Box<dyn Iterator<Item = Result<OwnedRoomId>> + 'a>;
|
||||||
|
|
||||||
/// Returns an iterator over all rooms a user was invited to.
|
/// Returns an iterator over all rooms a user was invited to.
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
fn rooms_invited<'a>(
|
fn rooms_invited<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
@ -96,6 +97,7 @@ pub trait Data: Send + Sync {
|
|||||||
) -> Result<Option<Vec<Raw<AnyStrippedStateEvent>>>>;
|
) -> Result<Option<Vec<Raw<AnyStrippedStateEvent>>>>;
|
||||||
|
|
||||||
/// Returns an iterator over all rooms a user left.
|
/// Returns an iterator over all rooms a user left.
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
fn rooms_left<'a>(
|
fn rooms_left<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
|
@ -16,6 +16,7 @@ use self::data::StateDiff;
|
|||||||
pub struct Service {
|
pub struct Service {
|
||||||
pub db: &'static dyn Data,
|
pub db: &'static dyn Data,
|
||||||
|
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
pub stateinfo_cache: Mutex<
|
pub stateinfo_cache: Mutex<
|
||||||
LruCache<
|
LruCache<
|
||||||
u64,
|
u64,
|
||||||
@ -33,6 +34,7 @@ pub type CompressedStateEvent = [u8; 2 * size_of::<u64>()];
|
|||||||
|
|
||||||
impl Service {
|
impl Service {
|
||||||
/// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer.
|
/// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer.
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
pub fn load_shortstatehash_info(
|
pub fn load_shortstatehash_info(
|
||||||
&self,
|
&self,
|
||||||
@ -131,6 +133,7 @@ impl Service {
|
|||||||
/// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid
|
/// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid
|
||||||
/// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer
|
/// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer
|
||||||
/// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer
|
/// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
#[tracing::instrument(skip(
|
#[tracing::instrument(skip(
|
||||||
self,
|
self,
|
||||||
statediffnew,
|
statediffnew,
|
||||||
@ -164,7 +167,7 @@ impl Service {
|
|||||||
for removed in statediffremoved.iter() {
|
for removed in statediffremoved.iter() {
|
||||||
if !parent_new.remove(removed) {
|
if !parent_new.remove(removed) {
|
||||||
// It was not added in the parent and we removed it
|
// It was not added in the parent and we removed it
|
||||||
parent_removed.insert(removed.clone());
|
parent_removed.insert(*removed);
|
||||||
}
|
}
|
||||||
// Else it was added in the parent and we removed it again. We can forget this change
|
// Else it was added in the parent and we removed it again. We can forget this change
|
||||||
}
|
}
|
||||||
@ -172,7 +175,7 @@ impl Service {
|
|||||||
for new in statediffnew.iter() {
|
for new in statediffnew.iter() {
|
||||||
if !parent_removed.remove(new) {
|
if !parent_removed.remove(new) {
|
||||||
// It was not touched in the parent and we added it
|
// It was not touched in the parent and we added it
|
||||||
parent_new.insert(new.clone());
|
parent_new.insert(*new);
|
||||||
}
|
}
|
||||||
// Else it was removed in the parent and we added it again. We can forget this change
|
// Else it was removed in the parent and we added it again. We can forget this change
|
||||||
}
|
}
|
||||||
@ -217,7 +220,7 @@ impl Service {
|
|||||||
for removed in statediffremoved.iter() {
|
for removed in statediffremoved.iter() {
|
||||||
if !parent_new.remove(removed) {
|
if !parent_new.remove(removed) {
|
||||||
// It was not added in the parent and we removed it
|
// It was not added in the parent and we removed it
|
||||||
parent_removed.insert(removed.clone());
|
parent_removed.insert(*removed);
|
||||||
}
|
}
|
||||||
// Else it was added in the parent and we removed it again. We can forget this change
|
// Else it was added in the parent and we removed it again. We can forget this change
|
||||||
}
|
}
|
||||||
@ -225,7 +228,7 @@ impl Service {
|
|||||||
for new in statediffnew.iter() {
|
for new in statediffnew.iter() {
|
||||||
if !parent_removed.remove(new) {
|
if !parent_removed.remove(new) {
|
||||||
// It was not touched in the parent and we added it
|
// It was not touched in the parent and we added it
|
||||||
parent_new.insert(new.clone());
|
parent_new.insert(*new);
|
||||||
}
|
}
|
||||||
// Else it was removed in the parent and we added it again. We can forget this change
|
// Else it was removed in the parent and we added it again. We can forget this change
|
||||||
}
|
}
|
||||||
@ -253,6 +256,7 @@ impl Service {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the new shortstatehash, and the state diff from the previous room state
|
/// Returns the new shortstatehash, and the state diff from the previous room state
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
pub fn save_state(
|
pub fn save_state(
|
||||||
&self,
|
&self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
|
@ -2,6 +2,7 @@ use crate::{PduEvent, Result};
|
|||||||
use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId};
|
use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId};
|
||||||
|
|
||||||
pub trait Data: Send + Sync {
|
pub trait Data: Send + Sync {
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
fn threads_until<'a>(
|
fn threads_until<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
user_id: &'a UserId,
|
user_id: &'a UserId,
|
||||||
|
@ -26,7 +26,7 @@ impl Service {
|
|||||||
self.db.threads_until(user_id, room_id, until, include)
|
self.db.threads_until(user_id, room_id, until, include)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_to_thread<'a>(&'a self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> {
|
pub fn add_to_thread(&self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> {
|
||||||
let root_id = &services()
|
let root_id = &services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
@ -103,7 +103,7 @@ impl Service {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut users = Vec::new();
|
let mut users = Vec::new();
|
||||||
if let Some(userids) = self.db.get_participants(&root_id)? {
|
if let Some(userids) = self.db.get_participants(root_id)? {
|
||||||
users.extend_from_slice(&userids);
|
users.extend_from_slice(&userids);
|
||||||
users.push(pdu.sender.clone());
|
users.push(pdu.sender.clone());
|
||||||
} else {
|
} else {
|
||||||
|
@ -66,6 +66,7 @@ pub trait Data: Send + Sync {
|
|||||||
|
|
||||||
/// Returns an iterator over all events and their tokens in a room that happened before the
|
/// Returns an iterator over all events and their tokens in a room that happened before the
|
||||||
/// event with id `until` in reverse-chronological order.
|
/// event with id `until` in reverse-chronological order.
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
fn pdus_until<'a>(
|
fn pdus_until<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
@ -75,6 +76,7 @@ pub trait Data: Send + Sync {
|
|||||||
|
|
||||||
/// Returns an iterator over all events in a room that happened after the event with id `from`
|
/// Returns an iterator over all events in a room that happened after the event with id `from`
|
||||||
/// in chronological order.
|
/// in chronological order.
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
fn pdus_after<'a>(
|
fn pdus_after<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
|
@ -58,8 +58,8 @@ impl PduCount {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn try_from_string(token: &str) -> Result<Self> {
|
pub fn try_from_string(token: &str) -> Result<Self> {
|
||||||
if token.starts_with('-') {
|
if let Some(stripped) = token.strip_prefix('-') {
|
||||||
token[1..].parse().map(PduCount::Backfilled)
|
stripped.parse().map(PduCount::Backfilled)
|
||||||
} else {
|
} else {
|
||||||
token.parse().map(PduCount::Normal)
|
token.parse().map(PduCount::Normal)
|
||||||
}
|
}
|
||||||
@ -112,7 +112,7 @@ pub struct Service {
|
|||||||
impl Service {
|
impl Service {
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result<Option<Arc<PduEvent>>> {
|
pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result<Option<Arc<PduEvent>>> {
|
||||||
self.all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)?
|
self.all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)?
|
||||||
.next()
|
.next()
|
||||||
.map(|o| o.map(|(_, p)| Arc::new(p)))
|
.map(|o| o.map(|(_, p)| Arc::new(p)))
|
||||||
.transpose()
|
.transpose()
|
||||||
@ -458,7 +458,7 @@ impl Service {
|
|||||||
let to_conduit = body.starts_with(&format!("{server_user}: "))
|
let to_conduit = body.starts_with(&format!("{server_user}: "))
|
||||||
|| body.starts_with(&format!("{server_user} "))
|
|| body.starts_with(&format!("{server_user} "))
|
||||||
|| body == format!("{server_user}:")
|
|| body == format!("{server_user}:")
|
||||||
|| body == format!("{server_user}");
|
|| body == server_user;
|
||||||
|
|
||||||
// This will evaluate to false if the emergency password is set up so that
|
// This will evaluate to false if the emergency password is set up so that
|
||||||
// the administrator can execute commands as conduit
|
// the administrator can execute commands as conduit
|
||||||
@ -842,7 +842,7 @@ impl Service {
|
|||||||
|
|
||||||
let target = pdu
|
let target = pdu
|
||||||
.state_key()
|
.state_key()
|
||||||
.filter(|v| v.starts_with("@"))
|
.filter(|v| v.starts_with('@'))
|
||||||
.unwrap_or(sender.as_str());
|
.unwrap_or(sender.as_str());
|
||||||
let server_name = services().globals.server_name();
|
let server_name = services().globals.server_name();
|
||||||
let server_user = format!("@conduit:{}", server_name);
|
let server_user = format!("@conduit:{}", server_name);
|
||||||
@ -850,7 +850,7 @@ impl Service {
|
|||||||
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
|
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
|
||||||
|
|
||||||
if content.membership == MembershipState::Leave {
|
if content.membership == MembershipState::Leave {
|
||||||
if target == &server_user {
|
if target == server_user {
|
||||||
warn!("Conduit user cannot leave from admins room");
|
warn!("Conduit user cannot leave from admins room");
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
@ -876,7 +876,7 @@ impl Service {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if content.membership == MembershipState::Ban && pdu.state_key().is_some() {
|
if content.membership == MembershipState::Ban && pdu.state_key().is_some() {
|
||||||
if target == &server_user {
|
if target == server_user {
|
||||||
warn!("Conduit user cannot be banned in admins room");
|
warn!("Conduit user cannot be banned in admins room");
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
@ -1048,7 +1048,7 @@ impl Service {
|
|||||||
#[tracing::instrument(skip(self, room_id))]
|
#[tracing::instrument(skip(self, room_id))]
|
||||||
pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> {
|
pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> {
|
||||||
let first_pdu = self
|
let first_pdu = self
|
||||||
.all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)?
|
.all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)?
|
||||||
.next()
|
.next()
|
||||||
.expect("Room is not empty")?;
|
.expect("Room is not empty")?;
|
||||||
|
|
||||||
@ -1060,7 +1060,7 @@ impl Service {
|
|||||||
let power_levels: RoomPowerLevelsEventContent = services()
|
let power_levels: RoomPowerLevelsEventContent = services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomPowerLevels, "")?
|
.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")?
|
||||||
.map(|ev| {
|
.map(|ev| {
|
||||||
serde_json::from_str(ev.content.get())
|
serde_json::from_str(ev.content.get())
|
||||||
.map_err(|_| Error::bad_database("invalid m.room.power_levels event"))
|
.map_err(|_| Error::bad_database("invalid m.room.power_levels event"))
|
||||||
@ -1091,11 +1091,9 @@ impl Service {
|
|||||||
.await;
|
.await;
|
||||||
match response {
|
match response {
|
||||||
Ok(response) => {
|
Ok(response) => {
|
||||||
let mut pub_key_map = RwLock::new(BTreeMap::new());
|
let pub_key_map = RwLock::new(BTreeMap::new());
|
||||||
for pdu in response.pdus {
|
for pdu in response.pdus {
|
||||||
if let Err(e) = self
|
if let Err(e) = self.backfill_pdu(backfill_server, pdu, &pub_key_map).await
|
||||||
.backfill_pdu(backfill_server, pdu, &mut pub_key_map)
|
|
||||||
.await
|
|
||||||
{
|
{
|
||||||
warn!("Failed to add backfilled pdu: {e}");
|
warn!("Failed to add backfilled pdu: {e}");
|
||||||
}
|
}
|
||||||
@ -1142,7 +1140,7 @@ impl Service {
|
|||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.event_handler
|
.event_handler
|
||||||
.handle_incoming_pdu(origin, &event_id, &room_id, value, false, &pub_key_map)
|
.handle_incoming_pdu(origin, &event_id, &room_id, value, false, pub_key_map)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let value = self.get_pdu_json(&event_id)?.expect("We just created it");
|
let value = self.get_pdu_json(&event_id)?.expect("We just created it");
|
||||||
@ -1175,24 +1173,21 @@ impl Service {
|
|||||||
|
|
||||||
drop(insert_lock);
|
drop(insert_lock);
|
||||||
|
|
||||||
match pdu.kind {
|
if pdu.kind == TimelineEventType::RoomMessage {
|
||||||
TimelineEventType::RoomMessage => {
|
#[derive(Deserialize)]
|
||||||
#[derive(Deserialize)]
|
struct ExtractBody {
|
||||||
struct ExtractBody {
|
body: Option<String>,
|
||||||
body: Option<String>,
|
}
|
||||||
}
|
|
||||||
|
let content = serde_json::from_str::<ExtractBody>(pdu.content.get())
|
||||||
let content = serde_json::from_str::<ExtractBody>(pdu.content.get())
|
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
|
||||||
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
|
|
||||||
|
if let Some(body) = content.body {
|
||||||
if let Some(body) = content.body {
|
services()
|
||||||
services()
|
.rooms
|
||||||
.rooms
|
.search
|
||||||
.search
|
.index_pdu(shortroomid, &pdu_id, &body)?;
|
||||||
.index_pdu(shortroomid, &pdu_id, &body)?;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
_ => {}
|
|
||||||
}
|
}
|
||||||
drop(mutex_lock);
|
drop(mutex_lock);
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@ use crate::Result;
|
|||||||
use super::{OutgoingKind, SendingEventType};
|
use super::{OutgoingKind, SendingEventType};
|
||||||
|
|
||||||
pub trait Data: Send + Sync {
|
pub trait Data: Send + Sync {
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
fn active_requests<'a>(
|
fn active_requests<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
) -> Box<dyn Iterator<Item = Result<(Vec<u8>, OutgoingKind, SendingEventType)>> + 'a>;
|
) -> Box<dyn Iterator<Item = Result<(Vec<u8>, OutgoingKind, SendingEventType)>> + 'a>;
|
||||||
|
@ -34,6 +34,7 @@ pub struct SlidingSyncCache {
|
|||||||
|
|
||||||
pub struct Service {
|
pub struct Service {
|
||||||
pub db: &'static dyn Data,
|
pub db: &'static dyn Data,
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
pub connections:
|
pub connections:
|
||||||
Mutex<BTreeMap<(OwnedUserId, OwnedDeviceId, String), Arc<Mutex<SlidingSyncCache>>>>,
|
Mutex<BTreeMap<(OwnedUserId, OwnedDeviceId, String), Arc<Mutex<SlidingSyncCache>>>>,
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user