summaryrefslogtreecommitdiff
path: root/src/api
diff options
context:
space:
mode:
authorTimo Kösters <timo@koesters.xyz>2022-06-19 22:56:14 +0200
committerNyaaori <+@nyaaori.cat>2022-08-15 16:30:34 +0200
commit025b64befc5872aa7ffcc0ba348005e326d347d5 (patch)
tree375664a65bcc2b908e4274a4fd0a4a93204d954b /src/api
parent92e59f14e07070bc7646dbeeaa58751d3a05a642 (diff)
downloadconduit-025b64befc5872aa7ffcc0ba348005e326d347d5.zip
refactor: renames and split room.rs
Diffstat (limited to 'src/api')
-rw-r--r--src/api/appservice_server.rs90
-rw-r--r--src/api/client_server/account.rs432
-rw-r--r--src/api/client_server/alias.rs151
-rw-r--r--src/api/client_server/backup.rs352
-rw-r--r--src/api/client_server/capabilities.rs35
-rw-r--r--src/api/client_server/config.rs130
-rw-r--r--src/api/client_server/context.rs188
-rw-r--r--src/api/client_server/device.rs183
-rw-r--r--src/api/client_server/directory.rs357
-rw-r--r--src/api/client_server/filter.rs36
-rw-r--r--src/api/client_server/keys.rs477
-rw-r--r--src/api/client_server/media.rs225
-rw-r--r--src/api/client_server/membership.rs1288
-rw-r--r--src/api/client_server/message.rs246
-rw-r--r--src/api/client_server/mod.rs68
-rw-r--r--src/api/client_server/presence.rs87
-rw-r--r--src/api/client_server/profile.rs321
-rw-r--r--src/api/client_server/push.rs584
-rw-r--r--src/api/client_server/read_marker.rs127
-rw-r--r--src/api/client_server/redact.rs56
-rw-r--r--src/api/client_server/report.rs72
-rw-r--r--src/api/client_server/room.rs730
-rw-r--r--src/api/client_server/search.rs119
-rw-r--r--src/api/client_server/session.rs200
-rw-r--r--src/api/client_server/state.rs299
-rw-r--r--src/api/client_server/sync.rs952
-rw-r--r--src/api/client_server/tag.rs117
-rw-r--r--src/api/client_server/thirdparty.rs16
-rw-r--r--src/api/client_server/to_device.rs94
-rw-r--r--src/api/client_server/typing.rs36
-rw-r--r--src/api/client_server/unversioned.rs31
-rw-r--r--src/api/client_server/user_directory.rs91
-rw-r--r--src/api/client_server/voip.rs48
-rw-r--r--src/api/ruma_wrapper/axum.rs367
-rw-r--r--src/api/ruma_wrapper/mod.rs42
-rw-r--r--src/api/server_server.rs3644
36 files changed, 12291 insertions, 0 deletions
diff --git a/src/api/appservice_server.rs b/src/api/appservice_server.rs
new file mode 100644
index 0000000..ce122da
--- /dev/null
+++ b/src/api/appservice_server.rs
@@ -0,0 +1,90 @@
+use crate::{utils, Error, Result};
+use bytes::BytesMut;
+use ruma::api::{IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken};
+use std::{fmt::Debug, mem, time::Duration};
+use tracing::warn;
+
+#[tracing::instrument(skip(globals, request))]
+pub(crate) async fn send_request<T: OutgoingRequest>(
+ globals: &crate::database::globals::Globals,
+ registration: serde_yaml::Value,
+ request: T,
+) -> Result<T::IncomingResponse>
+where
+ T: Debug,
+{
+ let destination = registration.get("url").unwrap().as_str().unwrap();
+ let hs_token = registration.get("hs_token").unwrap().as_str().unwrap();
+
+ let mut http_request = request
+ .try_into_http_request::<BytesMut>(
+ destination,
+ SendAccessToken::IfRequired(""),
+ &[MatrixVersion::V1_0],
+ )
+ .unwrap()
+ .map(|body| body.freeze());
+
+ let mut parts = http_request.uri().clone().into_parts();
+ let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned();
+ let symbol = if old_path_and_query.contains('?') {
+ "&"
+ } else {
+ "?"
+ };
+
+ parts.path_and_query = Some(
+ (old_path_and_query + symbol + "access_token=" + hs_token)
+ .parse()
+ .unwrap(),
+ );
+ *http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid");
+
+ let mut reqwest_request = reqwest::Request::try_from(http_request)
+ .expect("all http requests are valid reqwest requests");
+
+ *reqwest_request.timeout_mut() = Some(Duration::from_secs(30));
+
+ let url = reqwest_request.url().clone();
+ let mut response = globals.default_client().execute(reqwest_request).await?;
+
+ // reqwest::Response -> http::Response conversion
+ let status = response.status();
+ let mut http_response_builder = http::Response::builder()
+ .status(status)
+ .version(response.version());
+ mem::swap(
+ response.headers_mut(),
+ http_response_builder
+ .headers_mut()
+ .expect("http::response::Builder is usable"),
+ );
+
+ let body = response.bytes().await.unwrap_or_else(|e| {
+ warn!("server error: {}", e);
+ Vec::new().into()
+ }); // TODO: handle timeout
+
+ if status != 200 {
+ warn!(
+ "Appservice returned bad response {} {}\n{}\n{:?}",
+ destination,
+ status,
+ url,
+ utils::string_from_bytes(&body)
+ );
+ }
+
+ let response = T::IncomingResponse::try_from_http_response(
+ http_response_builder
+ .body(body)
+ .expect("reqwest body is valid http body"),
+ );
+ response.map_err(|_| {
+ warn!(
+ "Appservice returned invalid response bytes {}\n{}",
+ destination, url
+ );
+ Error::BadServerResponse("Server returned bad response.")
+ })
+}
diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs
new file mode 100644
index 0000000..dc0782d
--- /dev/null
+++ b/src/api/client_server/account.rs
@@ -0,0 +1,432 @@
+use std::sync::Arc;
+
+use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
+use crate::{
+ database::{admin::make_user_admin, DatabaseGuard},
+ pdu::PduBuilder,
+ utils, Database, Error, Result, Ruma,
+};
+use ruma::{
+ api::client::{
+ account::{
+ change_password, deactivate, get_3pids, get_username_availability, register, whoami,
+ ThirdPartyIdRemovalStatus,
+ },
+ error::ErrorKind,
+ uiaa::{AuthFlow, AuthType, UiaaInfo},
+ },
+ events::{
+ room::{
+ member::{MembershipState, RoomMemberEventContent},
+ message::RoomMessageEventContent,
+ },
+ GlobalAccountDataEventType, RoomEventType,
+ },
+ push, UserId,
+};
+use serde_json::value::to_raw_value;
+use tracing::{info, warn};
+
+use register::RegistrationKind;
+
+const RANDOM_USER_ID_LENGTH: usize = 10;
+
+/// # `GET /_matrix/client/r0/register/available`
+///
+/// Checks if a username is valid and available on this server.
+///
+/// Conditions for returning true:
+/// - The user id is not historical
+/// - The server name of the user id matches this server
+/// - No user or appservice on this server already claimed this username
+///
+/// Note: This will not reserve the username, so the username might become invalid when trying to register
+pub async fn get_register_available_route(
+ db: DatabaseGuard,
+ body: Ruma<get_username_availability::v3::IncomingRequest>,
+) -> Result<get_username_availability::v3::Response> {
+ // Validate user id
+ let user_id =
+ UserId::parse_with_server_name(body.username.to_lowercase(), db.globals.server_name())
+ .ok()
+ .filter(|user_id| {
+ !user_id.is_historical() && user_id.server_name() == db.globals.server_name()
+ })
+ .ok_or(Error::BadRequest(
+ ErrorKind::InvalidUsername,
+ "Username is invalid.",
+ ))?;
+
+ // Check if username is creative enough
+ if db.users.exists(&user_id)? {
+ return Err(Error::BadRequest(
+ ErrorKind::UserInUse,
+ "Desired user ID is already taken.",
+ ));
+ }
+
+ // TODO add check for appservice namespaces
+
+ // If no if check is true we have an username that's available to be used.
+ Ok(get_username_availability::v3::Response { available: true })
+}
+
+/// # `POST /_matrix/client/r0/register`
+///
+/// Register an account on this homeserver.
+///
+/// You can use [`GET /_matrix/client/r0/register/available`](fn.get_register_available_route.html)
+/// to check if the user id is valid and available.
+///
+/// - Only works if registration is enabled
+/// - If type is guest: ignores all parameters except initial_device_display_name
+/// - If sender is not appservice: Requires UIAA (but we only use a dummy stage)
+/// - If type is not guest and no username is given: Always fails after UIAA check
+/// - Creates a new account and populates it with default account data
+/// - If `inhibit_login` is false: Creates a device and returns device id and access_token
+pub async fn register_route(
+ db: DatabaseGuard,
+ body: Ruma<register::v3::IncomingRequest>,
+) -> Result<register::v3::Response> {
+ if !db.globals.allow_registration() && !body.from_appservice {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "Registration has been disabled.",
+ ));
+ }
+
+ let is_guest = body.kind == RegistrationKind::Guest;
+
+ let user_id = match (&body.username, is_guest) {
+ (Some(username), false) => {
+ let proposed_user_id =
+ UserId::parse_with_server_name(username.to_lowercase(), db.globals.server_name())
+ .ok()
+ .filter(|user_id| {
+ !user_id.is_historical()
+ && user_id.server_name() == db.globals.server_name()
+ })
+ .ok_or(Error::BadRequest(
+ ErrorKind::InvalidUsername,
+ "Username is invalid.",
+ ))?;
+ if db.users.exists(&proposed_user_id)? {
+ return Err(Error::BadRequest(
+ ErrorKind::UserInUse,
+ "Desired user ID is already taken.",
+ ));
+ }
+ proposed_user_id
+ }
+ _ => loop {
+ let proposed_user_id = UserId::parse_with_server_name(
+ utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(),
+ db.globals.server_name(),
+ )
+ .unwrap();
+ if !db.users.exists(&proposed_user_id)? {
+ break proposed_user_id;
+ }
+ },
+ };
+
+ // UIAA
+ let mut uiaainfo = UiaaInfo {
+ flows: vec![AuthFlow {
+ stages: vec![AuthType::Dummy],
+ }],
+ completed: Vec::new(),
+ params: Default::default(),
+ session: None,
+ auth_error: None,
+ };
+
+ if !body.from_appservice {
+ if let Some(auth) = &body.auth {
+ let (worked, uiaainfo) = db.uiaa.try_auth(
+ &UserId::parse_with_server_name("", db.globals.server_name())
+ .expect("we know this is valid"),
+ "".into(),
+ auth,
+ &uiaainfo,
+ &db.users,
+ &db.globals,
+ )?;
+ if !worked {
+ return Err(Error::Uiaa(uiaainfo));
+ }
+ // Success!
+ } else if let Some(json) = body.json_body {
+ uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
+ db.uiaa.create(
+ &UserId::parse_with_server_name("", db.globals.server_name())
+ .expect("we know this is valid"),
+ "".into(),
+ &uiaainfo,
+ &json,
+ )?;
+ return Err(Error::Uiaa(uiaainfo));
+ } else {
+ return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
+ }
+ }
+
+ let password = if is_guest {
+ None
+ } else {
+ body.password.as_deref()
+ };
+
+ // Create user
+ db.users.create(&user_id, password)?;
+
+ // Default to pretty displayname
+ let displayname = format!("{} ⚡️", user_id.localpart());
+ db.users
+ .set_displayname(&user_id, Some(displayname.clone()))?;
+
+ // Initial account data
+ db.account_data.update(
+ None,
+ &user_id,
+ GlobalAccountDataEventType::PushRules.to_string().into(),
+ &ruma::events::push_rules::PushRulesEvent {
+ content: ruma::events::push_rules::PushRulesEventContent {
+ global: push::Ruleset::server_default(&user_id),
+ },
+ },
+ &db.globals,
+ )?;
+
+ // Inhibit login does not work for guests
+ if !is_guest && body.inhibit_login {
+ return Ok(register::v3::Response {
+ access_token: None,
+ user_id,
+ device_id: None,
+ });
+ }
+
+ // Generate new device id if the user didn't specify one
+ let device_id = if is_guest {
+ None
+ } else {
+ body.device_id.clone()
+ }
+ .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
+
+ // Generate new token for the device
+ let token = utils::random_string(TOKEN_LENGTH);
+
+ // Create device for this account
+ db.users.create_device(
+ &user_id,
+ &device_id,
+ &token,
+ body.initial_device_display_name.clone(),
+ )?;
+
+ info!("New user {} registered on this server.", user_id);
+ db.admin
+ .send_message(RoomMessageEventContent::notice_plain(format!(
+ "New user {} registered on this server.",
+ user_id
+ )));
+
+ // If this is the first real user, grant them admin privileges
+ // Note: the server user, @conduit:servername, is generated first
+ if db.users.count()? == 2 {
+ make_user_admin(&db, &user_id, displayname).await?;
+
+ warn!("Granting {} admin privileges as the first user", user_id);
+ }
+
+ db.flush()?;
+
+ Ok(register::v3::Response {
+ access_token: Some(token),
+ user_id,
+ device_id: Some(device_id),
+ })
+}
+
+/// # `POST /_matrix/client/r0/account/password`
+///
+/// Changes the password of this account.
+///
+/// - Requires UIAA to verify user password
+/// - Changes the password of the sender user
+/// - The password hash is calculated using argon2 with 32 character salt, the plain password is
+/// not saved
+///
+/// If logout_devices is true it does the following for each device except the sender device:
+/// - Invalidates access token
+/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
+/// - Forgets to-device events
+/// - Triggers device list updates
+pub async fn change_password_route(
+ db: DatabaseGuard,
+ body: Ruma<change_password::v3::IncomingRequest>,
+) -> Result<change_password::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ let sender_device = body.sender_device.as_ref().expect("user is authenticated");
+
+ let mut uiaainfo = UiaaInfo {
+ flows: vec![AuthFlow {
+ stages: vec![AuthType::Password],
+ }],
+ completed: Vec::new(),
+ params: Default::default(),
+ session: None,
+ auth_error: None,
+ };
+
+ if let Some(auth) = &body.auth {
+ let (worked, uiaainfo) = db.uiaa.try_auth(
+ sender_user,
+ sender_device,
+ auth,
+ &uiaainfo,
+ &db.users,
+ &db.globals,
+ )?;
+ if !worked {
+ return Err(Error::Uiaa(uiaainfo));
+ }
+ // Success!
+ } else if let Some(json) = body.json_body {
+ uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
+ db.uiaa
+ .create(sender_user, sender_device, &uiaainfo, &json)?;
+ return Err(Error::Uiaa(uiaainfo));
+ } else {
+ return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
+ }
+
+ db.users
+ .set_password(sender_user, Some(&body.new_password))?;
+
+ if body.logout_devices {
+ // Logout all devices except the current one
+ for id in db
+ .users
+ .all_device_ids(sender_user)
+ .filter_map(|id| id.ok())
+ .filter(|id| id != sender_device)
+ {
+ db.users.remove_device(sender_user, &id)?;
+ }
+ }
+
+ db.flush()?;
+
+ info!("User {} changed their password.", sender_user);
+ db.admin
+ .send_message(RoomMessageEventContent::notice_plain(format!(
+ "User {} changed their password.",
+ sender_user
+ )));
+
+ Ok(change_password::v3::Response {})
+}
+
+/// # `GET _matrix/client/r0/account/whoami`
+///
+/// Get user_id of the sender user.
+///
+/// Note: Also works for Application Services
+pub async fn whoami_route(
+ db: DatabaseGuard,
+ body: Ruma<whoami::v3::Request>,
+) -> Result<whoami::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ let device_id = body.sender_device.as_ref().cloned();
+
+ Ok(whoami::v3::Response {
+ user_id: sender_user.clone(),
+ device_id,
+ is_guest: db.users.is_deactivated(&sender_user)?,
+ })
+}
+
+/// # `POST /_matrix/client/r0/account/deactivate`
+///
+/// Deactivate sender user account.
+///
+/// - Leaves all rooms and rejects all invitations
+/// - Invalidates all access tokens
+/// - Deletes all device metadata (device id, device display name, last seen ip, last seen ts)
+/// - Forgets all to-device events
+/// - Triggers device list updates
+/// - Removes ability to log in again
+pub async fn deactivate_route(
+ db: DatabaseGuard,
+ body: Ruma<deactivate::v3::IncomingRequest>,
+) -> Result<deactivate::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ let sender_device = body.sender_device.as_ref().expect("user is authenticated");
+
+ let mut uiaainfo = UiaaInfo {
+ flows: vec![AuthFlow {
+ stages: vec![AuthType::Password],
+ }],
+ completed: Vec::new(),
+ params: Default::default(),
+ session: None,
+ auth_error: None,
+ };
+
+ if let Some(auth) = &body.auth {
+ let (worked, uiaainfo) = db.uiaa.try_auth(
+ sender_user,
+ sender_device,
+ auth,
+ &uiaainfo,
+ &db.users,
+ &db.globals,
+ )?;
+ if !worked {
+ return Err(Error::Uiaa(uiaainfo));
+ }
+ // Success!
+ } else if let Some(json) = body.json_body {
+ uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
+ db.uiaa
+ .create(sender_user, sender_device, &uiaainfo, &json)?;
+ return Err(Error::Uiaa(uiaainfo));
+ } else {
+ return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
+ }
+
+ // Make the user leave all rooms before deactivation
+ db.rooms.leave_all_rooms(&sender_user, &db).await?;
+
+ // Remove devices and mark account as deactivated
+ db.users.deactivate_account(sender_user)?;
+
+ info!("User {} deactivated their account.", sender_user);
+ db.admin
+ .send_message(RoomMessageEventContent::notice_plain(format!(
+ "User {} deactivated their account.",
+ sender_user
+ )));
+
+ db.flush()?;
+
+ Ok(deactivate::v3::Response {
+ id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport,
+ })
+}
+
+/// # `GET _matrix/client/r0/account/3pid`
+///
+/// Get a list of third party identifiers associated with this account.
+///
+/// - Currently always returns empty list
+pub async fn third_party_route(
+ body: Ruma<get_3pids::v3::Request>,
+) -> Result<get_3pids::v3::Response> {
+ let _sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ Ok(get_3pids::v3::Response::new(Vec::new()))
+}
diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs
new file mode 100644
index 0000000..90e9d2c
--- /dev/null
+++ b/src/api/client_server/alias.rs
@@ -0,0 +1,151 @@
+use crate::{database::DatabaseGuard, Database, Error, Result, Ruma};
+use regex::Regex;
+use ruma::{
+ api::{
+ appservice,
+ client::{
+ alias::{create_alias, delete_alias, get_alias},
+ error::ErrorKind,
+ },
+ federation,
+ },
+ RoomAliasId,
+};
+
+/// # `PUT /_matrix/client/r0/directory/room/{roomAlias}`
+///
+/// Creates a new room alias on this server.
+pub async fn create_alias_route(
+ db: DatabaseGuard,
+ body: Ruma<create_alias::v3::IncomingRequest>,
+) -> Result<create_alias::v3::Response> {
+ if body.room_alias.server_name() != db.globals.server_name() {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Alias is from another server.",
+ ));
+ }
+
+ if db.rooms.id_from_alias(&body.room_alias)?.is_some() {
+ return Err(Error::Conflict("Alias already exists."));
+ }
+
+ db.rooms
+ .set_alias(&body.room_alias, Some(&body.room_id), &db.globals)?;
+
+ db.flush()?;
+
+ Ok(create_alias::v3::Response::new())
+}
+
+/// # `DELETE /_matrix/client/r0/directory/room/{roomAlias}`
+///
+/// Deletes a room alias from this server.
+///
+/// - TODO: additional access control checks
+/// - TODO: Update canonical alias event
+pub async fn delete_alias_route(
+ db: DatabaseGuard,
+ body: Ruma<delete_alias::v3::IncomingRequest>,
+) -> Result<delete_alias::v3::Response> {
+ if body.room_alias.server_name() != db.globals.server_name() {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Alias is from another server.",
+ ));
+ }
+
+ db.rooms.set_alias(&body.room_alias, None, &db.globals)?;
+
+ // TODO: update alt_aliases?
+
+ db.flush()?;
+
+ Ok(delete_alias::v3::Response::new())
+}
+
+/// # `GET /_matrix/client/r0/directory/room/{roomAlias}`
+///
+/// Resolve an alias locally or over federation.
+///
+/// - TODO: Suggest more servers to join via
+pub async fn get_alias_route(
+ db: DatabaseGuard,
+ body: Ruma<get_alias::v3::IncomingRequest>,
+) -> Result<get_alias::v3::Response> {
+ get_alias_helper(&db, &body.room_alias).await
+}
+
+pub(crate) async fn get_alias_helper(
+ db: &Database,
+ room_alias: &RoomAliasId,
+) -> Result<get_alias::v3::Response> {
+ if room_alias.server_name() != db.globals.server_name() {
+ let response = db
+ .sending
+ .send_federation_request(
+ &db.globals,
+ room_alias.server_name(),
+ federation::query::get_room_information::v1::Request { room_alias },
+ )
+ .await?;
+
+ return Ok(get_alias::v3::Response::new(
+ response.room_id,
+ response.servers,
+ ));
+ }
+
+ let mut room_id = None;
+ match db.rooms.id_from_alias(room_alias)? {
+ Some(r) => room_id = Some(r),
+ None => {
+ for (_id, registration) in db.appservice.all()? {
+ let aliases = registration
+ .get("namespaces")
+ .and_then(|ns| ns.get("aliases"))
+ .and_then(|aliases| aliases.as_sequence())
+ .map_or_else(Vec::new, |aliases| {
+ aliases
+ .iter()
+ .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok())
+ .collect::<Vec<_>>()
+ });
+
+ if aliases
+ .iter()
+ .any(|aliases| aliases.is_match(room_alias.as_str()))
+ && db
+ .sending
+ .send_appservice_request(
+ &db.globals,
+ registration,
+ appservice::query::query_room_alias::v1::Request { room_alias },
+ )
+ .await
+ .is_ok()
+ {
+ room_id = Some(db.rooms.id_from_alias(room_alias)?.ok_or_else(|| {
+ Error::bad_config("Appservice lied to us. Room does not exist.")
+ })?);
+ break;
+ }
+ }
+ }
+ };
+
+ let room_id = match room_id {
+ Some(room_id) => room_id,
+ None => {
+ return Err(Error::BadRequest(
+ ErrorKind::NotFound,
+ "Room with alias not found.",
+ ))
+ }
+ };
+
+ Ok(get_alias::v3::Response::new(
+ room_id,
+ vec![db.globals.server_name().to_owned()],
+ ))
+}
diff --git a/src/api/client_server/backup.rs b/src/api/client_server/backup.rs
new file mode 100644
index 0000000..067f20c
--- /dev/null
+++ b/src/api/client_server/backup.rs
@@ -0,0 +1,352 @@
+use crate::{database::DatabaseGuard, Error, Result, Ruma};
+use ruma::api::client::{
+ backup::{
+ add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session,
+ create_backup_version, delete_backup_keys, delete_backup_keys_for_room,
+ delete_backup_keys_for_session, delete_backup_version, get_backup_info, get_backup_keys,
+ get_backup_keys_for_room, get_backup_keys_for_session, get_latest_backup_info,
+ update_backup_version,
+ },
+ error::ErrorKind,
+};
+
+/// # `POST /_matrix/client/r0/room_keys/version`
+///
+/// Creates a new backup.
+pub async fn create_backup_version_route(
+ db: DatabaseGuard,
+ body: Ruma<create_backup_version::v3::Request>,
+) -> Result<create_backup_version::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ let version = db
+ .key_backups
+ .create_backup(sender_user, &body.algorithm, &db.globals)?;
+
+ db.flush()?;
+
+ Ok(create_backup_version::v3::Response { version })
+}
+
+/// # `PUT /_matrix/client/r0/room_keys/version/{version}`
+///
+/// Update information about an existing backup. Only `auth_data` can be modified.
+pub async fn update_backup_version_route(
+ db: DatabaseGuard,
+ body: Ruma<update_backup_version::v3::IncomingRequest>,
+) -> Result<update_backup_version::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ db.key_backups
+ .update_backup(sender_user, &body.version, &body.algorithm, &db.globals)?;
+
+ db.flush()?;
+
+ Ok(update_backup_version::v3::Response {})
+}
+
+/// # `GET /_matrix/client/r0/room_keys/version`
+///
+/// Get information about the latest backup version.
+pub async fn get_latest_backup_info_route(
+ db: DatabaseGuard,
+ body: Ruma<get_latest_backup_info::v3::Request>,
+) -> Result<get_latest_backup_info::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let (version, algorithm) =
+ db.key_backups
+ .get_latest_backup(sender_user)?
+ .ok_or(Error::BadRequest(
+ ErrorKind::NotFound,
+ "Key backup does not exist.",
+ ))?;
+
+ Ok(get_latest_backup_info::v3::Response {
+ algorithm,
+ count: (db.key_backups.count_keys(sender_user, &version)? as u32).into(),
+ etag: db.key_backups.get_etag(sender_user, &version)?,
+ version,
+ })
+}
+
+/// # `GET /_matrix/client/r0/room_keys/version`
+///
+/// Get information about an existing backup.
+pub async fn get_backup_info_route(
+ db: DatabaseGuard,
+ body: Ruma<get_backup_info::v3::IncomingRequest>,
+) -> Result<get_backup_info::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ let algorithm = db
+ .key_backups
+ .get_backup(sender_user, &body.version)?
+ .ok_or(Error::BadRequest(
+ ErrorKind::NotFound,
+ "Key backup does not exist.",
+ ))?;
+
+ Ok(get_backup_info::v3::Response {
+ algorithm,
+ count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
+ etag: db.key_backups.get_etag(sender_user, &body.version)?,
+ version: body.version.to_owned(),
+ })
+}
+
+/// # `DELETE /_matrix/client/r0/room_keys/version/{version}`
+///
+/// Delete an existing key backup.
+///
+/// - Deletes both information about the backup, as well as all key data related to the backup
+pub async fn delete_backup_version_route(
+ db: DatabaseGuard,
+ body: Ruma<delete_backup_version::v3::IncomingRequest>,
+) -> Result<delete_backup_version::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ db.key_backups.delete_backup(sender_user, &body.version)?;
+
+ db.flush()?;
+
+ Ok(delete_backup_version::v3::Response {})
+}
+
+/// # `PUT /_matrix/client/r0/room_keys/keys`
+///
+/// Add the received backup keys to the database.
+///
+/// - Only manipulating the most recently created version of the backup is allowed
+/// - Adds the keys to the backup
+/// - Returns the new number of keys in this backup and the etag
+pub async fn add_backup_keys_route(
+ db: DatabaseGuard,
+ body: Ruma<add_backup_keys::v3::IncomingRequest>,
+) -> Result<add_backup_keys::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ if Some(&body.version)
+ != db
+ .key_backups
+ .get_latest_backup_version(sender_user)?
+ .as_ref()
+ {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "You may only manipulate the most recently created version of the backup.",
+ ));
+ }
+
+ for (room_id, room) in &body.rooms {
+ for (session_id, key_data) in &room.sessions {
+ db.key_backups.add_key(
+ sender_user,
+ &body.version,
+ room_id,
+ session_id,
+ key_data,
+ &db.globals,
+ )?
+ }
+ }
+
+ db.flush()?;
+
+ Ok(add_backup_keys::v3::Response {
+ count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
+ etag: db.key_backups.get_etag(sender_user, &body.version)?,
+ })
+}
+
+/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}`
+///
+/// Add the received backup keys to the database.
+///
+/// - Only manipulating the most recently created version of the backup is allowed
+/// - Adds the keys to the backup
+/// - Returns the new number of keys in this backup and the etag
+pub async fn add_backup_keys_for_room_route(
+ db: DatabaseGuard,
+ body: Ruma<add_backup_keys_for_room::v3::IncomingRequest>,
+) -> Result<add_backup_keys_for_room::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ if Some(&body.version)
+ != db
+ .key_backups
+ .get_latest_backup_version(sender_user)?
+ .as_ref()
+ {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "You may only manipulate the most recently created version of the backup.",
+ ));
+ }
+
+ for (session_id, key_data) in &body.sessions {
+ db.key_backups.add_key(
+ sender_user,
+ &body.version,
+ &body.room_id,
+ session_id,
+ key_data,
+ &db.globals,
+ )?
+ }
+
+ db.flush()?;
+
+ Ok(add_backup_keys_for_room::v3::Response {
+ count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
+ etag: db.key_backups.get_etag(sender_user, &body.version)?,
+ })
+}
+
+/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
+///
+/// Add the received backup key to the database.
+///
+/// - Only manipulating the most recently created version of the backup is allowed
+/// - Adds the keys to the backup
+/// - Returns the new number of keys in this backup and the etag
+pub async fn add_backup_keys_for_session_route(
+ db: DatabaseGuard,
+ body: Ruma<add_backup_keys_for_session::v3::IncomingRequest>,
+) -> Result<add_backup_keys_for_session::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ if Some(&body.version)
+ != db
+ .key_backups
+ .get_latest_backup_version(sender_user)?
+ .as_ref()
+ {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "You may only manipulate the most recently created version of the backup.",
+ ));
+ }
+
+ db.key_backups.add_key(
+ sender_user,
+ &body.version,
+ &body.room_id,
+ &body.session_id,
+ &body.session_data,
+ &db.globals,
+ )?;
+
+ db.flush()?;
+
+ Ok(add_backup_keys_for_session::v3::Response {
+ count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
+ etag: db.key_backups.get_etag(sender_user, &body.version)?,
+ })
+}
+
+/// # `GET /_matrix/client/r0/room_keys/keys`
+///
+/// Retrieves all keys from the backup.
+pub async fn get_backup_keys_route(
+ db: DatabaseGuard,
+ body: Ruma<get_backup_keys::v3::IncomingRequest>,
+) -> Result<get_backup_keys::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let rooms = db.key_backups.get_all(sender_user, &body.version)?;
+
+ Ok(get_backup_keys::v3::Response { rooms })
+}
+
+/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}`
+///
+/// Retrieves all keys from the backup for a given room.
+pub async fn get_backup_keys_for_room_route(
+ db: DatabaseGuard,
+ body: Ruma<get_backup_keys_for_room::v3::IncomingRequest>,
+) -> Result<get_backup_keys_for_room::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let sessions = db
+ .key_backups
+ .get_room(sender_user, &body.version, &body.room_id)?;
+
+ Ok(get_backup_keys_for_room::v3::Response { sessions })
+}
+
+/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
+///
+/// Retrieves a key from the backup.
+pub async fn get_backup_keys_for_session_route(
+ db: DatabaseGuard,
+ body: Ruma<get_backup_keys_for_session::v3::IncomingRequest>,
+) -> Result<get_backup_keys_for_session::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let key_data = db
+ .key_backups
+ .get_session(sender_user, &body.version, &body.room_id, &body.session_id)?
+ .ok_or(Error::BadRequest(
+ ErrorKind::NotFound,
+ "Backup key not found for this user's session.",
+ ))?;
+
+ Ok(get_backup_keys_for_session::v3::Response { key_data })
+}
+
+/// # `DELETE /_matrix/client/r0/room_keys/keys`
+///
+/// Delete the keys from the backup.
+pub async fn delete_backup_keys_route(
+ db: DatabaseGuard,
+ body: Ruma<delete_backup_keys::v3::IncomingRequest>,
+) -> Result<delete_backup_keys::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ db.key_backups.delete_all_keys(sender_user, &body.version)?;
+
+ db.flush()?;
+
+ Ok(delete_backup_keys::v3::Response {
+ count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
+ etag: db.key_backups.get_etag(sender_user, &body.version)?,
+ })
+}
+
+/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}`
+///
+/// Delete the keys from the backup for a given room.
+pub async fn delete_backup_keys_for_room_route(
+ db: DatabaseGuard,
+ body: Ruma<delete_backup_keys_for_room::v3::IncomingRequest>,
+) -> Result<delete_backup_keys_for_room::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ db.key_backups
+ .delete_room_keys(sender_user, &body.version, &body.room_id)?;
+
+ db.flush()?;
+
+ Ok(delete_backup_keys_for_room::v3::Response {
+ count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
+ etag: db.key_backups.get_etag(sender_user, &body.version)?,
+ })
+}
+
+/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
+///
+/// Delete a key from the backup.
+pub async fn delete_backup_keys_for_session_route(
+ db: DatabaseGuard,
+ body: Ruma<delete_backup_keys_for_session::v3::IncomingRequest>,
+) -> Result<delete_backup_keys_for_session::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ db.key_backups
+ .delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id)?;
+
+ db.flush()?;
+
+ Ok(delete_backup_keys_for_session::v3::Response {
+ count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
+ etag: db.key_backups.get_etag(sender_user, &body.version)?,
+ })
+}
diff --git a/src/api/client_server/capabilities.rs b/src/api/client_server/capabilities.rs
new file mode 100644
index 0000000..417ad29
--- /dev/null
+++ b/src/api/client_server/capabilities.rs
@@ -0,0 +1,35 @@
+use crate::{database::DatabaseGuard, Result, Ruma};
+use ruma::api::client::discovery::get_capabilities::{
+ self, Capabilities, RoomVersionStability, RoomVersionsCapability,
+};
+use std::collections::BTreeMap;
+
+/// # `GET /_matrix/client/r0/capabilities`
+///
+/// Get information on the supported feature set and other relevent capabilities of this server.
+pub async fn get_capabilities_route(
+ db: DatabaseGuard,
+ _body: Ruma<get_capabilities::v3::IncomingRequest>,
+) -> Result<get_capabilities::v3::Response> {
+ let mut available = BTreeMap::new();
+ if db.globals.allow_unstable_room_versions() {
+ for room_version in &db.globals.unstable_room_versions {
+ available.insert(room_version.clone(), RoomVersionStability::Stable);
+ }
+ } else {
+ for room_version in &db.globals.unstable_room_versions {
+ available.insert(room_version.clone(), RoomVersionStability::Unstable);
+ }
+ }
+ for room_version in &db.globals.stable_room_versions {
+ available.insert(room_version.clone(), RoomVersionStability::Stable);
+ }
+
+ let mut capabilities = Capabilities::new();
+ capabilities.room_versions = RoomVersionsCapability {
+ default: db.globals.default_room_version(),
+ available,
+ };
+
+ Ok(get_capabilities::v3::Response { capabilities })
+}
diff --git a/src/api/client_server/config.rs b/src/api/client_server/config.rs
new file mode 100644
index 0000000..6184e0b
--- /dev/null
+++ b/src/api/client_server/config.rs
@@ -0,0 +1,130 @@
+use crate::{database::DatabaseGuard, Error, Result, Ruma};
+use ruma::{
+ api::client::{
+ config::{
+ get_global_account_data, get_room_account_data, set_global_account_data,
+ set_room_account_data,
+ },
+ error::ErrorKind,
+ },
+ events::{AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent},
+ serde::Raw,
+};
+use serde::Deserialize;
+use serde_json::{json, value::RawValue as RawJsonValue};
+
+/// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}`
+///
+/// Sets some account data for the sender user.
+pub async fn set_global_account_data_route(
+ db: DatabaseGuard,
+ body: Ruma<set_global_account_data::v3::IncomingRequest>,
+) -> Result<set_global_account_data::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let data: serde_json::Value = serde_json::from_str(body.data.json().get())
+ .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?;
+
+ let event_type = body.event_type.to_string();
+
+ db.account_data.update(
+ None,
+ sender_user,
+ event_type.clone().into(),
+ &json!({
+ "type": event_type,
+ "content": data,
+ }),
+ &db.globals,
+ )?;
+
+ db.flush()?;
+
+ Ok(set_global_account_data::v3::Response {})
+}
+
+/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}`
+///
+/// Sets some room account data for the sender user.
+pub async fn set_room_account_data_route(
+ db: DatabaseGuard,
+ body: Ruma<set_room_account_data::v3::IncomingRequest>,
+) -> Result<set_room_account_data::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let data: serde_json::Value = serde_json::from_str(body.data.json().get())
+ .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?;
+
+ let event_type = body.event_type.to_string();
+
+ db.account_data.update(
+ Some(&body.room_id),
+ sender_user,
+ event_type.clone().into(),
+ &json!({
+ "type": event_type,
+ "content": data,
+ }),
+ &db.globals,
+ )?;
+
+ db.flush()?;
+
+ Ok(set_room_account_data::v3::Response {})
+}
+
+/// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}`
+///
+/// Gets some account data for the sender user.
+pub async fn get_global_account_data_route(
+ db: DatabaseGuard,
+ body: Ruma<get_global_account_data::v3::IncomingRequest>,
+) -> Result<get_global_account_data::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let event: Box<RawJsonValue> = db
+ .account_data
+ .get(None, sender_user, body.event_type.clone().into())?
+ .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
+
+ let account_data = serde_json::from_str::<ExtractGlobalEventContent>(event.get())
+ .map_err(|_| Error::bad_database("Invalid account data event in db."))?
+ .content;
+
+ Ok(get_global_account_data::v3::Response { account_data })
+}
+
+/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}`
+///
+/// Gets some room account data for the sender user.
+pub async fn get_room_account_data_route(
+ db: DatabaseGuard,
+ body: Ruma<get_room_account_data::v3::IncomingRequest>,
+) -> Result<get_room_account_data::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let event: Box<RawJsonValue> = db
+ .account_data
+ .get(
+ Some(&body.room_id),
+ sender_user,
+ body.event_type.clone().into(),
+ )?
+ .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
+
+ let account_data = serde_json::from_str::<ExtractRoomEventContent>(event.get())
+ .map_err(|_| Error::bad_database("Invalid account data event in db."))?
+ .content;
+
+ Ok(get_room_account_data::v3::Response { account_data })
+}
+
+#[derive(Deserialize)]
+struct ExtractRoomEventContent {
+ content: Raw<AnyRoomAccountDataEventContent>,
+}
+
+#[derive(Deserialize)]
+struct ExtractGlobalEventContent {
+ content: Raw<AnyGlobalAccountDataEventContent>,
+}
diff --git a/src/api/client_server/context.rs b/src/api/client_server/context.rs
new file mode 100644
index 0000000..e93f5a5
--- /dev/null
+++ b/src/api/client_server/context.rs
@@ -0,0 +1,188 @@
+use crate::{database::DatabaseGuard, Error, Result, Ruma};
+use ruma::{
+ api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions},
+ events::StateEventType,
+};
+use std::{collections::HashSet, convert::TryFrom};
+use tracing::error;
+
+/// # `GET /_matrix/client/r0/rooms/{roomId}/context`
+///
+/// Allows loading room history around an event.
+///
+/// - Only works if the user is joined (TODO: always allow, but only show events if the user was
+/// joined, depending on history_visibility)
+pub async fn get_context_route(
+ db: DatabaseGuard,
+ body: Ruma<get_context::v3::IncomingRequest>,
+) -> Result<get_context::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ let sender_device = body.sender_device.as_ref().expect("user is authenticated");
+
+ let (lazy_load_enabled, lazy_load_send_redundant) = match &body.filter.lazy_load_options {
+ LazyLoadOptions::Enabled {
+ include_redundant_members,
+ } => (true, *include_redundant_members),
+ _ => (false, false),
+ };
+
+ let mut lazy_loaded = HashSet::new();
+
+ let base_pdu_id = db
+ .rooms
+ .get_pdu_id(&body.event_id)?
+ .ok_or(Error::BadRequest(
+ ErrorKind::NotFound,
+ "Base event id not found.",
+ ))?;
+
+ let base_token = db.rooms.pdu_count(&base_pdu_id)?;
+
+ let base_event = db
+ .rooms
+ .get_pdu_from_id(&base_pdu_id)?
+ .ok_or(Error::BadRequest(
+ ErrorKind::NotFound,
+ "Base event not found.",
+ ))?;
+
+ let room_id = base_event.room_id.clone();
+
+ if !db.rooms.is_joined(sender_user, &room_id)? {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "You don't have permission to view this room.",
+ ));
+ }
+
+ if !db.rooms.lazy_load_was_sent_before(
+ sender_user,
+ sender_device,
+ &room_id,
+ &base_event.sender,
+ )? || lazy_load_send_redundant
+ {
+ lazy_loaded.insert(base_event.sender.as_str().to_owned());
+ }
+
+ let base_event = base_event.to_room_event();
+
+ let events_before: Vec<_> = db
+ .rooms
+ .pdus_until(sender_user, &room_id, base_token)?
+ .take(
+ u32::try_from(body.limit).map_err(|_| {
+ Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.")
+ })? as usize
+ / 2,
+ )
+ .filter_map(|r| r.ok()) // Remove buggy events
+ .collect();
+
+ for (_, event) in &events_before {
+ if !db.rooms.lazy_load_was_sent_before(
+ sender_user,
+ sender_device,
+ &room_id,
+ &event.sender,
+ )? || lazy_load_send_redundant
+ {
+ lazy_loaded.insert(event.sender.as_str().to_owned());
+ }
+ }
+
+ let start_token = events_before
+ .last()
+ .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok())
+ .map(|count| count.to_string());
+
+ let events_before: Vec<_> = events_before
+ .into_iter()
+ .map(|(_, pdu)| pdu.to_room_event())
+ .collect();
+
+ let events_after: Vec<_> = db
+ .rooms
+ .pdus_after(sender_user, &room_id, base_token)?
+ .take(
+ u32::try_from(body.limit).map_err(|_| {
+ Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.")
+ })? as usize
+ / 2,
+ )
+ .filter_map(|r| r.ok()) // Remove buggy events
+ .collect();
+
+ for (_, event) in &events_after {
+ if !db.rooms.lazy_load_was_sent_before(
+ sender_user,
+ sender_device,
+ &room_id,
+ &event.sender,
+ )? || lazy_load_send_redundant
+ {
+ lazy_loaded.insert(event.sender.as_str().to_owned());
+ }
+ }
+
+ let shortstatehash = match db.rooms.pdu_shortstatehash(
+ events_after
+ .last()
+ .map_or(&*body.event_id, |(_, e)| &*e.event_id),
+ )? {
+ Some(s) => s,
+ None => db
+ .rooms
+ .current_shortstatehash(&room_id)?
+ .expect("All rooms have state"),
+ };
+
+ let state_ids = db.rooms.state_full_ids(shortstatehash).await?;
+
+ let end_token = events_after
+ .last()
+ .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok())
+ .map(|count| count.to_string());
+
+ let events_after: Vec<_> = events_after
+ .into_iter()
+ .map(|(_, pdu)| pdu.to_room_event())
+ .collect();
+
+ let mut state = Vec::new();
+
+ for (shortstatekey, id) in state_ids {
+ let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?;
+
+ if event_type != StateEventType::RoomMember {
+ let pdu = match db.rooms.get_pdu(&id)? {
+ Some(pdu) => pdu,
+ None => {
+ error!("Pdu in state not found: {}", id);
+ continue;
+ }
+ };
+ state.push(pdu.to_state_event());
+ } else if !lazy_load_enabled || lazy_loaded.contains(&state_key) {
+ let pdu = match db.rooms.get_pdu(&id)? {
+ Some(pdu) => pdu,
+ None => {
+ error!("Pdu in state not found: {}", id);
+ continue;
+ }
+ };
+ state.push(pdu.to_state_event());
+ }
+ }
+
+ let resp = get_context::v3::Response {
+ start: start_token,
+ end: end_token,
+ events_before,
+ event: Some(base_event),
+ events_after,
+ state,
+ };
+
+ Ok(resp)
+}
diff --git a/src/api/client_server/device.rs b/src/api/client_server/device.rs
new file mode 100644
index 0000000..b100bf2
--- /dev/null
+++ b/src/api/client_server/device.rs
@@ -0,0 +1,183 @@
+use crate::{database::DatabaseGuard, utils, Error, Result, Ruma};
+use ruma::api::client::{
+ device::{self, delete_device, delete_devices, get_device, get_devices, update_device},
+ error::ErrorKind,
+ uiaa::{AuthFlow, AuthType, UiaaInfo},
+};
+
+use super::SESSION_ID_LENGTH;
+
+/// # `GET /_matrix/client/r0/devices`
+///
+/// Get metadata on all devices of the sender user.
+pub async fn get_devices_route(
+ db: DatabaseGuard,
+ body: Ruma<get_devices::v3::Request>,
+) -> Result<get_devices::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let devices: Vec<device::Device> = db
+ .users
+ .all_devices_metadata(sender_user)
+ .filter_map(|r| r.ok()) // Filter out buggy devices
+ .collect();
+
+ Ok(get_devices::v3::Response { devices })
+}
+
+/// # `GET /_matrix/client/r0/devices/{deviceId}`
+///
+/// Get metadata on a single device of the sender user.
+pub async fn get_device_route(
+ db: DatabaseGuard,
+ body: Ruma<get_device::v3::IncomingRequest>,
+) -> Result<get_device::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let device = db
+ .users
+ .get_device_metadata(sender_user, &body.body.device_id)?
+ .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
+
+ Ok(get_device::v3::Response { device })
+}
+
+/// # `PUT /_matrix/client/r0/devices/{deviceId}`
+///
+/// Updates the metadata on a given device of the sender user.
+pub async fn update_device_route(
+ db: DatabaseGuard,
+ body: Ruma<update_device::v3::IncomingRequest>,
+) -> Result<update_device::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let mut device = db
+ .users
+ .get_device_metadata(sender_user, &body.device_id)?
+ .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
+
+ device.display_name = body.display_name.clone();
+
+ db.users
+ .update_device_metadata(sender_user, &body.device_id, &device)?;
+
+ db.flush()?;
+
+ Ok(update_device::v3::Response {})
+}
+
+/// # `DELETE /_matrix/client/r0/devices/{deviceId}`
+///
+/// Deletes the given device.
+///
+/// - Requires UIAA to verify user password
+/// - Invalidates access token
+/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
+/// - Forgets to-device events
+/// - Triggers device list updates
+pub async fn delete_device_route(
+ db: DatabaseGuard,
+ body: Ruma<delete_device::v3::IncomingRequest>,
+) -> Result<delete_device::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ let sender_device = body.sender_device.as_ref().expect("user is authenticated");
+
+ // UIAA
+ let mut uiaainfo = UiaaInfo {
+ flows: vec![AuthFlow {
+ stages: vec![AuthType::Password],
+ }],
+ completed: Vec::new(),
+ params: Default::default(),
+ session: None,
+ auth_error: None,
+ };
+
+ if let Some(auth) = &body.auth {
+ let (worked, uiaainfo) = db.uiaa.try_auth(
+ sender_user,
+ sender_device,
+ auth,
+ &uiaainfo,
+ &db.users,
+ &db.globals,
+ )?;
+ if !worked {
+ return Err(Error::Uiaa(uiaainfo));
+ }
+ // Success!
+ } else if let Some(json) = body.json_body {
+ uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
+ db.uiaa
+ .create(sender_user, sender_device, &uiaainfo, &json)?;
+ return Err(Error::Uiaa(uiaainfo));
+ } else {
+ return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
+ }
+
+ db.users.remove_device(sender_user, &body.device_id)?;
+
+ db.flush()?;
+
+ Ok(delete_device::v3::Response {})
+}
+
+/// # `PUT /_matrix/client/r0/devices/{deviceId}`
+///
+/// Deletes the given device.
+///
+/// - Requires UIAA to verify user password
+///
+/// For each device:
+/// - Invalidates access token
+/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
+/// - Forgets to-device events
+/// - Triggers device list updates
+pub async fn delete_devices_route(
+ db: DatabaseGuard,
+ body: Ruma<delete_devices::v3::IncomingRequest>,
+) -> Result<delete_devices::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ let sender_device = body.sender_device.as_ref().expect("user is authenticated");
+
+ // UIAA
+ let mut uiaainfo = UiaaInfo {
+ flows: vec![AuthFlow {
+ stages: vec![AuthType::Password],
+ }],
+ completed: Vec::new(),
+ params: Default::default(),
+ session: None,
+ auth_error: None,
+ };
+
+ if let Some(auth) = &body.auth {
+ let (worked, uiaainfo) = db.uiaa.try_auth(
+ sender_user,
+ sender_device,
+ auth,
+ &uiaainfo,
+ &db.users,
+ &db.globals,
+ )?;
+ if !worked {
+ return Err(Error::Uiaa(uiaainfo));
+ }
+ // Success!
+ } else if let Some(json) = body.json_body {
+ uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
+ db.uiaa
+ .create(sender_user, sender_device, &uiaainfo, &json)?;
+ return Err(Error::Uiaa(uiaainfo));
+ } else {
+ return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
+ }
+
+ for device_id in &body.devices {
+ db.users.remove_device(sender_user, device_id)?
+ }
+
+ db.flush()?;
+
+ Ok(delete_devices::v3::Response {})
+}
diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs
new file mode 100644
index 0000000..4e4a322
--- /dev/null
+++ b/src/api/client_server/directory.rs
@@ -0,0 +1,357 @@
+use crate::{database::DatabaseGuard, Database, Error, Result, Ruma};
+use ruma::{
+ api::{
+ client::{
+ directory::{
+ get_public_rooms, get_public_rooms_filtered, get_room_visibility,
+ set_room_visibility,
+ },
+ error::ErrorKind,
+ room,
+ },
+ federation,
+ },
+ directory::{
+ Filter, IncomingFilter, IncomingRoomNetwork, PublicRoomJoinRule, PublicRoomsChunk,
+ RoomNetwork,
+ },
+ events::{
+ room::{
+ avatar::RoomAvatarEventContent,
+ canonical_alias::RoomCanonicalAliasEventContent,
+ guest_access::{GuestAccess, RoomGuestAccessEventContent},
+ history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
+ join_rules::{JoinRule, RoomJoinRulesEventContent},
+ name::RoomNameEventContent,
+ topic::RoomTopicEventContent,
+ },
+ StateEventType,
+ },
+ ServerName, UInt,
+};
+use tracing::{info, warn};
+
+/// # `POST /_matrix/client/r0/publicRooms`
+///
+/// Lists the public rooms on this server.
+///
+/// - Rooms are ordered by the number of joined members
+pub async fn get_public_rooms_filtered_route(
+ db: DatabaseGuard,
+ body: Ruma<get_public_rooms_filtered::v3::IncomingRequest>,
+) -> Result<get_public_rooms_filtered::v3::Response> {
+ get_public_rooms_filtered_helper(
+ &db,
+ body.server.as_deref(),
+ body.limit,
+ body.since.as_deref(),
+ &body.filter,
+ &body.room_network,
+ )
+ .await
+}
+
+/// # `GET /_matrix/client/r0/publicRooms`
+///
+/// Lists the public rooms on this server.
+///
+/// - Rooms are ordered by the number of joined members
+pub async fn get_public_rooms_route(
+ db: DatabaseGuard,
+ body: Ruma<get_public_rooms::v3::IncomingRequest>,
+) -> Result<get_public_rooms::v3::Response> {
+ let response = get_public_rooms_filtered_helper(
+ &db,
+ body.server.as_deref(),
+ body.limit,
+ body.since.as_deref(),
+ &IncomingFilter::default(),
+ &IncomingRoomNetwork::Matrix,
+ )
+ .await?;
+
+ Ok(get_public_rooms::v3::Response {
+ chunk: response.chunk,
+ prev_batch: response.prev_batch,
+ next_batch: response.next_batch,
+ total_room_count_estimate: response.total_room_count_estimate,
+ })
+}
+
+/// # `PUT /_matrix/client/r0/directory/list/room/{roomId}`
+///
+/// Sets the visibility of a given room in the room directory.
+///
+/// - TODO: Access control checks
+pub async fn set_room_visibility_route(
+ db: DatabaseGuard,
+ body: Ruma<set_room_visibility::v3::IncomingRequest>,
+) -> Result<set_room_visibility::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ match &body.visibility {
+ room::Visibility::Public => {
+ db.rooms.set_public(&body.room_id, true)?;
+ info!("{} made {} public", sender_user, body.room_id);
+ }
+ room::Visibility::Private => db.rooms.set_public(&body.room_id, false)?,
+ _ => {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Room visibility type is not supported.",
+ ));
+ }
+ }
+
+ db.flush()?;
+
+ Ok(set_room_visibility::v3::Response {})
+}
+
+/// # `GET /_matrix/client/r0/directory/list/room/{roomId}`
+///
+/// Gets the visibility of a given room in the room directory.
+pub async fn get_room_visibility_route(
+ db: DatabaseGuard,
+ body: Ruma<get_room_visibility::v3::IncomingRequest>,
+) -> Result<get_room_visibility::v3::Response> {
+ Ok(get_room_visibility::v3::Response {
+ visibility: if db.rooms.is_public_room(&body.room_id)? {
+ room::Visibility::Public
+ } else {
+ room::Visibility::Private
+ },
+ })
+}
+
+pub(crate) async fn get_public_rooms_filtered_helper(
+ db: &Database,
+ server: Option<&ServerName>,
+ limit: Option<UInt>,
+ since: Option<&str>,
+ filter: &IncomingFilter,
+ _network: &IncomingRoomNetwork,
+) -> Result<get_public_rooms_filtered::v3::Response> {
+ if let Some(other_server) = server.filter(|server| *server != db.globals.server_name().as_str())
+ {
+ let response = db
+ .sending
+ .send_federation_request(
+ &db.globals,
+ other_server,
+ federation::directory::get_public_rooms_filtered::v1::Request {
+ limit,
+ since,
+ filter: Filter {
+ generic_search_term: filter.generic_search_term.as_deref(),
+ },
+ room_network: RoomNetwork::Matrix,
+ },
+ )
+ .await?;
+
+ return Ok(get_public_rooms_filtered::v3::Response {
+ chunk: response.chunk,
+ prev_batch: response.prev_batch,
+ next_batch: response.next_batch,
+ total_room_count_estimate: response.total_room_count_estimate,
+ });
+ }
+
+ let limit = limit.map_or(10, u64::from);
+ let mut num_since = 0_u64;
+
+ if let Some(s) = &since {
+ let mut characters = s.chars();
+ let backwards = match characters.next() {
+ Some('n') => false,
+ Some('p') => true,
+ _ => {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Invalid `since` token",
+ ))
+ }
+ };
+
+ num_since = characters
+ .collect::<String>()
+ .parse()
+ .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token."))?;
+
+ if backwards {
+ num_since = num_since.saturating_sub(limit);
+ }
+ }
+
+ let mut all_rooms: Vec<_> = db
+ .rooms
+ .public_rooms()
+ .map(|room_id| {
+ let room_id = room_id?;
+
+ let chunk = PublicRoomsChunk {
+ canonical_alias: db
+ .rooms
+ .room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")?
+ .map_or(Ok(None), |s| {
+ serde_json::from_str(s.content.get())
+ .map(|c: RoomCanonicalAliasEventContent| c.alias)
+ .map_err(|_| {
+ Error::bad_database("Invalid canonical alias event in database.")
+ })
+ })?,
+ name: db
+ .rooms
+ .room_state_get(&room_id, &StateEventType::RoomName, "")?
+ .map_or(Ok(None), |s| {
+ serde_json::from_str(s.content.get())
+ .map(|c: RoomNameEventContent| c.name)
+ .map_err(|_| {
+ Error::bad_database("Invalid room name event in database.")
+ })
+ })?,
+ num_joined_members: db
+ .rooms
+ .room_joined_count(&room_id)?
+ .unwrap_or_else(|| {
+ warn!("Room {} has no member count", room_id);
+ 0
+ })
+ .try_into()
+ .expect("user count should not be that big"),
+ topic: db
+ .rooms
+ .room_state_get(&room_id, &StateEventType::RoomTopic, "")?
+ .map_or(Ok(None), |s| {
+ serde_json::from_str(s.content.get())
+ .map(|c: RoomTopicEventContent| Some(c.topic))
+ .map_err(|_| {
+ Error::bad_database("Invalid room topic event in database.")
+ })
+ })?,
+ world_readable: db
+ .rooms
+ .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
+ .map_or(Ok(false), |s| {
+ serde_json::from_str(s.content.get())
+ .map(|c: RoomHistoryVisibilityEventContent| {
+ c.history_visibility == HistoryVisibility::WorldReadable
+ })
+ .map_err(|_| {
+ Error::bad_database(
+ "Invalid room history visibility event in database.",
+ )
+ })
+ })?,
+ guest_can_join: db
+ .rooms
+ .room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")?
+ .map_or(Ok(false), |s| {
+ serde_json::from_str(s.content.get())
+ .map(|c: RoomGuestAccessEventContent| {
+ c.guest_access == GuestAccess::CanJoin
+ })
+ .map_err(|_| {
+ Error::bad_database("Invalid room guest access event in database.")
+ })
+ })?,
+ avatar_url: db
+ .rooms
+ .room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
+ .map(|s| {
+ serde_json::from_str(s.content.get())
+ .map(|c: RoomAvatarEventContent| c.url)
+ .map_err(|_| {
+ Error::bad_database("Invalid room avatar event in database.")
+ })
+ })
+ .transpose()?
+ // url is now an Option<String> so we must flatten
+ .flatten(),
+ join_rule: db
+ .rooms
+ .room_state_get(&room_id, &StateEventType::RoomJoinRules, "")?
+ .map(|s| {
+ serde_json::from_str(s.content.get())
+ .map(|c: RoomJoinRulesEventContent| match c.join_rule {
+ JoinRule::Public => Some(PublicRoomJoinRule::Public),
+ JoinRule::Knock => Some(PublicRoomJoinRule::Knock),
+ _ => None,
+ })
+ .map_err(|_| {
+ Error::bad_database("Invalid room join rule event in database.")
+ })
+ })
+ .transpose()?
+ .flatten()
+ .ok_or(Error::bad_database(
+ "Invalid room join rule event in database.",
+ ))?,
+ room_id,
+ };
+ Ok(chunk)
+ })
+ .filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms
+ .filter(|chunk| {
+ if let Some(query) = filter
+ .generic_search_term
+ .as_ref()
+ .map(|q| q.to_lowercase())
+ {
+ if let Some(name) = &chunk.name {
+ if name.as_str().to_lowercase().contains(&query) {
+ return true;
+ }
+ }
+
+ if let Some(topic) = &chunk.topic {
+ if topic.to_lowercase().contains(&query) {
+ return true;
+ }
+ }
+
+ if let Some(canonical_alias) = &chunk.canonical_alias {
+ if canonical_alias.as_str().to_lowercase().contains(&query) {
+ return true;
+ }
+ }
+
+ false
+ } else {
+ // No search term
+ true
+ }
+ })
+ // We need to collect all, so we can sort by member count
+ .collect();
+
+ all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members));
+
+ let total_room_count_estimate = (all_rooms.len() as u32).into();
+
+ let chunk: Vec<_> = all_rooms
+ .into_iter()
+ .skip(num_since as usize)
+ .take(limit as usize)
+ .collect();
+
+ let prev_batch = if num_since == 0 {
+ None
+ } else {
+ Some(format!("p{}", num_since))
+ };
+
+ let next_batch = if chunk.len() < limit as usize {
+ None
+ } else {
+ Some(format!("n{}", num_since + limit))
+ };
+
+ Ok(get_public_rooms_filtered::v3::Response {
+ chunk,
+ prev_batch,
+ next_batch,
+ total_room_count_estimate: Some(total_room_count_estimate),
+ })
+}
diff --git a/src/api/client_server/filter.rs b/src/api/client_server/filter.rs
new file mode 100644
index 0000000..6522c90
--- /dev/null
+++ b/src/api/client_server/filter.rs
@@ -0,0 +1,36 @@
+use crate::{database::DatabaseGuard, Error, Result, Ruma};
+use ruma::api::client::{
+ error::ErrorKind,
+ filter::{create_filter, get_filter},
+};
+
+/// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}`
+///
+/// Loads a filter that was previously created.
+///
+/// - A user can only access their own filters
+pub async fn get_filter_route(
+ db: DatabaseGuard,
+ body: Ruma<get_filter::v3::IncomingRequest>,
+) -> Result<get_filter::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ let filter = match db.users.get_filter(sender_user, &body.filter_id)? {
+ Some(filter) => filter,
+ None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")),
+ };
+
+ Ok(get_filter::v3::Response::new(filter))
+}
+
+/// # `PUT /_matrix/client/r0/user/{userId}/filter`
+///
+/// Creates a new filter to be used by other endpoints.
+pub async fn create_filter_route(
+ db: DatabaseGuard,
+ body: Ruma<create_filter::v3::IncomingRequest>,
+) -> Result<create_filter::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ Ok(create_filter::v3::Response::new(
+ db.users.create_filter(sender_user, &body.filter)?,
+ ))
+}
diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs
new file mode 100644
index 0000000..c4f91cb
--- /dev/null
+++ b/src/api/client_server/keys.rs
@@ -0,0 +1,477 @@
+use super::SESSION_ID_LENGTH;
+use crate::{database::DatabaseGuard, utils, Database, Error, Result, Ruma};
+use futures_util::{stream::FuturesUnordered, StreamExt};
+use ruma::{
+ api::{
+ client::{
+ error::ErrorKind,
+ keys::{
+ claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures,
+ upload_signing_keys,
+ },
+ uiaa::{AuthFlow, AuthType, UiaaInfo},
+ },
+ federation,
+ },
+ serde::Raw,
+ DeviceId, DeviceKeyAlgorithm, UserId,
+};
+use serde_json::json;
+use std::collections::{BTreeMap, HashMap, HashSet};
+
+/// # `POST /_matrix/client/r0/keys/upload`
+///
+/// Publish end-to-end encryption keys for the sender device.
+///
+/// - Adds one time keys
+/// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?)
+pub async fn upload_keys_route(
+ db: DatabaseGuard,
+ body: Ruma<upload_keys::v3::Request>,
+) -> Result<upload_keys::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ let sender_device = body.sender_device.as_ref().expect("user is authenticated");
+
+ for (key_key, key_value) in &body.one_time_keys {
+ db.users
+ .add_one_time_key(sender_user, sender_device, key_key, key_value, &db.globals)?;
+ }
+
+ if let Some(device_keys) = &body.device_keys {
+ // TODO: merge this and the existing event?
+ // This check is needed to assure that signatures are kept
+ if db
+ .users
+ .get_device_keys(sender_user, sender_device)?
+ .is_none()
+ {
+ db.users.add_device_keys(
+ sender_user,
+ sender_device,
+ device_keys,
+ &db.rooms,
+ &db.globals,
+ )?;
+ }
+ }
+
+ db.flush()?;
+
+ Ok(upload_keys::v3::Response {
+ one_time_key_counts: db.users.count_one_time_keys(sender_user, sender_device)?,
+ })
+}
+
+/// # `POST /_matrix/client/r0/keys/query`
+///
+/// Get end-to-end encryption keys for the given users.
+///
+/// - Always fetches users from other servers over federation
+/// - Gets master keys, self-signing keys, user signing keys and device keys.
+/// - The master and self-signing keys contain signatures that the user is allowed to see
+pub async fn get_keys_route(
+ db: DatabaseGuard,
+ body: Ruma<get_keys::v3::IncomingRequest>,
+) -> Result<get_keys::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let response = get_keys_helper(
+ Some(sender_user),
+ &body.device_keys,
+ |u| u == sender_user,
+ &db,
+ )
+ .await?;
+
+ Ok(response)
+}
+
+/// # `POST /_matrix/client/r0/keys/claim`
+///
+/// Claims one-time keys
+pub async fn claim_keys_route(
+ db: DatabaseGuard,
+ body: Ruma<claim_keys::v3::Request>,
+) -> Result<claim_keys::v3::Response> {
+ let response = claim_keys_helper(&body.one_time_keys, &db).await?;
+
+ db.flush()?;
+
+ Ok(response)
+}
+
+/// # `POST /_matrix/client/r0/keys/device_signing/upload`
+///
+/// Uploads end-to-end key information for the sender user.
+///
+/// - Requires UIAA to verify password
+pub async fn upload_signing_keys_route(
+ db: DatabaseGuard,
+ body: Ruma<upload_signing_keys::v3::IncomingRequest>,
+) -> Result<upload_signing_keys::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ let sender_device = body.sender_device.as_ref().expect("user is authenticated");
+
+ // UIAA
+ let mut uiaainfo = UiaaInfo {
+ flows: vec![AuthFlow {
+ stages: vec![AuthType::Password],
+ }],
+ completed: Vec::new(),
+ params: Default::default(),
+ session: None,
+ auth_error: None,
+ };
+
+ if let Some(auth) = &body.auth {
+ let (worked, uiaainfo) = db.uiaa.try_auth(
+ sender_user,
+ sender_device,
+ auth,
+ &uiaainfo,
+ &db.users,
+ &db.globals,
+ )?;
+ if !worked {
+ return Err(Error::Uiaa(uiaainfo));
+ }
+ // Success!
+ } else if let Some(json) = body.json_body {
+ uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
+ db.uiaa
+ .create(sender_user, sender_device, &uiaainfo, &json)?;
+ return Err(Error::Uiaa(uiaainfo));
+ } else {
+ return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
+ }
+
+ if let Some(master_key) = &body.master_key {
+ db.users.add_cross_signing_keys(
+ sender_user,
+ master_key,
+ &body.self_signing_key,
+ &body.user_signing_key,
+ &db.rooms,
+ &db.globals,
+ )?;
+ }
+
+ db.flush()?;
+
+ Ok(upload_signing_keys::v3::Response {})
+}
+
+/// # `POST /_matrix/client/r0/keys/signatures/upload`
+///
+/// Uploads end-to-end key signatures from the sender user.
+pub async fn upload_signatures_route(
+ db: DatabaseGuard,
+ body: Ruma<upload_signatures::v3::Request>,
+) -> Result<upload_signatures::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ for (user_id, signed_keys) in &body.signed_keys {
+ for (key_id, signed_key) in signed_keys {
+ let signed_key = serde_json::to_value(signed_key).unwrap();
+
+ for signature in signed_key
+ .get("signatures")
+ .ok_or(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Missing signatures field.",
+ ))?
+ .get(sender_user.to_string())
+ .ok_or(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Invalid user in signatures field.",
+ ))?
+ .as_object()
+ .ok_or(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Invalid signature.",
+ ))?
+ .clone()
+ .into_iter()
+ {
+ // Signature validation?
+ let signature = (
+ signature.0,
+ signature
+ .1
+ .as_str()
+ .ok_or(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Invalid signature value.",
+ ))?
+ .to_owned(),
+ );
+ db.users.sign_key(
+ user_id,
+ key_id,
+ signature,
+ sender_user,
+ &db.rooms,
+ &db.globals,
+ )?;
+ }
+ }
+ }
+
+ db.flush()?;
+
+ Ok(upload_signatures::v3::Response {
+ failures: BTreeMap::new(), // TODO: integrate
+ })
+}
+
+/// # `POST /_matrix/client/r0/keys/changes`
+///
+/// Gets a list of users who have updated their device identity keys since the previous sync token.
+///
+/// - TODO: left users
+pub async fn get_key_changes_route(
+ db: DatabaseGuard,
+ body: Ruma<get_key_changes::v3::IncomingRequest>,
+) -> Result<get_key_changes::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let mut device_list_updates = HashSet::new();
+
+ device_list_updates.extend(
+ db.users
+ .keys_changed(
+ sender_user.as_str(),
+ body.from
+ .parse()
+ .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?,
+ Some(
+ body.to
+ .parse()
+ .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?,
+ ),
+ )
+ .filter_map(|r| r.ok()),
+ );
+
+ for room_id in db.rooms.rooms_joined(sender_user).filter_map(|r| r.ok()) {
+ device_list_updates.extend(
+ db.users
+ .keys_changed(
+ &room_id.to_string(),
+ body.from.parse().map_err(|_| {
+ Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`.")
+ })?,
+ Some(body.to.parse().map_err(|_| {
+ Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`.")
+ })?),
+ )
+ .filter_map(|r| r.ok()),
+ );
+ }
+ Ok(get_key_changes::v3::Response {
+ changed: device_list_updates.into_iter().collect(),
+ left: Vec::new(), // TODO
+ })
+}
+
+pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
+ sender_user: Option<&UserId>,
+ device_keys_input: &BTreeMap<Box<UserId>, Vec<Box<DeviceId>>>,
+ allowed_signatures: F,
+ db: &Database,
+) -> Result<get_keys::v3::Response> {
+ let mut master_keys = BTreeMap::new();
+ let mut self_signing_keys = BTreeMap::new();
+ let mut user_signing_keys = BTreeMap::new();
+ let mut device_keys = BTreeMap::new();
+
+ let mut get_over_federation = HashMap::new();
+
+ for (user_id, device_ids) in device_keys_input {
+ let user_id: &UserId = &**user_id;
+
+ if user_id.server_name() != db.globals.server_name() {
+ get_over_federation
+ .entry(user_id.server_name())
+ .or_insert_with(Vec::new)
+ .push((user_id, device_ids));
+ continue;
+ }
+
+ if device_ids.is_empty() {
+ let mut container = BTreeMap::new();
+ for device_id in db.users.all_device_ids(user_id) {
+ let device_id = device_id?;
+ if let Some(mut keys) = db.users.get_device_keys(user_id, &device_id)? {
+ let metadata = db
+ .users
+ .get_device_metadata(user_id, &device_id)?
+ .ok_or_else(|| {
+ Error::bad_database("all_device_keys contained nonexistent device.")
+ })?;
+
+ add_unsigned_device_display_name(&mut keys, metadata)
+ .map_err(|_| Error::bad_database("invalid device keys in database"))?;
+ container.insert(device_id, keys);
+ }
+ }
+ device_keys.insert(user_id.to_owned(), container);
+ } else {
+ for device_id in device_ids {
+ let mut container = BTreeMap::new();
+ if let Some(mut keys) = db.users.get_device_keys(user_id, device_id)? {
+ let metadata = db.users.get_device_metadata(user_id, device_id)?.ok_or(
+ Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Tried to get keys for nonexistent device.",
+ ),
+ )?;
+
+ add_unsigned_device_display_name(&mut keys, metadata)
+ .map_err(|_| Error::bad_database("invalid device keys in database"))?;
+ container.insert(device_id.to_owned(), keys);
+ }
+ device_keys.insert(user_id.to_owned(), container);
+ }
+ }
+
+ if let Some(master_key) = db.users.get_master_key(user_id, &allowed_signatures)? {
+ master_keys.insert(user_id.to_owned(), master_key);
+ }
+ if let Some(self_signing_key) = db
+ .users
+ .get_self_signing_key(user_id, &allowed_signatures)?
+ {
+ self_signing_keys.insert(user_id.to_owned(), self_signing_key);
+ }
+ if Some(user_id) == sender_user {
+ if let Some(user_signing_key) = db.users.get_user_signing_key(user_id)? {
+ user_signing_keys.insert(user_id.to_owned(), user_signing_key);
+ }
+ }
+ }
+
+ let mut failures = BTreeMap::new();
+
+ let mut futures: FuturesUnordered<_> = get_over_federation
+ .into_iter()
+ .map(|(server, vec)| async move {
+ let mut device_keys_input_fed = BTreeMap::new();
+ for (user_id, keys) in vec {
+ device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
+ }
+ (
+ server,
+ db.sending
+ .send_federation_request(
+ &db.globals,
+ server,
+ federation::keys::get_keys::v1::Request {
+ device_keys: device_keys_input_fed,
+ },
+ )
+ .await,
+ )
+ })
+ .collect();
+
+ while let Some((server, response)) = futures.next().await {
+ match response {
+ Ok(response) => {
+ master_keys.extend(response.master_keys);
+ self_signing_keys.extend(response.self_signing_keys);
+ device_keys.extend(response.device_keys);
+ }
+ Err(_e) => {
+ failures.insert(server.to_string(), json!({}));
+ }
+ }
+ }
+
+ Ok(get_keys::v3::Response {
+ master_keys,
+ self_signing_keys,
+ user_signing_keys,
+ device_keys,
+ failures,
+ })
+}
+
+fn add_unsigned_device_display_name(
+ keys: &mut Raw<ruma::encryption::DeviceKeys>,
+ metadata: ruma::api::client::device::Device,
+) -> serde_json::Result<()> {
+ if let Some(display_name) = metadata.display_name {
+ let mut object = keys.deserialize_as::<serde_json::Map<String, serde_json::Value>>()?;
+
+ let unsigned = object.entry("unsigned").or_insert_with(|| json!({}));
+ if let serde_json::Value::Object(unsigned_object) = unsigned {
+ unsigned_object.insert("device_display_name".to_owned(), display_name.into());
+ }
+
+ *keys = Raw::from_json(serde_json::value::to_raw_value(&object)?);
+ }
+
+ Ok(())
+}
+
+pub(crate) async fn claim_keys_helper(
+ one_time_keys_input: &BTreeMap<Box<UserId>, BTreeMap<Box<DeviceId>, DeviceKeyAlgorithm>>,
+ db: &Database,
+) -> Result<claim_keys::v3::Response> {
+ let mut one_time_keys = BTreeMap::new();
+
+ let mut get_over_federation = BTreeMap::new();
+
+ for (user_id, map) in one_time_keys_input {
+ if user_id.server_name() != db.globals.server_name() {
+ get_over_federation
+ .entry(user_id.server_name())
+ .or_insert_with(Vec::new)
+ .push((user_id, map));
+ }
+
+ let mut container = BTreeMap::new();
+ for (device_id, key_algorithm) in map {
+ if let Some(one_time_keys) =
+ db.users
+ .take_one_time_key(user_id, device_id, key_algorithm, &db.globals)?
+ {
+ let mut c = BTreeMap::new();
+ c.insert(one_time_keys.0, one_time_keys.1);
+ container.insert(device_id.clone(), c);
+ }
+ }
+ one_time_keys.insert(user_id.clone(), container);
+ }
+
+ let mut failures = BTreeMap::new();
+
+ for (server, vec) in get_over_federation {
+ let mut one_time_keys_input_fed = BTreeMap::new();
+ for (user_id, keys) in vec {
+ one_time_keys_input_fed.insert(user_id.clone(), keys.clone());
+ }
+ // Ignore failures
+ if let Ok(keys) = db
+ .sending
+ .send_federation_request(
+ &db.globals,
+ server,
+ federation::keys::claim_keys::v1::Request {
+ one_time_keys: one_time_keys_input_fed,
+ },
+ )
+ .await
+ {
+ one_time_keys.extend(keys.one_time_keys);
+ } else {
+ failures.insert(server.to_string(), json!({}));
+ }
+ }
+
+ Ok(claim_keys::v3::Response {
+ failures,
+ one_time_keys,
+ })
+}
diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs
new file mode 100644
index 0000000..a9a6d6c
--- /dev/null
+++ b/src/api/client_server/media.rs
@@ -0,0 +1,225 @@
+use crate::{
+ database::{media::FileMeta, DatabaseGuard},
+ utils, Error, Result, Ruma,
+};
+use ruma::api::client::{
+ error::ErrorKind,
+ media::{
+ create_content, get_content, get_content_as_filename, get_content_thumbnail,
+ get_media_config,
+ },
+};
+
+const MXC_LENGTH: usize = 32;
+
+/// # `GET /_matrix/media/r0/config`
+///
+/// Returns max upload size.
+pub async fn get_media_config_route(
+ db: DatabaseGuard,
+ _body: Ruma<get_media_config::v3::Request>,
+) -> Result<get_media_config::v3::Response> {
+ Ok(get_media_config::v3::Response {
+ upload_size: db.globals.max_request_size().into(),
+ })
+}
+
+/// # `POST /_matrix/media/r0/upload`
+///
+/// Permanently save media in the server.
+///
+/// - Some metadata will be saved in the database
+/// - Media will be saved in the media/ directory
+pub async fn create_content_route(
+ db: DatabaseGuard,
+ body: Ruma<create_content::v3::IncomingRequest>,
+) -> Result<create_content::v3::Response> {
+ let mxc = format!(
+ "mxc://{}/{}",
+ db.globals.server_name(),
+ utils::random_string(MXC_LENGTH)
+ );
+
+ db.media
+ .create(
+ mxc.clone(),
+ &db.globals,
+ &body
+ .filename
+ .as_ref()
+ .map(|filename| "inline; filename=".to_owned() + filename)
+ .as_deref(),
+ &body.content_type.as_deref(),
+ &body.file,
+ )
+ .await?;
+
+ db.flush()?;
+
+ Ok(create_content::v3::Response {
+ content_uri: mxc.try_into().expect("Invalid mxc:// URI"),
+ blurhash: None,
+ })
+}
+
+pub async fn get_remote_content(
+ db: &DatabaseGuard,
+ mxc: &str,
+ server_name: &ruma::ServerName,
+ media_id: &str,
+) -> Result<get_content::v3::Response, Error> {
+ let content_response = db
+ .sending
+ .send_federation_request(
+ &db.globals,
+ server_name,
+ get_content::v3::Request {
+ allow_remote: false,
+ server_name,
+ media_id,
+ },
+ )
+ .await?;
+
+ db.media
+ .create(
+ mxc.to_string(),
+ &db.globals,
+ &content_response.content_disposition.as_deref(),
+ &content_response.content_type.as_deref(),
+ &content_response.file,
+ )
+ .await?;
+
+ Ok(content_response)
+}
+
+/// # `GET /_matrix/media/r0/download/{serverName}/{mediaId}`
+///
+/// Load media from our server or over federation.
+///
+/// - Only allows federation if `allow_remote` is true
+pub async fn get_content_route(
+ db: DatabaseGuard,
+ body: Ruma<get_content::v3::IncomingRequest>,
+) -> Result<get_content::v3::Response> {
+ let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
+
+ if let Some(FileMeta {
+ content_disposition,
+ content_type,
+ file,
+ }) = db.media.get(&db.globals, &mxc).await?
+ {
+ Ok(get_content::v3::Response {
+ file,
+ content_type,
+ content_disposition,
+ })
+ } else if &*body.server_name != db.globals.server_name() && body.allow_remote {
+ let remote_content_response =
+ get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?;
+ Ok(remote_content_response)
+ } else {
+ Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
+ }
+}
+
+/// # `GET /_matrix/media/r0/download/{serverName}/{mediaId}/{fileName}`
+///
+/// Load media from our server or over federation, permitting desired filename.
+///
+/// - Only allows federation if `allow_remote` is true
+pub async fn get_content_as_filename_route(
+ db: DatabaseGuard,
+ body: Ruma<get_content_as_filename::v3::IncomingRequest>,
+) -> Result<get_content_as_filename::v3::Response> {
+ let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
+
+ if let Some(FileMeta {
+ content_disposition: _,
+ content_type,
+ file,
+ }) = db.media.get(&db.globals, &mxc).await?
+ {
+ Ok(get_content_as_filename::v3::Response {
+ file,
+ content_type,
+ content_disposition: Some(format!("inline; filename={}", body.filename)),
+ })
+ } else if &*body.server_name != db.globals.server_name() && body.allow_remote {
+ let remote_content_response =
+ get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?;
+
+ Ok(get_content_as_filename::v3::Response {
+ content_disposition: Some(format!("inline: filename={}", body.filename)),
+ content_type: remote_content_response.content_type,
+ file: remote_content_response.file,
+ })
+ } else {
+ Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
+ }
+}
+
+/// # `GET /_matrix/media/r0/thumbnail/{serverName}/{mediaId}`
+///
+/// Load media thumbnail from our server or over federation.
+///
+/// - Only allows federation if `allow_remote` is true
+pub async fn get_content_thumbnail_route(
+ db: DatabaseGuard,
+ body: Ruma<get_content_thumbnail::v3::IncomingRequest>,
+) -> Result<get_content_thumbnail::v3::Response> {
+ let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
+
+ if let Some(FileMeta {
+ content_type, file, ..
+ }) = db
+ .media
+ .get_thumbnail(
+ &mxc,
+ &db.globals,
+ body.width
+ .try_into()
+ .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
+ body.height
+ .try_into()
+ .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
+ )
+ .await?
+ {
+ Ok(get_content_thumbnail::v3::Response { file, content_type })
+ } else if &*body.server_name != db.globals.server_name() && body.allow_remote {
+ let get_thumbnail_response = db
+ .sending
+ .send_federation_request(
+ &db.globals,
+ &body.server_name,
+ get_content_thumbnail::v3::Request {
+ allow_remote: false,
+ height: body.height,
+ width: body.width,
+ method: body.method.clone(),
+ server_name: &body.server_name,
+ media_id: &body.media_id,
+ },
+ )
+ .await?;
+
+ db.media
+ .upload_thumbnail(
+ mxc,
+ &db.globals,
+ &None,
+ &get_thumbnail_response.content_type,
+ body.width.try_into().expect("all UInts are valid u32s"),
+ body.height.try_into().expect("all UInts are valid u32s"),
+ &get_thumbnail_response.file,
+ )
+ .await?;
+
+ Ok(get_thumbnail_response)
+ } else {
+ Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
+ }
+}
diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs
new file mode 100644
index 0000000..4dda11a
--- /dev/null
+++ b/src/api/client_server/membership.rs
@@ -0,0 +1,1288 @@
+use crate::{
+ client_server,
+ database::DatabaseGuard,
+ pdu::{EventHash, PduBuilder, PduEvent},
+ server_server, utils, Database, Error, Result, Ruma,
+};
+use ruma::{
+ api::{
+ client::{
+ error::ErrorKind,
+ membership::{
+ ban_user, forget_room, get_member_events, invite_user, join_room_by_id,
+ join_room_by_id_or_alias, joined_members, joined_rooms, kick_user, leave_room,
+ unban_user, IncomingThirdPartySigned,
+ },
+ },
+ federation::{self, membership::create_invite},
+ },
+ events::{
+ room::{
+ create::RoomCreateEventContent,
+ member::{MembershipState, RoomMemberEventContent},
+ },
+ RoomEventType, StateEventType,
+ },
+ serde::{to_canonical_value, Base64, CanonicalJsonObject, CanonicalJsonValue},
+ state_res::{self, RoomVersion},
+ uint, EventId, RoomId, RoomVersionId, ServerName, UserId,
+};
+use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
+use std::{
+ collections::{hash_map::Entry, BTreeMap, HashMap},
+ iter,
+ sync::{Arc, RwLock},
+ time::{Duration, Instant},
+};
+use tracing::{debug, error, warn};
+
+/// # `POST /_matrix/client/r0/rooms/{roomId}/join`
+///
+/// Tries to join the sender user into a room.
+///
+/// - If the server knowns about this room: creates the join event and does auth rules locally
+/// - If the server does not know about the room: asks other servers over federation
+pub async fn join_room_by_id_route(
+ db: DatabaseGuard,
+ body: Ruma<join_room_by_id::v3::IncomingRequest>,
+) -> Result<join_room_by_id::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let mut servers = Vec::new(); // There is no body.server_name for /roomId/join
+ servers.extend(
+ db.rooms
+ .invite_state(sender_user, &body.room_id)?
+ .unwrap_or_default()
+ .iter()
+ .filter_map(|event| serde_json::from_str(event.json().get()).ok())
+ .filter_map(|event: serde_json::Value| event.get("sender").cloned())
+ .filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
+ .filter_map(|sender| UserId::parse(sender).ok())
+ .map(|user| user.server_name().to_owned()),
+ );
+
+ servers.push(body.room_id.server_name().to_owned());
+
+ let ret = join_room_by_id_helper(
+ &db,
+ body.sender_user.as_deref(),
+ &body.room_id,
+ &servers,
+ body.third_party_signed.as_ref(),
+ )
+ .await;
+
+ db.flush()?;
+
+ ret
+}
+
+/// # `POST /_matrix/client/r0/join/{roomIdOrAlias}`
+///
+/// Tries to join the sender user into a room.
+///
+/// - If the server knowns about this room: creates the join event and does auth rules locally
+/// - If the server does not know about the room: asks other servers over federation
+pub async fn join_room_by_id_or_alias_route(
+ db: DatabaseGuard,
+ body: Ruma<join_room_by_id_or_alias::v3::IncomingRequest>,
+) -> Result<join_room_by_id_or_alias::v3::Response> {
+ let sender_user = body.sender_user.as_deref().expect("user is authenticated");
+ let body = body.body;
+
+ let (servers, room_id) = match Box::<RoomId>::try_from(body.room_id_or_alias) {
+ Ok(room_id) => {
+ let mut servers = body.server_name.clone();
+ servers.extend(
+ db.rooms
+ .invite_state(sender_user, &room_id)?
+ .unwrap_or_default()
+ .iter()
+ .filter_map(|event| serde_json::from_str(event.json().get()).ok())
+ .filter_map(|event: serde_json::Value| event.get("sender").cloned())
+ .filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
+ .filter_map(|sender| UserId::parse(sender).ok())
+ .map(|user| user.server_name().to_owned()),
+ );
+
+ servers.push(room_id.server_name().to_owned());
+ (servers, room_id)
+ }
+ Err(room_alias) => {
+ let response = client_server::get_alias_helper(&db, &room_alias).await?;
+
+ (response.servers.into_iter().collect(), response.room_id)
+ }
+ };
+
+ let join_room_response = join_room_by_id_helper(
+ &db,
+ Some(sender_user),
+ &room_id,
+ &servers,
+ body.third_party_signed.as_ref(),
+ )
+ .await?;
+
+ db.flush()?;
+
+ Ok(join_room_by_id_or_alias::v3::Response {
+ room_id: join_room_response.room_id,
+ })
+}
+
+/// # `POST /_matrix/client/r0/rooms/{roomId}/leave`
+///
+/// Tries to leave the sender user from a room.
+///
+/// - This should always work if the user is currently joined.
+pub async fn leave_room_route(
+ db: DatabaseGuard,
+ body: Ruma<leave_room::v3::IncomingRequest>,
+) -> Result<leave_room::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ db.rooms.leave_room(sender_user, &body.room_id, &db).await?;
+
+ db.flush()?;
+
+ Ok(leave_room::v3::Response::new())
+}
+
+/// # `POST /_matrix/client/r0/rooms/{roomId}/invite`
+///
+/// Tries to send an invite event into the room.
+pub async fn invite_user_route(
+ db: DatabaseGuard,
+ body: Ruma<invite_user::v3::IncomingRequest>,
+) -> Result<invite_user::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ if let invite_user::v3::IncomingInvitationRecipient::UserId { user_id } = &body.recipient {
+ invite_helper(sender_user, user_id, &body.room_id, &db, false).await?;
+ db.flush()?;
+ Ok(invite_user::v3::Response {})
+ } else {
+ Err(Error::BadRequest(ErrorKind::NotFound, "User not found."))
+ }
+}
+
+/// # `POST /_matrix/client/r0/rooms/{roomId}/kick`
+///
+/// Tries to send a kick event into the room.
+pub async fn kick_user_route(
+ db: DatabaseGuard,
+ body: Ruma<kick_user::v3::IncomingRequest>,
+) -> Result<kick_user::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let mut event: RoomMemberEventContent = serde_json::from_str(
+ db.rooms
+ .room_state_get(
+ &body.room_id,
+ &StateEventType::RoomMember,
+ &body.user_id.to_string(),
+ )?
+ .ok_or(Error::BadRequest(
+ ErrorKind::BadState,
+ "Cannot kick member that's not in the room.",
+ ))?
+ .content
+ .get(),
+ )
+ .map_err(|_| Error::bad_database("Invalid member event in database."))?;
+
+ event.membership = MembershipState::Leave;
+ // TODO: reason
+
+ let mutex_state = Arc::clone(
+ db.globals
+ .roomid_mutex_state
+ .write()
+ .unwrap()
+ .entry(body.room_id.clone())
+ .or_default(),
+ );
+ let state_lock = mutex_state.lock().await;
+
+ db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomMember,
+ content: to_raw_value(&event).expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: Some(body.user_id.to_string()),
+ redacts: None,
+ },
+ sender_user,
+ &body.room_id,
+ &db,
+ &state_lock,
+ )?;
+
+ drop(state_lock);
+
+ db.flush()?;
+
+ Ok(kick_user::v3::Response::new())
+}
+
+/// # `POST /_matrix/client/r0/rooms/{roomId}/ban`
+///
+/// Tries to send a ban event into the room.
+pub async fn ban_user_route(
+ db: DatabaseGuard,
+ body: Ruma<ban_user::v3::IncomingRequest>,
+) -> Result<ban_user::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ // TODO: reason
+
+ let event = db
+ .rooms
+ .room_state_get(
+ &body.room_id,
+ &StateEventType::RoomMember,
+ &body.user_id.to_string(),
+ )?
+ .map_or(
+ Ok(RoomMemberEventContent {
+ membership: MembershipState::Ban,
+ displayname: db.users.displayname(&body.user_id)?,
+ avatar_url: db.users.avatar_url(&body.user_id)?,
+ is_direct: None,
+ third_party_invite: None,
+ blurhash: db.users.blurhash(&body.user_id)?,
+ reason: None,
+ join_authorized_via_users_server: None,
+ }),
+ |event| {
+ serde_json::from_str(event.content.get())
+ .map(|event: RoomMemberEventContent| RoomMemberEventContent {
+ membership: MembershipState::Ban,
+ ..event
+ })
+ .map_err(|_| Error::bad_database("Invalid member event in database."))
+ },
+ )?;
+
+ let mutex_state = Arc::clone(
+ db.globals
+ .roomid_mutex_state
+ .write()
+ .unwrap()
+ .entry(body.room_id.clone())
+ .or_default(),
+ );
+ let state_lock = mutex_state.lock().await;
+
+ db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomMember,
+ content: to_raw_value(&event).expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: Some(body.user_id.to_string()),
+ redacts: None,
+ },
+ sender_user,
+ &body.room_id,
+ &db,
+ &state_lock,
+ )?;
+
+ drop(state_lock);
+
+ db.flush()?;
+
+ Ok(ban_user::v3::Response::new())
+}
+
+/// # `POST /_matrix/client/r0/rooms/{roomId}/unban`
+///
+/// Tries to send an unban event into the room.
+pub async fn unban_user_route(
+ db: DatabaseGuard,
+ body: Ruma<unban_user::v3::IncomingRequest>,
+) -> Result<unban_user::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let mut event: RoomMemberEventContent = serde_json::from_str(
+ db.rooms
+ .room_state_get(
+ &body.room_id,
+ &StateEventType::RoomMember,
+ &body.user_id.to_string(),
+ )?
+ .ok_or(Error::BadRequest(
+ ErrorKind::BadState,
+ "Cannot unban a user who is not banned.",
+ ))?
+ .content
+ .get(),
+ )
+ .map_err(|_| Error::bad_database("Invalid member event in database."))?;
+
+ event.membership = MembershipState::Leave;
+
+ let mutex_state = Arc::clone(
+ db.globals
+ .roomid_mutex_state
+ .write()
+ .unwrap()
+ .entry(body.room_id.clone())
+ .or_default(),
+ );
+ let state_lock = mutex_state.lock().await;
+
+ db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomMember,
+ content: to_raw_value(&event).expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: Some(body.user_id.to_string()),
+ redacts: None,
+ },
+ sender_user,
+ &body.room_id,
+ &db,
+ &state_lock,
+ )?;
+
+ drop(state_lock);
+
+ db.flush()?;
+
+ Ok(unban_user::v3::Response::new())
+}
+
+/// # `POST /_matrix/client/r0/rooms/{roomId}/forget`
+///
+/// Forgets about a room.
+///
+/// - If the sender user currently left the room: Stops sender user from receiving information about the room
+///
+/// Note: Other devices of the user have no way of knowing the room was forgotten, so this has to
+/// be called from every device
+pub async fn forget_room_route(
+ db: DatabaseGuard,
+ body: Ruma<forget_room::v3::IncomingRequest>,
+) -> Result<forget_room::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ db.rooms.forget(&body.room_id, sender_user)?;
+
+ db.flush()?;
+
+ Ok(forget_room::v3::Response::new())
+}
+
+/// # `POST /_matrix/client/r0/joined_rooms`
+///
+/// Lists all rooms the user has joined.
+pub async fn joined_rooms_route(
+ db: DatabaseGuard,
+ body: Ruma<joined_rooms::v3::Request>,
+) -> Result<joined_rooms::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ Ok(joined_rooms::v3::Response {
+ joined_rooms: db
+ .rooms
+ .rooms_joined(sender_user)
+ .filter_map(|r| r.ok())
+ .collect(),
+ })
+}
+
+/// # `POST /_matrix/client/r0/rooms/{roomId}/members`
+///
+/// Lists all joined users in a room (TODO: at a specific point in time, with a specific membership).
+///
+/// - Only works if the user is currently joined
+pub async fn get_member_events_route(
+ db: DatabaseGuard,
+ body: Ruma<get_member_events::v3::IncomingRequest>,
+) -> Result<get_member_events::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ // TODO: check history visibility?
+ if !db.rooms.is_joined(sender_user, &body.room_id)? {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "You don't have permission to view this room.",
+ ));
+ }
+
+ Ok(get_member_events::v3::Response {
+ chunk: db
+ .rooms
+ .room_state_full(&body.room_id)
+ .await?
+ .iter()
+ .filter(|(key, _)| key.0 == StateEventType::RoomMember)
+ .map(|(_, pdu)| pdu.to_member_event().into())
+ .collect(),
+ })
+}
+
+/// # `POST /_matrix/client/r0/rooms/{roomId}/joined_members`
+///
+/// Lists all members of a room.
+///
+/// - The sender user must be in the room
+/// - TODO: An appservice just needs a puppet joined
+pub async fn joined_members_route(
+ db: DatabaseGuard,
+ body: Ruma<joined_members::v3::IncomingRequest>,
+) -> Result<joined_members::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ if !db.rooms.is_joined(sender_user, &body.room_id)? {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "You aren't a member of the room.",
+ ));
+ }
+
+ let mut joined = BTreeMap::new();
+ for user_id in db.rooms.room_members(&body.room_id).filter_map(|r| r.ok()) {
+ let display_name = db.users.displayname(&user_id)?;
+ let avatar_url = db.users.avatar_url(&user_id)?;
+
+ joined.insert(
+ user_id,
+ joined_members::v3::RoomMember {
+ display_name,
+ avatar_url,
+ },
+ );
+ }
+
+ Ok(joined_members::v3::Response { joined })
+}
+
+#[tracing::instrument(skip(db))]
+async fn join_room_by_id_helper(
+ db: &Database,
+ sender_user: Option<&UserId>,
+ room_id: &RoomId,
+ servers: &[Box<ServerName>],
+ _third_party_signed: Option<&IncomingThirdPartySigned>,
+) -> Result<join_room_by_id::v3::Response> {
+ let sender_user = sender_user.expect("user is authenticated");
+
+ let mutex_state = Arc::clone(
+ db.globals
+ .roomid_mutex_state
+ .write()
+ .unwrap()
+ .entry(room_id.to_owned())
+ .or_default(),
+ );
+ let state_lock = mutex_state.lock().await;
+
+ // Ask a remote server if we don't have this room
+ if !db.rooms.exists(room_id)? {
+ let mut make_join_response_and_server = Err(Error::BadServerResponse(
+ "No server available to assist in joining.",
+ ));
+
+ for remote_server in servers {
+ let make_join_response = db
+ .sending
+ .send_federation_request(
+ &db.globals,
+ remote_server,
+ federation::membership::prepare_join_event::v1::Request {
+ room_id,
+ user_id: sender_user,
+ ver: &db.globals.supported_room_versions(),
+ },
+ )
+ .await;
+
+ make_join_response_and_server = make_join_response.map(|r| (r, remote_server));
+
+ if make_join_response_and_server.is_ok() {
+ break;
+ }
+ }
+
+ let (make_join_response, remote_server) = make_join_response_and_server?;
+
+ let room_version = match make_join_response.room_version {
+ Some(room_version) if db.rooms.is_supported_version(&db, &room_version) => room_version,
+ _ => return Err(Error::BadServerResponse("Room version is not supported")),
+ };
+
+ let mut join_event_stub: CanonicalJsonObject =
+ serde_json::from_str(make_join_response.event.get()).map_err(|_| {
+ Error::BadServerResponse("Invalid make_join event json received from server.")
+ })?;
+
+ // TODO: Is origin needed?
+ join_event_stub.insert(
+ "origin".to_owned(),
+ CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()),
+ );
+ join_event_stub.insert(
+ "origin_server_ts".to_owned(),
+ CanonicalJsonValue::Integer(
+ utils::millis_since_unix_epoch()
+ .try_into()
+ .expect("Timestamp is valid js_int value"),
+ ),
+ );
+ join_event_stub.insert(
+ "content".to_owned(),
+ to_canonical_value(RoomMemberEventContent {
+ membership: MembershipState::Join,
+ displayname: db.users.displayname(sender_user)?,
+ avatar_url: db.users.avatar_url(sender_user)?,
+ is_direct: None,
+ third_party_invite: None,
+ blurhash: db.users.blurhash(sender_user)?,
+ reason: None,
+ join_authorized_via_users_server: None,
+ })
+ .expect("event is valid, we just created it"),
+ );
+
+ // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms
+ join_event_stub.remove("event_id");
+
+ // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present
+ ruma::signatures::hash_and_sign_event(
+ db.globals.server_name().as_str(),
+ db.globals.keypair(),
+ &mut join_event_stub,
+ &room_version,
+ )
+ .expect("event is valid, we just created it");
+
+ // Generate event id
+ let event_id = format!(
+ "${}",
+ ruma::signatures::reference_hash(&join_event_stub, &room_version)
+ .expect("ruma can calculate reference hashes")
+ );
+ let event_id = <&EventId>::try_from(event_id.as_str())
+ .expect("ruma's reference hashes are valid event ids");
+
+ // Add event_id back
+ join_event_stub.insert(
+ "event_id".to_owned(),
+ CanonicalJsonValue::String(event_id.as_str().to_owned()),
+ );
+
+ // It has enough fields to be called a proper event now
+ let join_event = join_event_stub;
+
+ let send_join_response = db
+ .sending
+ .send_federation_request(
+ &db.globals,
+ remote_server,
+ federation::membership::create_join_event::v2::Request {
+ room_id,
+ event_id,
+ pdu: &PduEvent::convert_to_outgoing_federation_event(join_event.clone()),
+ },
+ )
+ .await?;
+
+ db.rooms.get_or_create_shortroomid(room_id, &db.globals)?;
+
+ let parsed_pdu = PduEvent::from_id_val(event_id, join_event.clone())
+ .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?;
+
+ let mut state = HashMap::new();
+ let pub_key_map = RwLock::new(BTreeMap::new());
+
+ server_server::fetch_join_signing_keys(
+ &send_join_response,
+ &room_version,
+ &pub_key_map,
+ db,
+ )
+ .await?;
+
+ for result in send_join_response
+ .room_state
+ .state
+ .iter()
+ .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, db))
+ {
+ let (event_id, value) = match result {
+ Ok(t) => t,
+ Err(_) => continue,
+ };
+
+ let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| {
+ warn!("{:?}: {}", value, e);
+ Error::BadServerResponse("Invalid PDU in send_join response.")
+ })?;
+
+ db.rooms.add_pdu_outlier(&event_id, &value)?;
+ if let Some(state_key) = &pdu.state_key {
+ let shortstatekey = db.rooms.get_or_create_shortstatekey(
+ &pdu.kind.to_string().into(),
+ state_key,
+ &db.globals,
+ )?;
+ state.insert(shortstatekey, pdu.event_id.clone());
+ }
+ }
+
+ let incoming_shortstatekey = db.rooms.get_or_create_shortstatekey(
+ &parsed_pdu.kind.to_string().into(),
+ parsed_pdu
+ .state_key
+ .as_ref()
+ .expect("Pdu is a membership state event"),
+ &db.globals,
+ )?;
+
+ state.insert(incoming_shortstatekey, parsed_pdu.event_id.clone());
+
+ let create_shortstatekey = db
+ .rooms
+ .get_shortstatekey(&StateEventType::RoomCreate, "")?
+ .expect("Room exists");
+
+ if state.get(&create_shortstatekey).is_none() {
+ return Err(Error::BadServerResponse("State contained no create event."));
+ }
+
+ db.rooms.force_state(
+ room_id,
+ state
+ .into_iter()
+ .map(|(k, id)| db.rooms.compress_state_event(k, &id, &db.globals))
+ .collect::<Result<_>>()?,
+ db,
+ )?;
+
+ for result in send_join_response
+ .room_state
+ .auth_chain
+ .iter()
+ .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, db))
+ {
+ let (event_id, value) = match result {
+ Ok(t) => t,
+ Err(_) => continue,
+ };
+
+ db.rooms.add_pdu_outlier(&event_id, &value)?;
+ }
+
+ // We append to state before appending the pdu, so we don't have a moment in time with the
+ // pdu without it's state. This is okay because append_pdu can't fail.
+ let statehashid = db.rooms.append_to_state(&parsed_pdu, &db.globals)?;
+
+ db.rooms.append_pdu(
+ &parsed_pdu,
+ join_event,
+ iter::once(&*parsed_pdu.event_id),
+ db,
+ )?;
+
+ // We set the room state after inserting the pdu, so that we never have a moment in time
+ // where events in the current room state do not exist
+ db.rooms.set_room_state(room_id, statehashid)?;
+ } else {
+ let event = RoomMemberEventContent {
+ membership: MembershipState::Join,
+ displayname: db.users.displayname(sender_user)?,
+ avatar_url: db.users.avatar_url(sender_user)?,
+ is_direct: None,
+ third_party_invite: None,
+ blurhash: db.users.blurhash(sender_user)?,
+ reason: None,
+ join_authorized_via_users_server: None,
+ };
+
+ db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomMember,
+ content: to_raw_value(&event).expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: Some(sender_user.to_string()),
+ redacts: None,
+ },
+ sender_user,
+ room_id,
+ db,
+ &state_lock,
+ )?;
+ }
+
+ drop(state_lock);
+
+ db.flush()?;
+
+ Ok(join_room_by_id::v3::Response::new(room_id.to_owned()))
+}
+
+fn validate_and_add_event_id(
+ pdu: &RawJsonValue,
+ room_version: &RoomVersionId,
+ pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
+ db: &Database,
+) -> Result<(Box<EventId>, CanonicalJsonObject)> {
+ let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
+ error!("Invalid PDU in server response: {:?}: {:?}", pdu, e);
+ Error::BadServerResponse("Invalid PDU in server response")
+ })?;
+ let event_id = EventId::parse(format!(
+ "${}",
+ ruma::signatures::reference_hash(&value, room_version)
+ .expect("ruma can calculate reference hashes")
+ ))
+ .expect("ruma's reference hashes are valid event ids");
+
+ let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) {
+ Entry::Vacant(e) => {
+ e.insert((Instant::now(), 1));
+ }
+ Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
+ };
+
+ if let Some((time, tries)) = db
+ .globals
+ .bad_event_ratelimiter
+ .read()
+ .unwrap()
+ .get(&event_id)
+ {
+ // Exponential backoff
+ let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
+ if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
+ min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
+ }
+
+ if time.elapsed() < min_elapsed_duration {
+ debug!("Backing off from {}", event_id);
+ return Err(Error::BadServerResponse("bad event, still backing off"));
+ }
+ }
+
+ if let Err(e) = ruma::signatures::verify_event(
+ &*pub_key_map
+ .read()
+ .map_err(|_| Error::bad_database("RwLock is poisoned."))?,
+ &value,
+ room_version,
+ ) {
+ warn!("Event {} failed verification {:?} {}", event_id, pdu, e);
+ back_off(event_id);
+ return Err(Error::BadServerResponse("Event failed verification."));
+ }
+
+ value.insert(
+ "event_id".to_owned(),
+ CanonicalJsonValue::String(event_id.as_str().to_owned()),
+ );
+
+ Ok((event_id, value))
+}
+
+pub(crate) async fn invite_helper<'a>(
+ sender_user: &UserId,
+ user_id: &UserId,
+ room_id: &RoomId,
+ db: &Database,
+ is_direct: bool,
+) -> Result<()> {
+ if user_id.server_name() != db.globals.server_name() {
+ let (room_version_id, pdu_json, invite_room_state) = {
+ let mutex_state = Arc::clone(
+ db.globals
+ .roomid_mutex_state
+ .write()
+ .unwrap()
+ .entry(room_id.to_owned())
+ .or_default(),
+ );
+ let state_lock = mutex_state.lock().await;
+
+ let prev_events: Vec<_> = db
+ .rooms
+ .get_pdu_leaves(room_id)?
+ .into_iter()
+ .take(20)
+ .collect();
+
+ let create_event = db
+ .rooms
+ .room_state_get(room_id, &StateEventType::RoomCreate, "")?;
+
+ let create_event_content: Option<RoomCreateEventContent> = create_event
+ .as_ref()
+ .map(|create_event| {
+ serde_json::from_str(create_event.content.get()).map_err(|e| {
+ warn!("Invalid create event: {}", e);
+ Error::bad_database("Invalid create event in db.")
+ })
+ })
+ .transpose()?;
+
+ // If there was no create event yet, assume we are creating a room with the default
+ // version right now
+ let room_version_id = create_event_content
+ .map_or(db.globals.default_room_version(), |create_event| {
+ create_event.room_version
+ });
+ let room_version =
+ RoomVersion::new(&room_version_id).expect("room version is supported");
+
+ let content = to_raw_value(&RoomMemberEventContent {
+ avatar_url: None,
+ displayname: None,
+ is_direct: Some(is_direct),
+ membership: MembershipState::Invite,
+ third_party_invite: None,
+ blurhash: None,
+ reason: None,
+ join_authorized_via_users_server: None,
+ })
+ .expect("member event is valid value");
+
+ let state_key = user_id.to_string();
+ let kind = StateEventType::RoomMember;
+
+ let auth_events = db.rooms.get_auth_events(
+ room_id,
+ &kind.to_string().into(),
+ sender_user,
+ Some(&state_key),
+ &content,
+ )?;
+
+ // Our depth is the maximum depth of prev_events + 1
+ let depth = prev_events
+ .iter()
+ .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth))
+ .max()
+ .unwrap_or_else(|| uint!(0))
+ + uint!(1);
+
+ let mut unsigned = BTreeMap::new();
+
+ if let Some(prev_pdu) = db.rooms.room_state_get(room_id, &kind, &state_key)? {
+ unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone());
+ unsigned.insert(
+ "prev_sender".to_owned(),
+ to_raw_value(&prev_pdu.sender).expect("UserId is valid"),
+ );
+ }
+
+ let pdu = PduEvent {
+ event_id: ruma::event_id!("$thiswillbefilledinlater").into(),
+ room_id: room_id.to_owned(),
+ sender: sender_user.to_owned(),
+ origin_server_ts: utils::millis_since_unix_epoch()
+ .try_into()
+ .expect("time is valid"),
+ kind: kind.to_string().into(),
+ content,
+ state_key: Some(state_key),
+ prev_events,
+ depth,
+ auth_events: auth_events
+ .iter()
+ .map(|(_, pdu)| pdu.event_id.clone())
+ .collect(),
+ redacts: None,
+ unsigned: if unsigned.is_empty() {
+ None
+ } else {
+ Some(to_raw_value(&unsigned).expect("to_raw_value always works"))
+ },
+ hashes: EventHash {
+ sha256: "aaa".to_owned(),
+ },
+ signatures: None,
+ };
+
+ let auth_check = state_res::auth_check(
+ &room_version,
+ &pdu,
+ None::<PduEvent>, // TODO: third_party_invite
+ |k, s| auth_events.get(&(k.clone(), s.to_owned())),
+ )
+ .map_err(|e| {
+ error!("{:?}", e);
+ Error::bad_database("Auth check failed.")
+ })?;
+
+ if !auth_check {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "Event is not authorized.",
+ ));
+ }
+
+ // Hash and sign
+ let mut pdu_json =
+ utils::to_canonical_object(&pdu).expect("event is valid, we just created it");
+
+ pdu_json.remove("event_id");
+
+ // Add origin because synapse likes that (and it's required in the spec)
+ pdu_json.insert(
+ "origin".to_owned(),
+ to_canonical_value(db.globals.server_name())
+ .expect("server name is a valid CanonicalJsonValue"),
+ );
+
+ ruma::signatures::hash_and_sign_event(
+ db.globals.server_name().as_str(),
+ db.globals.keypair(),
+ &mut pdu_json,
+ &room_version_id,
+ )
+ .expect("event is valid, we just created it");
+
+ let invite_room_state = db.rooms.calculate_invite_state(&pdu)?;
+
+ drop(state_lock);
+
+ (room_version_id, pdu_json, invite_room_state)
+ };
+
+ // Generate event id
+ let expected_event_id = format!(
+ "${}",
+ ruma::signatures::reference_hash(&pdu_json, &room_version_id)
+ .expect("ruma can calculate reference hashes")
+ );
+ let expected_event_id = <&EventId>::try_from(expected_event_id.as_str())
+ .expect("ruma's reference hashes are valid event ids");
+
+ let response = db
+ .sending
+ .send_federation_request(
+ &db.globals,
+ user_id.server_name(),
+ create_invite::v2::Request {
+ room_id,
+ event_id: expected_event_id,
+ room_version: &room_version_id,
+ event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()),
+ invite_room_state: &invite_room_state,
+ },
+ )
+ .await?;
+
+ let pub_key_map = RwLock::new(BTreeMap::new());
+
+ // We do not add the event_id field to the pdu here because of signature and hashes checks
+ let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&response.event, &db)
+ {
+ Ok(t) => t,
+ Err(_) => {
+ // Event could not be converted to canonical json
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Could not convert event to canonical json.",
+ ));
+ }
+ };
+
+ if expected_event_id != event_id {
+ warn!("Server {} changed invite event, that's not allowed in the spec: ours: {:?}, theirs: {:?}", user_id.server_name(), pdu_json, value);
+ }
+
+ let origin: Box<ServerName> = serde_json::from_value(
+ serde_json::to_value(value.get("origin").ok_or(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Event needs an origin field.",
+ ))?)
+ .expect("CanonicalJson is valid json value"),
+ )
+ .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?;
+
+ let pdu_id = server_server::handle_incoming_pdu(
+ &origin,
+ &event_id,
+ room_id,
+ value,
+ true,
+ db,
+ &pub_key_map,
+ )
+ .await
+ .map_err(|_| {
+ Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Error while handling incoming PDU.",
+ )
+ })?
+ .ok_or(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Could not accept incoming PDU as timeline event.",
+ ))?;
+
+ let servers = db
+ .rooms
+ .room_servers(room_id)
+ .filter_map(|r| r.ok())
+ .filter(|server| &**server != db.globals.server_name());
+
+ db.sending.send_pdu(servers, &pdu_id)?;
+
+ return Ok(());
+ }
+
+ if !db.rooms.is_joined(sender_user, &room_id)? {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "You don't have permission to view this room.",
+ ));
+ }
+
+ let mutex_state = Arc::clone(
+ db.globals
+ .roomid_mutex_state
+ .write()
+ .unwrap()
+ .entry(room_id.to_owned())
+ .or_default(),
+ );
+ let state_lock = mutex_state.lock().await;
+
+ db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomMember,
+ content: to_raw_value(&RoomMemberEventContent {
+ membership: MembershipState::Invite,
+ displayname: db.users.displayname(user_id)?,
+ avatar_url: db.users.avatar_url(user_id)?,
+ is_direct: Some(is_direct),
+ third_party_invite: None,
+ blurhash: db.users.blurhash(user_id)?,
+ reason: None,
+ join_authorized_via_users_server: None,
+ })
+ .expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: Some(user_id.to_string()),
+ redacts: None,
+ },
+ sender_user,
+ room_id,
+ db,
+ &state_lock,
+ )?;
+
+ drop(state_lock);
+
+ Ok(())
+}
+
+ // Make a user leave all their joined rooms
+ #[tracing::instrument(skip(self, db))]
+ pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> {
+ let all_rooms = db
+ .rooms
+ .rooms_joined(user_id)
+ .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r)))
+ .collect::<Vec<_>>();
+
+ for room_id in all_rooms {
+ let room_id = match room_id {
+ Ok(room_id) => room_id,
+ Err(_) => continue,
+ };
+
+ let _ = self.leave_room(user_id, &room_id, db).await;
+ }
+
+ Ok(())
+ }
+
+ #[tracing::instrument(skip(self, db))]
+ pub async fn leave_room(
+ &self,
+ user_id: &UserId,
+ room_id: &RoomId,
+ db: &Database,
+ ) -> Result<()> {
+ // Ask a remote server if we don't have this room
+ if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() {
+ if let Err(e) = self.remote_leave_room(user_id, room_id, db).await {
+ warn!("Failed to leave room {} remotely: {}", user_id, e);
+ // Don't tell the client about this error
+ }
+
+ let last_state = self
+ .invite_state(user_id, room_id)?
+ .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?;
+
+ // We always drop the invite, we can't rely on other servers
+ self.update_membership(
+ room_id,
+ user_id,
+ MembershipState::Leave,
+ user_id,
+ last_state,
+ db,
+ true,
+ )?;
+ } else {
+ let mutex_state = Arc::clone(
+ db.globals
+ .roomid_mutex_state
+ .write()
+ .unwrap()
+ .entry(room_id.to_owned())
+ .or_default(),
+ );
+ let state_lock = mutex_state.lock().await;
+
+ let mut event: RoomMemberEventContent = serde_json::from_str(
+ self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())?
+ .ok_or(Error::BadRequest(
+ ErrorKind::BadState,
+ "Cannot leave a room you are not a member of.",
+ ))?
+ .content
+ .get(),
+ )
+ .map_err(|_| Error::bad_database("Invalid member event in database."))?;
+
+ event.membership = MembershipState::Leave;
+
+ self.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomMember,
+ content: to_raw_value(&event).expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: Some(user_id.to_string()),
+ redacts: None,
+ },
+ user_id,
+ room_id,
+ db,
+ &state_lock,
+ )?;
+ }
+
+ Ok(())
+ }
+
+ #[tracing::instrument(skip(self, db))]
+ async fn remote_leave_room(
+ &self,
+ user_id: &UserId,
+ room_id: &RoomId,
+ db: &Database,
+ ) -> Result<()> {
+ let mut make_leave_response_and_server = Err(Error::BadServerResponse(
+ "No server available to assist in leaving.",
+ ));
+
+ let invite_state = db
+ .rooms
+ .invite_state(user_id, room_id)?
+ .ok_or(Error::BadRequest(
+ ErrorKind::BadState,
+ "User is not invited.",
+ ))?;
+
+ let servers: HashSet<_> = invite_state
+ .iter()
+ .filter_map(|event| serde_json::from_str(event.json().get()).ok())
+ .filter_map(|event: serde_json::Value| event.get("sender").cloned())
+ .filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
+ .filter_map(|sender| UserId::parse(sender).ok())
+ .map(|user| user.server_name().to_owned())
+ .collect();
+
+ for remote_server in servers {
+ let make_leave_response = db
+ .sending
+ .send_federation_request(
+ &db.globals,
+ &remote_server,
+ federation::membership::prepare_leave_event::v1::Request { room_id, user_id },
+ )
+ .await;
+
+ make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server));
+
+ if make_leave_response_and_server.is_ok() {
+ break;
+ }
+ }
+
+ let (make_leave_response, remote_server) = make_leave_response_and_server?;
+
+ let room_version_id = match make_leave_response.room_version {
+ Some(version) if self.is_supported_version(&db, &version) => version,
+ _ => return Err(Error::BadServerResponse("Room version is not supported")),
+ };
+
+ let mut leave_event_stub =
+ serde_json::from_str::<CanonicalJsonObject>(make_leave_response.event.get()).map_err(
+ |_| Error::BadServerResponse("Invalid make_leave event json received from server."),
+ )?;
+
+ // TODO: Is origin needed?
+ leave_event_stub.insert(
+ "origin".to_owned(),
+ CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()),
+ );
+ leave_event_stub.insert(
+ "origin_server_ts".to_owned(),
+ CanonicalJsonValue::Integer(
+ utils::millis_since_unix_epoch()
+ .try_into()
+ .expect("Timestamp is valid js_int value"),
+ ),
+ );
+ // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms
+ leave_event_stub.remove("event_id");
+
+ // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present
+ ruma::signatures::hash_and_sign_event(
+ db.globals.server_name().as_str(),
+ db.globals.keypair(),
+ &mut leave_event_stub,
+ &room_version_id,
+ )
+ .expect("event is valid, we just created it");
+
+ // Generate event id
+ let event_id = EventId::parse(format!(
+ "${}",
+ ruma::signatures::reference_hash(&leave_event_stub, &room_version_id)
+ .expect("ruma can calculate reference hashes")
+ ))
+ .expect("ruma's reference hashes are valid event ids");
+
+ // Add event_id back
+ leave_event_stub.insert(
+ "event_id".to_owned(),
+ CanonicalJsonValue::String(event_id.as_str().to_owned()),
+ );
+
+ // It has enough fields to be called a proper event now
+ let leave_event = leave_event_stub;
+
+ db.sending
+ .send_federation_request(
+ &db.globals,
+ &remote_server,
+ federation::membership::create_leave_event::v2::Request {
+ room_id,
+ event_id: &event_id,
+ pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()),
+ },
+ )
+ .await?;
+
+ Ok(())
+ }
+
diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs
new file mode 100644
index 0000000..1348132
--- /dev/null
+++ b/src/api/client_server/message.rs
@@ -0,0 +1,246 @@
+use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, Error, Result, Ruma};
+use ruma::{
+ api::client::{
+ error::ErrorKind,
+ message::{get_message_events, send_message_event},
+ },
+ events::{RoomEventType, StateEventType},
+};
+use std::{
+ collections::{BTreeMap, HashSet},
+ sync::Arc,
+};
+
+/// # `PUT /_matrix/client/r0/rooms/{roomId}/send/{eventType}/{txnId}`
+///
+/// Send a message event into the room.
+///
+/// - Is a NOOP if the txn id was already used before and returns the same event id again
+/// - The only requirement for the content is that it has to be valid json
+/// - Tries to send the event into the room, auth rules will determine if it is allowed
+pub async fn send_message_event_route(
+ db: DatabaseGuard,
+ body: Ruma<send_message_event::v3::IncomingRequest>,
+) -> Result<send_message_event::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ let sender_device = body.sender_device.as_deref();
+
+ let mutex_state = Arc::clone(
+ db.globals
+ .roomid_mutex_state
+ .write()
+ .unwrap()
+ .entry(body.room_id.clone())
+ .or_default(),
+ );
+ let state_lock = mutex_state.lock().await;
+
+ // Forbid m.room.encrypted if encryption is disabled
+ if RoomEventType::RoomEncrypted == body.event_type.to_string().into()
+ && !db.globals.allow_encryption()
+ {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "Encryption has been disabled",
+ ));
+ }
+
+ // Check if this is a new transaction id
+ if let Some(response) =
+ db.transaction_ids
+ .existing_txnid(sender_user, sender_device, &body.txn_id)?
+ {
+ // The client might have sent a txnid of the /sendToDevice endpoint
+ // This txnid has no response associated with it
+ if response.is_empty() {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Tried to use txn id already used for an incompatible endpoint.",
+ ));
+ }
+
+ let event_id = utils::string_from_bytes(&response)
+ .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?
+ .try_into()
+ .map_err(|_| Error::bad_database("Invalid event id in txnid data."))?;
+ return Ok(send_message_event::v3::Response { event_id });
+ }
+
+ let mut unsigned = BTreeMap::new();
+ unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
+
+ let event_id = db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: body.event_type.to_string().into(),
+ content: serde_json::from_str(body.body.body.json().get())
+ .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?,
+ unsigned: Some(unsigned),
+ state_key: None,
+ redacts: None,
+ },
+ sender_user,
+ &body.room_id,
+ &db,
+ &state_lock,
+ )?;
+
+ db.transaction_ids.add_txnid(
+ sender_user,
+ sender_device,
+ &body.txn_id,
+ event_id.as_bytes(),
+ )?;
+
+ drop(state_lock);
+
+ db.flush()?;
+
+ Ok(send_message_event::v3::Response::new(
+ (*event_id).to_owned(),
+ ))
+}
+
+/// # `GET /_matrix/client/r0/rooms/{roomId}/messages`
+///
+/// Allows paginating through room history.
+///
+/// - Only works if the user is joined (TODO: always allow, but only show events where the user was
+/// joined, depending on history_visibility)
+pub async fn get_message_events_route(
+ db: DatabaseGuard,
+ body: Ruma<get_message_events::v3::IncomingRequest>,
+) -> Result<get_message_events::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ let sender_device = body.sender_device.as_ref().expect("user is authenticated");
+
+ if !db.rooms.is_joined(sender_user, &body.room_id)? {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "You don't have permission to view this room.",
+ ));
+ }
+
+ let from = match body.from.clone() {
+ Some(from) => from
+ .parse()
+ .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?,
+
+ None => match body.dir {
+ get_message_events::v3::Direction::Forward => 0,
+ get_message_events::v3::Direction::Backward => u64::MAX,
+ },
+ };
+
+ let to = body.to.as_ref().map(|t| t.parse());
+
+ db.rooms
+ .lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)?;
+
+ // Use limit or else 10
+ let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize);
+
+ let next_token;
+
+ let mut resp = get_message_events::v3::Response::new();
+
+ let mut lazy_loaded = HashSet::new();
+
+ match body.dir {
+ get_message_events::v3::Direction::Forward => {
+ let events_after: Vec<_> = db
+ .rooms
+ .pdus_after(sender_user, &body.room_id, from)?
+ .take(limit)
+ .filter_map(|r| r.ok()) // Filter out buggy events
+ .filter_map(|(pdu_id, pdu)| {
+ db.rooms
+ .pdu_count(&pdu_id)
+ .map(|pdu_count| (pdu_count, pdu))
+ .ok()
+ })
+ .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
+ .collect();
+
+ for (_, event) in &events_after {
+ if !db.rooms.lazy_load_was_sent_before(
+ sender_user,
+ sender_device,
+ &body.room_id,
+ &event.sender,
+ )? {
+ lazy_loaded.insert(event.sender.clone());
+ }
+ }
+
+ next_token = events_after.last().map(|(count, _)| count).copied();
+
+ let events_after: Vec<_> = events_after
+ .into_iter()
+ .map(|(_, pdu)| pdu.to_room_event())
+ .collect();
+
+ resp.start = from.to_string();
+ resp.end = next_token.map(|count| count.to_string());
+ resp.chunk = events_after;
+ }
+ get_message_events::v3::Direction::Backward => {
+ let events_before: Vec<_> = db
+ .rooms
+ .pdus_until(sender_user, &body.room_id, from)?
+ .take(limit)
+ .filter_map(|r| r.ok()) // Filter out buggy events
+ .filter_map(|(pdu_id, pdu)| {
+ db.rooms
+ .pdu_count(&pdu_id)
+ .map(|pdu_count| (pdu_count, pdu))
+ .ok()
+ })
+ .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
+ .collect();
+
+ for (_, event) in &events_before {
+ if !db.rooms.lazy_load_was_sent_before(
+ sender_user,
+ sender_device,
+ &body.room_id,
+ &event.sender,
+ )? {
+ lazy_loaded.insert(event.sender.clone());
+ }
+ }
+
+ next_token = events_before.last().map(|(count, _)| count).copied();
+
+ let events_before: Vec<_> = events_before
+ .into_iter()
+ .map(|(_, pdu)| pdu.to_room_event())
+ .collect();
+
+ resp.start = from.to_string();
+ resp.end = next_token.map(|count| count.to_string());
+ resp.chunk = events_before;
+ }
+ }
+
+ resp.state = Vec::new();
+ for ll_id in &lazy_loaded {
+ if let Some(member_event) =
+ db.rooms
+ .room_state_get(&body.room_id, &StateEventType::RoomMember, ll_id.as_str())?
+ {
+ resp.state.push(member_event.to_state_event());
+ }
+ }
+
+ if let Some(next_token) = next_token {
+ db.rooms.lazy_load_mark_sent(
+ sender_user,
+ sender_device,
+ &body.room_id,
+ lazy_loaded,
+ next_token,
+ );
+ }
+
+ Ok(resp)
+}
diff --git a/src/api/client_server/mod.rs b/src/api/client_server/mod.rs
new file mode 100644
index 0000000..65b7a10
--- /dev/null
+++ b/src/api/client_server/mod.rs
@@ -0,0 +1,68 @@
+mod account;
+mod alias;
+mod backup;
+mod capabilities;
+mod config;
+mod context;
+mod device;
+mod directory;
+mod filter;
+mod keys;
+mod media;
+mod membership;
+mod message;
+mod presence;
+mod profile;
+mod push;
+mod read_marker;
+mod redact;
+mod report;
+mod room;
+mod search;
+mod session;
+mod state;
+mod sync;
+mod tag;
+mod thirdparty;
+mod to_device;
+mod typing;
+mod unversioned;
+mod user_directory;
+mod voip;
+
+pub use account::*;
+pub use alias::*;
+pub use backup::*;
+pub use capabilities::*;
+pub use config::*;
+pub use context::*;
+pub use device::*;
+pub use directory::*;
+pub use filter::*;
+pub use keys::*;
+pub use media::*;
+pub use membership::*;
+pub use message::*;
+pub use presence::*;
+pub use profile::*;
+pub use push::*;
+pub use read_marker::*;
+pub use redact::*;
+pub use report::*;
+pub use room::*;
+pub use search::*;
+pub use session::*;
+pub use state::*;
+pub use sync::*;
+pub use tag::*;
+pub use thirdparty::*;
+pub use to_device::*;
+pub use typing::*;
+pub use unversioned::*;
+pub use user_directory::*;
+pub use voip::*;
+
+pub const DEVICE_ID_LENGTH: usize = 10;
+pub const TOKEN_LENGTH: usize = 256;
+pub const SESSION_ID_LENGTH: usize = 256;
+pub const AUTO_GEN_PASSWORD_LENGTH: usize = 15;
diff --git a/src/api/client_server/presence.rs b/src/api/client_server/presence.rs
new file mode 100644
index 0000000..773fef4
--- /dev/null
+++ b/src/api/client_server/presence.rs
@@ -0,0 +1,87 @@
+use crate::{database::DatabaseGuard, utils, Result, Ruma};
+use ruma::api::client::presence::{get_presence, set_presence};
+use std::time::Duration;
+
+/// # `PUT /_matrix/client/r0/presence/{userId}/status`
+///
+/// Sets the presence state of the sender user.
+pub async fn set_presence_route(
+ db: DatabaseGuard,
+ body: Ruma<set_presence::v3::IncomingRequest>,
+) -> Result<set_presence::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ for room_id in db.rooms.rooms_joined(sender_user) {
+ let room_id = room_id?;
+
+ db.rooms.edus.update_presence(
+ sender_user,
+ &room_id,
+ ruma::events::presence::PresenceEvent {
+ content: ruma::events::presence::PresenceEventContent {
+ avatar_url: db.users.avatar_url(sender_user)?,
+ currently_active: None,
+ displayname: db.users.displayname(sender_user)?,
+ last_active_ago: Some(
+ utils::millis_since_unix_epoch()
+ .try_into()
+ .expect("time is valid"),
+ ),
+ presence: body.presence.clone(),
+ status_msg: body.status_msg.clone(),
+ },
+ sender: sender_user.clone(),
+ },
+ &db.globals,
+ )?;
+ }
+
+ db.flush()?;
+
+ Ok(set_presence::v3::Response {})
+}
+
+/// # `GET /_matrix/client/r0/presence/{userId}/status`
+///
+/// Gets the presence state of the given user.
+///
+/// - Only works if you share a room with the user
+pub async fn get_presence_route(
+ db: DatabaseGuard,
+ body: Ruma<get_presence::v3::IncomingRequest>,
+) -> Result<get_presence::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let mut presence_event = None;
+
+ for room_id in db
+ .rooms
+ .get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])?
+ {
+ let room_id = room_id?;
+
+ if let Some(presence) = db
+ .rooms
+ .edus
+ .get_last_presence_event(sender_user, &room_id)?
+ {
+ presence_event = Some(presence);
+ break;
+ }
+ }
+
+ if let Some(presence) = presence_event {
+ Ok(get_presence::v3::Response {
+ // TODO: Should ruma just use the presenceeventcontent type here?
+ status_msg: presence.content.status_msg,
+ currently_active: presence.content.currently_active,
+ last_active_ago: presence
+ .content
+ .last_active_ago
+ .map(|millis| Duration::from_millis(millis.into())),
+ presence: presence.content.presence,
+ })
+ } else {
+ todo!();
+ }
+}
diff --git a/src/api/client_server/profile.rs b/src/api/client_server/profile.rs
new file mode 100644
index 0000000..acea19f
--- /dev/null
+++ b/src/api/client_server/profile.rs
@@ -0,0 +1,321 @@
+use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, Error, Result, Ruma};
+use ruma::{
+ api::{
+ client::{
+ error::ErrorKind,
+ profile::{
+ get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name,
+ },
+ },
+ federation::{self, query::get_profile_information::v1::ProfileField},
+ },
+ events::{room::member::RoomMemberEventContent, RoomEventType, StateEventType},
+};
+use serde_json::value::to_raw_value;
+use std::sync::Arc;
+
+/// # `PUT /_matrix/client/r0/profile/{userId}/displayname`
+///
+/// Updates the displayname.
+///
+/// - Also makes sure other users receive the update using presence EDUs
+pub async fn set_displayname_route(
+ db: DatabaseGuard,
+ body: Ruma<set_display_name::v3::IncomingRequest>,
+) -> Result<set_display_name::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ db.users
+ .set_displayname(sender_user, body.displayname.clone())?;
+
+ // Send a new membership event and presence update into all joined rooms
+ let all_rooms_joined: Vec<_> = db
+ .rooms
+ .rooms_joined(sender_user)
+ .filter_map(|r| r.ok())
+ .map(|room_id| {
+ Ok::<_, Error>((
+ PduBuilder {
+ event_type: RoomEventType::RoomMember,
+ content: to_raw_value(&RoomMemberEventContent {
+ displayname: body.displayname.clone(),
+ ..serde_json::from_str(
+ db.rooms
+ .room_state_get(
+ &room_id,
+ &StateEventType::RoomMember,
+ sender_user.as_str(),
+ )?
+ .ok_or_else(|| {
+ Error::bad_database(
+ "Tried to send displayname update for user not in the \
+ room.",
+ )
+ })?
+ .content
+ .get(),
+ )
+ .map_err(|_| Error::bad_database("Database contains invalid PDU."))?
+ })
+ .expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: Some(sender_user.to_string()),
+ redacts: None,
+ },
+ room_id,
+ ))
+ })
+ .filter_map(|r| r.ok())
+ .collect();
+
+ for (pdu_builder, room_id) in all_rooms_joined {
+ let mutex_state = Arc::clone(
+ db.globals
+ .roomid_mutex_state
+ .write()
+ .unwrap()
+ .entry(room_id.clone())
+ .or_default(),
+ );
+ let state_lock = mutex_state.lock().await;
+
+ let _ = db
+ .rooms
+ .build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock);
+
+ // Presence update
+ db.rooms.edus.update_presence(
+ sender_user,
+ &room_id,
+ ruma::events::presence::PresenceEvent {
+ content: ruma::events::presence::PresenceEventContent {
+ avatar_url: db.users.avatar_url(sender_user)?,
+ currently_active: None,
+ displayname: db.users.displayname(sender_user)?,
+ last_active_ago: Some(
+ utils::millis_since_unix_epoch()
+ .try_into()
+ .expect("time is valid"),
+ ),
+ presence: ruma::presence::PresenceState::Online,
+ status_msg: None,
+ },
+ sender: sender_user.clone(),
+ },
+ &db.globals,
+ )?;
+ }
+
+ db.flush()?;
+
+ Ok(set_display_name::v3::Response {})
+}
+
+/// # `GET /_matrix/client/r0/profile/{userId}/displayname`
+///
+/// Returns the displayname of the user.
+///
+/// - If user is on another server: Fetches displayname over federation
+pub async fn get_displayname_route(
+ db: DatabaseGuard,
+ body: Ruma<get_display_name::v3::IncomingRequest>,
+) -> Result<get_display_name::v3::Response> {
+ if body.user_id.server_name() != db.globals.server_name() {
+ let response = db
+ .sending
+ .send_federation_request(
+ &db.globals,
+ body.user_id.server_name(),
+ federation::query::get_profile_information::v1::Request {
+ user_id: &body.user_id,
+ field: Some(&ProfileField::DisplayName),
+ },
+ )
+ .await?;
+
+ return Ok(get_display_name::v3::Response {
+ displayname: response.displayname,
+ });
+ }
+
+ Ok(get_display_name::v3::Response {
+ displayname: db.users.displayname(&body.user_id)?,
+ })
+}
+
+/// # `PUT /_matrix/client/r0/profile/{userId}/avatar_url`
+///
+/// Updates the avatar_url and blurhash.
+///
+/// - Also makes sure other users receive the update using presence EDUs
+pub async fn set_avatar_url_route(
+ db: DatabaseGuard,
+ body: Ruma<set_avatar_url::v3::IncomingRequest>,
+) -> Result<set_avatar_url::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ db.users
+ .set_avatar_url(sender_user, body.avatar_url.clone())?;
+
+ db.users.set_blurhash(sender_user, body.blurhash.clone())?;
+
+ // Send a new membership event and presence update into all joined rooms
+ let all_joined_rooms: Vec<_> = db
+ .rooms
+ .rooms_joined(sender_user)
+ .filter_map(|r| r.ok())
+ .map(|room_id| {
+ Ok::<_, Error>((
+ PduBuilder {
+ event_type: RoomEventType::RoomMember,
+ content: to_raw_value(&RoomMemberEventContent {
+ avatar_url: body.avatar_url.clone(),
+ ..serde_json::from_str(
+ db.rooms
+ .room_state_get(
+ &room_id,
+ &StateEventType::RoomMember,
+ sender_user.as_str(),
+ )?
+ .ok_or_else(|| {
+ Error::bad_database(
+ "Tried to send displayname update for user not in the \
+ room.",
+ )
+ })?
+ .content
+ .get(),
+ )
+ .map_err(|_| Error::bad_database("Database contains invalid PDU."))?
+ })
+ .expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: Some(sender_user.to_string()),
+ redacts: None,
+ },
+ room_id,
+ ))
+ })
+ .filter_map(|r| r.ok())
+ .collect();
+
+ for (pdu_builder, room_id) in all_joined_rooms {
+ let mutex_state = Arc::clone(
+ db.globals
+ .roomid_mutex_state
+ .write()
+ .unwrap()
+ .entry(room_id.clone())
+ .or_default(),
+ );
+ let state_lock = mutex_state.lock().await;
+
+ let _ = db
+ .rooms
+ .build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock);
+
+ // Presence update
+ db.rooms.edus.update_presence(
+ sender_user,
+ &room_id,
+ ruma::events::presence::PresenceEvent {
+ content: ruma::events::presence::PresenceEventContent {
+ avatar_url: db.users.avatar_url(sender_user)?,
+ currently_active: None,
+ displayname: db.users.displayname(sender_user)?,
+ last_active_ago: Some(
+ utils::millis_since_unix_epoch()
+ .try_into()
+ .expect("time is valid"),
+ ),
+ presence: ruma::presence::PresenceState::Online,
+ status_msg: None,
+ },
+ sender: sender_user.clone(),
+ },
+ &db.globals,
+ )?;
+ }
+
+ db.flush()?;
+
+ Ok(set_avatar_url::v3::Response {})
+}
+
+/// # `GET /_matrix/client/r0/profile/{userId}/avatar_url`
+///
+/// Returns the avatar_url and blurhash of the user.
+///
+/// - If user is on another server: Fetches avatar_url and blurhash over federation
+pub async fn get_avatar_url_route(
+ db: DatabaseGuard,
+ body: Ruma<get_avatar_url::v3::IncomingRequest>,
+) -> Result<get_avatar_url::v3::Response> {
+ if body.user_id.server_name() != db.globals.server_name() {
+ let response = db
+ .sending
+ .send_federation_request(
+ &db.globals,
+ body.user_id.server_name(),
+ federation::query::get_profile_information::v1::Request {
+ user_id: &body.user_id,
+ field: Some(&ProfileField::AvatarUrl),
+ },
+ )
+ .await?;
+
+ return Ok(get_avatar_url::v3::Response {
+ avatar_url: response.avatar_url,
+ blurhash: response.blurhash,
+ });
+ }
+
+ Ok(get_avatar_url::v3::Response {
+ avatar_url: db.users.avatar_url(&body.user_id)?,
+ blurhash: db.users.blurhash(&body.user_id)?,
+ })
+}
+
+/// # `GET /_matrix/client/r0/profile/{userId}`
+///
+/// Returns the displayname, avatar_url and blurhash of the user.
+///
+/// - If user is on another server: Fetches profile over federation
+pub async fn get_profile_route(
+ db: DatabaseGuard,
+ body: Ruma<get_profile::v3::IncomingRequest>,
+) -> Result<get_profile::v3::Response> {
+ if body.user_id.server_name() != db.globals.server_name() {
+ let response = db
+ .sending
+ .send_federation_request(
+ &db.globals,
+ body.user_id.server_name(),
+ federation::query::get_profile_information::v1::Request {
+ user_id: &body.user_id,
+ field: None,
+ },
+ )
+ .await?;
+
+ return Ok(get_profile::v3::Response {
+ displayname: response.displayname,
+ avatar_url: response.avatar_url,
+ blurhash: response.blurhash,
+ });
+ }
+
+ if !db.users.exists(&body.user_id)? {
+ // Return 404 if this user doesn't exist
+ return Err(Error::BadRequest(
+ ErrorKind::NotFound,
+ "Profile was not found.",
+ ));
+ }
+
+ Ok(get_profile::v3::Response {
+ avatar_url: db.users.avatar_url(&body.user_id)?,
+ blurhash: db.users.blurhash(&body.user_id)?,
+ displayname: db.users.displayname(&body.user_id)?,
+ })
+}
diff --git a/src/api/client_server/push.rs b/src/api/client_server/push.rs
new file mode 100644
index 0000000..dc45ea0
--- /dev/null
+++ b/src/api/client_server/push.rs
@@ -0,0 +1,584 @@
+use crate::{database::DatabaseGuard, Error, Result, Ruma};
+use ruma::{
+ api::client::{
+ error::ErrorKind,
+ push::{
+ delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled,
+ get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions,
+ set_pushrule_enabled, RuleKind,
+ },
+ },
+ events::{push_rules::PushRulesEvent, GlobalAccountDataEventType},
+ push::{ConditionalPushRuleInit, PatternedPushRuleInit, SimplePushRuleInit},
+};
+
+/// # `GET /_matrix/client/r0/pushrules`
+///
+/// Retrieves the push rules event for this user.
+pub async fn get_pushrules_all_route(
+ db: DatabaseGuard,
+ body: Ruma<get_pushrules_all::v3::Request>,
+) -> Result<get_pushrules_all::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let event: PushRulesEvent = db
+ .account_data
+ .get(
+ None,
+ sender_user,
+ GlobalAccountDataEventType::PushRules.to_string().into(),
+ )?
+ .ok_or(Error::BadRequest(
+ ErrorKind::NotFound,
+ "PushRules event not found.",
+ ))?;
+
+ Ok(get_pushrules_all::v3::Response {
+ global: event.content.global,
+ })
+}
+
+/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
+///
+/// Retrieves a single specified push rule for this user.
+pub async fn get_pushrule_route(
+ db: DatabaseGuard,
+ body: Ruma<get_pushrule::v3::IncomingRequest>,
+) -> Result<get_pushrule::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let event: PushRulesEvent = db
+ .account_data
+ .get(
+ None,
+ sender_user,
+ GlobalAccountDataEventType::PushRules.to_string().into(),
+ )?
+ .ok_or(Error::BadRequest(
+ ErrorKind::NotFound,
+ "PushRules event not found.",
+ ))?;
+
+ let global = event.content.global;
+ let rule = match body.kind {
+ RuleKind::Override => global
+ .override_
+ .get(body.rule_id.as_str())
+ .map(|rule| rule.clone().into()),
+ RuleKind::Underride => global
+ .underride
+ .get(body.rule_id.as_str())
+ .map(|rule| rule.clone().into()),
+ RuleKind::Sender => global
+ .sender
+ .get(body.rule_id.as_str())
+ .map(|rule| rule.clone().into()),
+ RuleKind::Room => global
+ .room
+ .get(body.rule_id.as_str())
+ .map(|rule| rule.clone().into()),
+ RuleKind::Content => global
+ .content
+ .get(body.rule_id.as_str())
+ .map(|rule| rule.clone().into()),
+ _ => None,
+ };
+
+ if let Some(rule) = rule {
+ Ok(get_pushrule::v3::Response { rule })
+ } else {
+ Err(Error::BadRequest(
+ ErrorKind::NotFound,
+ "Push rule not found.",
+ ))
+ }
+}
+
+/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
+///
+/// Creates a single specified push rule for this user.
+pub async fn set_pushrule_route(
+ db: DatabaseGuard,
+ body: Ruma<set_pushrule::v3::IncomingRequest>,
+) -> Result<set_pushrule::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ let body = body.body;
+
+ if body.scope != "global" {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Scopes other than 'global' are not supported.",
+ ));
+ }
+
+ let mut event: PushRulesEvent = db
+ .account_data
+ .get(
+ None,
+ sender_user,
+ GlobalAccountDataEventType::PushRules.to_string().into(),
+ )?
+ .ok_or(Error::BadRequest(
+ ErrorKind::NotFound,
+ "PushRules event not found.",
+ ))?;
+
+ let global = &mut event.content.global;
+ match body.kind {
+ RuleKind::Override => {
+ global.override_.replace(
+ ConditionalPushRuleInit {
+ actions: body.actions,
+ default: false,
+ enabled: true,
+ rule_id: body.rule_id,
+ conditions: body.conditions,
+ }
+ .into(),
+ );
+ }
+ RuleKind::Underride => {
+ global.underride.replace(
+ ConditionalPushRuleInit {
+ actions: body.actions,
+ default: false,
+ enabled: true,
+ rule_id: body.rule_id,
+ conditions: body.conditions,
+ }
+ .into(),
+ );
+ }
+ RuleKind::Sender => {
+ global.sender.replace(
+ SimplePushRuleInit {
+ actions: body.actions,
+ default: false,
+ enabled: true,
+ rule_id: body.rule_id,
+ }
+ .into(),
+ );
+ }
+ RuleKind::Room => {
+ global.room.replace(
+ SimplePushRuleInit {
+ actions: body.actions,
+ default: false,
+ enabled: true,
+ rule_id: body.rule_id,
+ }
+ .into(),
+ );
+ }
+ RuleKind::Content => {
+ global.content.replace(
+ PatternedPushRuleInit {
+ actions: body.actions,
+ default: false,
+ enabled: true,
+ rule_id: body.rule_id,
+ pattern: body.pattern.unwrap_or_default(),
+ }
+ .into(),
+ );
+ }
+ _ => {}
+ }
+
+ db.account_data.update(
+ None,
+ sender_user,
+ GlobalAccountDataEventType::PushRules.to_string().into(),
+ &event,
+ &db.globals,
+ )?;
+
+ db.flush()?;
+
+ Ok(set_pushrule::v3::Response {})
+}
+
+/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
+///
+/// Gets the actions of a single specified push rule for this user.
+pub async fn get_pushrule_actions_route(
+ db: DatabaseGuard,
+ body: Ruma<get_pushrule_actions::v3::IncomingRequest>,
+) -> Result<get_pushrule_actions::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ if body.scope != "global" {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Scopes other than 'global' are not supported.",
+ ));
+ }
+
+ let mut event: PushRulesEvent = db
+ .account_data
+ .get(
+ None,
+ sender_user,
+ GlobalAccountDataEventType::PushRules.to_string().into(),
+ )?
+ .ok_or(Error::BadRequest(
+ ErrorKind::NotFound,
+ "PushRules event not found.",
+ ))?;
+
+ let global = &mut event.content.global;
+ let actions = match body.kind {
+ RuleKind::Override => global
+ .override_
+ .get(body.rule_id.as_str())
+ .map(|rule| rule.actions.clone()),
+ RuleKind::Underride => global
+ .underride
+ .get(body.rule_id.as_str())
+ .map(|rule| rule.actions.clone()),
+ RuleKind::Sender => global
+ .sender
+ .get(body.rule_id.as_str())
+ .map(|rule| rule.actions.clone()),
+ RuleKind::Room => global
+ .room
+ .get(body.rule_id.as_str())
+ .map(|rule| rule.actions.clone()),
+ RuleKind::Content => global
+ .content
+ .get(body.rule_id.as_str())
+ .map(|rule| rule.actions.clone()),
+ _ => None,
+ };
+
+ db.flush()?;
+
+ Ok(get_pushrule_actions::v3::Response {
+ actions: actions.unwrap_or_default(),
+ })
+}
+
+/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
+///
+/// Sets the actions of a single specified push rule for this user.
+pub async fn set_pushrule_actions_route(
+ db: DatabaseGuard,
+ body: Ruma<set_pushrule_actions::v3::IncomingRequest>,
+) -> Result<set_pushrule_actions::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ if body.scope != "global" {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Scopes other than 'global' are not supported.",
+ ));
+ }
+
+ let mut event: PushRulesEvent = db
+ .account_data
+ .get(
+ None,
+ sender_user,
+ GlobalAccountDataEventType::PushRules.to_string().into(),
+ )?
+ .ok_or(Error::BadRequest(
+ ErrorKind::NotFound,
+ "PushRules event not found.",
+ ))?;
+
+ let global = &mut event.content.global;
+ match body.kind {
+ RuleKind::Override => {
+ if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() {
+ rule.actions = body.actions.clone();
+ global.override_.replace(rule);
+ }
+ }
+ RuleKind::Underride => {
+ if let Some(mut rule) = global.underride.get(body.rule_id.as_str()).cloned() {
+ rule.actions = body.actions.clone();
+ global.underride.replace(rule);
+ }
+ }
+ RuleKind::Sender => {
+ if let Some(mut rule) = global.sender.get(body.rule_id.as_str()).cloned() {
+ rule.actions = body.actions.clone();
+ global.sender.replace(rule);
+ }
+ }
+ RuleKind::Room => {
+ if let Some(mut rule) = global.room.get(body.rule_id.as_str()).cloned() {
+ rule.actions = body.actions.clone();
+ global.room.replace(rule);
+ }
+ }
+ RuleKind::Content => {
+ if let Some(mut rule) = global.content.get(body.rule_id.as_str()).cloned() {
+ rule.actions = body.actions.clone();
+ global.content.replace(rule);
+ }
+ }
+ _ => {}
+ };
+
+ db.account_data.update(
+ None,
+ sender_user,
+ GlobalAccountDataEventType::PushRules.to_string().into(),
+ &event,
+ &db.globals,
+ )?;
+
+ db.flush()?;
+
+ Ok(set_pushrule_actions::v3::Response {})
+}
+
+/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled`
+///
+/// Gets the enabled status of a single specified push rule for this user.
+pub async fn get_pushrule_enabled_route(
+ db: DatabaseGuard,
+ body: Ruma<get_pushrule_enabled::v3::IncomingRequest>,
+) -> Result<get_pushrule_enabled::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ if body.scope != "global" {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Scopes other than 'global' are not supported.",
+ ));
+ }
+
+ let mut event: PushRulesEvent = db
+ .account_data
+ .get(
+ None,
+ sender_user,
+ GlobalAccountDataEventType::PushRules.to_string().into(),
+ )?
+ .ok_or(Error::BadRequest(
+ ErrorKind::NotFound,
+ "PushRules event not found.",
+ ))?;
+
+ let global = &mut event.content.global;
+ let enabled = match body.kind {
+ RuleKind::Override => global
+ .override_
+ .iter()
+ .find(|rule| rule.rule_id == body.rule_id)
+ .map_or(false, |rule| rule.enabled),
+ RuleKind::Underride => global
+ .underride
+ .iter()
+ .find(|rule| rule.rule_id == body.rule_id)
+ .map_or(false, |rule| rule.enabled),
+ RuleKind::Sender => global
+ .sender
+ .iter()
+ .find(|rule| rule.rule_id == body.rule_id)
+ .map_or(false, |rule| rule.enabled),
+ RuleKind::Room => global
+ .room
+ .iter()
+ .find(|rule| rule.rule_id == body.rule_id)
+ .map_or(false, |rule| rule.enabled),
+ RuleKind::Content => global
+ .content
+ .iter()
+ .find(|rule| rule.rule_id == body.rule_id)
+ .map_or(false, |rule| rule.enabled),
+ _ => false,
+ };
+
+ db.flush()?;
+
+ Ok(get_pushrule_enabled::v3::Response { enabled })
+}
+
+/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled`
+///
+/// Sets the enabled status of a single specified push rule for this user.
+pub async fn set_pushrule_enabled_route(
+ db: DatabaseGuard,
+ body: Ruma<set_pushrule_enabled::v3::IncomingRequest>,
+) -> Result<set_pushrule_enabled::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ if body.scope != "global" {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Scopes other than 'global' are not supported.",
+ ));
+ }
+
+ let mut event: PushRulesEvent = db
+ .account_data
+ .get(
+ None,
+ sender_user,
+ GlobalAccountDataEventType::PushRules.to_string().into(),
+ )?
+ .ok_or(Error::BadRequest(
+ ErrorKind::NotFound,
+ "PushRules event not found.",
+ ))?;
+
+ let global = &mut event.content.global;
+ match body.kind {
+ RuleKind::Override => {
+ if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() {
+ global.override_.remove(&rule);
+ rule.enabled = body.enabled;
+ global.override_.insert(rule);
+ }
+ }
+ RuleKind::Underride => {
+ if let Some(mut rule) = global.underride.get(body.rule_id.as_str()).cloned() {
+ global.underride.remove(&rule);
+ rule.enabled = body.enabled;
+ global.underride.insert(rule);
+ }
+ }
+ RuleKind::Sender => {
+ if let Some(mut rule) = global.sender.get(body.rule_id.as_str()).cloned() {
+ global.sender.remove(&rule);
+ rule.enabled = body.enabled;
+ global.sender.insert(rule);
+ }
+ }
+ RuleKind::Room => {
+ if let Some(mut rule) = global.room.get(body.rule_id.as_str()).cloned() {
+ global.room.remove(&rule);
+ rule.enabled = body.enabled;
+ global.room.insert(rule);
+ }
+ }
+ RuleKind::Content => {
+ if let Some(mut rule) = global.content.get(body.rule_id.as_str()).cloned() {
+ global.content.remove(&rule);
+ rule.enabled = body.enabled;
+ global.content.insert(rule);
+ }
+ }
+ _ => {}
+ }
+
+ db.account_data.update(
+ None,
+ sender_user,
+ GlobalAccountDataEventType::PushRules.to_string().into(),
+ &event,
+ &db.globals,
+ )?;
+
+ db.flush()?;
+
+ Ok(set_pushrule_enabled::v3::Response {})
+}
+
+/// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
+///
+/// Deletes a single specified push rule for this user.
+pub async fn delete_pushrule_route(
+ db: DatabaseGuard,
+ body: Ruma<delete_pushrule::v3::IncomingRequest>,
+) -> Result<delete_pushrule::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ if body.scope != "global" {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Scopes other than 'global' are not supported.",
+ ));
+ }
+
+ let mut event: PushRulesEvent = db
+ .account_data
+ .get(
+ None,
+ sender_user,
+ GlobalAccountDataEventType::PushRules.to_string().into(),
+ )?
+ .ok_or(Error::BadRequest(
+ ErrorKind::NotFound,
+ "PushRules event not found.",
+ ))?;
+
+ let global = &mut event.content.global;
+ match body.kind {
+ RuleKind::Override => {
+ if let Some(rule) = global.override_.get(body.rule_id.as_str()).cloned() {
+ global.override_.remove(&rule);
+ }
+ }
+ RuleKind::Underride => {
+ if let Some(rule) = global.underride.get(body.rule_id.as_str()).cloned() {
+ global.underride.remove(&rule);
+ }
+ }
+ RuleKind::Sender => {
+ if let Some(rule) = global.sender.get(body.rule_id.as_str()).cloned() {
+ global.sender.remove(&rule);
+ }
+ }
+ RuleKind::Room => {
+ if let Some(rule) = global.room.get(body.rule_id.as_str()).cloned() {
+ global.room.remove(&rule);
+ }
+ }
+ RuleKind::Content => {
+ if let Some(rule) = global.content.get(body.rule_id.as_str()).cloned() {
+ global.content.remove(&rule);
+ }
+ }
+ _ => {}
+ }
+
+ db.account_data.update(
+ None,
+ sender_user,
+ GlobalAccountDataEventType::PushRules.to_string().into(),
+ &event,
+ &db.globals,
+ )?;
+
+ db.flush()?;
+
+ Ok(delete_pushrule::v3::Response {})
+}
+
+/// # `GET /_matrix/client/r0/pushers`
+///
+/// Gets all currently active pushers for the sender user.
+pub async fn get_pushers_route(
+ db: DatabaseGuard,
+ body: Ruma<get_pushers::v3::Request>,
+) -> Result<get_pushers::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ Ok(get_pushers::v3::Response {
+ pushers: db.pusher.get_pushers(sender_user)?,
+ })
+}
+
+/// # `POST /_matrix/client/r0/pushers/set`
+///
+/// Adds a pusher for the sender user.
+///
+/// - TODO: Handle `append`
+pub async fn set_pushers_route(
+ db: DatabaseGuard,
+ body: Ruma<set_pusher::v3::Request>,
+) -> Result<set_pusher::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ let pusher = body.pusher.clone();
+
+ db.pusher.set_pusher(sender_user, pusher)?;
+
+ db.flush()?;
+
+ Ok(set_pusher::v3::Response::default())
+}
diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs
new file mode 100644
index 0000000..91988a4
--- /dev/null
+++ b/src/api/client_server/read_marker.rs
@@ -0,0 +1,127 @@
+use crate::{database::DatabaseGuard, Error, Result, Ruma};
+use ruma::{
+ api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt},
+ events::RoomAccountDataEventType,
+ receipt::ReceiptType,
+ MilliSecondsSinceUnixEpoch,
+};
+use std::collections::BTreeMap;
+
+/// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers`
+///
+/// Sets different types of read markers.
+///
+/// - Updates fully-read account data event to `fully_read`
+/// - If `read_receipt` is set: Update private marker and public read receipt EDU
+pub async fn set_read_marker_route(
+ db: DatabaseGuard,
+ body: Ruma<set_read_marker::v3::IncomingRequest>,
+) -> Result<set_read_marker::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let fully_read_event = ruma::events::fully_read::FullyReadEvent {
+ content: ruma::events::fully_read::FullyReadEventContent {
+ event_id: body.fully_read.clone(),
+ },
+ };
+ db.account_data.update(
+ Some(&body.room_id),
+ sender_user,
+ RoomAccountDataEventType::FullyRead,
+ &fully_read_event,
+ &db.globals,
+ )?;
+
+ if let Some(event) = &body.read_receipt {
+ db.rooms.edus.private_read_set(
+ &body.room_id,
+ sender_user,
+ db.rooms.get_pdu_count(event)?.ok_or(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Event does not exist.",
+ ))?,
+ &db.globals,
+ )?;
+ db.rooms
+ .reset_notification_counts(sender_user, &body.room_id)?;
+
+ let mut user_receipts = BTreeMap::new();
+ user_receipts.insert(
+ sender_user.clone(),
+ ruma::events::receipt::Receipt {
+ ts: Some(MilliSecondsSinceUnixEpoch::now()),
+ },
+ );
+
+ let mut receipts = BTreeMap::new();
+ receipts.insert(ReceiptType::Read, user_receipts);
+
+ let mut receipt_content = BTreeMap::new();
+ receipt_content.insert(event.to_owned(), receipts);
+
+ db.rooms.edus.readreceipt_update(
+ sender_user,
+ &body.room_id,
+ ruma::events::receipt::ReceiptEvent {
+ content: ruma::events::receipt::ReceiptEventContent(receipt_content),
+ room_id: body.room_id.clone(),
+ },
+ &db.globals,
+ )?;
+ }
+
+ db.flush()?;
+
+ Ok(set_read_marker::v3::Response {})
+}
+
+/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}`
+///
+/// Sets private read marker and public read receipt EDU.
+pub async fn create_receipt_route(
+ db: DatabaseGuard,
+ body: Ruma<create_receipt::v3::IncomingRequest>,
+) -> Result<create_receipt::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ db.rooms.edus.private_read_set(
+ &body.room_id,
+ sender_user,
+ db.rooms
+ .get_pdu_count(&body.event_id)?
+ .ok_or(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Event does not exist.",
+ ))?,
+ &db.globals,
+ )?;
+ db.rooms
+ .reset_notification_counts(sender_user, &body.room_id)?;
+
+ let mut user_receipts = BTreeMap::new();
+ user_receipts.insert(
+ sender_user.clone(),
+ ruma::events::receipt::Receipt {
+ ts: Some(MilliSecondsSinceUnixEpoch::now()),
+ },
+ );
+ let mut receipts = BTreeMap::new();
+ receipts.insert(ReceiptType::Read, user_receipts);
+
+ let mut receipt_content = BTreeMap::new();
+ receipt_content.insert(body.event_id.to_owned(), receipts);
+
+ db.rooms.edus.readreceipt_update(
+ sender_user,
+ &body.room_id,
+ ruma::events::receipt::ReceiptEvent {
+ content: ruma::events::receipt::ReceiptEventContent(receipt_content),
+ room_id: body.room_id.clone(),
+ },
+ &db.globals,
+ )?;
+
+ db.flush()?;
+
+ Ok(create_receipt::v3::Response {})
+}
diff --git a/src/api/client_server/redact.rs b/src/api/client_server/redact.rs
new file mode 100644
index 0000000..059e0f5
--- /dev/null
+++ b/src/api/client_server/redact.rs
@@ -0,0 +1,56 @@
+use std::sync::Arc;
+
+use crate::{database::DatabaseGuard, pdu::PduBuilder, Result, Ruma};
+use ruma::{
+ api::client::redact::redact_event,
+ events::{room::redaction::RoomRedactionEventContent, RoomEventType},
+};
+
+use serde_json::value::to_raw_value;
+
+/// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}`
+///
+/// Tries to send a redaction event into the room.
+///
+/// - TODO: Handle txn id
+pub async fn redact_event_route(
+ db: DatabaseGuard,
+ body: Ruma<redact_event::v3::IncomingRequest>,
+) -> Result<redact_event::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ let body = body.body;
+
+ let mutex_state = Arc::clone(
+ db.globals
+ .roomid_mutex_state
+ .write()
+ .unwrap()
+ .entry(body.room_id.clone())
+ .or_default(),
+ );
+ let state_lock = mutex_state.lock().await;
+
+ let event_id = db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomRedaction,
+ content: to_raw_value(&RoomRedactionEventContent {
+ reason: body.reason.clone(),
+ })
+ .expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: None,
+ redacts: Some(body.event_id.into()),
+ },
+ sender_user,
+ &body.room_id,
+ &db,
+ &state_lock,
+ )?;
+
+ drop(state_lock);
+
+ db.flush()?;
+
+ let event_id = (*event_id).to_owned();
+ Ok(redact_event::v3::Response { event_id })
+}
diff --git a/src/api/client_server/report.rs b/src/api/client_server/report.rs
new file mode 100644
index 0000000..14768e1
--- /dev/null
+++ b/src/api/client_server/report.rs
@@ -0,0 +1,72 @@
+use crate::{database::DatabaseGuard, utils::HtmlEscape, Error, Result, Ruma};
+use ruma::{
+ api::client::{error::ErrorKind, room::report_content},
+ events::room::message,
+ int,
+};
+
+/// # `POST /_matrix/client/r0/rooms/{roomId}/report/{eventId}`
+///
+/// Reports an inappropriate event to homeserver admins
+///
+pub async fn report_event_route(
+ db: DatabaseGuard,
+ body: Ruma<report_content::v3::IncomingRequest>,
+) -> Result<report_content::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let pdu = match db.rooms.get_pdu(&body.event_id)? {
+ Some(pdu) => pdu,
+ _ => {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Invalid Event ID",
+ ))
+ }
+ };
+
+ if let Some(true) = body.score.map(|s| s > int!(0) || s < int!(-100)) {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Invalid score, must be within 0 to -100",
+ ));
+ };
+
+ if let Some(true) = body.reason.clone().map(|s| s.chars().count() > 250) {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Reason too long, should be 250 characters or fewer",
+ ));
+ };
+
+ db.admin
+ .send_message(message::RoomMessageEventContent::text_html(
+ format!(
+ "Report received from: {}\n\n\
+ Event ID: {:?}\n\
+ Room ID: {:?}\n\
+ Sent By: {:?}\n\n\
+ Report Score: {:?}\n\
+ Report Reason: {:?}",
+ sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason
+ ),
+ format!(
+ "<details><summary>Report received from: <a href=\"https://matrix.to/#/{0:?}\">{0:?}\
+ </a></summary><ul><li>Event Info<ul><li>Event ID: <code>{1:?}</code>\
+ <a href=\"https://matrix.to/#/{2:?}/{1:?}\">🔗</a></li><li>Room ID: <code>{2:?}</code>\
+ </li><li>Sent By: <a href=\"https://matrix.to/#/{3:?}\">{3:?}</a></li></ul></li><li>\
+ Report Info<ul><li>Report Score: {4:?}</li><li>Report Reason: {5}</li></ul></li>\
+ </ul></details>",
+ sender_user,
+ pdu.event_id,
+ pdu.room_id,
+ pdu.sender,
+ body.score,
+ HtmlEscape(body.reason.as_deref().unwrap_or(""))
+ ),
+ ));
+
+ db.flush()?;
+
+ Ok(report_content::v3::Response {})
+}
diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs
new file mode 100644
index 0000000..5ae7224
--- /dev/null
+++ b/src/api/client_server/room.rs
@@ -0,0 +1,730 @@
+use crate::{
+ client_server::invite_helper, database::DatabaseGuard, pdu::PduBuilder, Error, Result, Ruma,
+};
+use ruma::{
+ api::client::{
+ error::ErrorKind,
+ room::{self, aliases, create_room, get_room_event, upgrade_room},
+ },
+ events::{
+ room::{
+ canonical_alias::RoomCanonicalAliasEventContent,
+ create::RoomCreateEventContent,
+ guest_access::{GuestAccess, RoomGuestAccessEventContent},
+ history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
+ join_rules::{JoinRule, RoomJoinRulesEventContent},
+ member::{MembershipState, RoomMemberEventContent},
+ name::RoomNameEventContent,
+ power_levels::RoomPowerLevelsEventContent,
+ tombstone::RoomTombstoneEventContent,
+ topic::RoomTopicEventContent,
+ },
+ RoomEventType, StateEventType,
+ },
+ int,
+ serde::{CanonicalJsonObject, JsonObject},
+ RoomAliasId, RoomId,
+};
+use serde_json::{json, value::to_raw_value};
+use std::{cmp::max, collections::BTreeMap, sync::Arc};
+use tracing::{info, warn};
+
+/// # `POST /_matrix/client/r0/createRoom`
+///
+/// Creates a new room.
+///
+/// - Room ID is randomly generated
+/// - Create alias if room_alias_name is set
+/// - Send create event
+/// - Join sender user
+/// - Send power levels event
+/// - Send canonical room alias
+/// - Send join rules
+/// - Send history visibility
+/// - Send guest access
+/// - Send events listed in initial state
+/// - Send events implied by `name` and `topic`
+/// - Send invite events
+pub async fn create_room_route(
+ db: DatabaseGuard,
+ body: Ruma<create_room::v3::IncomingRequest>,
+) -> Result<create_room::v3::Response> {
+ use create_room::v3::RoomPreset;
+
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let room_id = RoomId::new(db.globals.server_name());
+
+ db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?;
+
+ let mutex_state = Arc::clone(
+ db.globals
+ .roomid_mutex_state
+ .write()
+ .unwrap()
+ .entry(room_id.clone())
+ .or_default(),
+ );
+ let state_lock = mutex_state.lock().await;
+
+ if !db.globals.allow_room_creation()
+ && !body.from_appservice
+ && !db.users.is_admin(sender_user, &db.rooms, &db.globals)?
+ {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "Room creation has been disabled.",
+ ));
+ }
+
+ let alias: Option<Box<RoomAliasId>> =
+ body.room_alias_name
+ .as_ref()
+ .map_or(Ok(None), |localpart| {
+ // TODO: Check for invalid characters and maximum length
+ let alias =
+ RoomAliasId::parse(format!("#{}:{}", localpart, db.globals.server_name()))
+ .map_err(|_| {
+ Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias.")
+ })?;
+
+ if db.rooms.id_from_alias(&alias)?.is_some() {
+ Err(Error::BadRequest(
+ ErrorKind::RoomInUse,
+ "Room alias already exists.",
+ ))
+ } else {
+ Ok(Some(alias))
+ }
+ })?;
+
+ let room_version = match body.room_version.clone() {
+ Some(room_version) => {
+ if db.rooms.is_supported_version(&db, &room_version) {
+ room_version
+ } else {
+ return Err(Error::BadRequest(
+ ErrorKind::UnsupportedRoomVersion,
+ "This server does not support that room version.",
+ ));
+ }
+ }
+ None => db.globals.default_room_version(),
+ };
+
+ let content = match &body.creation_content {
+ Some(content) => {
+ let mut content = content
+ .deserialize_as::<CanonicalJsonObject>()
+ .expect("Invalid creation content");
+ content.insert(
+ "creator".into(),
+ json!(&sender_user).try_into().map_err(|_| {
+ Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
+ })?,
+ );
+ content.insert(
+ "room_version".into(),
+ json!(room_version.as_str()).try_into().map_err(|_| {
+ Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
+ })?,
+ );
+ content
+ }
+ None => {
+ let mut content = serde_json::from_str::<CanonicalJsonObject>(
+ to_raw_value(&RoomCreateEventContent::new(sender_user.clone()))
+ .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))?
+ .get(),
+ )
+ .unwrap();
+ content.insert(
+ "room_version".into(),
+ json!(room_version.as_str()).try_into().map_err(|_| {
+ Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
+ })?,
+ );
+ content
+ }
+ };
+
+ // Validate creation content
+ let de_result = serde_json::from_str::<CanonicalJsonObject>(
+ to_raw_value(&content)
+ .expect("Invalid creation content")
+ .get(),
+ );
+
+ if de_result.is_err() {
+ return Err(Error::BadRequest(
+ ErrorKind::BadJson,
+ "Invalid creation content",
+ ));
+ }
+
+ // 1. The room create event
+ db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomCreate,
+ content: to_raw_value(&content).expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: Some("".to_owned()),
+ redacts: None,
+ },
+ sender_user,
+ &room_id,
+ &db,
+ &state_lock,
+ )?;
+
+ // 2. Let the room creator join
+ db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomMember,
+ content: to_raw_value(&RoomMemberEventContent {
+ membership: MembershipState::Join,
+ displayname: db.users.displayname(sender_user)?,
+ avatar_url: db.users.avatar_url(sender_user)?,
+ is_direct: Some(body.is_direct),
+ third_party_invite: None,
+ blurhash: db.users.blurhash(sender_user)?,
+ reason: None,
+ join_authorized_via_users_server: None,
+ })
+ .expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: Some(sender_user.to_string()),
+ redacts: None,
+ },
+ sender_user,
+ &room_id,
+ &db,
+ &state_lock,
+ )?;
+
+ // 3. Power levels
+
+ // Figure out preset. We need it for preset specific events
+ let preset = body
+ .preset
+ .clone()
+ .unwrap_or_else(|| match &body.visibility {
+ room::Visibility::Private => RoomPreset::PrivateChat,
+ room::Visibility::Public => RoomPreset::PublicChat,
+ _ => RoomPreset::PrivateChat, // Room visibility should not be custom
+ });
+
+ let mut users = BTreeMap::new();
+ users.insert(sender_user.clone(), int!(100));
+
+ if preset == RoomPreset::TrustedPrivateChat {
+ for invite_ in &body.invite {
+ users.insert(invite_.clone(), int!(100));
+ }
+ }
+
+ let mut power_levels_content = serde_json::to_value(RoomPowerLevelsEventContent {
+ users,
+ ..Default::default()
+ })
+ .expect("event is valid, we just created it");
+
+ if let Some(power_level_content_override) = &body.power_level_content_override {
+ let json: JsonObject = serde_json::from_str(power_level_content_override.json().get())
+ .map_err(|_| {
+ Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override.")
+ })?;
+
+ for (key, value) in json {
+ power_levels_content[key] = value;
+ }
+ }
+
+ db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomPowerLevels,
+ content: to_raw_value(&power_levels_content)
+ .expect("to_raw_value always works on serde_json::Value"),
+ unsigned: None,
+ state_key: Some("".to_owned()),
+ redacts: None,
+ },
+ sender_user,
+ &room_id,
+ &db,
+ &state_lock,
+ )?;
+
+ // 4. Canonical room alias
+ if let Some(room_alias_id) = &alias {
+ db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomCanonicalAlias,
+ content: to_raw_value(&RoomCanonicalAliasEventContent {
+ alias: Some(room_alias_id.to_owned()),
+ alt_aliases: vec![],
+ })
+ .expect("We checked that alias earlier, it must be fine"),
+ unsigned: None,
+ state_key: Some("".to_owned()),
+ redacts: None,
+ },
+ sender_user,
+ &room_id,
+ &db,
+ &state_lock,
+ )?;
+ }
+
+ // 5. Events set by preset
+
+ // 5.1 Join Rules
+ db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomJoinRules,
+ content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
+ RoomPreset::PublicChat => JoinRule::Public,
+ // according to spec "invite" is the default
+ _ => JoinRule::Invite,
+ }))
+ .expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: Some("".to_owned()),
+ redacts: None,
+ },
+ sender_user,
+ &room_id,
+ &db,
+ &state_lock,
+ )?;
+
+ // 5.2 History Visibility
+ db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomHistoryVisibility,
+ content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
+ HistoryVisibility::Shared,
+ ))
+ .expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: Some("".to_owned()),
+ redacts: None,
+ },
+ sender_user,
+ &room_id,
+ &db,
+ &state_lock,
+ )?;
+
+ // 5.3 Guest Access
+ db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomGuestAccess,
+ content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
+ RoomPreset::PublicChat => GuestAccess::Forbidden,
+ _ => GuestAccess::CanJoin,
+ }))
+ .expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: Some("".to_owned()),
+ redacts: None,
+ },
+ sender_user,
+ &room_id,
+ &db,
+ &state_lock,
+ )?;
+
+ // 6. Events listed in initial_state
+ for event in &body.initial_state {
+ let mut pdu_builder = event.deserialize_as::<PduBuilder>().map_err(|e| {
+ warn!("Invalid initial state event: {:?}", e);
+ Error::BadRequest(ErrorKind::InvalidParam, "Invalid initial state event.")
+ })?;
+
+ // Implicit state key defaults to ""
+ pdu_builder.state_key.get_or_insert_with(|| "".to_owned());
+
+ // Silently skip encryption events if they are not allowed
+ if pdu_builder.event_type == RoomEventType::RoomEncryption && !db.globals.allow_encryption()
+ {
+ continue;
+ }
+
+ db.rooms
+ .build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock)?;
+ }
+
+ // 7. Events implied by name and topic
+ if let Some(name) = &body.name {
+ db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomName,
+ content: to_raw_value(&RoomNameEventContent::new(Some(name.clone())))
+ .expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: Some("".to_owned()),
+ redacts: None,
+ },
+ sender_user,
+ &room_id,
+ &db,
+ &state_lock,
+ )?;
+ }
+
+ if let Some(topic) = &body.topic {
+ db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomTopic,
+ content: to_raw_value(&RoomTopicEventContent {
+ topic: topic.clone(),
+ })
+ .expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: Some("".to_owned()),
+ redacts: None,
+ },
+ sender_user,
+ &room_id,
+ &db,
+ &state_lock,
+ )?;
+ }
+
+ // 8. Events implied by invite (and TODO: invite_3pid)
+ drop(state_lock);
+ for user_id in &body.invite {
+ let _ = invite_helper(sender_user, user_id, &room_id, &db, body.is_direct).await;
+ }
+
+ // Homeserver specific stuff
+ if let Some(alias) = alias {
+ db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?;
+ }
+
+ if body.visibility == room::Visibility::Public {
+ db.rooms.set_public(&room_id, true)?;
+ }
+
+ info!("{} created a room", sender_user);
+
+ db.flush()?;
+
+ Ok(create_room::v3::Response::new(room_id))
+}
+
+/// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}`
+///
+/// Gets a single event.
+///
+/// - You have to currently be joined to the room (TODO: Respect history visibility)
+pub async fn get_room_event_route(
+ db: DatabaseGuard,
+ body: Ruma<get_room_event::v3::IncomingRequest>,
+) -> Result<get_room_event::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ if !db.rooms.is_joined(sender_user, &body.room_id)? {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "You don't have permission to view this room.",
+ ));
+ }
+
+ Ok(get_room_event::v3::Response {
+ event: db
+ .rooms
+ .get_pdu(&body.event_id)?
+ .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?
+ .to_room_event(),
+ })
+}
+
+/// # `GET /_matrix/client/r0/rooms/{roomId}/aliases`
+///
+/// Lists all aliases of the room.
+///
+/// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable
+pub async fn get_room_aliases_route(
+ db: DatabaseGuard,
+ body: Ruma<aliases::v3::IncomingRequest>,
+) -> Result<aliases::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ if !db.rooms.is_joined(sender_user, &body.room_id)? {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "You don't have permission to view this room.",
+ ));
+ }
+
+ Ok(aliases::v3::Response {
+ aliases: db
+ .rooms
+ .room_aliases(&body.room_id)
+ .filter_map(|a| a.ok())
+ .collect(),
+ })
+}
+
+/// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade`
+///
+/// Upgrades the room.
+///
+/// - Creates a replacement room
+/// - Sends a tombstone event into the current room
+/// - Sender user joins the room
+/// - Transfers some state events
+/// - Moves local aliases
+/// - Modifies old room power levels to prevent users from speaking
+pub async fn upgrade_room_route(
+ db: DatabaseGuard,
+ body: Ruma<upgrade_room::v3::IncomingRequest>,
+) -> Result<upgrade_room::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ if !db.rooms.is_supported_version(&db, &body.new_version) {
+ return Err(Error::BadRequest(
+ ErrorKind::UnsupportedRoomVersion,
+ "This server does not support that room version.",
+ ));
+ }
+
+ // Create a replacement room
+ let replacement_room = RoomId::new(db.globals.server_name());
+ db.rooms
+ .get_or_create_shortroomid(&replacement_room, &db.globals)?;
+
+ let mutex_state = Arc::clone(
+ db.globals
+ .roomid_mutex_state
+ .write()
+ .unwrap()
+ .entry(body.room_id.clone())
+ .or_default(),
+ );
+ let state_lock = mutex_state.lock().await;
+
+ // Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
+ // Fail if the sender does not have the required permissions
+ let tombstone_event_id = db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomTombstone,
+ content: to_raw_value(&RoomTombstoneEventContent {
+ body: "This room has been replaced".to_owned(),
+ replacement_room: replacement_room.clone(),
+ })
+ .expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: Some("".to_owned()),
+ redacts: None,
+ },
+ sender_user,
+ &body.room_id,
+ &db,
+ &state_lock,
+ )?;
+
+ // Change lock to replacement room
+ drop(state_lock);
+ let mutex_state = Arc::clone(
+ db.globals
+ .roomid_mutex_state
+ .write()
+ .unwrap()
+ .entry(replacement_room.clone())
+ .or_default(),
+ );
+ let state_lock = mutex_state.lock().await;
+
+ // Get the old room creation event
+ let mut create_event_content = serde_json::from_str::<CanonicalJsonObject>(
+ db.rooms
+ .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")?
+ .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
+ .content
+ .get(),
+ )
+ .map_err(|_| Error::bad_database("Invalid room event in database."))?;
+
+ // Use the m.room.tombstone event as the predecessor
+ let predecessor = Some(ruma::events::room::create::PreviousRoom::new(
+ body.room_id.clone(),
+ (*tombstone_event_id).to_owned(),
+ ));
+
+ // Send a m.room.create event containing a predecessor field and the applicable room_version
+ create_event_content.insert(
+ "creator".into(),
+ json!(&sender_user)
+ .try_into()
+ .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?,
+ );
+ create_event_content.insert(
+ "room_version".into(),
+ json!(&body.new_version)
+ .try_into()
+ .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?,
+ );
+ create_event_content.insert(
+ "predecessor".into(),
+ json!(predecessor)
+ .try_into()
+ .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?,
+ );
+
+ // Validate creation event content
+ let de_result = serde_json::from_str::<CanonicalJsonObject>(
+ to_raw_value(&create_event_content)
+ .expect("Error forming creation event")
+ .get(),
+ );
+
+ if de_result.is_err() {
+ return Err(Error::BadRequest(
+ ErrorKind::BadJson,
+ "Error forming creation event",
+ ));
+ }
+
+ db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomCreate,
+ content: to_raw_value(&create_event_content)
+ .expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: Some("".to_owned()),
+ redacts: None,
+ },
+ sender_user,
+ &replacement_room,
+ &db,
+ &state_lock,
+ )?;
+
+ // Join the new room
+ db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomMember,
+ content: to_raw_value(&RoomMemberEventContent {
+ membership: MembershipState::Join,
+ displayname: db.users.displayname(sender_user)?,
+ avatar_url: db.users.avatar_url(sender_user)?,
+ is_direct: None,
+ third_party_invite: None,
+ blurhash: db.users.blurhash(sender_user)?,
+ reason: None,
+ join_authorized_via_users_server: None,
+ })
+ .expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: Some(sender_user.to_string()),
+ redacts: None,
+ },
+ sender_user,
+ &replacement_room,
+ &db,
+ &state_lock,
+ )?;
+
+ // Recommended transferable state events list from the specs
+ let transferable_state_events = vec![
+ StateEventType::RoomServerAcl,
+ StateEventType::RoomEncryption,
+ StateEventType::RoomName,
+ StateEventType::RoomAvatar,
+ StateEventType::RoomTopic,
+ StateEventType::RoomGuestAccess,
+ StateEventType::RoomHistoryVisibility,
+ StateEventType::RoomJoinRules,
+ StateEventType::RoomPowerLevels,
+ ];
+
+ // Replicate transferable state events to the new room
+ for event_type in transferable_state_events {
+ let event_content = match db.rooms.room_state_get(&body.room_id, &event_type, "")? {
+ Some(v) => v.content.clone(),
+ None => continue, // Skipping missing events.
+ };
+
+ db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: event_type.to_string().into(),
+ content: event_content,
+ unsigned: None,
+ state_key: Some("".to_owned()),
+ redacts: None,
+ },
+ sender_user,
+ &replacement_room,
+ &db,
+ &state_lock,
+ )?;
+ }
+
+ // Moves any local aliases to the new room
+ for alias in db.rooms.room_aliases(&body.room_id).filter_map(|r| r.ok()) {
+ db.rooms
+ .set_alias(&alias, Some(&replacement_room), &db.globals)?;
+ }
+
+ // Get the old room power levels
+ let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str(
+ db.rooms
+ .room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")?
+ .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
+ .content
+ .get(),
+ )
+ .map_err(|_| Error::bad_database("Invalid room event in database."))?;
+
+ // Setting events_default and invite to the greater of 50 and users_default + 1
+ let new_level = max(int!(50), power_levels_event_content.users_default + int!(1));
+ power_levels_event_content.events_default = new_level;
+ power_levels_event_content.invite = new_level;
+
+ // Modify the power levels in the old room to prevent sending of events and inviting new users
+ let _ = db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: RoomEventType::RoomPowerLevels,
+ content: to_raw_value(&power_levels_event_content)
+ .expect("event is valid, we just created it"),
+ unsigned: None,
+ state_key: Some("".to_owned()),
+ redacts: None,
+ },
+ sender_user,
+ &body.room_id,
+ &db,
+ &state_lock,
+ )?;
+
+ drop(state_lock);
+
+ db.flush()?;
+
+ // Return the replacement room id
+ Ok(upgrade_room::v3::Response { replacement_room })
+}
+
+ /// Returns the room's version.
+ #[tracing::instrument(skip(self))]
+ pub fn get_room_version(&self, room_id: &RoomId) -> Result<RoomVersionId> {
+ let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?;
+
+ let create_event_content: Option<RoomCreateEventContent> = create_event
+ .as_ref()
+ .map(|create_event| {
+ serde_json::from_str(create_event.content.get()).map_err(|e| {
+ warn!("Invalid create event: {}", e);
+ Error::bad_database("Invalid create event in db.")
+ })
+ })
+ .transpose()?;
+ let room_version = create_event_content
+ .map(|create_event| create_event.room_version)
+ .ok_or_else(|| Error::BadDatabase("Invalid room version"))?;
+ Ok(room_version)
+ }
+
diff --git a/src/api/client_server/search.rs b/src/api/client_server/search.rs
new file mode 100644
index 0000000..686e3b5
--- /dev/null
+++ b/src/api/client_server/search.rs
@@ -0,0 +1,119 @@
+use crate::{database::DatabaseGuard, Error, Result, Ruma};
+use ruma::api::client::{
+ error::ErrorKind,
+ search::search_events::{
+ self,
+ v3::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult},
+ },
+};
+
+use std::collections::BTreeMap;
+
+/// # `POST /_matrix/client/r0/search`
+///
+/// Searches rooms for messages.
+///
+/// - Only works if the user is currently joined to the room (TODO: Respect history visibility)
+pub async fn search_events_route(
+ db: DatabaseGuard,
+ body: Ruma<search_events::v3::IncomingRequest>,
+) -> Result<search_events::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let search_criteria = body.search_categories.room_events.as_ref().unwrap();
+ let filter = &search_criteria.filter;
+
+ let room_ids = filter.rooms.clone().unwrap_or_else(|| {
+ db.rooms
+ .rooms_joined(sender_user)
+ .filter_map(|r| r.ok())
+ .collect()
+ });
+
+ let limit = filter.limit.map_or(10, |l| u64::from(l) as usize);
+
+ let mut searches = Vec::new();
+
+ for room_id in room_ids {
+ if !db.rooms.is_joined(sender_user, &room_id)? {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "You don't have permission to view this room.",
+ ));
+ }
+
+ if let Some(search) = db
+ .rooms
+ .search_pdus(&room_id, &search_criteria.search_term)?
+ {
+ searches.push(search.0.peekable());
+ }
+ }
+
+ let skip = match body.next_batch.as_ref().map(|s| s.parse()) {
+ Some(Ok(s)) => s,
+ Some(Err(_)) => {
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Invalid next_batch token.",
+ ))
+ }
+ None => 0, // Default to the start
+ };
+
+ let mut results = Vec::new();
+ for _ in 0..skip + limit {
+ if let Some(s) = searches
+ .iter_mut()
+ .map(|s| (s.peek().cloned(), s))
+ .max_by_key(|(peek, _)| peek.clone())
+ .and_then(|(_, i)| i.next())
+ {
+ results.push(s);
+ }
+ }
+
+ let results: Vec<_> = results
+ .iter()
+ .map(|result| {
+ Ok::<_, Error>(SearchResult {
+ context: EventContextResult {
+ end: None,
+ events_after: Vec::new(),
+ events_before: Vec::new(),
+ profile_info: BTreeMap::new(),
+ start: None,
+ },
+ rank: None,
+ result: db
+ .rooms
+ .get_pdu_from_id(result)?
+ .map(|pdu| pdu.to_room_event()),
+ })
+ })
+ .filter_map(|r| r.ok())
+ .skip(skip)
+ .take(limit)
+ .collect();
+
+ let next_batch = if results.len() < limit as usize {
+ None
+ } else {
+ Some((skip + limit).to_string())
+ };
+
+ Ok(search_events::v3::Response::new(ResultCategories {
+ room_events: ResultRoomEvents {
+ count: Some((results.len() as u32).into()), // TODO: set this to none. Element shouldn't depend on it
+ groups: BTreeMap::new(), // TODO
+ next_batch,
+ results,
+ state: BTreeMap::new(), // TODO
+ highlights: search_criteria
+ .search_term
+ .split_terminator(|c: char| !c.is_alphanumeric())
+ .map(str::to_lowercase)
+ .collect(),
+ },
+ }))
+}
diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs
new file mode 100644
index 0000000..c2a79ca
--- /dev/null
+++ b/src/api/client_server/session.rs
@@ -0,0 +1,200 @@
+use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH};
+use crate::{database::DatabaseGuard, utils, Error, Result, Ruma};
+use ruma::{
+ api::client::{
+ error::ErrorKind,
+ session::{get_login_types, login, logout, logout_all},
+ uiaa::IncomingUserIdentifier,
+ },
+ UserId,
+};
+use serde::Deserialize;
+use tracing::info;
+
+#[derive(Debug, Deserialize)]
+struct Claims {
+ sub: String,
+ //exp: usize,
+}
+
+/// # `GET /_matrix/client/r0/login`
+///
+/// Get the supported login types of this server. One of these should be used as the `type` field
+/// when logging in.
+pub async fn get_login_types_route(
+ _body: Ruma<get_login_types::v3::IncomingRequest>,
+) -> Result<get_login_types::v3::Response> {
+ Ok(get_login_types::v3::Response::new(vec![
+ get_login_types::v3::LoginType::Password(Default::default()),
+ ]))
+}
+
+/// # `POST /_matrix/client/r0/login`
+///
+/// Authenticates the user and returns an access token it can use in subsequent requests.
+///
+/// - The user needs to authenticate using their password (or if enabled using a json web token)
+/// - If `device_id` is known: invalidates old access token of that device
+/// - If `device_id` is unknown: creates a new device
+/// - Returns access token that is associated with the user and device
+///
+/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see
+/// supported login types.
+pub async fn login_route(
+ db: DatabaseGuard,
+ body: Ruma<login::v3::IncomingRequest>,
+) -> Result<login::v3::Response> {
+ // Validate login method
+ // TODO: Other login methods
+ let user_id = match &body.login_info {
+ login::v3::IncomingLoginInfo::Password(login::v3::IncomingPassword {
+ identifier,
+ password,
+ }) => {
+ let username = if let IncomingUserIdentifier::UserIdOrLocalpart(user_id) = identifier {
+ user_id.to_lowercase()
+ } else {
+ return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
+ };
+ let user_id =
+ UserId::parse_with_server_name(username.to_owned(), db.globals.server_name())
+ .map_err(|_| {
+ Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
+ })?;
+ let hash = db.users.password_hash(&user_id)?.ok_or(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "Wrong username or password.",
+ ))?;
+
+ if hash.is_empty() {
+ return Err(Error::BadRequest(
+ ErrorKind::UserDeactivated,
+ "The user has been deactivated",
+ ));
+ }
+
+ let hash_matches = argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false);
+
+ if !hash_matches {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "Wrong username or password.",
+ ));
+ }
+
+ user_id
+ }
+ login::v3::IncomingLoginInfo::Token(login::v3::IncomingToken { token }) => {
+ if let Some(jwt_decoding_key) = db.globals.jwt_decoding_key() {
+ let token = jsonwebtoken::decode::<Claims>(
+ token,
+ jwt_decoding_key,
+ &jsonwebtoken::Validation::default(),
+ )
+ .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?;
+ let username = token.claims.sub;
+ UserId::parse_with_server_name(username, db.globals.server_name()).map_err(
+ |_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."),
+ )?
+ } else {
+ return Err(Error::BadRequest(
+ ErrorKind::Unknown,
+ "Token login is not supported (server has no jwt decoding key).",
+ ));
+ }
+ }
+ _ => {
+ return Err(Error::BadRequest(
+ ErrorKind::Unknown,
+ "Unsupported login type.",
+ ));
+ }
+ };
+
+ // Generate new device id if the user didn't specify one
+ let device_id = body
+ .device_id
+ .clone()
+ .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
+
+ // Generate a new token for the device
+ let token = utils::random_string(TOKEN_LENGTH);
+
+ // Determine if device_id was provided and exists in the db for this user
+ let device_exists = body.device_id.as_ref().map_or(false, |device_id| {
+ db.users
+ .all_device_ids(&user_id)
+ .any(|x| x.as_ref().map_or(false, |v| v == device_id))
+ });
+
+ if device_exists {
+ db.users.set_token(&user_id, &device_id, &token)?;
+ } else {
+ db.users.create_device(
+ &user_id,
+ &device_id,
+ &token,
+ body.initial_device_display_name.clone(),
+ )?;
+ }
+
+ info!("{} logged in", user_id);
+
+ db.flush()?;
+
+ Ok(login::v3::Response {
+ user_id,
+ access_token: token,
+ home_server: Some(db.globals.server_name().to_owned()),
+ device_id,
+ well_known: None,
+ })
+}
+
+/// # `POST /_matrix/client/r0/logout`
+///
+/// Log out the current device.
+///
+/// - Invalidates access token
+/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
+/// - Forgets to-device events
+/// - Triggers device list updates
+pub async fn logout_route(
+ db: DatabaseGuard,
+ body: Ruma<logout::v3::Request>,
+) -> Result<logout::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ let sender_device = body.sender_device.as_ref().expect("user is authenticated");
+
+ db.users.remove_device(sender_user, sender_device)?;
+
+ db.flush()?;
+
+ Ok(logout::v3::Response::new())
+}
+
+/// # `POST /_matrix/client/r0/logout/all`
+///
+/// Log out all devices of this user.
+///
+/// - Invalidates all access tokens
+/// - Deletes all device metadata (device id, device display name, last seen ip, last seen ts)
+/// - Forgets all to-device events
+/// - Triggers device list updates
+///
+/// Note: This is equivalent to calling [`GET /_matrix/client/r0/logout`](fn.logout_route.html)
+/// from each device of this user.
+pub async fn logout_all_route(
+ db: DatabaseGuard,
+ body: Ruma<logout_all::v3::Request>,
+) -> Result<logout_all::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ for device_id in db.users.all_device_ids(sender_user).flatten() {
+ db.users.remove_device(sender_user, &device_id)?;
+ }
+
+ db.flush()?;
+
+ Ok(logout_all::v3::Response::new())
+}
diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs
new file mode 100644
index 0000000..4df953c
--- /dev/null
+++ b/src/api/client_server/state.rs
@@ -0,0 +1,299 @@
+use std::sync::Arc;
+
+use crate::{
+ database::DatabaseGuard, pdu::PduBuilder, Database, Error, Result, Ruma, RumaResponse,
+};
+use ruma::{
+ api::client::{
+ error::ErrorKind,
+ state::{get_state_events, get_state_events_for_key, send_state_event},
+ },
+ events::{
+ room::{
+ canonical_alias::RoomCanonicalAliasEventContent,
+ history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
+ },
+ AnyStateEventContent, StateEventType,
+ },
+ serde::Raw,
+ EventId, RoomId, UserId,
+};
+
+/// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}`
+///
+/// Sends a state event into the room.
+///
+/// - The only requirement for the content is that it has to be valid json
+/// - Tries to send the event into the room, auth rules will determine if it is allowed
+/// - If event is new canonical_alias: Rejects if alias is incorrect
+pub async fn send_state_event_for_key_route(
+ db: DatabaseGuard,
+ body: Ruma<send_state_event::v3::IncomingRequest>,
+) -> Result<send_state_event::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let event_id = send_state_event_for_key_helper(
+ &db,
+ sender_user,
+ &body.room_id,
+ &body.event_type,
+ &body.body.body, // Yes, I hate it too
+ body.state_key.to_owned(),
+ )
+ .await?;
+
+ db.flush()?;
+
+ let event_id = (*event_id).to_owned();
+ Ok(send_state_event::v3::Response { event_id })
+}
+
+/// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}`
+///
+/// Sends a state event into the room.
+///
+/// - The only requirement for the content is that it has to be valid json
+/// - Tries to send the event into the room, auth rules will determine if it is allowed
+/// - If event is new canonical_alias: Rejects if alias is incorrect
+pub async fn send_state_event_for_empty_key_route(
+ db: DatabaseGuard,
+ body: Ruma<send_state_event::v3::IncomingRequest>,
+) -> Result<RumaResponse<send_state_event::v3::Response>> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ // Forbid m.room.encryption if encryption is disabled
+ if body.event_type == StateEventType::RoomEncryption && !db.globals.allow_encryption() {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "Encryption has been disabled",
+ ));
+ }
+
+ let event_id = send_state_event_for_key_helper(
+ &db,
+ sender_user,
+ &body.room_id,
+ &body.event_type.to_string().into(),
+ &body.body.body,
+ body.state_key.to_owned(),
+ )
+ .await?;
+
+ db.flush()?;
+
+ let event_id = (*event_id).to_owned();
+ Ok(send_state_event::v3::Response { event_id }.into())
+}
+
+/// # `GET /_matrix/client/r0/rooms/{roomid}/state`
+///
+/// Get all state events for a room.
+///
+/// - If not joined: Only works if current room history visibility is world readable
+pub async fn get_state_events_route(
+ db: DatabaseGuard,
+ body: Ruma<get_state_events::v3::IncomingRequest>,
+) -> Result<get_state_events::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ #[allow(clippy::blocks_in_if_conditions)]
+ // Users not in the room should not be able to access the state unless history_visibility is
+ // WorldReadable
+ if !db.rooms.is_joined(sender_user, &body.room_id)?
+ && !matches!(
+ db.rooms
+ .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")?
+ .map(|event| {
+ serde_json::from_str(event.content.get())
+ .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility)
+ .map_err(|_| {
+ Error::bad_database(
+ "Invalid room history visibility event in database.",
+ )
+ })
+ }),
+ Some(Ok(HistoryVisibility::WorldReadable))
+ )
+ {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "You don't have permission to view the room state.",
+ ));
+ }
+
+ Ok(get_state_events::v3::Response {
+ room_state: db
+ .rooms
+ .room_state_full(&body.room_id)
+ .await?
+ .values()
+ .map(|pdu| pdu.to_state_event())
+ .collect(),
+ })
+}
+
+/// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}/{stateKey}`
+///
+/// Get single state event of a room.
+///
+/// - If not joined: Only works if current room history visibility is world readable
+pub async fn get_state_events_for_key_route(
+ db: DatabaseGuard,
+ body: Ruma<get_state_events_for_key::v3::IncomingRequest>,
+) -> Result<get_state_events_for_key::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ #[allow(clippy::blocks_in_if_conditions)]
+ // Users not in the room should not be able to access the state unless history_visibility is
+ // WorldReadable
+ if !db.rooms.is_joined(sender_user, &body.room_id)?
+ && !matches!(
+ db.rooms
+ .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")?
+ .map(|event| {
+ serde_json::from_str(event.content.get())
+ .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility)
+ .map_err(|_| {
+ Error::bad_database(
+ "Invalid room history visibility event in database.",
+ )
+ })
+ }),
+ Some(Ok(HistoryVisibility::WorldReadable))
+ )
+ {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "You don't have permission to view the room state.",
+ ));
+ }
+
+ let event = db
+ .rooms
+ .room_state_get(&body.room_id, &body.event_type, &body.state_key)?
+ .ok_or(Error::BadRequest(
+ ErrorKind::NotFound,
+ "State event not found.",
+ ))?;
+
+ Ok(get_state_events_for_key::v3::Response {
+ content: serde_json::from_str(event.content.get())
+ .map_err(|_| Error::bad_database("Invalid event content in database"))?,
+ })
+}
+
+/// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}`
+///
+/// Get single state event of a room.
+///
+/// - If not joined: Only works if current room history visibility is world readable
+pub async fn get_state_events_for_empty_key_route(
+ db: DatabaseGuard,
+ body: Ruma<get_state_events_for_key::v3::IncomingRequest>,
+) -> Result<RumaResponse<get_state_events_for_key::v3::Response>> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ #[allow(clippy::blocks_in_if_conditions)]
+ // Users not in the room should not be able to access the state unless history_visibility is
+ // WorldReadable
+ if !db.rooms.is_joined(sender_user, &body.room_id)?
+ && !matches!(
+ db.rooms
+ .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")?
+ .map(|event| {
+ serde_json::from_str(event.content.get())
+ .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility)
+ .map_err(|_| {
+ Error::bad_database(
+ "Invalid room history visibility event in database.",
+ )
+ })
+ }),
+ Some(Ok(HistoryVisibility::WorldReadable))
+ )
+ {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "You don't have permission to view the room state.",
+ ));
+ }
+
+ let event = db
+ .rooms
+ .room_state_get(&body.room_id, &body.event_type, "")?
+ .ok_or(Error::BadRequest(
+ ErrorKind::NotFound,
+ "State event not found.",
+ ))?;
+
+ Ok(get_state_events_for_key::v3::Response {
+ content: serde_json::from_str(event.content.get())
+ .map_err(|_| Error::bad_database("Invalid event content in database"))?,
+ }
+ .into())
+}
+
+async fn send_state_event_for_key_helper(
+ db: &Database,
+ sender: &UserId,
+ room_id: &RoomId,
+ event_type: &StateEventType,
+ json: &Raw<AnyStateEventContent>,
+ state_key: String,
+) -> Result<Arc<EventId>> {
+ let sender_user = sender;
+
+ // TODO: Review this check, error if event is unparsable, use event type, allow alias if it
+ // previously existed
+ if let Ok(canonical_alias) =
+ serde_json::from_str::<RoomCanonicalAliasEventContent>(json.json().get())
+ {
+ let mut aliases = canonical_alias.alt_aliases.clone();
+
+ if let Some(alias) = canonical_alias.alias {
+ aliases.push(alias);
+ }
+
+ for alias in aliases {
+ if alias.server_name() != db.globals.server_name()
+ || db
+ .rooms
+ .id_from_alias(&alias)?
+ .filter(|room| room == room_id) // Make sure it's the right room
+ .is_none()
+ {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "You are only allowed to send canonical_alias \
+ events when it's aliases already exists",
+ ));
+ }
+ }
+ }
+
+ let mutex_state = Arc::clone(
+ db.globals
+ .roomid_mutex_state
+ .write()
+ .unwrap()
+ .entry(room_id.to_owned())
+ .or_default(),
+ );
+ let state_lock = mutex_state.lock().await;
+
+ let event_id = db.rooms.build_and_append_pdu(
+ PduBuilder {
+ event_type: event_type.to_string().into(),
+ content: serde_json::from_str(json.json().get()).expect("content is valid json"),
+ unsigned: None,
+ state_key: Some(state_key),
+ redacts: None,
+ },
+ sender_user,
+ room_id,
+ db,
+ &state_lock,
+ )?;
+
+ Ok(event_id)
+}
diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs
new file mode 100644
index 0000000..0c294b7
--- /dev/null
+++ b/src/api/client_server/sync.rs
@@ -0,0 +1,952 @@
+use crate::{database::DatabaseGuard, Database, Error, Result, Ruma, RumaResponse};
+use ruma::{
+ api::client::{
+ filter::{IncomingFilterDefinition, LazyLoadOptions},
+ sync::sync_events,
+ uiaa::UiaaResponse,
+ },
+ events::{
+ room::member::{MembershipState, RoomMemberEventContent},
+ RoomEventType, StateEventType,
+ },
+ serde::Raw,
+ DeviceId, RoomId, UserId,
+};
+use std::{
+ collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
+ sync::Arc,
+ time::Duration,
+};
+use tokio::sync::watch::Sender;
+use tracing::error;
+
+/// # `GET /_matrix/client/r0/sync`
+///
+/// Synchronize the client's state with the latest state on the server.
+///
+/// - This endpoint takes a `since` parameter which should be the `next_batch` value from a
+/// previous request for incremental syncs.
+///
+/// Calling this endpoint without a `since` parameter returns:
+/// - Some of the most recent events of each timeline
+/// - Notification counts for each room
+/// - Joined and invited member counts, heroes
+/// - All state events
+///
+/// Calling this endpoint with a `since` parameter from a previous `next_batch` returns:
+/// For joined rooms:
+/// - Some of the most recent events of each timeline that happened after since
+/// - If user joined the room after since: All state events (unless lazy loading is activated) and
+/// all device list updates in that room
+/// - If the user was already in the room: A list of all events that are in the state now, but were
+/// not in the state at `since`
+/// - If the state we send contains a member event: Joined and invited member counts, heroes
+/// - Device list updates that happened after `since`
+/// - If there are events in the timeline we send or the user send updated his read mark: Notification counts
+/// - EDUs that are active now (read receipts, typing updates, presence)
+/// - TODO: Allow multiple sync streams to support Pantalaimon
+///
+/// For invited rooms:
+/// - If the user was invited after `since`: A subset of the state of the room at the point of the invite
+///
+/// For left rooms:
+/// - If the user left after `since`: prev_batch token, empty state (TODO: subset of the state at the point of the leave)
+///
+/// - Sync is handled in an async task, multiple requests from the same device with the same
+/// `since` will be cached
+pub async fn sync_events_route(
+ db: DatabaseGuard,
+ body: Ruma<sync_events::v3::IncomingRequest>,
+) -> Result<sync_events::v3::Response, RumaResponse<UiaaResponse>> {
+ let sender_user = body.sender_user.expect("user is authenticated");
+ let sender_device = body.sender_device.expect("user is authenticated");
+ let body = body.body;
+
+ let arc_db = Arc::new(db);
+
+ let mut rx = match arc_db
+ .globals
+ .sync_receivers
+ .write()
+ .unwrap()
+ .entry((sender_user.clone(), sender_device.clone()))
+ {
+ Entry::Vacant(v) => {
+ let (tx, rx) = tokio::sync::watch::channel(None);
+
+ v.insert((body.since.to_owned(), rx.clone()));
+
+ tokio::spawn(sync_helper_wrapper(
+ Arc::clone(&arc_db),
+ sender_user.clone(),
+ sender_device.clone(),
+ body,
+ tx,
+ ));
+
+ rx
+ }
+ Entry::Occupied(mut o) => {
+ if o.get().0 != body.since {
+ let (tx, rx) = tokio::sync::watch::channel(None);
+
+ o.insert((body.since.clone(), rx.clone()));
+
+ tokio::spawn(sync_helper_wrapper(
+ Arc::clone(&arc_db),
+ sender_user.clone(),
+ sender_device.clone(),
+ body,
+ tx,
+ ));
+
+ rx
+ } else {
+ o.get().1.clone()
+ }
+ }
+ };
+
+ let we_have_to_wait = rx.borrow().is_none();
+ if we_have_to_wait {
+ if let Err(e) = rx.changed().await {
+ error!("Error waiting for sync: {}", e);
+ }
+ }
+
+ let result = match rx
+ .borrow()
+ .as_ref()
+ .expect("When sync channel changes it's always set to some")
+ {
+ Ok(response) => Ok(response.clone()),
+ Err(error) => Err(error.to_response()),
+ };
+
+ result
+}
+
+async fn sync_helper_wrapper(
+ db: Arc<DatabaseGuard>,
+ sender_user: Box<UserId>,
+ sender_device: Box<DeviceId>,
+ body: sync_events::v3::IncomingRequest,
+ tx: Sender<Option<Result<sync_events::v3::Response>>>,
+) {
+ let since = body.since.clone();
+
+ let r = sync_helper(
+ Arc::clone(&db),
+ sender_user.clone(),
+ sender_device.clone(),
+ body,
+ )
+ .await;
+
+ if let Ok((_, caching_allowed)) = r {
+ if !caching_allowed {
+ match db
+ .globals
+ .sync_receivers
+ .write()
+ .unwrap()
+ .entry((sender_user, sender_device))
+ {
+ Entry::Occupied(o) => {
+ // Only remove if the device didn't start a different /sync already
+ if o.get().0 == since {
+ o.remove();
+ }
+ }
+ Entry::Vacant(_) => {}
+ }
+ }
+ }
+
+ drop(db);
+
+ let _ = tx.send(Some(r.map(|(r, _)| r)));
+}
+
+async fn sync_helper(
+ db: Arc<DatabaseGuard>,
+ sender_user: Box<UserId>,
+ sender_device: Box<DeviceId>,
+ body: sync_events::v3::IncomingRequest,
+ // bool = caching allowed
+) -> Result<(sync_events::v3::Response, bool), Error> {
+ use sync_events::v3::{
+ DeviceLists, Ephemeral, GlobalAccountData, IncomingFilter, InviteState, InvitedRoom,
+ JoinedRoom, LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline,
+ ToDevice, UnreadNotificationsCount,
+ };
+
+ // TODO: match body.set_presence {
+ db.rooms.edus.ping_presence(&sender_user)?;
+
+ // Setup watchers, so if there's no response, we can wait for them
+ let watcher = db.watch(&sender_user, &sender_device);
+
+ let next_batch = db.globals.current_count()?;
+ let next_batch_string = next_batch.to_string();
+
+ // Load filter
+ let filter = match body.filter {
+ None => IncomingFilterDefinition::default(),
+ Some(IncomingFilter::FilterDefinition(filter)) => filter,
+ Some(IncomingFilter::FilterId(filter_id)) => db
+ .users
+ .get_filter(&sender_user, &filter_id)?
+ .unwrap_or_default(),
+ };
+
+ let (lazy_load_enabled, lazy_load_send_redundant) = match filter.room.state.lazy_load_options {
+ LazyLoadOptions::Enabled {
+ include_redundant_members: redundant,
+ } => (true, redundant),
+ _ => (false, false),
+ };
+
+ let mut joined_rooms = BTreeMap::new();
+ let since = body
+ .since
+ .clone()
+ .and_then(|string| string.parse().ok())
+ .unwrap_or(0);
+
+ let mut presence_updates = HashMap::new();
+ let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in
+ let mut device_list_updates = HashSet::new();
+ let mut device_list_left = HashSet::new();
+
+ // Look for device list updates of this account
+ device_list_updates.extend(
+ db.users
+ .keys_changed(&sender_user.to_string(), since, None)
+ .filter_map(|r| r.ok()),
+ );
+
+ let all_joined_rooms = db.rooms.rooms_joined(&sender_user).collect::<Vec<_>>();
+ for room_id in all_joined_rooms {
+ let room_id = room_id?;
+
+ {
+ // Get and drop the lock to wait for remaining operations to finish
+ // This will make sure the we have all events until next_batch
+ let mutex_insert = Arc::clone(
+ db.globals
+ .roomid_mutex_insert
+ .write()
+ .unwrap()
+ .entry(room_id.clone())
+ .or_default(),
+ );
+ let insert_lock = mutex_insert.lock().unwrap();
+ drop(insert_lock);
+ }
+
+ let timeline_pdus;
+ let limited;
+ if db.rooms.last_timeline_count(&sender_user, &room_id)? > since {
+ let mut non_timeline_pdus = db
+ .rooms
+ .pdus_until(&sender_user, &room_id, u64::MAX)?
+ .filter_map(|r| {
+ // Filter out buggy events
+ if r.is_err() {
+ error!("Bad pdu in pdus_since: {:?}", r);
+ }
+ r.ok()
+ })
+ .take_while(|(pduid, _)| {
+ db.rooms
+ .pdu_count(pduid)
+ .map_or(false, |count| count > since)
+ });
+
+ // Take the last 10 events for the timeline
+ timeline_pdus = non_timeline_pdus
+ .by_ref()
+ .take(10)
+ .collect::<Vec<_>>()
+ .into_iter()
+ .rev()
+ .collect::<Vec<_>>();
+
+ // They /sync response doesn't always return all messages, so we say the output is
+ // limited unless there are events in non_timeline_pdus
+ limited = non_timeline_pdus.next().is_some();
+ } else {
+ timeline_pdus = Vec::new();
+ limited = false;
+ }
+
+ let send_notification_counts = !timeline_pdus.is_empty()
+ || db
+ .rooms
+ .edus
+ .last_privateread_update(&sender_user, &room_id)?
+ > since;
+
+ let mut timeline_users = HashSet::new();
+ for (_, event) in &timeline_pdus {
+ timeline_users.insert(event.sender.as_str().to_owned());
+ }
+
+ db.rooms
+ .lazy_load_confirm_delivery(&sender_user, &sender_device, &room_id, since)?;
+
+ // Database queries:
+
+ let current_shortstatehash = if let Some(s) = db.rooms.current_shortstatehash(&room_id)? {
+ s
+ } else {
+ error!("Room {} has no state", room_id);
+ continue;
+ };
+
+ let since_shortstatehash = db.rooms.get_token_shortstatehash(&room_id, since)?;
+
+ // Calculates joined_member_count, invited_member_count and heroes
+ let calculate_counts = || {
+ let joined_member_count = db.rooms.room_joined_count(&room_id)?.unwrap_or(0);
+ let invited_member_count = db.rooms.room_invited_count(&room_id)?.unwrap_or(0);
+
+ // Recalculate heroes (first 5 members)
+ let mut heroes = Vec::new();
+
+ if joined_member_count + invited_member_count <= 5 {
+ // Go through all PDUs and for each member event, check if the user is still joined or
+ // invited until we have 5 or we reach the end
+
+ for hero in db
+ .rooms
+ .all_pdus(&sender_user, &room_id)?
+ .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
+ .filter(|(_, pdu)| pdu.kind == RoomEventType::RoomMember)
+ .map(|(_, pdu)| {
+ let content: RoomMemberEventContent =
+ serde_json::from_str(pdu.content.get()).map_err(|_| {
+ Error::bad_database("Invalid member event in database.")
+ })?;
+
+ if let Some(state_key) = &pdu.state_key {
+ let user_id = UserId::parse(state_key.clone()).map_err(|_| {
+ Error::bad_database("Invalid UserId in member PDU.")
+ })?;
+
+ // The membership was and still is invite or join
+ if matches!(
+ content.membership,
+ MembershipState::Join | MembershipState::Invite
+ ) && (db.rooms.is_joined(&user_id, &room_id)?
+ || db.rooms.is_invited(&user_id, &room_id)?)
+ {
+ Ok::<_, Error>(Some(state_key.clone()))
+ } else {
+ Ok(None)
+ }
+ } else {
+ Ok(None)
+ }
+ })
+ // Filter out buggy users
+ .filter_map(|u| u.ok())
+ // Filter for possible heroes
+ .flatten()
+ {
+ if heroes.contains(&hero) || hero == sender_user.as_str() {
+ continue;
+ }
+
+ heroes.push(hero);
+ }
+ }
+
+ Ok::<_, Error>((
+ Some(joined_member_count),
+ Some(invited_member_count),
+ heroes,
+ ))
+ };
+
+ let (
+ heroes,
+ joined_member_count,
+ invited_member_count,
+ joined_since_last_sync,
+ state_events,
+ ) = if since_shortstatehash.is_none() {
+ // Probably since = 0, we will do an initial sync
+
+ let (joined_member_count, invited_member_count, heroes) = calculate_counts()?;
+
+ let current_state_ids = db.rooms.state_full_ids(current_shortstatehash).await?;
+
+ let mut state_events = Vec::new();
+ let mut lazy_loaded = HashSet::new();
+
+ let mut i = 0;
+ for (shortstatekey, id) in current_state_ids {
+ let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?;
+
+ if event_type != StateEventType::RoomMember {
+ let pdu = match db.rooms.get_pdu(&id)? {
+ Some(pdu) => pdu,
+ None => {
+ error!("Pdu in state not found: {}", id);
+ continue;
+ }
+ };
+ state_events.push(pdu);
+
+ i += 1;
+ if i % 100 == 0 {
+ tokio::task::yield_now().await;
+ }
+ } else if !lazy_load_enabled
+ || body.full_state
+ || timeline_users.contains(&state_key)
+ {
+ let pdu = match db.rooms.get_pdu(&id)? {
+ Some(pdu) => pdu,
+ None => {
+ error!("Pdu in state not found: {}", id);
+ continue;
+ }
+ };
+
+ // This check is in case a bad user ID made it into the database
+ if let Ok(uid) = UserId::parse(state_key.as_ref()) {
+ lazy_loaded.insert(uid);
+ }
+ state_events.push(pdu);
+
+ i += 1;
+ if i % 100 == 0 {
+ tokio::task::yield_now().await;
+ }
+ }
+ }
+
+ // Reset lazy loading because this is an initial sync
+ db.rooms
+ .lazy_load_reset(&sender_user, &sender_device, &room_id)?;
+
+ // The state_events above should contain all timeline_users, let's mark them as lazy
+ // loaded.
+ db.rooms.lazy_load_mark_sent(
+ &sender_user,
+ &sender_device,
+ &room_id,
+ lazy_loaded,
+ next_batch,
+ );
+
+ (
+ heroes,
+ joined_member_count,
+ invited_member_count,
+ true,
+ state_events,
+ )
+ } else if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) {
+ // No state changes
+ (Vec::new(), None, None, false, Vec::new())
+ } else {
+ // Incremental /sync
+ let since_shortstatehash = since_shortstatehash.unwrap();
+
+ let since_sender_member: Option<RoomMemberEventContent> = db
+ .rooms
+ .state_get(
+ since_shortstatehash,
+ &StateEventType::RoomMember,
+ sender_user.as_str(),
+ )?
+ .and_then(|pdu| {
+ serde_json::from_str(pdu.content.get())
+ .map_err(|_| Error::bad_database("Invalid PDU in database."))
+ .ok()
+ });
+
+ let joined_since_last_sync = since_sender_member
+ .map_or(true, |member| member.membership != MembershipState::Join);
+
+ let mut state_events = Vec::new();
+ let mut lazy_loaded = HashSet::new();
+
+ if since_shortstatehash != current_shortstatehash {
+ let current_state_ids = db.rooms.state_full_ids(current_shortstatehash).await?;
+ let since_state_ids = db.rooms.state_full_ids(since_shortstatehash).await?;
+
+ for (key, id) in current_state_ids {
+ if body.full_state || since_state_ids.get(&key) != Some(&id) {
+ let pdu = match db.rooms.get_pdu(&id)? {
+ Some(pdu) => pdu,
+ None => {
+ error!("Pdu in state not found: {}", id);
+ continue;
+ }
+ };
+
+ if pdu.kind == RoomEventType::RoomMember {
+ match UserId::parse(
+ pdu.state_key
+ .as_ref()
+ .expect("State event has state key")
+ .clone(),
+ ) {
+ Ok(state_key_userid) => {
+ lazy_loaded.insert(state_key_userid);
+ }
+ Err(e) => error!("Invalid state key for member event: {}", e),
+ }
+ }
+
+ state_events.push(pdu);
+ tokio::task::yield_now().await;
+ }
+ }
+ }
+
+ for (_, event) in &timeline_pdus {
+ if lazy_loaded.contains(&event.sender) {
+ continue;
+ }
+
+ if !db.rooms.lazy_load_was_sent_before(
+ &sender_user,
+ &sender_device,
+ &room_id,
+ &event.sender,
+ )? || lazy_load_send_redundant
+ {
+ if let Some(member_event) = db.rooms.room_state_get(
+ &room_id,
+ &StateEventType::RoomMember,
+ event.sender.as_str(),
+ )? {
+ lazy_loaded.insert(event.sender.clone());
+ state_events.push(member_event);
+ }
+ }
+ }
+
+ db.rooms.lazy_load_mark_sent(
+ &sender_user,
+ &sender_device,
+ &room_id,
+ lazy_loaded,
+ next_batch,
+ );
+
+ let encrypted_room = db
+ .rooms
+ .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")?
+ .is_some();
+
+ let since_encryption =
+ db.rooms
+ .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "")?;
+
+ // Calculations:
+ let new_encrypted_room = encrypted_room && since_encryption.is_none();
+
+ let send_member_count = state_events
+ .iter()
+ .any(|event| event.kind == RoomEventType::RoomMember);
+
+ if encrypted_room {
+ for state_event in &state_events {
+ if state_event.kind != RoomEventType::RoomMember {
+ continue;
+ }
+
+ if let Some(state_key) = &state_event.state_key {
+ let user_id = UserId::parse(state_key.clone())
+ .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
+
+ if user_id == sender_user {
+ continue;
+ }
+
+ let new_membership = serde_json::from_str::<RoomMemberEventContent>(
+ state_event.content.get(),
+ )
+ .map_err(|_| Error::bad_database("Invalid PDU in database."))?
+ .membership;
+
+ match new_membership {
+ MembershipState::Join => {
+ // A new user joined an encrypted room
+ if !share_encrypted_room(&db, &sender_user, &user_id, &room_id)? {
+ device_list_updates.insert(user_id);
+ }
+ }
+ MembershipState::Leave => {
+ // Write down users that have left encrypted rooms we are in
+ left_encrypted_users.insert(user_id);
+ }
+ _ => {}
+ }
+ }
+ }
+ }
+
+ if joined_since_last_sync && encrypted_room || new_encrypted_room {
+ // If the user is in a new encrypted room, give them all joined users
+ device_list_updates.extend(
+ db.rooms
+ .room_members(&room_id)
+ .flatten()
+ .filter(|user_id| {
+ // Don't send key updates from the sender to the sender
+ &sender_user != user_id
+ })
+ .filter(|user_id| {
+ // Only send keys if the sender doesn't share an encrypted room with the target already
+ !share_encrypted_room(&db, &sender_user, user_id, &room_id)
+ .unwrap_or(false)
+ }),
+ );
+ }
+
+ let (joined_member_count, invited_member_count, heroes) = if send_member_count {
+ calculate_counts()?
+ } else {
+ (None, None, Vec::new())
+ };
+
+ (
+ heroes,
+ joined_member_count,
+ invited_member_count,
+ joined_since_last_sync,
+ state_events,
+ )
+ };
+
+ // Look for device list updates in this room
+ device_list_updates.extend(
+ db.users
+ .keys_changed(&room_id.to_string(), since, None)
+ .filter_map(|r| r.ok()),
+ );
+
+ let notification_count = if send_notification_counts {
+ Some(
+ db.rooms
+ .notification_count(&sender_user, &room_id)?
+ .try_into()
+ .expect("notification count can't go that high"),
+ )
+ } else {
+ None
+ };
+
+ let highlight_count = if send_notification_counts {
+ Some(
+ db.rooms
+ .highlight_count(&sender_user, &room_id)?
+ .try_into()
+ .expect("highlight count can't go that high"),
+ )
+ } else {
+ None
+ };
+
+ let prev_batch = timeline_pdus
+ .first()
+ .map_or(Ok::<_, Error>(None), |(pdu_id, _)| {
+ Ok(Some(db.rooms.pdu_count(pdu_id)?.to_string()))
+ })?;
+
+ let room_events: Vec<_> = timeline_pdus
+ .iter()
+ .map(|(_, pdu)| pdu.to_sync_room_event())
+ .collect();
+
+ let mut edus: Vec<_> = db
+ .rooms
+ .edus
+ .readreceipts_since(&room_id, since)
+ .filter_map(|r| r.ok()) // Filter out buggy events
+ .map(|(_, _, v)| v)
+ .collect();
+
+ if db.rooms.edus.last_typing_update(&room_id, &db.globals)? > since {
+ edus.push(
+ serde_json::from_str(
+ &serde_json::to_string(&db.rooms.edus.typings_all(&room_id)?)
+ .expect("event is valid, we just created it"),
+ )
+ .expect("event is valid, we just created it"),
+ );
+ }
+
+ // Save the state after this sync so we can send the correct state diff next sync
+ db.rooms
+ .associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash)?;
+
+ let joined_room = JoinedRoom {
+ account_data: RoomAccountData {
+ events: db
+ .account_data
+ .changes_since(Some(&room_id), &sender_user, since)?
+ .into_iter()
+ .filter_map(|(_, v)| {
+ serde_json::from_str(v.json().get())
+ .map_err(|_| Error::bad_database("Invalid account event in database."))
+ .ok()
+ })
+ .collect(),
+ },
+ summary: RoomSummary {
+ heroes,
+ joined_member_count: joined_member_count.map(|n| (n as u32).into()),
+ invited_member_count: invited_member_count.map(|n| (n as u32).into()),
+ },
+ unread_notifications: UnreadNotificationsCount {
+ highlight_count,
+ notification_count,
+ },
+ timeline: Timeline {
+ limited: limited || joined_since_last_sync,
+ prev_batch,
+ events: room_events,
+ },
+ state: State {
+ events: state_events
+ .iter()
+ .map(|pdu| pdu.to_sync_state_event())
+ .collect(),
+ },
+ ephemeral: Ephemeral { events: edus },
+ };
+
+ if !joined_room.is_empty() {
+ joined_rooms.insert(room_id.clone(), joined_room);
+ }
+
+ // Take presence updates from this room
+ for (user_id, presence) in
+ db.rooms
+ .edus
+ .presence_since(&room_id, since, &db.rooms, &db.globals)?
+ {
+ match presence_updates.entry(user_id) {
+ Entry::Vacant(v) => {
+ v.insert(presence);
+ }
+ Entry::Occupied(mut o) => {
+ let p = o.get_mut();
+
+ // Update existing presence event with more info
+ p.content.presence = presence.content.presence;
+ if let Some(status_msg) = presence.content.status_msg {
+ p.content.status_msg = Some(status_msg);
+ }
+ if let Some(last_active_ago) = presence.content.last_active_ago {
+ p.content.last_active_ago = Some(last_active_ago);
+ }
+ if let Some(displayname) = presence.content.displayname {
+ p.content.displayname = Some(displayname);
+ }
+ if let Some(avatar_url) = presence.content.avatar_url {
+ p.content.avatar_url = Some(avatar_url);
+ }
+ if let Some(currently_active) = presence.content.currently_active {
+ p.content.currently_active = Some(currently_active);
+ }
+ }
+ }
+ }
+ }
+
+ let mut left_rooms = BTreeMap::new();
+ let all_left_rooms: Vec<_> = db.rooms.rooms_left(&sender_user).collect();
+ for result in all_left_rooms {
+ let (room_id, left_state_events) = result?;
+
+ {
+ // Get and drop the lock to wait for remaining operations to finish
+ let mutex_insert = Arc::clone(
+ db.globals
+ .roomid_mutex_insert
+ .write()
+ .unwrap()
+ .entry(room_id.clone())
+ .or_default(),
+ );
+ let insert_lock = mutex_insert.lock().unwrap();
+ drop(insert_lock);
+ }
+
+ let left_count = db.rooms.get_left_count(&room_id, &sender_user)?;
+
+ // Left before last sync
+ if Some(since) >= left_count {
+ continue;
+ }
+
+ left_rooms.insert(
+ room_id.clone(),
+ LeftRoom {
+ account_data: RoomAccountData { events: Vec::new() },
+ timeline: Timeline {
+ limited: false,
+ prev_batch: Some(next_batch_string.clone()),
+ events: Vec::new(),
+ },
+ state: State {
+ events: left_state_events,
+ },
+ },
+ );
+ }
+
+ let mut invited_rooms = BTreeMap::new();
+ let all_invited_rooms: Vec<_> = db.rooms.rooms_invited(&sender_user).collect();
+ for result in all_invited_rooms {
+ let (room_id, invite_state_events) = result?;
+
+ {
+ // Get and drop the lock to wait for remaining operations to finish
+ let mutex_insert = Arc::clone(
+ db.globals
+ .roomid_mutex_insert
+ .write()
+ .unwrap()
+ .entry(room_id.clone())
+ .or_default(),
+ );
+ let insert_lock = mutex_insert.lock().unwrap();
+ drop(insert_lock);
+ }
+
+ let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?;
+
+ // Invited before last sync
+ if Some(since) >= invite_count {
+ continue;
+ }
+
+ invited_rooms.insert(
+ room_id.clone(),
+ InvitedRoom {
+ invite_state: InviteState {
+ events: invite_state_events,
+ },
+ },
+ );
+ }
+
+ for user_id in left_encrypted_users {
+ let still_share_encrypted_room = db
+ .rooms
+ .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])?
+ .filter_map(|r| r.ok())
+ .filter_map(|other_room_id| {
+ Some(
+ db.rooms
+ .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "")
+ .ok()?
+ .is_some(),
+ )
+ })
+ .all(|encrypted| !encrypted);
+ // If the user doesn't share an encrypted room with the target anymore, we need to tell
+ // them
+ if still_share_encrypted_room {
+ device_list_left.insert(user_id);
+ }
+ }
+
+ // Remove all to-device events the device received *last time*
+ db.users
+ .remove_to_device_events(&sender_user, &sender_device, since)?;
+
+ let response = sync_events::v3::Response {
+ next_batch: next_batch_string,
+ rooms: Rooms {
+ leave: left_rooms,
+ join: joined_rooms,
+ invite: invited_rooms,
+ knock: BTreeMap::new(), // TODO
+ },
+ presence: Presence {
+ events: presence_updates
+ .into_iter()
+ .map(|(_, v)| Raw::new(&v).expect("PresenceEvent always serializes successfully"))
+ .collect(),
+ },
+ account_data: GlobalAccountData {
+ events: db
+ .account_data
+ .changes_since(None, &sender_user, since)?
+ .into_iter()
+ .filter_map(|(_, v)| {
+ serde_json::from_str(v.json().get())
+ .map_err(|_| Error::bad_database("Invalid account event in database."))
+ .ok()
+ })
+ .collect(),
+ },
+ device_lists: DeviceLists {
+ changed: device_list_updates.into_iter().collect(),
+ left: device_list_left.into_iter().collect(),
+ },
+ device_one_time_keys_count: db.users.count_one_time_keys(&sender_user, &sender_device)?,
+ to_device: ToDevice {
+ events: db
+ .users
+ .get_to_device_events(&sender_user, &sender_device)?,
+ },
+ // Fallback keys are not yet supported
+ device_unused_fallback_key_types: None,
+ };
+
+ // TODO: Retry the endpoint instead of returning (waiting for #118)
+ if !body.full_state
+ && response.rooms.is_empty()
+ && response.presence.is_empty()
+ && response.account_data.is_empty()
+ && response.device_lists.is_empty()
+ && response.to_device.is_empty()
+ {
+ // Hang a few seconds so requests are not spammed
+ // Stop hanging if new info arrives
+ let mut duration = body.timeout.unwrap_or_default();
+ if duration.as_secs() > 30 {
+ duration = Duration::from_secs(30);
+ }
+ let _ = tokio::time::timeout(duration, watcher).await;
+ Ok((response, false))
+ } else {
+ Ok((response, since != next_batch)) // Only cache if we made progress
+ }
+}
+
+#[tracing::instrument(skip(db))]
+fn share_encrypted_room(
+ db: &Database,
+ sender_user: &UserId,
+ user_id: &UserId,
+ ignore_room: &RoomId,
+) -> Result<bool> {
+ Ok(db
+ .rooms
+ .get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])?
+ .filter_map(|r| r.ok())
+ .filter(|room_id| room_id != ignore_room)
+ .filter_map(|other_room_id| {
+ Some(
+ db.rooms
+ .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "")
+ .ok()?
+ .is_some(),
+ )
+ })
+ .any(|encrypted| encrypted))
+}
diff --git a/src/api/client_server/tag.rs b/src/api/client_server/tag.rs
new file mode 100644
index 0000000..98d895c
--- /dev/null
+++ b/src/api/client_server/tag.rs
@@ -0,0 +1,117 @@
+use crate::{database::DatabaseGuard, Result, Ruma};
+use ruma::{
+ api::client::tag::{create_tag, delete_tag, get_tags},
+ events::{
+ tag::{TagEvent, TagEventContent},
+ RoomAccountDataEventType,
+ },
+};
+use std::collections::BTreeMap;
+
+/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
+///
+/// Adds a tag to the room.
+///
+/// - Inserts the tag into the tag event of the room account data.
+pub async fn update_tag_route(
+ db: DatabaseGuard,
+ body: Ruma<create_tag::v3::IncomingRequest>,
+) -> Result<create_tag::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let mut tags_event = db
+ .account_data
+ .get(
+ Some(&body.room_id),
+ sender_user,
+ RoomAccountDataEventType::Tag,
+ )?
+ .unwrap_or_else(|| TagEvent {
+ content: TagEventContent {
+ tags: BTreeMap::new(),
+ },
+ });
+ tags_event
+ .content
+ .tags
+ .insert(body.tag.clone().into(), body.tag_info.clone());
+
+ db.account_data.update(
+ Some(&body.room_id),
+ sender_user,
+ RoomAccountDataEventType::Tag,
+ &tags_event,
+ &db.globals,
+ )?;
+
+ db.flush()?;
+
+ Ok(create_tag::v3::Response {})
+}
+
+/// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
+///
+/// Deletes a tag from the room.
+///
+/// - Removes the tag from the tag event of the room account data.
+pub async fn delete_tag_route(
+ db: DatabaseGuard,
+ body: Ruma<delete_tag::v3::IncomingRequest>,
+) -> Result<delete_tag::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let mut tags_event = db
+ .account_data
+ .get(
+ Some(&body.room_id),
+ sender_user,
+ RoomAccountDataEventType::Tag,
+ )?
+ .unwrap_or_else(|| TagEvent {
+ content: TagEventContent {
+ tags: BTreeMap::new(),
+ },
+ });
+ tags_event.content.tags.remove(&body.tag.clone().into());
+
+ db.account_data.update(
+ Some(&body.room_id),
+ sender_user,
+ RoomAccountDataEventType::Tag,
+ &tags_event,
+ &db.globals,
+ )?;
+
+ db.flush()?;
+
+ Ok(delete_tag::v3::Response {})
+}
+
+/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags`
+///
+/// Returns tags on the room.
+///
+/// - Gets the tag event of the room account data.
+pub async fn get_tags_route(
+ db: DatabaseGuard,
+ body: Ruma<get_tags::v3::IncomingRequest>,
+) -> Result<get_tags::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ Ok(get_tags::v3::Response {
+ tags: db
+ .account_data
+ .get(
+ Some(&body.room_id),
+ sender_user,
+ RoomAccountDataEventType::Tag,
+ )?
+ .unwrap_or_else(|| TagEvent {
+ content: TagEventContent {
+ tags: BTreeMap::new(),
+ },
+ })
+ .content
+ .tags,
+ })
+}
diff --git a/src/api/client_server/thirdparty.rs b/src/api/client_server/thirdparty.rs
new file mode 100644
index 0000000..5665ad6
--- /dev/null
+++ b/src/api/client_server/thirdparty.rs
@@ -0,0 +1,16 @@
+use crate::{Result, Ruma};
+use ruma::api::client::thirdparty::get_protocols;
+
+use std::collections::BTreeMap;
+
+/// # `GET /_matrix/client/r0/thirdparty/protocols`
+///
+/// TODO: Fetches all metadata about protocols supported by the homeserver.
+pub async fn get_protocols_route(
+ _body: Ruma<get_protocols::v3::IncomingRequest>,
+) -> Result<get_protocols::v3::Response> {
+ // TODO
+ Ok(get_protocols::v3::Response {
+ protocols: BTreeMap::new(),
+ })
+}
diff --git a/src/api/client_server/to_device.rs b/src/api/client_server/to_device.rs
new file mode 100644
index 0000000..51441dd
--- /dev/null
+++ b/src/api/client_server/to_device.rs
@@ -0,0 +1,94 @@
+use ruma::events::ToDeviceEventType;
+use std::collections::BTreeMap;
+
+use crate::{database::DatabaseGuard, Error, Result, Ruma};
+use ruma::{
+ api::{
+ client::{error::ErrorKind, to_device::send_event_to_device},
+ federation::{self, transactions::edu::DirectDeviceContent},
+ },
+ to_device::DeviceIdOrAllDevices,
+};
+
+/// # `PUT /_matrix/client/r0/sendToDevice/{eventType}/{txnId}`
+///
+/// Send a to-device event to a set of client devices.
+pub async fn send_event_to_device_route(
+ db: DatabaseGuard,
+ body: Ruma<send_event_to_device::v3::IncomingRequest>,
+) -> Result<send_event_to_device::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ let sender_device = body.sender_device.as_deref();
+
+ // Check if this is a new transaction id
+ if db
+ .transaction_ids
+ .existing_txnid(sender_user, sender_device, &body.txn_id)?
+ .is_some()
+ {
+ return Ok(send_event_to_device::v3::Response {});
+ }
+
+ for (target_user_id, map) in &body.messages {
+ for (target_device_id_maybe, event) in map {
+ if target_user_id.server_name() != db.globals.server_name() {
+ let mut map = BTreeMap::new();
+ map.insert(target_device_id_maybe.clone(), event.clone());
+ let mut messages = BTreeMap::new();
+ messages.insert(target_user_id.clone(), map);
+
+ db.sending.send_reliable_edu(
+ target_user_id.server_name(),
+ serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice(
+ DirectDeviceContent {
+ sender: sender_user.clone(),
+ ev_type: ToDeviceEventType::from(&*body.event_type),
+ message_id: body.txn_id.to_owned(),
+ messages,
+ },
+ ))
+ .expect("DirectToDevice EDU can be serialized"),
+ db.globals.next_count()?,
+ )?;
+
+ continue;
+ }
+
+ match target_device_id_maybe {
+ DeviceIdOrAllDevices::DeviceId(target_device_id) => db.users.add_to_device_event(
+ sender_user,
+ target_user_id,
+ &target_device_id,
+ &body.event_type,
+ event.deserialize_as().map_err(|_| {
+ Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
+ })?,
+ &db.globals,
+ )?,
+
+ DeviceIdOrAllDevices::AllDevices => {
+ for target_device_id in db.users.all_device_ids(target_user_id) {
+ db.users.add_to_device_event(
+ sender_user,
+ target_user_id,
+ &target_device_id?,
+ &body.event_type,
+ event.deserialize_as().map_err(|_| {
+ Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
+ })?,
+ &db.globals,
+ )?;
+ }
+ }
+ }
+ }
+ }
+
+ // Save transaction id with empty data
+ db.transaction_ids
+ .add_txnid(sender_user, sender_device, &body.txn_id, &[])?;
+
+ db.flush()?;
+
+ Ok(send_event_to_device::v3::Response {})
+}
diff --git a/src/api/client_server/typing.rs b/src/api/client_server/typing.rs
new file mode 100644
index 0000000..cac5a5f
--- /dev/null
+++ b/src/api/client_server/typing.rs
@@ -0,0 +1,36 @@
+use crate::{database::DatabaseGuard, utils, Error, Result, Ruma};
+use ruma::api::client::{error::ErrorKind, typing::create_typing_event};
+
+/// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}`
+///
+/// Sets the typing state of the sender user.
+pub async fn create_typing_event_route(
+ db: DatabaseGuard,
+ body: Ruma<create_typing_event::v3::IncomingRequest>,
+) -> Result<create_typing_event::v3::Response> {
+ use create_typing_event::v3::Typing;
+
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ if !db.rooms.is_joined(sender_user, &body.room_id)? {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "You are not in this room.",
+ ));
+ }
+
+ if let Typing::Yes(duration) = body.state {
+ db.rooms.edus.typing_add(
+ sender_user,
+ &body.room_id,
+ duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
+ &db.globals,
+ )?;
+ } else {
+ db.rooms
+ .edus
+ .typing_remove(sender_user, &body.room_id, &db.globals)?;
+ }
+
+ Ok(create_typing_event::v3::Response {})
+}
diff --git a/src/api/client_server/unversioned.rs b/src/api/client_server/unversioned.rs
new file mode 100644
index 0000000..8a5c3d2
--- /dev/null
+++ b/src/api/client_server/unversioned.rs
@@ -0,0 +1,31 @@
+use std::{collections::BTreeMap, iter::FromIterator};
+
+use ruma::api::client::discovery::get_supported_versions;
+
+use crate::{Result, Ruma};
+
+/// # `GET /_matrix/client/versions`
+///
+/// Get the versions of the specification and unstable features supported by this server.
+///
+/// - Versions take the form MAJOR.MINOR.PATCH
+/// - Only the latest PATCH release will be reported for each MAJOR.MINOR value
+/// - Unstable features are namespaced and may include version information in their name
+///
+/// Note: Unstable features are used while developing new features. Clients should avoid using
+/// unstable features in their stable releases
+pub async fn get_supported_versions_route(
+ _body: Ruma<get_supported_versions::IncomingRequest>,
+) -> Result<get_supported_versions::Response> {
+ let resp = get_supported_versions::Response {
+ versions: vec![
+ "r0.5.0".to_owned(),
+ "r0.6.0".to_owned(),
+ "v1.1".to_owned(),
+ "v1.2".to_owned(),
+ ],
+ unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
+ };
+
+ Ok(resp)
+}
diff --git a/src/api/client_server/user_directory.rs b/src/api/client_server/user_directory.rs
new file mode 100644
index 0000000..349c139
--- /dev/null
+++ b/src/api/client_server/user_directory.rs
@@ -0,0 +1,91 @@
+use crate::{database::DatabaseGuard, Result, Ruma};
+use ruma::{
+ api::client::user_directory::search_users,
+ events::{
+ room::join_rules::{JoinRule, RoomJoinRulesEventContent},
+ StateEventType,
+ },
+};
+
+/// # `POST /_matrix/client/r0/user_directory/search`
+///
+/// Searches all known users for a match.
+///
+/// - Hides any local users that aren't in any public rooms (i.e. those that have the join rule set to public)
+/// and don't share a room with the sender
+pub async fn search_users_route(
+ db: DatabaseGuard,
+ body: Ruma<search_users::v3::IncomingRequest>,
+) -> Result<search_users::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+ let limit = u64::from(body.limit) as usize;
+
+ let mut users = db.users.iter().filter_map(|user_id| {
+ // Filter out buggy users (they should not exist, but you never know...)
+ let user_id = user_id.ok()?;
+
+ let user = search_users::v3::User {
+ user_id: user_id.clone(),
+ display_name: db.users.displayname(&user_id).ok()?,
+ avatar_url: db.users.avatar_url(&user_id).ok()?,
+ };
+
+ let user_id_matches = user
+ .user_id
+ .to_string()
+ .to_lowercase()
+ .contains(&body.search_term.to_lowercase());
+
+ let user_displayname_matches = user
+ .display_name
+ .as_ref()
+ .filter(|name| {
+ name.to_lowercase()
+ .contains(&body.search_term.to_lowercase())
+ })
+ .is_some();
+
+ if !user_id_matches && !user_displayname_matches {
+ return None;
+ }
+
+ let user_is_in_public_rooms =
+ db.rooms
+ .rooms_joined(&user_id)
+ .filter_map(|r| r.ok())
+ .any(|room| {
+ db.rooms
+ .room_state_get(&room, &StateEventType::RoomJoinRules, "")
+ .map_or(false, |event| {
+ event.map_or(false, |event| {
+ serde_json::from_str(event.content.get())
+ .map_or(false, |r: RoomJoinRulesEventContent| {
+ r.join_rule == JoinRule::Public
+ })
+ })
+ })
+ });
+
+ if user_is_in_public_rooms {
+ return Some(user);
+ }
+
+ let user_is_in_shared_rooms = db
+ .rooms
+ .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])
+ .ok()?
+ .next()
+ .is_some();
+
+ if user_is_in_shared_rooms {
+ return Some(user);
+ }
+
+ None
+ });
+
+ let results = users.by_ref().take(limit).collect();
+ let limited = users.next().is_some();
+
+ Ok(search_users::v3::Response { results, limited })
+}
diff --git a/src/api/client_server/voip.rs b/src/api/client_server/voip.rs
new file mode 100644
index 0000000..7e9de31
--- /dev/null
+++ b/src/api/client_server/voip.rs
@@ -0,0 +1,48 @@
+use crate::{database::DatabaseGuard, Result, Ruma};
+use hmac::{Hmac, Mac, NewMac};
+use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch};
+use sha1::Sha1;
+use std::time::{Duration, SystemTime};
+
+type HmacSha1 = Hmac<Sha1>;
+
+/// # `GET /_matrix/client/r0/voip/turnServer`
+///
+/// TODO: Returns information about the recommended turn server.
+pub async fn turn_server_route(
+ db: DatabaseGuard,
+ body: Ruma<get_turn_server_info::v3::IncomingRequest>,
+) -> Result<get_turn_server_info::v3::Response> {
+ let sender_user = body.sender_user.as_ref().expect("user is authenticated");
+
+ let turn_secret = db.globals.turn_secret();
+
+ let (username, password) = if !turn_secret.is_empty() {
+ let expiry = SecondsSinceUnixEpoch::from_system_time(
+ SystemTime::now() + Duration::from_secs(db.globals.turn_ttl()),
+ )
+ .expect("time is valid");
+
+ let username: String = format!("{}:{}", expiry.get(), sender_user);
+
+ let mut mac = HmacSha1::new_from_slice(turn_secret.as_bytes())
+ .expect("HMAC can take key of any size");
+ mac.update(username.as_bytes());
+
+ let password: String = base64::encode_config(mac.finalize().into_bytes(), base64::STANDARD);
+
+ (username, password)
+ } else {
+ (
+ db.globals.turn_username().clone(),
+ db.globals.turn_password().clone(),
+ )
+ };
+
+ Ok(get_turn_server_info::v3::Response {
+ username,
+ password,
+ uris: db.globals.turn_uris().to_vec(),
+ ttl: Duration::from_secs(db.globals.turn_ttl()),
+ })
+}
diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs
new file mode 100644
index 0000000..45e9d9a
--- /dev/null
+++ b/src/api/ruma_wrapper/axum.rs
@@ -0,0 +1,367 @@
+use std::{collections::BTreeMap, iter::FromIterator, str};
+
+use axum::{
+ async_trait,
+ body::{Full, HttpBody},
+ extract::{
+ rejection::TypedHeaderRejectionReason, FromRequest, Path, RequestParts, TypedHeader,
+ },
+ headers::{
+ authorization::{Bearer, Credentials},
+ Authorization,
+ },
+ response::{IntoResponse, Response},
+ BoxError,
+};
+use bytes::{BufMut, Bytes, BytesMut};
+use http::StatusCode;
+use ruma::{
+ api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
+ signatures::CanonicalJsonValue,
+ DeviceId, ServerName, UserId,
+};
+use serde::Deserialize;
+use tracing::{debug, error, warn};
+
+use super::{Ruma, RumaResponse};
+use crate::{database::DatabaseGuard, server_server, Error, Result};
+
+#[async_trait]
+impl<T, B> FromRequest<B> for Ruma<T>
+where
+ T: IncomingRequest,
+ B: HttpBody + Send,
+ B::Data: Send,
+ B::Error: Into<BoxError>,
+{
+ type Rejection = Error;
+
+ async fn from_request(req: &mut RequestParts<B>) -> Result<Self, Self::Rejection> {
+ #[derive(Deserialize)]
+ struct QueryParams {
+ access_token: Option<String>,
+ user_id: Option<String>,
+ }
+
+ let metadata = T::METADATA;
+ let db = DatabaseGuard::from_request(req).await?;
+ let auth_header = Option::<TypedHeader<Authorization<Bearer>>>::from_request(req).await?;
+ let path_params = Path::<Vec<String>>::from_request(req).await?;
+
+ let query = req.uri().query().unwrap_or_default();
+ let query_params: QueryParams = match ruma::serde::urlencoded::from_str(query) {
+ Ok(params) => params,
+ Err(e) => {
+ error!(%query, "Failed to deserialize query parameters: {}", e);
+ return Err(Error::BadRequest(
+ ErrorKind::Unknown,
+ "Failed to read query parameters",
+ ));
+ }
+ };
+
+ let token = match &auth_header {
+ Some(TypedHeader(Authorization(bearer))) => Some(bearer.token()),
+ None => query_params.access_token.as_deref(),
+ };
+
+ let mut body = Bytes::from_request(req)
+ .await
+ .map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
+
+ let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&body).ok();
+
+ let appservices = db.appservice.all().unwrap();
+ let appservice_registration = appservices.iter().find(|(_id, registration)| {
+ registration
+ .get("as_token")
+ .and_then(|as_token| as_token.as_str())
+ .map_or(false, |as_token| token == Some(as_token))
+ });
+
+ let (sender_user, sender_device, sender_servername, from_appservice) =
+ if let Some((_id, registration)) = appservice_registration {
+ match metadata.authentication {
+ AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => {
+ let user_id = query_params.user_id.map_or_else(
+ || {
+ UserId::parse_with_server_name(
+ registration
+ .get("sender_localpart")
+ .unwrap()
+ .as_str()
+ .unwrap(),
+ db.globals.server_name(),
+ )
+ .unwrap()
+ },
+ |s| UserId::parse(s).unwrap(),
+ );
+
+ if !db.users.exists(&user_id).unwrap() {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "User does not exist.",
+ ));
+ }
+
+ // TODO: Check if appservice is allowed to be that user
+ (Some(user_id), None, None, true)
+ }
+ AuthScheme::ServerSignatures => (None, None, None, true),
+ AuthScheme::None => (None, None, None, true),
+ }
+ } else {
+ match metadata.authentication {
+ AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => {
+ let token = match token {
+ Some(token) => token,
+ _ => {
+ return Err(Error::BadRequest(
+ ErrorKind::MissingToken,
+ "Missing access token.",
+ ))
+ }
+ };
+
+ match db.users.find_from_token(token).unwrap() {
+ None => {
+ return Err(Error::BadRequest(
+ ErrorKind::UnknownToken { soft_logout: false },
+ "Unknown access token.",
+ ))
+ }
+ Some((user_id, device_id)) => (
+ Some(user_id),
+ Some(Box::<DeviceId>::from(device_id)),
+ None,
+ false,
+ ),
+ }
+ }
+ AuthScheme::ServerSignatures => {
+ let TypedHeader(Authorization(x_matrix)) =
+ TypedHeader::<Authorization<XMatrix>>::from_request(req)
+ .await
+ .map_err(|e| {
+ warn!("Missing or invalid Authorization header: {}", e);
+
+ let msg = match e.reason() {
+ TypedHeaderRejectionReason::Missing => {
+ "Missing Authorization header."
+ }
+ TypedHeaderRejectionReason::Error(_) => {
+ "Invalid X-Matrix signatures."
+ }
+ _ => "Unknown header-related error",
+ };
+
+ Error::BadRequest(ErrorKind::Forbidden, msg)
+ })?;
+
+ let origin_signatures = BTreeMap::from_iter([(
+ x_matrix.key.clone(),
+ CanonicalJsonValue::String(x_matrix.sig),
+ )]);
+
+ let signatures = BTreeMap::from_iter([(
+ x_matrix.origin.as_str().to_owned(),
+ CanonicalJsonValue::Object(origin_signatures),
+ )]);
+
+ let mut request_map = BTreeMap::from_iter([
+ (
+ "method".to_owned(),
+ CanonicalJsonValue::String(req.method().to_string()),
+ ),
+ (
+ "uri".to_owned(),
+ CanonicalJsonValue::String(req.uri().to_string()),
+ ),
+ (
+ "origin".to_owned(),
+ CanonicalJsonValue::String(x_matrix.origin.as_str().to_owned()),
+ ),
+ (
+ "destination".to_owned(),
+ CanonicalJsonValue::String(
+ db.globals.server_name().as_str().to_owned(),
+ ),
+ ),
+ (
+ "signatures".to_owned(),
+ CanonicalJsonValue::Object(signatures),
+ ),
+ ]);
+
+ if let Some(json_body) = &json_body {
+ request_map.insert("content".to_owned(), json_body.clone());
+ };
+
+ let keys_result = server_server::fetch_signing_keys(
+ &db,
+ &x_matrix.origin,
+ vec![x_matrix.key.to_owned()],
+ )
+ .await;
+
+ let keys = match keys_result {
+ Ok(b) => b,
+ Err(e) => {
+ warn!("Failed to fetch signing keys: {}", e);
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "Failed to fetch signing keys.",
+ ));
+ }
+ };
+
+ let pub_key_map =
+ BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]);
+
+ match ruma::signatures::verify_json(&pub_key_map, &request_map) {
+ Ok(()) => (None, None, Some(x_matrix.origin), false),
+ Err(e) => {
+ warn!(
+ "Failed to verify json request from {}: {}\n{:?}",
+ x_matrix.origin, e, request_map
+ );
+
+ if req.uri().to_string().contains('@') {
+ warn!(
+ "Request uri contained '@' character. Make sure your \
+ reverse proxy gives Conduit the raw uri (apache: use \
+ nocanon)"
+ );
+ }
+
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "Failed to verify X-Matrix signatures.",
+ ));
+ }
+ }
+ }
+ AuthScheme::None => (None, None, None, false),
+ }
+ };
+
+ let mut http_request = http::Request::builder().uri(req.uri()).method(req.method());
+ *http_request.headers_mut().unwrap() = req.headers().clone();
+
+ if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body {
+ let user_id = sender_user.clone().unwrap_or_else(|| {
+ UserId::parse_with_server_name("", db.globals.server_name())
+ .expect("we know this is valid")
+ });
+
+ let uiaa_request = json_body
+ .get("auth")
+ .and_then(|auth| auth.as_object())
+ .and_then(|auth| auth.get("session"))
+ .and_then(|session| session.as_str())
+ .and_then(|session| {
+ db.uiaa.get_uiaa_request(
+ &user_id,
+ &sender_device.clone().unwrap_or_else(|| "".into()),
+ session,
+ )
+ });
+
+ if let Some(CanonicalJsonValue::Object(initial_request)) = uiaa_request {
+ for (key, value) in initial_request {
+ json_body.entry(key).or_insert(value);
+ }
+ }
+
+ let mut buf = BytesMut::new().writer();
+ serde_json::to_writer(&mut buf, json_body).expect("value serialization can't fail");
+ body = buf.into_inner().freeze();
+ }
+
+ let http_request = http_request.body(&*body).unwrap();
+
+ debug!("{:?}", http_request);
+
+ let body = T::try_from_http_request(http_request, &path_params).map_err(|e| {
+ warn!("{:?}", e);
+ Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.")
+ })?;
+
+ Ok(Ruma {
+ body,
+ sender_user,
+ sender_device,
+ sender_servername,
+ from_appservice,
+ json_body,
+ })
+ }
+}
+
+struct XMatrix {
+ origin: Box<ServerName>,
+ key: String, // KeyName?
+ sig: String,
+}
+
+impl Credentials for XMatrix {
+ const SCHEME: &'static str = "X-Matrix";
+
+ fn decode(value: &http::HeaderValue) -> Option<Self> {
+ debug_assert!(
+ value.as_bytes().starts_with(b"X-Matrix "),
+ "HeaderValue to decode should start with \"X-Matrix ..\", received = {:?}",
+ value,
+ );
+
+ let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..])
+ .ok()?
+ .trim_start();
+
+ let mut origin = None;
+ let mut key = None;
+ let mut sig = None;
+
+ for entry in parameters.split_terminator(',') {
+ let (name, value) = entry.split_once('=')?;
+
+ // It's not at all clear why some fields are quoted and others not in the spec,
+ // let's simply accept either form for every field.
+ let value = value
+ .strip_prefix('"')
+ .and_then(|rest| rest.strip_suffix('"'))
+ .unwrap_or(value);
+
+ // FIXME: Catch multiple fields of the same name
+ match name {
+ "origin" => origin = Some(value.try_into().ok()?),
+ "key" => key = Some(value.to_owned()),
+ "sig" => sig = Some(value.to_owned()),
+ _ => debug!(
+ "Unexpected field `{}` in X-Matrix Authorization header",
+ name
+ ),
+ }
+ }
+
+ Some(Self {
+ origin: origin?,
+ key: key?,
+ sig: sig?,
+ })
+ }
+
+ fn encode(&self) -> http::HeaderValue {
+ todo!()
+ }
+}
+
+impl<T: OutgoingResponse> IntoResponse for RumaResponse<T> {
+ fn into_response(self) -> Response {
+ match self.0.try_into_http_response::<BytesMut>() {
+ Ok(res) => res.map(BytesMut::freeze).map(Full::new).into_response(),
+ Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
+ }
+ }
+}
diff --git a/src/api/ruma_wrapper/mod.rs b/src/api/ruma_wrapper/mod.rs
new file mode 100644
index 0000000..15360e5
--- /dev/null
+++ b/src/api/ruma_wrapper/mod.rs
@@ -0,0 +1,42 @@
+use crate::Error;
+use ruma::{
+ api::client::uiaa::UiaaResponse, signatures::CanonicalJsonValue, DeviceId, ServerName, UserId,
+};
+use std::ops::Deref;
+
+#[cfg(feature = "conduit_bin")]
+mod axum;
+
+/// Extractor for Ruma request structs
+pub struct Ruma<T> {
+ pub body: T,
+ pub sender_user: Option<Box<UserId>>,
+ pub sender_device: Option<Box<DeviceId>>,
+ pub sender_servername: Option<Box<ServerName>>,
+ // This is None when body is not a valid string
+ pub json_body: Option<CanonicalJsonValue>,
+ pub from_appservice: bool,
+}
+
+impl<T> Deref for Ruma<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &self.body
+ }
+}
+
+#[derive(Clone)]
+pub struct RumaResponse<T>(pub T);
+
+impl<T> From<T> for RumaResponse<T> {
+ fn from(t: T) -> Self {
+ Self(t)
+ }
+}
+
+impl From<Error> for RumaResponse<UiaaResponse> {
+ fn from(t: Error) -> Self {
+ t.to_response()
+ }
+}
diff --git a/src/api/server_server.rs b/src/api/server_server.rs
new file mode 100644
index 0000000..6fa83e4
--- /dev/null
+++ b/src/api/server_server.rs
@@ -0,0 +1,3644 @@
+use crate::{
+ client_server::{self, claim_keys_helper, get_keys_helper},
+ database::{rooms::CompressedStateEvent, DatabaseGuard},
+ pdu::EventHash,
+ utils, Database, Error, PduEvent, Result, Ruma,
+};
+use axum::{response::IntoResponse, Json};
+use futures_util::{stream::FuturesUnordered, StreamExt};
+use get_profile_information::v1::ProfileField;
+use http::header::{HeaderValue, AUTHORIZATION};
+use regex::Regex;
+use ruma::{
+ api::{
+ client::error::{Error as RumaError, ErrorKind},
+ federation::{
+ authorization::get_event_authorization,
+ device::get_devices::{self, v1::UserDevice},
+ directory::{get_public_rooms, get_public_rooms_filtered},
+ discovery::{
+ get_remote_server_keys, get_remote_server_keys_batch,
+ get_remote_server_keys_batch::v2::QueryCriteria, get_server_keys,
+ get_server_version, ServerSigningKeys, VerifyKey,
+ },
+ event::{get_event, get_missing_events, get_room_state, get_room_state_ids},
+ keys::{claim_keys, get_keys},
+ membership::{
+ create_invite,
+ create_join_event::{self, RoomState},
+ prepare_join_event,
+ },
+ query::{get_profile_information, get_room_information},
+ transactions::{
+ edu::{DeviceListUpdateContent, DirectDeviceContent, Edu, SigningKeyUpdateContent},
+ send_transaction_message,
+ },
+ },
+ EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, OutgoingResponse,
+ SendAccessToken,
+ },
+ directory::{IncomingFilter, IncomingRoomNetwork},
+ events::{
+ receipt::{ReceiptEvent, ReceiptEventContent},
+ room::{
+ create::RoomCreateEventContent,
+ join_rules::{JoinRule, RoomJoinRulesEventContent},
+ member::{MembershipState, RoomMemberEventContent},
+ server_acl::RoomServerAclEventContent,
+ },
+ RoomEventType, StateEventType,
+ },
+ int,
+ receipt::ReceiptType,
+ serde::{Base64, JsonObject, Raw},
+ signatures::{CanonicalJsonObject, CanonicalJsonValue},
+ state_res::{self, RoomVersion, StateMap},
+ to_device::DeviceIdOrAllDevices,
+ uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName,
+ ServerSigningKeyId,
+};
+use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
+use std::{
+ collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet},
+ fmt::Debug,
+ future::Future,
+ mem,
+ net::{IpAddr, SocketAddr},
+ ops::Deref,
+ pin::Pin,
+ sync::{Arc, RwLock, RwLockWriteGuard},
+ time::{Duration, Instant, SystemTime},
+};
+use tokio::sync::{MutexGuard, Semaphore};
+use tracing::{debug, error, info, trace, warn};
+
+/// Wraps either an literal IP address plus port, or a hostname plus complement
+/// (colon-plus-port if it was specified).
+///
+/// Note: A `FedDest::Named` might contain an IP address in string form if there
+/// was no port specified to construct a SocketAddr with.
+///
+/// # Examples:
+/// ```rust
+/// # use conduit::server_server::FedDest;
+/// # fn main() -> Result<(), std::net::AddrParseError> {
+/// FedDest::Literal("198.51.100.3:8448".parse()?);
+/// FedDest::Literal("[2001:db8::4:5]:443".parse()?);
+/// FedDest::Named("matrix.example.org".to_owned(), "".to_owned());
+/// FedDest::Named("matrix.example.org".to_owned(), ":8448".to_owned());
+/// FedDest::Named("198.51.100.5".to_owned(), "".to_owned());
+/// # Ok(())
+/// # }
+/// ```
+#[derive(Clone, Debug, PartialEq)]
+pub enum FedDest {
+ Literal(SocketAddr),
+ Named(String, String),
+}
+
+impl FedDest {
+ fn into_https_string(self) -> String {
+ match self {
+ Self::Literal(addr) => format!("https://{}", addr),
+ Self::Named(host, port) => format!("https://{}{}", host, port),
+ }
+ }
+
+ fn into_uri_string(self) -> String {
+ match self {
+ Self::Literal(addr) => addr.to_string(),
+ Self::Named(host, ref port) => host + port,
+ }
+ }
+
+ fn hostname(&self) -> String {
+ match &self {
+ Self::Literal(addr) => addr.ip().to_string(),
+ Self::Named(host, _) => host.clone(),
+ }
+ }
+
+ fn port(&self) -> Option<u16> {
+ match &self {
+ Self::Literal(addr) => Some(addr.port()),
+ Self::Named(_, port) => port[1..].parse().ok(),
+ }
+ }
+}
+
+#[tracing::instrument(skip(globals, request))]
+pub(crate) async fn send_request<T: OutgoingRequest>(
+ globals: &crate::database::globals::Globals,
+ destination: &ServerName,
+ request: T,
+) -> Result<T::IncomingResponse>
+where
+ T: Debug,
+{
+ if !globals.allow_federation() {
+ return Err(Error::bad_config("Federation is disabled."));
+ }
+
+ let mut write_destination_to_cache = false;
+
+ let cached_result = globals
+ .actual_destination_cache
+ .read()
+ .unwrap()
+ .get(destination)
+ .cloned();
+
+ let (actual_destination, host) = if let Some(result) = cached_result {
+ result
+ } else {
+ write_destination_to_cache = true;
+
+ let result = find_actual_destination(globals, destination).await;
+
+ (result.0, result.1.into_uri_string())
+ };
+
+ let actual_destination_str = actual_destination.clone().into_https_string();
+
+ let mut http_request = request
+ .try_into_http_request::<Vec<u8>>(
+ &actual_destination_str,
+ SendAccessToken::IfRequired(""),
+ &[MatrixVersion::V1_0],
+ )
+ .map_err(|e| {
+ warn!(
+ "Failed to find destination {}: {}",
+ actual_destination_str, e
+ );
+ Error::BadServerResponse("Invalid destination")
+ })?;
+
+ let mut request_map = serde_json::Map::new();
+
+ if !http_request.body().is_empty() {
+ request_map.insert(
+ "content".to_owned(),
+ serde_json::from_slice(http_request.body())
+ .expect("body is valid json, we just created it"),
+ );
+ };
+
+ request_map.insert("method".to_owned(), T::METADATA.method.to_string().into());
+ request_map.insert(
+ "uri".to_owned(),
+ http_request
+ .uri()
+ .path_and_query()
+ .expect("all requests have a path")
+ .to_string()
+ .into(),
+ );
+ request_map.insert("origin".to_owned(), globals.server_name().as_str().into());
+ request_map.insert("destination".to_owned(), destination.as_str().into());
+
+ let mut request_json =
+ serde_json::from_value(request_map.into()).expect("valid JSON is valid BTreeMap");
+
+ ruma::signatures::sign_json(
+ globals.server_name().as_str(),
+ globals.keypair(),
+ &mut request_json,
+ )
+ .expect("our request json is what ruma expects");
+
+ let request_json: serde_json::Map<String, serde_json::Value> =
+ serde_json::from_slice(&serde_json::to_vec(&request_json).unwrap()).unwrap();
+
+ let signatures = request_json["signatures"]
+ .as_object()
+ .unwrap()
+ .values()
+ .map(|v| {
+ v.as_object()
+ .unwrap()
+ .iter()
+ .map(|(k, v)| (k, v.as_str().unwrap()))
+ });
+
+ for signature_server in signatures {
+ for s in signature_server {
+ http_request.headers_mut().insert(
+ AUTHORIZATION,
+ HeaderValue::from_str(&format!(
+ "X-Matrix origin={},key=\"{}\",sig=\"{}\"",
+ globals.server_name(),
+ s.0,
+ s.1
+ ))
+ .unwrap(),
+ );
+ }
+ }
+
+ let reqwest_request = reqwest::Request::try_from(http_request)
+ .expect("all http requests are valid reqwest requests");
+
+ let url = reqwest_request.url().clone();
+
+ let response = globals.federation_client().execute(reqwest_request).await;
+
+ match response {
+ Ok(mut response) => {
+ // reqwest::Response -> http::Response conversion
+ let status = response.status();
+ let mut http_response_builder = http::Response::builder()
+ .status(status)
+ .version(response.version());
+ mem::swap(
+ response.headers_mut(),
+ http_response_builder
+ .headers_mut()
+ .expect("http::response::Builder is usable"),
+ );
+
+ let body = response.bytes().await.unwrap_or_else(|e| {
+ warn!("server error {}", e);
+ Vec::new().into()
+ }); // TODO: handle timeout
+
+ if status != 200 {
+ warn!(
+ "{} {}: {}",
+ url,
+ status,
+ String::from_utf8_lossy(&body)
+ .lines()
+ .collect::<Vec<_>>()
+ .join(" ")
+ );
+ }
+
+ let http_response = http_response_builder
+ .body(body)
+ .expect("reqwest body is valid http body");
+
+ if status == 200 {
+ let response = T::IncomingResponse::try_from_http_response(http_response);
+ if response.is_ok() && write_destination_to_cache {
+ globals.actual_destination_cache.write().unwrap().insert(
+ Box::<ServerName>::from(destination),
+ (actual_destination, host),
+ );
+ }
+
+ response.map_err(|e| {
+ warn!(
+ "Invalid 200 response from {} on: {} {}",
+ &destination, url, e
+ );
+ Error::BadServerResponse("Server returned bad 200 response.")
+ })
+ } else {
+ Err(Error::FederationError(
+ destination.to_owned(),
+ RumaError::try_from_http_response(http_response).map_err(|e| {
+ warn!(
+ "Invalid {} response from {} on: {} {}",
+ status, &destination, url, e
+ );
+ Error::BadServerResponse("Server returned bad error response.")
+ })?,
+ ))
+ }
+ }
+ Err(e) => Err(e.into()),
+ }
+}
+
+fn get_ip_with_port(destination_str: &str) -> Option<FedDest> {
+ if let Ok(destination) = destination_str.parse::<SocketAddr>() {
+ Some(FedDest::Literal(destination))
+ } else if let Ok(ip_addr) = destination_str.parse::<IpAddr>() {
+ Some(FedDest::Literal(SocketAddr::new(ip_addr, 8448)))
+ } else {
+ None
+ }
+}
+
+fn add_port_to_hostname(destination_str: &str) -> FedDest {
+ let (host, port) = match destination_str.find(':') {
+ None => (destination_str, ":8448"),
+ Some(pos) => destination_str.split_at(pos),
+ };
+ FedDest::Named(host.to_owned(), port.to_owned())
+}
+
+/// Returns: actual_destination, host header
+/// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names
+/// Numbers in comments below refer to bullet points in linked section of specification
+#[tracing::instrument(skip(globals))]
+async fn find_actual_destination(
+ globals: &crate::database::globals::Globals,
+ destination: &'_ ServerName,
+) -> (FedDest, FedDest) {
+ let destination_str = destination.as_str().to_owned();
+ let mut hostname = destination_str.clone();
+ let actual_destination = match get_ip_with_port(&destination_str) {
+ Some(host_port) => {
+ // 1: IP literal with provided or default port
+ host_port
+ }
+ None => {
+ if let Some(pos) = destination_str.find(':') {
+ // 2: Hostname with included port
+ let (host, port) = destination_str.split_at(pos);
+ FedDest::Named(host.to_owned(), port.to_owned())
+ } else {
+ match request_well_known(globals, destination.as_str()).await {
+ // 3: A .well-known file is available
+ Some(delegated_hostname) => {
+ hostname = add_port_to_hostname(&delegated_hostname).into_uri_string();
+ match get_ip_with_port(&delegated_hostname) {
+ Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file
+ None => {
+ if let Some(pos) = delegated_hostname.find(':') {
+ // 3.2: Hostname with port in .well-known file
+ let (host, port) = delegated_hostname.split_at(pos);
+ FedDest::Named(host.to_owned(), port.to_owned())
+ } else {
+ // Delegated hostname has no port in this branch
+ if let Some(hostname_override) =
+ query_srv_record(globals, &delegated_hostname).await
+ {
+ // 3.3: SRV lookup successful
+ let force_port = hostname_override.port();
+
+ if let Ok(override_ip) = globals
+ .dns_resolver()
+ .lookup_ip(hostname_override.hostname())
+ .await
+ {
+ globals.tls_name_override.write().unwrap().insert(
+ delegated_hostname.clone(),
+ (
+ override_ip.iter().collect(),
+ force_port.unwrap_or(8448),
+ ),
+ );
+ } else {
+ warn!("Using SRV record, but could not resolve to IP");
+ }
+
+ if let Some(port) = force_port {
+ FedDest::Named(delegated_hostname, format!(":{}", port))
+ } else {
+ add_port_to_hostname(&delegated_hostname)
+ }
+ } else {
+ // 3.4: No SRV records, just use the hostname from .well-known
+ add_port_to_hostname(&delegated_hostname)
+ }
+ }
+ }
+ }
+ }
+ // 4: No .well-known or an error occured
+ None => {
+ match query_srv_record(globals, &destination_str).await {
+ // 4: SRV record found
+ Some(hostname_override) => {
+ let force_port = hostname_override.port();
+
+ if let Ok(override_ip) = globals
+ .dns_resolver()
+ .lookup_ip(hostname_override.hostname())
+ .await
+ {
+ globals.tls_name_override.write().unwrap().insert(
+ hostname.clone(),
+ (override_ip.iter().collect(), force_port.unwrap_or(8448)),
+ );
+ } else {
+ warn!("Using SRV record, but could not resolve to IP");
+ }
+
+ if let Some(port) = force_port {
+ FedDest::Named(hostname.clone(), format!(":{}", port))
+ } else {
+ add_port_to_hostname(&hostname)
+ }
+ }
+ // 5: No SRV record found
+ None => add_port_to_hostname(&destination_str),
+ }
+ }
+ }
+ }
+ }
+ };
+
+ // Can't use get_ip_with_port here because we don't want to add a port
+ // to an IP address if it wasn't specified
+ let hostname = if let Ok(addr) = hostname.parse::<SocketAddr>() {
+ FedDest::Literal(addr)
+ } else if let Ok(addr) = hostname.parse::<IpAddr>() {
+ FedDest::Named(addr.to_string(), ":8448".to_owned())
+ } else if let Some(pos) = hostname.find(':') {
+ let (host, port) = hostname.split_at(pos);
+ FedDest::Named(host.to_owned(), port.to_owned())
+ } else {
+ FedDest::Named(hostname, ":8448".to_owned())
+ };
+ (actual_destination, hostname)
+}
+
+#[tracing::instrument(skip(globals))]
+async fn query_srv_record(
+ globals: &crate::database::globals::Globals,
+ hostname: &'_ str,
+) -> Option<FedDest> {
+ if let Ok(Some(host_port)) = globals
+ .dns_resolver()
+ .srv_lookup(format!("_matrix._tcp.{}", hostname))
+ .await
+ .map(|srv| {
+ srv.iter().next().map(|result| {
+ FedDest::Named(
+ result.target().to_string().trim_end_matches('.').to_owned(),
+ format!(":{}", result.port()),
+ )
+ })
+ })
+ {
+ Some(host_port)
+ } else {
+ None
+ }
+}
+
+#[tracing::instrument(skip(globals))]
+async fn request_well_known(
+ globals: &crate::database::globals::Globals,
+ destination: &str,
+) -> Option<String> {
+ let body: serde_json::Value = serde_json::from_str(
+ &globals
+ .default_client()
+ .get(&format!(
+ "https://{}/.well-known/matrix/server",
+ destination
+ ))
+ .send()
+ .await
+ .ok()?
+ .text()
+ .await
+ .ok()?,
+ )
+ .ok()?;
+ Some(body.get("m.server")?.as_str()?.to_owned())
+}
+
+/// # `GET /_matrix/federation/v1/version`
+///
+/// Get version information on this server.
+pub async fn get_server_version_route(
+ db: DatabaseGuard,
+ _body: Ruma<get_server_version::v1::Request>,
+) -> Result<get_server_version::v1::Response> {
+ if !db.globals.allow_federation() {
+ return Err(Error::bad_config("Federation is disabled."));
+ }
+
+ Ok(get_server_version::v1::Response {
+ server: Some(get_server_version::v1::Server {
+ name: Some("Conduit".to_owned()),
+ version: Some(env!("CARGO_PKG_VERSION").to_owned()),
+ }),
+ })
+}
+
+/// # `GET /_matrix/key/v2/server`
+///
+/// Gets the public signing keys of this server.
+///
+/// - Matrix does not support invalidating public keys, so the key returned by this will be valid
+/// forever.
+// Response type for this endpoint is Json because we need to calculate a signature for the response
+pub async fn get_server_keys_route(db: DatabaseGuard) -> Result<impl IntoResponse> {
+ if !db.globals.allow_federation() {
+ return Err(Error::bad_config("Federation is disabled."));
+ }
+
+ let mut verify_keys: BTreeMap<Box<ServerSigningKeyId>, VerifyKey> = BTreeMap::new();
+ verify_keys.insert(
+ format!("ed25519:{}", db.globals.keypair().version())
+ .try_into()
+ .expect("found invalid server signing keys in DB"),
+ VerifyKey {
+ key: Base64::new(db.globals.keypair().public_key().to_vec()),
+ },
+ );
+ let mut response = serde_json::from_slice(
+ get_server_keys::v2::Response {
+ server_key: Raw::new(&ServerSigningKeys {
+ server_name: db.globals.server_name().to_owned(),
+ verify_keys,
+ old_verify_keys: BTreeMap::new(),
+ signatures: BTreeMap::new(),
+ valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time(
+ SystemTime::now() + Duration::from_secs(86400 * 7),
+ )
+ .expect("time is valid"),
+ })
+ .expect("static conversion, no errors"),
+ }
+ .try_into_http_response::<Vec<u8>>()
+ .unwrap()
+ .body(),
+ )
+ .unwrap();
+
+ ruma::signatures::sign_json(
+ db.globals.server_name().as_str(),
+ db.globals.keypair(),
+ &mut response,
+ )
+ .unwrap();
+
+ Ok(Json(response))
+}
+
+/// # `GET /_matrix/key/v2/server/{keyId}`
+///
+/// Gets the public signing keys of this server.
+///
+/// - Matrix does not support invalidating public keys, so the key returned by this will be valid
+/// forever.
+pub async fn get_server_keys_deprecated_route(db: DatabaseGuard) -> impl IntoResponse {
+ get_server_keys_route(db).await
+}
+
+/// # `POST /_matrix/federation/v1/publicRooms`
+///
+/// Lists the public rooms on this server.
+pub async fn get_public_rooms_filtered_route(
+ db: DatabaseGuard,
+ body: Ruma<get_public_rooms_filtered::v1::IncomingRequest>,
+) -> Result<get_public_rooms_filtered::v1::Response> {
+ if !db.globals.allow_federation() {
+ return Err(Error::bad_config("Federation is disabled."));
+ }
+
+ let response = client_server::get_public_rooms_filtered_helper(
+ &db,
+ None,
+ body.limit,
+ body.since.as_deref(),
+ &body.filter,
+ &body.room_network,
+ )
+ .await?;
+
+ Ok(get_public_rooms_filtered::v1::Response {
+ chunk: response.chunk,
+ prev_batch: response.prev_batch,
+ next_batch: response.next_batch,
+ total_room_count_estimate: response.total_room_count_estimate,
+ })
+}
+
+/// # `GET /_matrix/federation/v1/publicRooms`
+///
+/// Lists the public rooms on this server.
+pub async fn get_public_rooms_route(
+ db: DatabaseGuard,
+ body: Ruma<get_public_rooms::v1::IncomingRequest>,
+) -> Result<get_public_rooms::v1::Response> {
+ if !db.globals.allow_federation() {
+ return Err(Error::bad_config("Federation is disabled."));
+ }
+
+ let response = client_server::get_public_rooms_filtered_helper(
+ &db,
+ None,
+ body.limit,
+ body.since.as_deref(),
+ &IncomingFilter::default(),
+ &IncomingRoomNetwork::Matrix,
+ )
+ .await?;
+
+ Ok(get_public_rooms::v1::Response {
+ chunk: response.chunk,
+ prev_batch: response.prev_batch,
+ next_batch: response.next_batch,
+ total_room_count_estimate: response.total_room_count_estimate,
+ })
+}
+
+/// # `PUT /_matrix/federation/v1/send/{txnId}`
+///
+/// Push EDUs and PDUs to this server.
+pub async fn send_transaction_message_route(
+ db: DatabaseGuard,
+ body: Ruma<send_transaction_message::v1::IncomingRequest>,
+) -> Result<send_transaction_message::v1::Response> {
+ if !db.globals.allow_federation() {
+ return Err(Error::bad_config("Federation is disabled."));
+ }
+
+ let sender_servername = body
+ .sender_servername
+ .as_ref()
+ .expect("server is authenticated");
+
+ let mut resolved_map = BTreeMap::new();
+
+ let pub_key_map = RwLock::new(BTreeMap::new());
+
+ // This is all the auth_events that have been recursively fetched so they don't have to be
+ // deserialized over and over again.
+ // TODO: make this persist across requests but not in a DB Tree (in globals?)
+ // TODO: This could potentially also be some sort of trie (suffix tree) like structure so
+ // that once an auth event is known it would know (using indexes maybe) all of the auth
+ // events that it references.
+ // let mut auth_cache = EventMap::new();
+
+ for pdu in &body.pdus {
+ // We do not add the event_id field to the pdu here because of signature and hashes checks
+ let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) {
+ Ok(t) => t,
+ Err(_) => {
+ // Event could not be converted to canonical json
+ continue;
+ }
+ };
+
+ // 0. Check the server is in the room
+ let room_id = match value
+ .get("room_id")
+ .and_then(|id| RoomId::parse(id.as_str()?).ok())
+ {
+ Some(id) => id,
+ None => {
+ // Event is invalid
+ resolved_map.insert(event_id, Err("Event needs a valid RoomId.".to_owned()));
+ continue;
+ }
+ };
+
+ acl_check(&sender_servername, &room_id, &db)?;
+
+ let mutex = Arc::clone(
+ db.globals
+ .roomid_mutex_federation
+ .write()
+ .unwrap()
+ .entry(room_id.to_owned())
+ .or_default(),
+ );
+ let mutex_lock = mutex.lock().await;
+ let start_time = Instant::now();
+ resolved_map.insert(
+ event_id.clone(),
+ handle_incoming_pdu(
+ &sender_servername,
+ &event_id,
+ &room_id,
+ value,
+ true,
+ &db,
+ &pub_key_map,
+ )
+ .await
+ .map(|_| ()),
+ );
+ drop(mutex_lock);
+
+ let elapsed = start_time.elapsed();
+ warn!(
+ "Handling transaction of event {} took {}m{}s",
+ event_id,
+ elapsed.as_secs() / 60,
+ elapsed.as_secs() % 60
+ );
+ }
+
+ for pdu in &resolved_map {
+ if let Err(e) = pdu.1 {
+ if e != "Room is unknown to this server." {
+ warn!("Incoming PDU failed {:?}", pdu);
+ }
+ }
+ }
+
+ for edu in body
+ .edus
+ .iter()
+ .filter_map(|edu| serde_json::from_str::<Edu>(edu.json().get()).ok())
+ {
+ match edu {
+ Edu::Presence(_) => {}
+ Edu::Receipt(receipt) => {
+ for (room_id, room_updates) in receipt.receipts {
+ for (user_id, user_updates) in room_updates.read {
+ if let Some((event_id, _)) = user_updates
+ .event_ids
+ .iter()
+ .filter_map(|id| {
+ db.rooms.get_pdu_count(id).ok().flatten().map(|r| (id, r))
+ })
+ .max_by_key(|(_, count)| *count)
+ {
+ let mut user_receipts = BTreeMap::new();
+ user_receipts.insert(user_id.clone(), user_updates.data);
+
+ let mut receipts = BTreeMap::new();
+ receipts.insert(ReceiptType::Read, user_receipts);
+
+ let mut receipt_content = BTreeMap::new();
+ receipt_content.insert(event_id.to_owned(), receipts);
+
+ let event = ReceiptEvent {
+ content: ReceiptEventContent(receipt_content),
+ room_id: room_id.clone(),
+ };
+ db.rooms.edus.readreceipt_update(
+ &user_id,
+ &room_id,
+ event,
+ &db.globals,
+ )?;
+ } else {
+ // TODO fetch missing events
+ info!("No known event ids in read receipt: {:?}", user_updates);
+ }
+ }
+ }
+ }
+ Edu::Typing(typing) => {
+ if db.rooms.is_joined(&typing.user_id, &typing.room_id)? {
+ if typing.typing {
+ db.rooms.edus.typing_add(
+ &typing.user_id,
+ &typing.room_id,
+ 3000 + utils::millis_since_unix_epoch(),
+ &db.globals,
+ )?;
+ } else {
+ db.rooms.edus.typing_remove(
+ &typing.user_id,
+ &typing.room_id,
+ &db.globals,
+ )?;
+ }
+ }
+ }
+ Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => {
+ db.users
+ .mark_device_key_update(&user_id, &db.rooms, &db.globals)?;
+ }
+ Edu::DirectToDevice(DirectDeviceContent {
+ sender,
+ ev_type,
+ message_id,
+ messages,
+ }) => {
+ // Check if this is a new transaction id
+ if db
+ .transaction_ids
+ .existing_txnid(&sender, None, &message_id)?
+ .is_some()
+ {
+ continue;
+ }
+
+ for (target_user_id, map) in &messages {
+ for (target_device_id_maybe, event) in map {
+ match target_device_id_maybe {
+ DeviceIdOrAllDevices::DeviceId(target_device_id) => {
+ db.users.add_to_device_event(
+ &sender,
+ target_user_id,
+ target_device_id,
+ &ev_type.to_string(),
+ event.deserialize_as().map_err(|_| {
+ Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Event is invalid",
+ )
+ })?,
+ &db.globals,
+ )?
+ }
+
+ DeviceIdOrAllDevices::AllDevices => {
+ for target_device_id in db.users.all_device_ids(target_user_id) {
+ db.users.add_to_device_event(
+ &sender,
+ target_user_id,
+ &target_device_id?,
+ &ev_type.to_string(),
+ event.deserialize_as().map_err(|_| {
+ Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Event is invalid",
+ )
+ })?,
+ &db.globals,
+ )?;
+ }
+ }
+ }
+ }
+ }
+
+ // Save transaction id with empty data
+ db.transaction_ids
+ .add_txnid(&sender, None, &message_id, &[])?;
+ }
+ Edu::SigningKeyUpdate(SigningKeyUpdateContent {
+ user_id,
+ master_key,
+ self_signing_key,
+ }) => {
+ if user_id.server_name() != sender_servername {
+ continue;
+ }
+ if let Some(master_key) = master_key {
+ db.users.add_cross_signing_keys(
+ &user_id,
+ &master_key,
+ &self_signing_key,
+ &None,
+ &db.rooms,
+ &db.globals,
+ )?;
+ }
+ }
+ Edu::_Custom(_) => {}
+ }
+ }
+
+ db.flush()?;
+
+ Ok(send_transaction_message::v1::Response { pdus: resolved_map })
+}
+
+/// An async function that can recursively call itself.
+type AsyncRecursiveType<'a, T> = Pin<Box<dyn Future<Output = T> + 'a + Send>>;
+
+/// When receiving an event one needs to:
+/// 0. Check the server is in the room
+/// 1. Skip the PDU if we already know about it
+/// 2. Check signatures, otherwise drop
+/// 3. Check content hash, redact if doesn't match
+/// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not
+/// timeline events
+/// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are
+/// also rejected "due to auth events"
+/// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events
+/// 7. Persist this event as an outlier
+/// 8. If not timeline event: stop
+/// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline
+/// events
+/// 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities
+/// doing all the checks in this list starting at 1. These are not timeline events
+/// 11. Check the auth of the event passes based on the state of the event
+/// 12. Ensure that the state is derived from the previous current state (i.e. we calculated by
+/// doing state res where one of the inputs was a previously trusted set of state, don't just
+/// trust a set of state we got from a remote)
+/// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail"
+/// it
+/// 14. Use state resolution to find new room state
+// We use some AsyncRecursiveType hacks here so we can call this async funtion recursively
+#[tracing::instrument(skip(value, is_timeline_event, db, pub_key_map))]
+pub(crate) async fn handle_incoming_pdu<'a>(
+ origin: &'a ServerName,
+ event_id: &'a EventId,
+ room_id: &'a RoomId,
+ value: BTreeMap<String, CanonicalJsonValue>,
+ is_timeline_event: bool,
+ db: &'a Database,
+ pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
+) -> Result<Option<Vec<u8>>, String> {
+ match db.rooms.exists(room_id) {
+ Ok(true) => {}
+ _ => {
+ return Err("Room is unknown to this server.".to_owned());
+ }
+ }
+
+ match db.rooms.is_disabled(room_id) {
+ Ok(false) => {}
+ _ => {
+ return Err("Federation of this room is currently disabled on this server.".to_owned());
+ }
+ }
+
+ // 1. Skip the PDU if we already have it as a timeline event
+ if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(event_id) {
+ return Ok(Some(pdu_id.to_vec()));
+ }
+
+ let create_event = db
+ .rooms
+ .room_state_get(room_id, &StateEventType::RoomCreate, "")
+ .map_err(|_| "Failed to ask database for event.".to_owned())?
+ .ok_or_else(|| "Failed to find create event in db.".to_owned())?;
+
+ let first_pdu_in_room = db
+ .rooms
+ .first_pdu_in_room(room_id)
+ .map_err(|_| "Error loading first room event.".to_owned())?
+ .expect("Room exists");
+
+ let (incoming_pdu, val) = handle_outlier_pdu(
+ origin,
+ &create_event,
+ event_id,
+ room_id,
+ value,
+ db,
+ pub_key_map,
+ )
+ .await?;
+
+ // 8. if not timeline event: stop
+ if !is_timeline_event {
+ return Ok(None);
+ }
+
+ if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts {
+ return Ok(None);
+ }
+
+ // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events
+ let mut graph: HashMap<Arc<EventId>, _> = HashMap::new();
+ let mut eventid_info = HashMap::new();
+ let mut todo_outlier_stack: Vec<Arc<EventId>> = incoming_pdu.prev_events.clone();
+
+ let mut amount = 0;
+
+ while let Some(prev_event_id) = todo_outlier_stack.pop() {
+ if let Some((pdu, json_opt)) = fetch_and_handle_outliers(
+ db,
+ origin,
+ &[prev_event_id.clone()],
+ &create_event,
+ room_id,
+ pub_key_map,
+ )
+ .await
+ .pop()
+ {
+ if amount > 100 {
+ // Max limit reached
+ warn!("Max prev event limit reached!");
+ graph.insert(prev_event_id.clone(), HashSet::new());
+ continue;
+ }
+
+ if let Some(json) =
+ json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten())
+ {
+ if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts {
+ amount += 1;
+ for prev_prev in &pdu.prev_events {
+ if !graph.contains_key(prev_prev) {
+ todo_outlier_stack.push(dbg!(prev_prev.clone()));
+ }
+ }
+
+ graph.insert(
+ prev_event_id.clone(),
+ pdu.prev_events.iter().cloned().collect(),
+ );
+ } else {
+ // Time based check failed
+ graph.insert(prev_event_id.clone(), HashSet::new());
+ }
+
+ eventid_info.insert(prev_event_id.clone(), (pdu, json));
+ } else {
+ // Get json failed
+ graph.insert(prev_event_id.clone(), HashSet::new());
+ }
+ } else {
+ // Fetch and handle failed
+ graph.insert(prev_event_id.clone(), HashSet::new());
+ }
+ }
+
+ let sorted = state_res::lexicographical_topological_sort(dbg!(&graph), |event_id| {
+ // This return value is the key used for sorting events,
+ // events are then sorted by power level, time,
+ // and lexically by event_id.
+ println!("{}", event_id);
+ Ok((
+ int!(0),
+ MilliSecondsSinceUnixEpoch(
+ eventid_info
+ .get(event_id)
+ .map_or_else(|| uint!(0), |info| info.0.origin_server_ts),
+ ),
+ ))
+ })
+ .map_err(|_| "Error sorting prev events".to_owned())?;
+
+ let mut errors = 0;
+ for prev_id in dbg!(sorted) {
+ match db.rooms.is_disabled(room_id) {
+ Ok(false) => {}
+ _ => {
+ return Err(
+ "Federation of this room is currently disabled on this server.".to_owned(),
+ );
+ }
+ }
+
+ if let Some((time, tries)) = db
+ .globals
+ .bad_event_ratelimiter
+ .read()
+ .unwrap()
+ .get(&*prev_id)
+ {
+ // Exponential backoff
+ let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries);
+ if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
+ min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
+ }
+
+ if time.elapsed() < min_elapsed_duration {
+ info!("Backing off from {}", prev_id);
+ continue;
+ }
+ }
+
+ if errors >= 5 {
+ break;
+ }
+ if let Some((pdu, json)) = eventid_info.remove(&*prev_id) {
+ if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts {
+ continue;
+ }
+
+ let start_time = Instant::now();
+ db.globals
+ .roomid_federationhandletime
+ .write()
+ .unwrap()
+ .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time));
+ if let Err(e) = upgrade_outlier_to_timeline_pdu(
+ pdu,
+ json,
+ &create_event,
+ origin,
+ db,
+ room_id,
+ pub_key_map,
+ )
+ .await
+ {
+ errors += 1;
+ warn!("Prev event {} failed: {}", prev_id, e);
+ match db
+ .globals
+ .bad_event_ratelimiter
+ .write()
+ .unwrap()
+ .entry((*prev_id).to_owned())
+ {
+ hash_map::Entry::Vacant(e) => {
+ e.insert((Instant::now(), 1));
+ }
+ hash_map::Entry::Occupied(mut e) => {
+ *e.get_mut() = (Instant::now(), e.get().1 + 1)
+ }
+ }
+ }
+ let elapsed = start_time.elapsed();
+ db.globals
+ .roomid_federationhandletime
+ .write()
+ .unwrap()
+ .remove(&room_id.to_owned());
+ warn!(
+ "Handling prev event {} took {}m{}s",
+ prev_id,
+ elapsed.as_secs() / 60,
+ elapsed.as_secs() % 60
+ );
+ }
+ }
+
+ let start_time = Instant::now();
+ db.globals
+ .roomid_federationhandletime
+ .write()
+ .unwrap()
+ .insert(room_id.to_owned(), (event_id.to_owned(), start_time));
+ let r = upgrade_outlier_to_timeline_pdu(
+ incoming_pdu,
+ val,
+ &create_event,
+ origin,
+ db,
+ room_id,
+ pub_key_map,
+ )
+ .await;
+ db.globals
+ .roomid_federationhandletime
+ .write()
+ .unwrap()
+ .remove(&room_id.to_owned());
+
+ r
+}
+
+#[tracing::instrument(skip(create_event, value, db, pub_key_map))]
+fn handle_outlier_pdu<'a>(
+ origin: &'a ServerName,
+ create_event: &'a PduEvent,
+ event_id: &'a EventId,
+ room_id: &'a RoomId,
+ value: BTreeMap<String, CanonicalJsonValue>,
+ db: &'a Database,
+ pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
+) -> AsyncRecursiveType<'a, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>), String>> {
+ Box::pin(async move {
+ // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
+
+ // We go through all the signatures we see on the value and fetch the corresponding signing
+ // keys
+ fetch_required_signing_keys(&value, pub_key_map, db)
+ .await
+ .map_err(|e| e.to_string())?;
+
+ // 2. Check signatures, otherwise drop
+ // 3. check content hash, redact if doesn't match
+
+ let create_event_content: RoomCreateEventContent =
+ serde_json::from_str(create_event.content.get()).map_err(|e| {
+ warn!("Invalid create event: {}", e);
+ "Invalid create event in db.".to_owned()
+ })?;
+
+ let room_version_id = &create_event_content.room_version;
+ let room_version = RoomVersion::new(room_version_id).expect("room version is supported");
+
+ let mut val = match ruma::signatures::verify_event(
+ &*pub_key_map.read().map_err(|_| "RwLock is poisoned.")?,
+ &value,
+ room_version_id,
+ ) {
+ Err(e) => {
+ // Drop
+ warn!("Dropping bad event {}: {}", event_id, e);
+ return Err("Signature verification failed".to_owned());
+ }
+ Ok(ruma::signatures::Verified::Signatures) => {
+ // Redact
+ warn!("Calculated hash does not match: {}", event_id);
+ match ruma::signatures::redact(&value, room_version_id) {
+ Ok(obj) => obj,
+ Err(_) => return Err("Redaction failed".to_owned()),
+ }
+ }
+ Ok(ruma::signatures::Verified::All) => value,
+ };
+
+ // Now that we have checked the signature and hashes we can add the eventID and convert
+ // to our PduEvent type
+ val.insert(
+ "event_id".to_owned(),
+ CanonicalJsonValue::String(event_id.as_str().to_owned()),
+ );
+ let incoming_pdu = serde_json::from_value::<PduEvent>(
+ serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"),
+ )
+ .map_err(|_| "Event is not a valid PDU.".to_owned())?;
+
+ // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events
+ // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events"
+ // EDIT: Step 5 is not applied anymore because it failed too often
+ warn!("Fetching auth events for {}", incoming_pdu.event_id);
+ fetch_and_handle_outliers(
+ db,
+ origin,
+ &incoming_pdu
+ .auth_events
+ .iter()
+ .map(|x| Arc::from(&**x))
+ .collect::<Vec<_>>(),
+ create_event,
+ room_id,
+ pub_key_map,
+ )
+ .await;
+
+ // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events
+ info!(
+ "Auth check for {} based on auth events",
+ incoming_pdu.event_id
+ );
+
+ // Build map of auth events
+ let mut auth_events = HashMap::new();
+ for id in &incoming_pdu.auth_events {
+ let auth_event = match db.rooms.get_pdu(id).map_err(|e| e.to_string())? {
+ Some(e) => e,
+ None => {
+ warn!("Could not find auth event {}", id);
+ continue;
+ }
+ };
+
+ match auth_events.entry((
+ auth_event.kind.to_string().into(),
+ auth_event
+ .state_key
+ .clone()
+ .expect("all auth events have state keys"),
+ )) {
+ hash_map::Entry::Vacant(v) => {
+ v.insert(auth_event);
+ }
+ hash_map::Entry::Occupied(_) => {
+ return Err(
+ "Auth event's type and state_key combination exists multiple times."
+ .to_owned(),
+ )
+ }
+ }
+ }
+
+ // The original create event must be in the auth events
+ if auth_events
+ .get(&(StateEventType::RoomCreate, "".to_owned()))
+ .map(|a| a.as_ref())
+ != Some(create_event)
+ {
+ return Err("Incoming event refers to wrong create event.".to_owned());
+ }
+
+ if !state_res::event_auth::auth_check(
+ &room_version,
+ &incoming_pdu,
+ None::<PduEvent>, // TODO: third party invite
+ |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())),
+ )
+ .map_err(|_e| "Auth check failed".to_owned())?
+ {
+ return Err("Event has failed auth check with auth events.".to_owned());
+ }
+
+ info!("Validation successful.");
+
+ // 7. Persist the event as an outlier.
+ db.rooms
+ .add_pdu_outlier(&incoming_pdu.event_id, &val)
+ .map_err(|_| "Failed to add pdu as outlier.".to_owned())?;
+ info!("Added pdu as outlier.");
+
+ Ok((Arc::new(incoming_pdu), val))
+ })
+}
+
+#[tracing::instrument(skip(incoming_pdu, val, create_event, db, pub_key_map))]
+async fn upgrade_outlier_to_timeline_pdu(
+ incoming_pdu: Arc<PduEvent>,
+ val: BTreeMap<String, CanonicalJsonValue>,
+ create_event: &PduEvent,
+ origin: &ServerName,
+ db: &Database,
+ room_id: &RoomId,
+ pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
+) -> Result<Option<Vec<u8>>, String> {
+ if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) {
+ return Ok(Some(pduid));
+ }
+
+ if db
+ .rooms
+ .is_event_soft_failed(&incoming_pdu.event_id)
+ .map_err(|_| "Failed to ask db for soft fail".to_owned())?
+ {
+ return Err("Event has been soft failed".into());
+ }
+
+ info!("Upgrading {} to timeline pdu", incoming_pdu.event_id);
+
+ let create_event_content: RoomCreateEventContent =
+ serde_json::from_str(create_event.content.get()).map_err(|e| {
+ warn!("Invalid create event: {}", e);
+ "Invalid create event in db.".to_owned()
+ })?;
+
+ let room_version_id = &create_event_content.room_version;
+ let room_version = RoomVersion::new(room_version_id).expect("room version is supported");
+
+ // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities
+ // doing all the checks in this list starting at 1. These are not timeline events.
+
+ // TODO: if we know the prev_events of the incoming event we can avoid the request and build
+ // the state from a known point and resolve if > 1 prev_event
+
+ info!("Requesting state at event");
+ let mut state_at_incoming_event = None;
+
+ if incoming_pdu.prev_events.len() == 1 {
+ let prev_event = &*incoming_pdu.prev_events[0];
+ let prev_event_sstatehash = db
+ .rooms
+ .pdu_shortstatehash(prev_event)
+ .map_err(|_| "Failed talking to db".to_owned())?;
+
+ let state = if let Some(shortstatehash) = prev_event_sstatehash {
+ Some(db.rooms.state_full_ids(shortstatehash).await)
+ } else {
+ None
+ };
+
+ if let Some(Ok(mut state)) = state {
+ info!("Using cached state");
+ let prev_pdu =
+ db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| {
+ "Could not find prev event, but we know the state.".to_owned()
+ })?;
+
+ if let Some(state_key) = &prev_pdu.state_key {
+ let shortstatekey = db
+ .rooms
+ .get_or_create_shortstatekey(
+ &prev_pdu.kind.to_string().into(),
+ state_key,
+ &db.globals,
+ )
+ .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
+
+ state.insert(shortstatekey, Arc::from(prev_event));
+ // Now it's the state after the pdu
+ }
+
+ state_at_incoming_event = Some(state);
+ }
+ } else {
+ info!("Calculating state at event using state res");
+ let mut extremity_sstatehashes = HashMap::new();
+
+ let mut okay = true;
+ for prev_eventid in &incoming_pdu.prev_events {
+ let prev_event = if let Ok(Some(pdu)) = db.rooms.get_pdu(prev_eventid) {
+ pdu
+ } else {
+ okay = false;
+ break;
+ };
+
+ let sstatehash = if let Ok(Some(s)) = db.rooms.pdu_shortstatehash(prev_eventid) {
+ s
+ } else {
+ okay = false;
+ break;
+ };
+
+ extremity_sstatehashes.insert(sstatehash, prev_event);
+ }
+
+ if okay {
+ let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len());
+ let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len());
+
+ for (sstatehash, prev_event) in extremity_sstatehashes {
+ let mut leaf_state: BTreeMap<_, _> = db
+ .rooms
+ .state_full_ids(sstatehash)
+ .await
+ .map_err(|_| "Failed to ask db for room state.".to_owned())?;
+
+ if let Some(state_key) = &prev_event.state_key {
+ let shortstatekey = db
+ .rooms
+ .get_or_create_shortstatekey(
+ &prev_event.kind.to_string().into(),
+ state_key,
+ &db.globals,
+ )
+ .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
+ leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id));
+ // Now it's the state after the pdu
+ }
+
+ let mut state = StateMap::with_capacity(leaf_state.len());
+ let mut starting_events = Vec::with_capacity(leaf_state.len());
+
+ for (k, id) in leaf_state {
+ if let Ok((ty, st_key)) = db.rooms.get_statekey_from_short(k) {
+ // FIXME: Undo .to_string().into() when StateMap
+ // is updated to use StateEventType
+ state.insert((ty.to_string().into(), st_key), id.clone());
+ } else {
+ warn!("Failed to get_statekey_from_short.");
+ }
+ starting_events.push(id);
+ }
+
+ auth_chain_sets.push(
+ get_auth_chain(room_id, starting_events, db)
+ .await
+ .map_err(|_| "Failed to load auth chain.".to_owned())?
+ .collect(),
+ );
+
+ fork_states.push(state);
+ }
+
+ let lock = db.globals.stateres_mutex.lock();
+
+ let result = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| {
+ let res = db.rooms.get_pdu(id);
+ if let Err(e) = &res {
+ error!("LOOK AT ME Failed to fetch event: {}", e);
+ }
+ res.ok().flatten()
+ });
+ drop(lock);
+
+ state_at_incoming_event = match result {
+ Ok(new_state) => Some(
+ new_state
+ .into_iter()
+ .map(|((event_type, state_key), event_id)| {
+ let shortstatekey = db
+ .rooms
+ .get_or_create_shortstatekey(
+ &event_type.to_string().into(),
+ &state_key,
+ &db.globals,
+ )
+ .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?;
+ Ok((shortstatekey, event_id))
+ })
+ .collect::<Result<_, String>>()?,
+ ),
+ Err(e) => {
+ warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e);
+ None
+ }
+ }
+ }
+ }
+
+ if state_at_incoming_event.is_none() {
+ info!("Calling /state_ids");
+ // Call /state_ids to find out what the state at this pdu is. We trust the server's
+ // response to some extend, but we still do a lot of checks on the events
+ match db
+ .sending
+ .send_federation_request(
+ &db.globals,
+ origin,
+ get_room_state_ids::v1::Request {
+ room_id,
+ event_id: &incoming_pdu.event_id,
+ },
+ )
+ .await
+ {
+ Ok(res) => {
+ info!("Fetching state events at event.");
+ let state_vec = fetch_and_handle_outliers(
+ db,
+ origin,
+ &res.pdu_ids
+ .iter()
+ .map(|x| Arc::from(&**x))
+ .collect::<Vec<_>>(),
+ create_event,
+ room_id,
+ pub_key_map,
+ )
+ .await;
+
+ let mut state: BTreeMap<_, Arc<EventId>> = BTreeMap::new();
+ for (pdu, _) in state_vec {
+ let state_key = pdu
+ .state_key
+ .clone()
+ .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?;
+
+ let shortstatekey = db
+ .rooms
+ .get_or_create_shortstatekey(
+ &pdu.kind.to_string().into(),
+ &state_key,
+ &db.globals,
+ )
+ .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
+
+ match state.entry(shortstatekey) {
+ btree_map::Entry::Vacant(v) => {
+ v.insert(Arc::from(&*pdu.event_id));
+ }
+ btree_map::Entry::Occupied(_) => return Err(
+ "State event's type and state_key combination exists multiple times."
+ .to_owned(),
+ ),
+ }
+ }
+
+ // The original create event must still be in the state
+ let create_shortstatekey = db
+ .rooms
+ .get_shortstatekey(&StateEventType::RoomCreate, "")
+ .map_err(|_| "Failed to talk to db.")?
+ .expect("Room exists");
+
+ if state.get(&create_shortstatekey).map(|id| id.as_ref())
+ != Some(&create_event.event_id)
+ {
+ return Err("Incoming event refers to wrong create event.".to_owned());
+ }
+
+ state_at_incoming_event = Some(state);
+ }
+ Err(e) => {
+ warn!("Fetching state for event failed: {}", e);
+ return Err("Fetching state for event failed".into());
+ }
+ };
+ }
+
+ let state_at_incoming_event =
+ state_at_incoming_event.expect("we always set this to some above");
+
+ info!("Starting auth check");
+ // 11. Check the auth of the event passes based on the state of the event
+ let check_result = state_res::event_auth::auth_check(
+ &room_version,
+ &incoming_pdu,
+ None::<PduEvent>, // TODO: third party invite
+ |k, s| {
+ db.rooms
+ .get_shortstatekey(&k.to_string().into(), s)
+ .ok()
+ .flatten()
+ .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey))
+ .and_then(|event_id| db.rooms.get_pdu(event_id).ok().flatten())
+ },
+ )
+ .map_err(|_e| "Auth check failed.".to_owned())?;
+
+ if !check_result {
+ return Err("Event has failed auth check with state at the event.".into());
+ }
+ info!("Auth check succeeded");
+
+ // We start looking at current room state now, so lets lock the room
+
+ let mutex_state = Arc::clone(
+ db.globals
+ .roomid_mutex_state
+ .write()
+ .unwrap()
+ .entry(room_id.to_owned())
+ .or_default(),
+ );
+ let state_lock = mutex_state.lock().await;
+
+ // Now we calculate the set of extremities this room has after the incoming event has been
+ // applied. We start with the previous extremities (aka leaves)
+ info!("Calculating extremities");
+ let mut extremities = db
+ .rooms
+ .get_pdu_leaves(room_id)
+ .map_err(|_| "Failed to load room leaves".to_owned())?;
+
+ // Remove any forward extremities that are referenced by this incoming event's prev_events
+ for prev_event in &incoming_pdu.prev_events {
+ if extremities.contains(prev_event) {
+ extremities.remove(prev_event);
+ }
+ }
+
+ // Only keep those extremities were not referenced yet
+ extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true)));
+
+ info!("Compressing state at event");
+ let state_ids_compressed = state_at_incoming_event
+ .iter()
+ .map(|(shortstatekey, id)| {
+ db.rooms
+ .compress_state_event(*shortstatekey, id, &db.globals)
+ .map_err(|_| "Failed to compress_state_event".to_owned())
+ })
+ .collect::<Result<_, _>>()?;
+
+ // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it
+ info!("Starting soft fail auth check");
+
+ let auth_events = db
+ .rooms
+ .get_auth_events(
+ room_id,
+ &incoming_pdu.kind,
+ &incoming_pdu.sender,
+ incoming_pdu.state_key.as_deref(),
+ &incoming_pdu.content,
+ )
+ .map_err(|_| "Failed to get_auth_events.".to_owned())?;
+
+ let soft_fail = !state_res::event_auth::auth_check(
+ &room_version,
+ &incoming_pdu,
+ None::<PduEvent>,
+ |k, s| auth_events.get(&(k.clone(), s.to_owned())),
+ )
+ .map_err(|_e| "Auth check failed.".to_owned())?;
+
+ if soft_fail {
+ append_incoming_pdu(
+ db,
+ &incoming_pdu,
+ val,
+ extremities.iter().map(Deref::deref),
+ state_ids_compressed,
+ soft_fail,
+ &state_lock,
+ )
+ .map_err(|e| {
+ warn!("Failed to add pdu to db: {}", e);
+ "Failed to add pdu to db.".to_owned()
+ })?;
+
+ // Soft fail, we keep the event as an outlier but don't add it to the timeline
+ warn!("Event was soft failed: {:?}", incoming_pdu);
+ db.rooms
+ .mark_event_soft_failed(&incoming_pdu.event_id)
+ .map_err(|_| "Failed to set soft failed flag".to_owned())?;
+ return Err("Event has been soft failed".into());
+ }
+
+ if incoming_pdu.state_key.is_some() {
+ info!("Loading current room state ids");
+ let current_sstatehash = db
+ .rooms
+ .current_shortstatehash(room_id)
+ .map_err(|_| "Failed to load current state hash.".to_owned())?
+ .expect("every room has state");
+
+ let current_state_ids = db
+ .rooms
+ .state_full_ids(current_sstatehash)
+ .await
+ .map_err(|_| "Failed to load room state.")?;
+
+ info!("Preparing for stateres to derive new room state");
+ let mut extremity_sstatehashes = HashMap::new();
+
+ info!("Loading extremities");
+ for id in dbg!(&extremities) {
+ match db
+ .rooms
+ .get_pdu(id)
+ .map_err(|_| "Failed to ask db for pdu.".to_owned())?
+ {
+ Some(leaf_pdu) => {
+ extremity_sstatehashes.insert(
+ db.rooms
+ .pdu_shortstatehash(&leaf_pdu.event_id)
+ .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())?
+ .ok_or_else(|| {
+ error!(
+ "Found extremity pdu with no statehash in db: {:?}",
+ leaf_pdu
+ );
+ "Found pdu with no statehash in db.".to_owned()
+ })?,
+ leaf_pdu,
+ );
+ }
+ _ => {
+ error!("Missing state snapshot for {:?}", id);
+ return Err("Missing state snapshot.".to_owned());
+ }
+ }
+ }
+
+ let mut fork_states = Vec::new();
+
+ // 12. Ensure that the state is derived from the previous current state (i.e. we calculated
+ // by doing state res where one of the inputs was a previously trusted set of state,
+ // don't just trust a set of state we got from a remote).
+
+ // We do this by adding the current state to the list of fork states
+ extremity_sstatehashes.remove(&current_sstatehash);
+ fork_states.push(current_state_ids);
+
+ // We also add state after incoming event to the fork states
+ let mut state_after = state_at_incoming_event.clone();
+ if let Some(state_key) = &incoming_pdu.state_key {
+ let shortstatekey = db
+ .rooms
+ .get_or_create_shortstatekey(
+ &incoming_pdu.kind.to_string().into(),
+ state_key,
+ &db.globals,
+ )
+ .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
+
+ state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id));
+ }
+ fork_states.push(state_after);
+
+ let mut update_state = false;
+ // 14. Use state resolution to find new room state
+ let new_room_state = if fork_states.is_empty() {
+ return Err("State is empty.".to_owned());
+ } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) {
+ info!("State resolution trivial");
+ // There was only one state, so it has to be the room's current state (because that is
+ // always included)
+ fork_states[0]
+ .iter()
+ .map(|(k, id)| {
+ db.rooms
+ .compress_state_event(*k, id, &db.globals)
+ .map_err(|_| "Failed to compress_state_event.".to_owned())
+ })
+ .collect::<Result<_, _>>()?
+ } else {
+ info!("Loading auth chains");
+ // We do need to force an update to this room's state
+ update_state = true;
+
+ let mut auth_chain_sets = Vec::new();
+ for state in &fork_states {
+ auth_chain_sets.push(
+ get_auth_chain(
+ room_id,
+ state.iter().map(|(_, id)| id.clone()).collect(),
+ db,
+ )
+ .await
+ .map_err(|_| "Failed to load auth chain.".to_owned())?
+ .collect(),
+ );
+ }
+
+ info!("Loading fork states");
+
+ let fork_states: Vec<_> = fork_states
+ .into_iter()
+ .map(|map| {
+ map.into_iter()
+ .filter_map(|(k, id)| {
+ db.rooms
+ .get_statekey_from_short(k)
+ // FIXME: Undo .to_string().into() when StateMap
+ // is updated to use StateEventType
+ .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id))
+ .map_err(|e| warn!("Failed to get_statekey_from_short: {}", e))
+ .ok()
+ })
+ .collect::<StateMap<_>>()
+ })
+ .collect();
+
+ info!("Resolving state");
+
+ let lock = db.globals.stateres_mutex.lock();
+ let state = match state_res::resolve(
+ room_version_id,
+ &fork_states,
+ auth_chain_sets,
+ |id| {
+ let res = db.rooms.get_pdu(id);
+ if let Err(e) = &res {
+ error!("LOOK AT ME Failed to fetch event: {}", e);
+ }
+ res.ok().flatten()
+ },
+ ) {
+ Ok(new_state) => new_state,
+ Err(_) => {
+ return Err("State resolution failed, either an event could not be found or deserialization".into());
+ }
+ };
+
+ drop(lock);
+
+ info!("State resolution done. Compressing state");
+
+ state
+ .into_iter()
+ .map(|((event_type, state_key), event_id)| {
+ let shortstatekey = db
+ .rooms
+ .get_or_create_shortstatekey(
+ &event_type.to_string().into(),
+ &state_key,
+ &db.globals,
+ )
+ .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?;
+ db.rooms
+ .compress_state_event(shortstatekey, &event_id, &db.globals)
+ .map_err(|_| "Failed to compress state event".to_owned())
+ })
+ .collect::<Result<_, _>>()?
+ };
+
+ // Set the new room state to the resolved state
+ if update_state {
+ info!("Forcing new room state");
+ db.rooms
+ .force_state(room_id, new_room_state, db)
+ .map_err(|_| "Failed to set new room state.".to_owned())?;
+ }
+ }
+
+ info!("Appending pdu to timeline");
+ extremities.insert(incoming_pdu.event_id.clone());
+
+ // Now that the event has passed all auth it is added into the timeline.
+ // We use the `state_at_event` instead of `state_after` so we accurately
+ // represent the state for this event.
+
+ let pdu_id = append_incoming_pdu(
+ db,
+ &incoming_pdu,
+ val,
+ extremities.iter().map(Deref::deref),
+ state_ids_compressed,
+ soft_fail,
+ &state_lock,
+ )
+ .map_err(|e| {
+ warn!("Failed to add pdu to db: {}", e);
+ "Failed to add pdu to db.".to_owned()
+ })?;
+
+ info!("Appended incoming pdu");
+
+ // Event has passed all auth/stateres checks
+ drop(state_lock);
+ Ok(pdu_id)
+}
+
+/// Find the event and auth it. Once the event is validated (steps 1 - 8)
+/// it is appended to the outliers Tree.
+///
+/// Returns pdu and if we fetched it over federation the raw json.
+///
+/// a. Look in the main timeline (pduid_pdu tree)
+/// b. Look at outlier pdu tree
+/// c. Ask origin server over federation
+/// d. TODO: Ask other servers over federation?
+#[tracing::instrument(skip_all)]
+pub(crate) fn fetch_and_handle_outliers<'a>(
+ db: &'a Database,
+ origin: &'a ServerName,
+ events: &'a [Arc<EventId>],
+ create_event: &'a PduEvent,
+ room_id: &'a RoomId,
+ pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
+) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>> {
+ Box::pin(async move {
+ let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) {
+ hash_map::Entry::Vacant(e) => {
+ e.insert((Instant::now(), 1));
+ }
+ hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
+ };
+
+ let mut pdus = vec![];
+ for id in events {
+ if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&**id)
+ {
+ // Exponential backoff
+ let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries);
+ if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
+ min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
+ }
+
+ if time.elapsed() < min_elapsed_duration {
+ info!("Backing off from {}", id);
+ continue;
+ }
+ }
+
+ // a. Look in the main timeline (pduid_pdu tree)
+ // b. Look at outlier pdu tree
+ // (get_pdu_json checks both)
+ if let Ok(Some(local_pdu)) = db.rooms.get_pdu(id) {
+ trace!("Found {} in db", id);
+ pdus.push((local_pdu, None));
+ continue;
+ }
+
+ // c. Ask origin server over federation
+ // We also handle its auth chain here so we don't get a stack overflow in
+ // handle_outlier_pdu.
+ let mut todo_auth_events = vec![Arc::clone(id)];
+ let mut events_in_reverse_order = Vec::new();
+ let mut events_all = HashSet::new();
+ let mut i = 0;
+ while let Some(next_id) = todo_auth_events.pop() {
+ if events_all.contains(&next_id) {
+ continue;
+ }
+
+ i += 1;
+ if i % 100 == 0 {
+ tokio::task::yield_now().await;
+ }
+
+ if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) {
+ trace!("Found {} in db", id);
+ continue;
+ }
+
+ info!("Fetching {} over federation.", next_id);
+ match db
+ .sending
+ .send_federation_request(
+ &db.globals,
+ origin,
+ get_event::v1::Request { event_id: &next_id },
+ )
+ .await
+ {
+ Ok(res) => {
+ info!("Got {} over federation", next_id);
+ let (calculated_event_id, value) =
+ match crate::pdu::gen_event_id_canonical_json(&res.pdu, &db) {
+ Ok(t) => t,
+ Err(_) => {
+ back_off((*next_id).to_owned());
+ continue;
+ }
+ };
+
+ if calculated_event_id != *next_id {
+ warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}",
+ next_id, calculated_event_id, &res.pdu);
+ }
+
+ if let Some(auth_events) =
+ value.get("auth_events").and_then(|c| c.as_array())
+ {
+ for auth_event in auth_events {
+ if let Ok(auth_event) =
+ serde_json::from_value(auth_event.clone().into())
+ {
+ let a: Arc<EventId> = auth_event;
+ todo_auth_events.push(a);
+ } else {
+ warn!("Auth event id is not valid");
+ }
+ }
+ } else {
+ warn!("Auth event list invalid");
+ }
+
+ events_in_reverse_order.push((next_id.clone(), value));
+ events_all.insert(next_id);
+ }
+ Err(_) => {
+ warn!("Failed to fetch event: {}", next_id);
+ back_off((*next_id).to_owned());
+ }
+ }
+ }
+
+ for (next_id, value) in events_in_reverse_order.iter().rev() {
+ match handle_outlier_pdu(
+ origin,
+ create_event,
+ next_id,
+ room_id,
+ value.clone(),
+ db,
+ pub_key_map,
+ )
+ .await
+ {
+ Ok((pdu, json)) => {
+ if next_id == id {
+ pdus.push((pdu, Some(json)));
+ }
+ }
+ Err(e) => {
+ warn!("Authentication of event {} failed: {:?}", next_id, e);
+ back_off((**next_id).to_owned());
+ }
+ }
+ }
+ }
+ pdus
+ })
+}
+
+/// Search the DB for the signing keys of the given server, if we don't have them
+/// fetch them from the server and save to our DB.
+#[tracing::instrument(skip_all)]
+pub(crate) async fn fetch_signing_keys(
+ db: &Database,
+ origin: &ServerName,
+ signature_ids: Vec<String>,
+) -> Result<BTreeMap<String, Base64>> {
+ let contains_all_ids =
+ |keys: &BTreeMap<String, Base64>| signature_ids.iter().all(|id| keys.contains_key(id));
+
+ let permit = db
+ .globals
+ .servername_ratelimiter
+ .read()
+ .unwrap()
+ .get(origin)
+ .map(|s| Arc::clone(s).acquire_owned());
+
+ let permit = match permit {
+ Some(p) => p,
+ None => {
+ let mut write = db.globals.servername_ratelimiter.write().unwrap();
+ let s = Arc::clone(
+ write
+ .entry(origin.to_owned())
+ .or_insert_with(|| Arc::new(Semaphore::new(1))),
+ );
+
+ s.acquire_owned()
+ }
+ }
+ .await;
+
+ let back_off = |id| match db
+ .globals
+ .bad_signature_ratelimiter
+ .write()
+ .unwrap()
+ .entry(id)
+ {
+ hash_map::Entry::Vacant(e) => {
+ e.insert((Instant::now(), 1));
+ }
+ hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
+ };
+
+ if let Some((time, tries)) = db
+ .globals
+ .bad_signature_ratelimiter
+ .read()
+ .unwrap()
+ .get(&signature_ids)
+ {
+ // Exponential backoff
+ let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
+ if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
+ min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
+ }
+
+ if time.elapsed() < min_elapsed_duration {
+ debug!("Backing off from {:?}", signature_ids);
+ return Err(Error::BadServerResponse("bad signature, still backing off"));
+ }
+ }
+
+ trace!("Loading signing keys for {}", origin);
+
+ let mut result: BTreeMap<_, _> = db
+ .globals
+ .signing_keys_for(origin)?
+ .into_iter()
+ .map(|(k, v)| (k.to_string(), v.key))
+ .collect();
+
+ if contains_all_ids(&result) {
+ return Ok(result);
+ }
+
+ debug!("Fetching signing keys for {} over federation", origin);
+
+ if let Some(server_key) = db
+ .sending
+ .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new())
+ .await
+ .ok()
+ .and_then(|resp| resp.server_key.deserialize().ok())
+ {
+ db.globals.add_signing_key(origin, server_key.clone())?;
+
+ result.extend(
+ server_key
+ .verify_keys
+ .into_iter()
+ .map(|(k, v)| (k.to_string(), v.key)),
+ );
+ result.extend(
+ server_key
+ .old_verify_keys
+ .into_iter()
+ .map(|(k, v)| (k.to_string(), v.key)),
+ );
+
+ if contains_all_ids(&result) {
+ return Ok(result);
+ }
+ }
+
+ for server in db.globals.trusted_servers() {
+ debug!("Asking {} for {}'s signing key", server, origin);
+ if let Some(server_keys) = db
+ .sending
+ .send_federation_request(
+ &db.globals,
+ server,
+ get_remote_server_keys::v2::Request::new(
+ origin,
+ MilliSecondsSinceUnixEpoch::from_system_time(
+ SystemTime::now()
+ .checked_add(Duration::from_secs(3600))
+ .expect("SystemTime to large"),
+ )
+ .expect("time is valid"),
+ ),
+ )
+ .await
+ .ok()
+ .map(|resp| {
+ resp.server_keys
+ .into_iter()
+ .filter_map(|e| e.deserialize().ok())
+ .collect::<Vec<_>>()
+ })
+ {
+ trace!("Got signing keys: {:?}", server_keys);
+ for k in server_keys {
+ db.globals.add_signing_key(origin, k.clone())?;
+ result.extend(
+ k.verify_keys
+ .into_iter()
+ .map(|(k, v)| (k.to_string(), v.key)),
+ );
+ result.extend(
+ k.old_verify_keys
+ .into_iter()
+ .map(|(k, v)| (k.to_string(), v.key)),
+ );
+ }
+
+ if contains_all_ids(&result) {
+ return Ok(result);
+ }
+ }
+ }
+
+ drop(permit);
+
+ back_off(signature_ids);
+
+ warn!("Failed to find public key for server: {}", origin);
+ Err(Error::BadServerResponse(
+ "Failed to find public key for server",
+ ))
+}
+
+/// Append the incoming event setting the state snapshot to the state from the
+/// server that sent the event.
+#[tracing::instrument(skip_all)]
+fn append_incoming_pdu<'a>(
+ db: &Database,
+ pdu: &PduEvent,
+ pdu_json: CanonicalJsonObject,
+ new_room_leaves: impl IntoIterator<Item = &'a EventId> + Clone + Debug,
+ state_ids_compressed: HashSet<CompressedStateEvent>,
+ soft_fail: bool,
+ _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex
+) -> Result<Option<Vec<u8>>> {
+ // We append to state before appending the pdu, so we don't have a moment in time with the
+ // pdu without it's state. This is okay because append_pdu can't fail.
+ db.rooms.set_event_state(
+ &pdu.event_id,
+ &pdu.room_id,
+ state_ids_compressed,
+ &db.globals,
+ )?;
+
+ if soft_fail {
+ db.rooms
+ .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?;
+ db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?;
+ return Ok(None);
+ }
+
+ let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?;
+
+ for appservice in db.appservice.all()? {
+ if db.rooms.appservice_in_room(&pdu.room_id, &appservice, db)? {
+ db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?;
+ continue;
+ }
+
+ if let Some(namespaces) = appservice.1.get("namespaces") {
+ let users = namespaces
+ .get("users")
+ .and_then(|users| users.as_sequence())
+ .map_or_else(Vec::new, |users| {
+ users
+ .iter()
+ .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok())
+ .collect::<Vec<_>>()
+ });
+ let aliases = namespaces
+ .get("aliases")
+ .and_then(|aliases| aliases.as_sequence())
+ .map_or_else(Vec::new, |aliases| {
+ aliases
+ .iter()
+ .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok())
+ .collect::<Vec<_>>()
+ });
+ let rooms = namespaces
+ .get("rooms")
+ .and_then(|rooms| rooms.as_sequence());
+
+ let matching_users = |users: &Regex| {
+ users.is_match(pdu.sender.as_str())
+ || pdu.kind == RoomEventType::RoomMember
+ && pdu
+ .state_key
+ .as_ref()
+ .map_or(false, |state_key| users.is_match(state_key))
+ };
+ let matching_aliases = |aliases: &Regex| {
+ db.rooms
+ .room_aliases(&pdu.room_id)
+ .filter_map(|r| r.ok())
+ .any(|room_alias| aliases.is_match(room_alias.as_str()))
+ };
+
+ if aliases.iter().any(matching_aliases)
+ || rooms.map_or(false, |rooms| rooms.contains(&pdu.room_id.as_str().into()))
+ || users.iter().any(matching_users)
+ {
+ db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?;
+ }
+ }
+ }
+
+ Ok(Some(pdu_id))
+}
+
+#[tracing::instrument(skip(starting_events, db))]
+pub(crate) async fn get_auth_chain<'a>(
+ room_id: &RoomId,
+ starting_events: Vec<Arc<EventId>>,
+ db: &'a Database,
+) -> Result<impl Iterator<Item = Arc<EventId>> + 'a> {
+ const NUM_BUCKETS: usize = 50;
+
+ let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS];
+
+ let mut i = 0;
+ for id in starting_events {
+ let short = db.rooms.get_or_create_shorteventid(&id, &db.globals)?;
+ let bucket_id = (short % NUM_BUCKETS as u64) as usize;
+ buckets[bucket_id].insert((short, id.clone()));
+ i += 1;
+ if i % 100 == 0 {
+ tokio::task::yield_now().await;
+ }
+ }
+
+ let mut full_auth_chain = HashSet::new();
+
+ let mut hits = 0;
+ let mut misses = 0;
+ for chunk in buckets {
+ if chunk.is_empty() {
+ continue;
+ }
+
+ let chunk_key: Vec<u64> = chunk.iter().map(|(short, _)| short).copied().collect();
+ if let Some(cached) = db.rooms.get_auth_chain_from_cache(&chunk_key)? {
+ hits += 1;
+ full_auth_chain.extend(cached.iter().copied());
+ continue;
+ }
+ misses += 1;
+
+ let mut chunk_cache = HashSet::new();
+ let mut hits2 = 0;
+ let mut misses2 = 0;
+ let mut i = 0;
+ for (sevent_id, event_id) in chunk {
+ if let Some(cached) = db.rooms.get_auth_chain_from_cache(&[sevent_id])? {
+ hits2 += 1;
+ chunk_cache.extend(cached.iter().copied());
+ } else {
+ misses2 += 1;
+ let auth_chain = Arc::new(get_auth_chain_inner(room_id, &event_id, db)?);
+ db.rooms
+ .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?;
+ println!(
+ "cache missed event {} with auth chain len {}",
+ event_id,
+ auth_chain.len()
+ );
+ chunk_cache.extend(auth_chain.iter());
+
+ i += 1;
+ if i % 100 == 0 {
+ tokio::task::yield_now().await;
+ }
+ };
+ }
+ println!(
+ "chunk missed with len {}, event hits2: {}, misses2: {}",
+ chunk_cache.len(),
+ hits2,
+ misses2
+ );
+ let chunk_cache = Arc::new(chunk_cache);
+ db.rooms
+ .cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?;
+ full_auth_chain.extend(chunk_cache.iter());
+ }
+
+ println!(
+ "total: {}, chunk hits: {}, misses: {}",
+ full_auth_chain.len(),
+ hits,
+ misses
+ );
+
+ Ok(full_auth_chain
+ .into_iter()
+ .filter_map(move |sid| db.rooms.get_eventid_from_short(sid).ok()))
+}
+
+#[tracing::instrument(skip(event_id, db))]
+fn get_auth_chain_inner(
+ room_id: &RoomId,
+ event_id: &EventId,
+ db: &Database,
+) -> Result<HashSet<u64>> {
+ let mut todo = vec![Arc::from(event_id)];
+ let mut found = HashSet::new();
+
+ while let Some(event_id) = todo.pop() {
+ match db.rooms.get_pdu(&event_id) {
+ Ok(Some(pdu)) => {
+ if pdu.room_id != room_id {
+ return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db"));
+ }
+ for auth_event in &pdu.auth_events {
+ let sauthevent = db
+ .rooms
+ .get_or_create_shorteventid(auth_event, &db.globals)?;
+
+ if !found.contains(&sauthevent) {
+ found.insert(sauthevent);
+ todo.push(auth_event.clone());
+ }
+ }
+ }
+ Ok(None) => {
+ warn!("Could not find pdu mentioned in auth events: {}", event_id);
+ }
+ Err(e) => {
+ warn!("Could not load event in auth chain: {} {}", event_id, e);
+ }
+ }
+ }
+
+ Ok(found)
+}
+
+/// # `GET /_matrix/federation/v1/event/{eventId}`
+///
+/// Retrieves a single event from the server.
+///
+/// - Only works if a user of this server is currently invited or joined the room
+pub async fn get_event_route(
+ db: DatabaseGuard,
+ body: Ruma<get_event::v1::IncomingRequest>,
+) -> Result<get_event::v1::Response> {
+ if !db.globals.allow_federation() {
+ return Err(Error::bad_config("Federation is disabled."));
+ }
+
+ let sender_servername = body
+ .sender_servername
+ .as_ref()
+ .expect("server is authenticated");
+
+ let event = db
+ .rooms
+ .get_pdu_json(&body.event_id)?
+ .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?;
+
+ let room_id_str = event
+ .get("room_id")
+ .and_then(|val| val.as_str())
+ .ok_or_else(|| Error::bad_database("Invalid event in database"))?;
+
+ let room_id = <&RoomId>::try_from(room_id_str)
+ .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
+
+ if !db.rooms.server_in_room(sender_servername, room_id)? {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "Server is not in room",
+ ));
+ }
+
+ Ok(get_event::v1::Response {
+ origin: db.globals.server_name().to_owned(),
+ origin_server_ts: MilliSecondsSinceUnixEpoch::now(),
+ pdu: PduEvent::convert_to_outgoing_federation_event(event),
+ })
+}
+
+/// # `POST /_matrix/federation/v1/get_missing_events/{roomId}`
+///
+/// Retrieves events that the sender is missing.
+pub async fn get_missing_events_route(
+ db: DatabaseGuard,
+ body: Ruma<get_missing_events::v1::IncomingRequest>,
+) -> Result<get_missing_events::v1::Response> {
+ if !db.globals.allow_federation() {
+ return Err(Error::bad_config("Federation is disabled."));
+ }
+
+ let sender_servername = body
+ .sender_servername
+ .as_ref()
+ .expect("server is authenticated");
+
+ if !db.rooms.server_in_room(sender_servername, &body.room_id)? {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "Server is not in room",
+ ));
+ }
+
+ acl_check(sender_servername, &body.room_id, &db)?;
+
+ let mut queued_events = body.latest_events.clone();
+ let mut events = Vec::new();
+
+ let mut i = 0;
+ while i < queued_events.len() && events.len() < u64::from(body.limit) as usize {
+ if let Some(pdu) = db.rooms.get_pdu_json(&queued_events[i])? {
+ let room_id_str = pdu
+ .get("room_id")
+ .and_then(|val| val.as_str())
+ .ok_or_else(|| Error::bad_database("Invalid event in database"))?;
+
+ let event_room_id = <&RoomId>::try_from(room_id_str)
+ .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
+
+ if event_room_id != body.room_id {
+ warn!(
+ "Evil event detected: Event {} found while searching in room {}",
+ queued_events[i], body.room_id
+ );
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Evil event detected",
+ ));
+ }
+
+ if body.earliest_events.contains(&queued_events[i]) {
+ i += 1;
+ continue;
+ }
+ queued_events.extend_from_slice(
+ &serde_json::from_value::<Vec<Box<EventId>>>(
+ serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| {
+ Error::bad_database("Event in db has no prev_events field.")
+ })?)
+ .expect("canonical json is valid json value"),
+ )
+ .map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?,
+ );
+ events.push(PduEvent::convert_to_outgoing_federation_event(pdu));
+ }
+ i += 1;
+ }
+
+ Ok(get_missing_events::v1::Response { events })
+}
+
+/// # `GET /_matrix/federation/v1/event_auth/{roomId}/{eventId}`
+///
+/// Retrieves the auth chain for a given event.
+///
+/// - This does not include the event itself
+pub async fn get_event_authorization_route(
+ db: DatabaseGuard,
+ body: Ruma<get_event_authorization::v1::IncomingRequest>,
+) -> Result<get_event_authorization::v1::Response> {
+ if !db.globals.allow_federation() {
+ return Err(Error::bad_config("Federation is disabled."));
+ }
+
+ let sender_servername = body
+ .sender_servername
+ .as_ref()
+ .expect("server is authenticated");
+
+ if !db.rooms.server_in_room(sender_servername, &body.room_id)? {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "Server is not in room.",
+ ));
+ }
+
+ acl_check(sender_servername, &body.room_id, &db)?;
+
+ let event = db
+ .rooms
+ .get_pdu_json(&body.event_id)?
+ .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?;
+
+ let room_id_str = event
+ .get("room_id")
+ .and_then(|val| val.as_str())
+ .ok_or_else(|| Error::bad_database("Invalid event in database"))?;
+
+ let room_id = <&RoomId>::try_from(room_id_str)
+ .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
+
+ let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db).await?;
+
+ Ok(get_event_authorization::v1::Response {
+ auth_chain: auth_chain_ids
+ .filter_map(|id| db.rooms.get_pdu_json(&id).ok()?)
+ .map(PduEvent::convert_to_outgoing_federation_event)
+ .collect(),
+ })
+}
+
+/// # `GET /_matrix/federation/v1/state/{roomId}`
+///
+/// Retrieves the current state of the room.
+pub async fn get_room_state_route(
+ db: DatabaseGuard,
+ body: Ruma<get_room_state::v1::IncomingRequest>,
+) -> Result<get_room_state::v1::Response> {
+ if !db.globals.allow_federation() {
+ return Err(Error::bad_config("Federation is disabled."));
+ }
+
+ let sender_servername = body
+ .sender_servername
+ .as_ref()
+ .expect("server is authenticated");
+
+ if !db.rooms.server_in_room(sender_servername, &body.room_id)? {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "Server is not in room.",
+ ));
+ }
+
+ acl_check(sender_servername, &body.room_id, &db)?;
+
+ let shortstatehash = db
+ .rooms
+ .pdu_shortstatehash(&body.event_id)?
+ .ok_or(Error::BadRequest(
+ ErrorKind::NotFound,
+ "Pdu state not found.",
+ ))?;
+
+ let pdus = db
+ .rooms
+ .state_full_ids(shortstatehash)
+ .await?
+ .into_iter()
+ .map(|(_, id)| {
+ PduEvent::convert_to_outgoing_federation_event(
+ db.rooms.get_pdu_json(&id).unwrap().unwrap(),
+ )
+ })
+ .collect();
+
+ let auth_chain_ids =
+ get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?;
+
+ Ok(get_room_state::v1::Response {
+ auth_chain: auth_chain_ids
+ .map(|id| {
+ db.rooms.get_pdu_json(&id).map(|maybe_json| {
+ PduEvent::convert_to_outgoing_federation_event(maybe_json.unwrap())
+ })
+ })
+ .filter_map(|r| r.ok())
+ .collect(),
+ pdus,
+ })
+}
+
+/// # `GET /_matrix/federation/v1/state_ids/{roomId}`
+///
+/// Retrieves the current state of the room.
+pub async fn get_room_state_ids_route(
+ db: DatabaseGuard,
+ body: Ruma<get_room_state_ids::v1::IncomingRequest>,
+) -> Result<get_room_state_ids::v1::Response> {
+ if !db.globals.allow_federation() {
+ return Err(Error::bad_config("Federation is disabled."));
+ }
+
+ let sender_servername = body
+ .sender_servername
+ .as_ref()
+ .expect("server is authenticated");
+
+ if !db.rooms.server_in_room(sender_servername, &body.room_id)? {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "Server is not in room.",
+ ));
+ }
+
+ acl_check(sender_servername, &body.room_id, &db)?;
+
+ let shortstatehash = db
+ .rooms
+ .pdu_shortstatehash(&body.event_id)?
+ .ok_or(Error::BadRequest(
+ ErrorKind::NotFound,
+ "Pdu state not found.",
+ ))?;
+
+ let pdu_ids = db
+ .rooms
+ .state_full_ids(shortstatehash)
+ .await?
+ .into_iter()
+ .map(|(_, id)| (*id).to_owned())
+ .collect();
+
+ let auth_chain_ids =
+ get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?;
+
+ Ok(get_room_state_ids::v1::Response {
+ auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(),
+ pdu_ids,
+ })
+}
+
+/// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}`
+///
+/// Creates a join template.
+pub async fn create_join_event_template_route(
+ db: DatabaseGuard,
+ body: Ruma<prepare_join_event::v1::IncomingRequest>,
+) -> Result<prepare_join_event::v1::Response> {
+ if !db.globals.allow_federation() {
+ return Err(Error::bad_config("Federation is disabled."));
+ }
+
+ if !db.rooms.exists(&body.room_id)? {
+ return Err(Error::BadRequest(
+ ErrorKind::NotFound,
+ "Room is unknown to this server.",
+ ));
+ }
+
+ let sender_servername = body
+ .sender_servername
+ .as_ref()
+ .expect("server is authenticated");
+
+ acl_check(sender_servername, &body.room_id, &db)?;
+
+ // TODO: Conduit does not implement restricted join rules yet, we always reject
+ let join_rules_event =
+ db.rooms
+ .room_state_get(&body.room_id, &StateEventType::RoomJoinRules, "")?;
+
+ let join_rules_event_content: Option<RoomJoinRulesEventContent> = join_rules_event
+ .as_ref()
+ .map(|join_rules_event| {
+ serde_json::from_str(join_rules_event.content.get()).map_err(|e| {
+ warn!("Invalid join rules event: {}", e);
+ Error::bad_database("Invalid join rules event in db.")
+ })
+ })
+ .transpose()?;
+
+ if let Some(join_rules_event_content) = join_rules_event_content {
+ if matches!(
+ join_rules_event_content.join_rule,
+ JoinRule::Restricted { .. }
+ ) {
+ return Err(Error::BadRequest(
+ ErrorKind::Unknown,
+ "Conduit does not support restricted rooms yet.",
+ ));
+ }
+ }
+
+ let prev_events: Vec<_> = db
+ .rooms
+ .get_pdu_leaves(&body.room_id)?
+ .into_iter()
+ .take(20)
+ .collect();
+
+ let create_event = db
+ .rooms
+ .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")?;
+
+ let create_event_content: Option<RoomCreateEventContent> = create_event
+ .as_ref()
+ .map(|create_event| {
+ serde_json::from_str(create_event.content.get()).map_err(|e| {
+ warn!("Invalid create event: {}", e);
+ Error::bad_database("Invalid create event in db.")
+ })
+ })
+ .transpose()?;
+
+ // If there was no create event yet, assume we are creating a room with the default version
+ // right now
+ let room_version_id = create_event_content
+ .map_or(db.globals.default_room_version(), |create_event| {
+ create_event.room_version
+ });
+ let room_version = RoomVersion::new(&room_version_id).expect("room version is supported");
+
+ if !body.ver.contains(&room_version_id) {
+ return Err(Error::BadRequest(
+ ErrorKind::IncompatibleRoomVersion {
+ room_version: room_version_id,
+ },
+ "Room version not supported.",
+ ));
+ }
+
+ let content = to_raw_value(&RoomMemberEventContent {
+ avatar_url: None,
+ blurhash: None,
+ displayname: None,
+ is_direct: None,
+ membership: MembershipState::Join,
+ third_party_invite: None,
+ reason: None,
+ join_authorized_via_users_server: None,
+ })
+ .expect("member event is valid value");
+
+ let state_key = body.user_id.to_string();
+ let kind = StateEventType::RoomMember;
+
+ let auth_events = db.rooms.get_auth_events(
+ &body.room_id,
+ &kind.to_string().into(),
+ &body.user_id,
+ Some(&state_key),
+ &content,
+ )?;
+
+ // Our depth is the maximum depth of prev_events + 1
+ let depth = prev_events
+ .iter()
+ .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth))
+ .max()
+ .unwrap_or_else(|| uint!(0))
+ + uint!(1);
+
+ let mut unsigned = BTreeMap::new();
+
+ if let Some(prev_pdu) = db.rooms.room_state_get(&body.room_id, &kind, &state_key)? {
+ unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone());
+ unsigned.insert(
+ "prev_sender".to_owned(),
+ to_raw_value(&prev_pdu.sender).expect("UserId is valid"),
+ );
+ }
+
+ let pdu = PduEvent {
+ event_id: ruma::event_id!("$thiswillbefilledinlater").into(),
+ room_id: body.room_id.clone(),
+ sender: body.user_id.clone(),
+ origin_server_ts: utils::millis_since_unix_epoch()
+ .try_into()
+ .expect("time is valid"),
+ kind: kind.to_string().into(),
+ content,
+ state_key: Some(state_key),
+ prev_events,
+ depth,
+ auth_events: auth_events
+ .iter()
+ .map(|(_, pdu)| pdu.event_id.clone())
+ .collect(),
+ redacts: None,
+ unsigned: if unsigned.is_empty() {
+ None
+ } else {
+ Some(to_raw_value(&unsigned).expect("to_raw_value always works"))
+ },
+ hashes: EventHash {
+ sha256: "aaa".to_owned(),
+ },
+ signatures: None,
+ };
+
+ let auth_check = state_res::auth_check(
+ &room_version,
+ &pdu,
+ None::<PduEvent>, // TODO: third_party_invite
+ |k, s| auth_events.get(&(k.clone(), s.to_owned())),
+ )
+ .map_err(|e| {
+ error!("{:?}", e);
+ Error::bad_database("Auth check failed.")
+ })?;
+
+ if !auth_check {
+ return Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "Event is not authorized.",
+ ));
+ }
+
+ // Hash and sign
+ let mut pdu_json =
+ utils::to_canonical_object(&pdu).expect("event is valid, we just created it");
+
+ pdu_json.remove("event_id");
+
+ // Add origin because synapse likes that (and it's required in the spec)
+ pdu_json.insert(
+ "origin".to_owned(),
+ CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()),
+ );
+
+ Ok(prepare_join_event::v1::Response {
+ room_version: Some(room_version_id),
+ event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"),
+ })
+}
+
+async fn create_join_event(
+ db: &DatabaseGuard,
+ sender_servername: &ServerName,
+ room_id: &RoomId,
+ pdu: &RawJsonValue,
+) -> Result<RoomState> {
+ if !db.globals.allow_federation() {
+ return Err(Error::bad_config("Federation is disabled."));
+ }
+
+ if !db.rooms.exists(room_id)? {
+ return Err(Error::BadRequest(
+ ErrorKind::NotFound,
+ "Room is unknown to this server.",
+ ));
+ }
+
+ acl_check(sender_servername, room_id, db)?;
+
+ // TODO: Conduit does not implement restricted join rules yet, we always reject
+ let join_rules_event = db
+ .rooms
+ .room_state_get(room_id, &StateEventType::RoomJoinRules, "")?;
+
+ let join_rules_event_content: Option<RoomJoinRulesEventContent> = join_rules_event
+ .as_ref()
+ .map(|join_rules_event| {
+ serde_json::from_str(join_rules_event.content.get()).map_err(|e| {
+ warn!("Invalid join rules event: {}", e);
+ Error::bad_database("Invalid join rules event in db.")
+ })
+ })
+ .transpose()?;
+
+ if let Some(join_rules_event_content) = join_rules_event_content {
+ if matches!(
+ join_rules_event_content.join_rule,
+ JoinRule::Restricted { .. }
+ ) {
+ return Err(Error::BadRequest(
+ ErrorKind::Unknown,
+ "Conduit does not support restricted rooms yet.",
+ ));
+ }
+ }
+
+ // We need to return the state prior to joining, let's keep a reference to that here
+ let shortstatehash = db
+ .rooms
+ .current_shortstatehash(room_id)?
+ .ok_or(Error::BadRequest(
+ ErrorKind::NotFound,
+ "Pdu state not found.",
+ ))?;
+
+ let pub_key_map = RwLock::new(BTreeMap::new());
+ // let mut auth_cache = EventMap::new();
+
+ // We do not add the event_id field to the pdu here because of signature and hashes checks
+ let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) {
+ Ok(t) => t,
+ Err(_) => {
+ // Event could not be converted to canonical json
+ return Err(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Could not convert event to canonical json.",
+ ));
+ }
+ };
+
+ let origin: Box<ServerName> = serde_json::from_value(
+ serde_json::to_value(value.get("origin").ok_or(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Event needs an origin field.",
+ ))?)
+ .expect("CanonicalJson is valid json value"),
+ )
+ .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?;
+
+ let mutex = Arc::clone(
+ db.globals
+ .roomid_mutex_federation
+ .write()
+ .unwrap()
+ .entry(room_id.to_owned())
+ .or_default(),
+ );
+ let mutex_lock = mutex.lock().await;
+ let pdu_id = handle_incoming_pdu(&origin, &event_id, room_id, value, true, db, &pub_key_map)
+ .await
+ .map_err(|e| {
+ warn!("Error while handling incoming send join PDU: {}", e);
+ Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Error while handling incoming PDU.",
+ )
+ })?
+ .ok_or(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Could not accept incoming PDU as timeline event.",
+ ))?;
+ drop(mutex_lock);
+
+ let state_ids = db.rooms.state_full_ids(shortstatehash).await?;
+ let auth_chain_ids = get_auth_chain(
+ room_id,
+ state_ids.iter().map(|(_, id)| id.clone()).collect(),
+ db,
+ )
+ .await?;
+
+ let servers = db
+ .rooms
+ .room_servers(room_id)
+ .filter_map(|r| r.ok())
+ .filter(|server| &**server != db.globals.server_name());
+
+ db.sending.send_pdu(servers, &pdu_id)?;
+
+ db.flush()?;
+
+ Ok(RoomState {
+ auth_chain: auth_chain_ids
+ .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten())
+ .map(PduEvent::convert_to_outgoing_federation_event)
+ .collect(),
+ state: state_ids
+ .iter()
+ .filter_map(|(_, id)| db.rooms.get_pdu_json(id).ok().flatten())
+ .map(PduEvent::convert_to_outgoing_federation_event)
+ .collect(),
+ })
+}
+
+/// # `PUT /_matrix/federation/v1/send_join/{roomId}/{eventId}`
+///
+/// Submits a signed join event.
+pub async fn create_join_event_v1_route(
+ db: DatabaseGuard,
+ body: Ruma<create_join_event::v1::IncomingRequest>,
+) -> Result<create_join_event::v1::Response> {
+ let sender_servername = body
+ .sender_servername
+ .as_ref()
+ .expect("server is authenticated");
+
+ let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?;
+
+ Ok(create_join_event::v1::Response { room_state })
+}
+
+/// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}`
+///
+/// Submits a signed join event.
+pub async fn create_join_event_v2_route(
+ db: DatabaseGuard,
+ body: Ruma<create_join_event::v2::IncomingRequest>,
+) -> Result<create_join_event::v2::Response> {
+ let sender_servername = body
+ .sender_servername
+ .as_ref()
+ .expect("server is authenticated");
+
+ let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?;
+
+ Ok(create_join_event::v2::Response { room_state })
+}
+
+/// # `PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`
+///
+/// Invites a remote user to a room.
+pub async fn create_invite_route(
+ db: DatabaseGuard,
+ body: Ruma<create_invite::v2::IncomingRequest>,
+) -> Result<create_invite::v2::Response> {
+ if !db.globals.allow_federation() {
+ return Err(Error::bad_config("Federation is disabled."));
+ }
+
+ let sender_servername = body
+ .sender_servername
+ .as_ref()
+ .expect("server is authenticated");
+
+ acl_check(sender_servername, &body.room_id, &db)?;
+
+ if !db.rooms.is_supported_version(&db, &body.room_version) {
+ return Err(Error::BadRequest(
+ ErrorKind::IncompatibleRoomVersion {
+ room_version: body.room_version.clone(),
+ },
+ "Server does not support this room version.",
+ ));
+ }
+
+ let mut signed_event = utils::to_canonical_object(&body.event)
+ .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invite event is invalid."))?;
+
+ ruma::signatures::hash_and_sign_event(
+ db.globals.server_name().as_str(),
+ db.globals.keypair(),
+ &mut signed_event,
+ &body.room_version,
+ )
+ .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?;
+
+ // Generate event id
+ let event_id = EventId::parse(format!(
+ "${}",
+ ruma::signatures::reference_hash(&signed_event, &body.room_version)
+ .expect("ruma can calculate reference hashes")
+ ))
+ .expect("ruma's reference hashes are valid event ids");
+
+ // Add event_id back
+ signed_event.insert(
+ "event_id".to_owned(),
+ CanonicalJsonValue::String(event_id.into()),
+ );
+
+ let sender: Box<_> = serde_json::from_value(
+ signed_event
+ .get("sender")
+ .ok_or(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Event had no sender field.",
+ ))?
+ .clone()
+ .into(),
+ )
+ .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id."))?;
+
+ let invited_user: Box<_> = serde_json::from_value(
+ signed_event
+ .get("state_key")
+ .ok_or(Error::BadRequest(
+ ErrorKind::InvalidParam,
+ "Event had no state_key field.",
+ ))?
+ .clone()
+ .into(),
+ )
+ .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "state_key is not a user id."))?;
+
+ let mut invite_state = body.invite_room_state.clone();
+
+ let mut event: JsonObject = serde_json::from_str(body.event.get())
+ .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes."))?;
+
+ event.insert("event_id".to_owned(), "$dummy".into());
+
+ let pdu: PduEvent = serde_json::from_value(event.into()).map_err(|e| {
+ warn!("Invalid invite event: {}", e);
+ Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event.")
+ })?;
+
+ invite_state.push(pdu.to_stripped_state_event());
+
+ // If the room already exists, the remote server will notify us about the join via /send
+ if !db.rooms.exists(&pdu.room_id)? {
+ db.rooms.update_membership(
+ &body.room_id,
+ &invited_user,
+ MembershipState::Invite,
+ &sender,
+ Some(invite_state),
+ &db,
+ true,
+ )?;
+ }
+
+ db.flush()?;
+
+ Ok(create_invite::v2::Response {
+ event: PduEvent::convert_to_outgoing_federation_event(signed_event),
+ })
+}
+
+/// # `GET /_matrix/federation/v1/user/devices/{userId}`
+///
+/// Gets information on all devices of the user.
+pub async fn get_devices_route(
+ db: DatabaseGuard,
+ body: Ruma<get_devices::v1::IncomingRequest>,
+) -> Result<get_devices::v1::Response> {
+ if !db.globals.allow_federation() {
+ return Err(Error::bad_config("Federation is disabled."));
+ }
+
+ let sender_servername = body
+ .sender_servername
+ .as_ref()
+ .expect("server is authenticated");
+
+ Ok(get_devices::v1::Response {
+ user_id: body.user_id.clone(),
+ stream_id: db
+ .users
+ .get_devicelist_version(&body.user_id)?
+ .unwrap_or(0)
+ .try_into()
+ .expect("version will not grow that large"),
+ devices: db
+ .users
+ .all_devices_metadata(&body.user_id)
+ .filter_map(|r| r.ok())
+ .filter_map(|metadata| {
+ Some(UserDevice {
+ keys: db
+ .users
+ .get_device_keys(&body.user_id, &metadata.device_id)
+ .ok()??,
+ device_id: metadata.device_id,
+ device_display_name: metadata.display_name,
+ })
+ })
+ .collect(),
+ master_key: db
+ .users
+ .get_master_key(&body.user_id, |u| u.server_name() == sender_servername)?,
+ self_signing_key: db
+ .users
+ .get_self_signing_key(&body.user_id, |u| u.server_name() == sender_servername)?,
+ })
+}
+
+/// # `GET /_matrix/federation/v1/query/directory`
+///
+/// Resolve a room alias to a room id.
+pub async fn get_room_information_route(
+ db: DatabaseGuard,
+ body: Ruma<get_room_information::v1::IncomingRequest>,
+) -> Result<get_room_information::v1::Response> {
+ if !db.globals.allow_federation() {
+ return Err(Error::bad_config("Federation is disabled."));
+ }
+
+ let room_id = db
+ .rooms
+ .id_from_alias(&body.room_alias)?
+ .ok_or(Error::BadRequest(
+ ErrorKind::NotFound,
+ "Room alias not found.",
+ ))?;
+
+ Ok(get_room_information::v1::Response {
+ room_id,
+ servers: vec![db.globals.server_name().to_owned()],
+ })
+}
+
+/// # `GET /_matrix/federation/v1/query/profile`
+///
+/// Gets information on a profile.
+pub async fn get_profile_information_route(
+ db: DatabaseGuard,
+ body: Ruma<get_profile_information::v1::IncomingRequest>,
+) -> Result<get_profile_information::v1::Response> {
+ if !db.globals.allow_federation() {
+ return Err(Error::bad_config("Federation is disabled."));
+ }
+
+ let mut displayname = None;
+ let mut avatar_url = None;
+ let mut blurhash = None;
+
+ match &body.field {
+ Some(ProfileField::DisplayName) => displayname = db.users.displayname(&body.user_id)?,
+ Some(ProfileField::AvatarUrl) => {
+ avatar_url = db.users.avatar_url(&body.user_id)?;
+ blurhash = db.users.blurhash(&body.user_id)?
+ }
+ // TODO: what to do with custom
+ Some(_) => {}
+ None => {
+ displayname = db.users.displayname(&body.user_id)?;
+ avatar_url = db.users.avatar_url(&body.user_id)?;
+ blurhash = db.users.blurhash(&body.user_id)?;
+ }
+ }
+
+ Ok(get_profile_information::v1::Response {
+ blurhash,
+ displayname,
+ avatar_url,
+ })
+}
+
+/// # `POST /_matrix/federation/v1/user/keys/query`
+///
+/// Gets devices and identity keys for the given users.
+pub async fn get_keys_route(
+ db: DatabaseGuard,
+ body: Ruma<get_keys::v1::Request>,
+) -> Result<get_keys::v1::Response> {
+ if !db.globals.allow_federation() {
+ return Err(Error::bad_config("Federation is disabled."));
+ }
+
+ let result = get_keys_helper(
+ None,
+ &body.device_keys,
+ |u| Some(u.server_name()) == body.sender_servername.as_deref(),
+ &db,
+ )
+ .await?;
+
+ db.flush()?;
+
+ Ok(get_keys::v1::Response {
+ device_keys: result.device_keys,
+ master_keys: result.master_keys,
+ self_signing_keys: result.self_signing_keys,
+ })
+}
+
+/// # `POST /_matrix/federation/v1/user/keys/claim`
+///
+/// Claims one-time keys.
+pub async fn claim_keys_route(
+ db: DatabaseGuard,
+ body: Ruma<claim_keys::v1::Request>,
+) -> Result<claim_keys::v1::Response> {
+ if !db.globals.allow_federation() {
+ return Err(Error::bad_config("Federation is disabled."));
+ }
+
+ let result = claim_keys_helper(&body.one_time_keys, &db).await?;
+
+ db.flush()?;
+
+ Ok(claim_keys::v1::Response {
+ one_time_keys: result.one_time_keys,
+ })
+}
+
+#[tracing::instrument(skip_all)]
+pub(crate) async fn fetch_required_signing_keys(
+ event: &BTreeMap<String, CanonicalJsonValue>,
+ pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
+ db: &Database,
+) -> Result<()> {
+ let signatures = event
+ .get("signatures")
+ .ok_or(Error::BadServerResponse(
+ "No signatures in server response pdu.",
+ ))?
+ .as_object()
+ .ok_or(Error::BadServerResponse(
+ "Invalid signatures object in server response pdu.",
+ ))?;
+
+ // We go through all the signatures we see on the value and fetch the corresponding signing
+ // keys
+ for (signature_server, signature) in signatures {
+ let signature_object = signature.as_object().ok_or(Error::BadServerResponse(
+ "Invalid signatures content object in server response pdu.",
+ ))?;
+
+ let signature_ids = signature_object.keys().cloned().collect::<Vec<_>>();
+
+ let fetch_res = fetch_signing_keys(
+ db,
+ signature_server.as_str().try_into().map_err(|_| {
+ Error::BadServerResponse("Invalid servername in signatures of server response pdu.")
+ })?,
+ signature_ids,
+ )
+ .await;
+
+ let keys = match fetch_res {
+ Ok(keys) => keys,
+ Err(_) => {
+ warn!("Signature verification failed: Could not fetch signing key.",);
+ continue;
+ }
+ };
+
+ pub_key_map
+ .write()
+ .map_err(|_| Error::bad_database("RwLock is poisoned."))?
+ .insert(signature_server.clone(), keys);
+ }
+
+ Ok(())
+}
+
+// Gets a list of servers for which we don't have the signing key yet. We go over
+// the PDUs and either cache the key or add it to the list that needs to be retrieved.
+fn get_server_keys_from_cache(
+ pdu: &RawJsonValue,
+ servers: &mut BTreeMap<Box<ServerName>, BTreeMap<Box<ServerSigningKeyId>, QueryCriteria>>,
+ room_version: &RoomVersionId,
+ pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap<String, BTreeMap<String, Base64>>>,
+ db: &Database,
+) -> Result<()> {
+ let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
+ error!("Invalid PDU in server response: {:?}: {:?}", pdu, e);
+ Error::BadServerResponse("Invalid PDU in server response")
+ })?;
+
+ let event_id = format!(
+ "${}",
+ ruma::signatures::reference_hash(&value, room_version)
+ .expect("ruma can calculate reference hashes")
+ );
+ let event_id = <&EventId>::try_from(event_id.as_str())
+ .expect("ruma's reference hashes are valid event ids");
+
+ if let Some((time, tries)) = db
+ .globals
+ .bad_event_ratelimiter
+ .read()
+ .unwrap()
+ .get(event_id)
+ {
+ // Exponential backoff
+ let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
+ if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
+ min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
+ }
+
+ if time.elapsed() < min_elapsed_duration {
+ debug!("Backing off from {}", event_id);
+ return Err(Error::BadServerResponse("bad event, still backing off"));
+ }
+ }
+
+ let signatures = value
+ .get("signatures")
+ .ok_or(Error::BadServerResponse(
+ "No signatures in server response pdu.",
+ ))?
+ .as_object()
+ .ok_or(Error::BadServerResponse(
+ "Invalid signatures object in server response pdu.",
+ ))?;
+
+ for (signature_server, signature) in signatures {
+ let signature_object = signature.as_object().ok_or(Error::BadServerResponse(
+ "Invalid signatures content object in server response pdu.",
+ ))?;
+
+ let signature_ids = signature_object.keys().cloned().collect::<Vec<_>>();
+
+ let contains_all_ids =
+ |keys: &BTreeMap<String, Base64>| signature_ids.iter().all(|id| keys.contains_key(id));
+
+ let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| {
+ Error::BadServerResponse("Invalid servername in signatures of server response pdu.")
+ })?;
+
+ if servers.contains_key(origin) || pub_key_map.contains_key(origin.as_str()) {
+ continue;
+ }
+
+ trace!("Loading signing keys for {}", origin);
+
+ let result: BTreeMap<_, _> = db
+ .globals
+ .signing_keys_for(origin)?
+ .into_iter()
+ .map(|(k, v)| (k.to_string(), v.key))
+ .collect();
+
+ if !contains_all_ids(&result) {
+ trace!("Signing key not loaded for {}", origin);
+ servers.insert(origin.to_owned(), BTreeMap::new());
+ }
+
+ pub_key_map.insert(origin.to_string(), result);
+ }
+
+ Ok(())
+}
+
+pub(crate) async fn fetch_join_signing_keys(
+ event: &create_join_event::v2::Response,
+ room_version: &RoomVersionId,
+ pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
+ db: &Database,
+) -> Result<()> {
+ let mut servers: BTreeMap<Box<ServerName>, BTreeMap<Box<ServerSigningKeyId>, QueryCriteria>> =
+ BTreeMap::new();
+
+ {
+ let mut pkm = pub_key_map
+ .write()
+ .map_err(|_| Error::bad_database("RwLock is poisoned."))?;
+
+ // Try to fetch keys, failure is okay
+ // Servers we couldn't find in the cache will be added to `servers`
+ for pdu in &event.room_state.state {
+ let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db);
+ }
+ for pdu in &event.room_state.auth_chain {
+ let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db);
+ }
+
+ drop(pkm);
+ }
+
+ if servers.is_empty() {
+ // We had all keys locally
+ return Ok(());
+ }
+
+ for server in db.globals.trusted_servers() {
+ trace!("Asking batch signing keys from trusted server {}", server);
+ if let Ok(keys) = db
+ .sending
+ .send_federation_request(
+ &db.globals,
+ server,
+ get_remote_server_keys_batch::v2::Request {
+ server_keys: servers.clone(),
+ },
+ )
+ .await
+ {
+ trace!("Got signing keys: {:?}", keys);
+ let mut pkm = pub_key_map
+ .write()
+ .map_err(|_| Error::bad_database("RwLock is poisoned."))?;
+ for k in keys.server_keys {
+ let k = k.deserialize().unwrap();
+
+ // TODO: Check signature from trusted server?
+ servers.remove(&k.server_name);
+
+ let result = db
+ .globals
+ .add_signing_key(&k.server_name, k.clone())?
+ .into_iter()
+ .map(|(k, v)| (k.to_string(), v.key))
+ .collect::<BTreeMap<_, _>>();
+
+ pkm.insert(k.server_name.to_string(), result);
+ }
+ }
+
+ if servers.is_empty() {
+ return Ok(());
+ }
+ }
+
+ let mut futures: FuturesUnordered<_> = servers
+ .into_iter()
+ .map(|(server, _)| async move {
+ (
+ db.sending
+ .send_federation_request(
+ &db.globals,
+ &server,
+ get_server_keys::v2::Request::new(),
+ )
+ .await,
+ server,
+ )
+ })
+ .collect();
+
+ while let Some(result) = futures.next().await {
+ if let (Ok(get_keys_response), origin) = result {
+ let result: BTreeMap<_, _> = db
+ .globals
+ .add_signing_key(&origin, get_keys_response.server_key.deserialize().unwrap())?
+ .into_iter()
+ .map(|(k, v)| (k.to_string(), v.key))
+ .collect();
+
+ pub_key_map
+ .write()
+ .map_err(|_| Error::bad_database("RwLock is poisoned."))?
+ .insert(origin.to_string(), result);
+ }
+ }
+
+ Ok(())
+}
+
+/// Returns Ok if the acl allows the server
+fn acl_check(server_name: &ServerName, room_id: &RoomId, db: &Database) -> Result<()> {
+ let acl_event = match db
+ .rooms
+ .room_state_get(room_id, &StateEventType::RoomServerAcl, "")?
+ {
+ Some(acl) => acl,
+ None => return Ok(()),
+ };
+
+ let acl_event_content: RoomServerAclEventContent =
+ match serde_json::from_str(acl_event.content.get()) {
+ Ok(content) => content,
+ Err(_) => {
+ warn!("Invalid ACL event");
+ return Ok(());
+ }
+ };
+
+ if acl_event_content.is_allowed(server_name) {
+ Ok(())
+ } else {
+ Err(Error::BadRequest(
+ ErrorKind::Forbidden,
+ "Server was denied by ACL",
+ ))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{add_port_to_hostname, get_ip_with_port, FedDest};
+
+ #[test]
+ fn ips_get_default_ports() {
+ assert_eq!(
+ get_ip_with_port("1.1.1.1"),
+ Some(FedDest::Literal("1.1.1.1:8448".parse().unwrap()))
+ );
+ assert_eq!(
+ get_ip_with_port("dead:beef::"),
+ Some(FedDest::Literal("[dead:beef::]:8448".parse().unwrap()))
+ );
+ }
+
+ #[test]
+ fn ips_keep_custom_ports() {
+ assert_eq!(
+ get_ip_with_port("1.1.1.1:1234"),
+ Some(FedDest::Literal("1.1.1.1:1234".parse().unwrap()))
+ );
+ assert_eq!(
+ get_ip_with_port("[dead::beef]:8933"),
+ Some(FedDest::Literal("[dead::beef]:8933".parse().unwrap()))
+ );
+ }
+
+ #[test]
+ fn hostnames_get_default_ports() {
+ assert_eq!(
+ add_port_to_hostname("example.com"),
+ FedDest::Named(String::from("example.com"), String::from(":8448"))
+ )
+ }
+
+ #[test]
+ fn hostnames_keep_custom_ports() {
+ assert_eq!(
+ add_port_to_hostname("example.com:1337"),
+ FedDest::Named(String::from("example.com"), String::from(":1337"))
+ )
+ }
+}