diff --git a/.gitignore b/.gitignore
index 8eb4eda73d71..3bd6b1a08c57 100644
--- a/.gitignore
+++ b/.gitignore
@@ -52,5 +52,5 @@ __pycache__/
book/
# complement
-/complement-master
+/complement-*
/master.tar.gz
diff --git a/CHANGES.md b/CHANGES.md
index f91109f88570..ced1dcc0db80 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,8 +1,53 @@
-Synapse 1.50.0rc1 (2022-01-05)
-==============================
+Synapse 1.50.1 (2022-01-18)
+===========================
+
+This release fixes a bug in Synapse 1.50.0 that could prevent clients from being able to connect to Synapse if the `webclient` resource was enabled. Further details are available in [this issue](https://github.com/matrix-org/synapse/issues/11763).
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.50.0rc1 that could cause Matrix clients to be unable to connect to Synapse instances with the `webclient` resource enabled. ([\#11764](https://github.com/matrix-org/synapse/issues/11764))
+
+
+Synapse 1.50.0 (2022-01-18)
+===========================
+
+**This release contains a critical bug that may prevent clients from being able to connect.
+As such, it is not recommended to upgrade to 1.50.0. Instead, please upgrade straight to
+to 1.50.1. Further details are available in [this issue](https://github.com/matrix-org/synapse/issues/11763).**
Please note that we now only support Python 3.7+ and PostgreSQL 10+ (if applicable), because Python 3.6 and PostgreSQL 9.6 have reached end-of-life.
+No significant changes since 1.50.0rc2.
+
+
+Synapse 1.50.0rc2 (2022-01-14)
+==============================
+
+This release candidate fixes a federation-breaking regression introduced in Synapse 1.50.0rc1.
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse v1.0.0 whereby some device list updates would not be sent to remote homeservers if there were too many to send at once. ([\#11729](https://github.com/matrix-org/synapse/issues/11729))
+- Fix a bug introduced in Synapse v1.50.0rc1 whereby outbound federation could fail because too many EDUs were produced for device updates. ([\#11730](https://github.com/matrix-org/synapse/issues/11730))
+
+
+Improved Documentation
+----------------------
+
+- Document that now the minimum supported PostgreSQL version is 10. ([\#11725](https://github.com/matrix-org/synapse/issues/11725))
+
+
+Internal Changes
+----------------
+
+- Fix a typechecker problem related to our (ab)use of `nacl.signing.SigningKey`s. ([\#11714](https://github.com/matrix-org/synapse/issues/11714))
+
+
+Synapse 1.50.0rc1 (2022-01-05)
+==============================
+
Features
--------
diff --git a/changelog.d/11561.feature b/changelog.d/11561.feature
index 19dada883bb2..3d4f2159c0b3 100644
--- a/changelog.d/11561.feature
+++ b/changelog.d/11561.feature
@@ -1 +1 @@
-Add `track_puppeted_user_ips` config flag to track puppeted user IP addresses. This also includes them in monthly active user counts.
+Add `track_puppeted_user_ips` config flag to record client IP addresses against puppeted users, and include the puppeted users in monthly active user counts.
diff --git a/changelog.d/11576.feature b/changelog.d/11576.feature
new file mode 100644
index 000000000000..5be836ae022d
--- /dev/null
+++ b/changelog.d/11576.feature
@@ -0,0 +1 @@
+Remove the `"password_hash"` field from the response dictionaries of the [Users Admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html).
\ No newline at end of file
diff --git a/changelog.d/11577.feature b/changelog.d/11577.feature
new file mode 100644
index 000000000000..f9c8a0d5f40a
--- /dev/null
+++ b/changelog.d/11577.feature
@@ -0,0 +1 @@
+Include whether the requesting user has participated in a thread when generating a summary for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440).
diff --git a/changelog.d/11669.bugfix b/changelog.d/11669.bugfix
new file mode 100644
index 000000000000..10d913aaceed
--- /dev/null
+++ b/changelog.d/11669.bugfix
@@ -0,0 +1 @@
+Fix preview of some gif URLs (like tenor.com). Contributed by Philippe Daouadi.
diff --git a/changelog.d/11675.feature b/changelog.d/11675.feature
new file mode 100644
index 000000000000..9a276f9542cf
--- /dev/null
+++ b/changelog.d/11675.feature
@@ -0,0 +1 @@
+Add a flag to the `synapse_review_recent_signups` script to ignore and filter appservice users.
diff --git a/changelog.d/11686.doc b/changelog.d/11686.doc
new file mode 100644
index 000000000000..41bc7799d46a
--- /dev/null
+++ b/changelog.d/11686.doc
@@ -0,0 +1 @@
+Warn against using a Let's Encrypt certificate for TLS/DTLS TURN server client connections, and suggest using ZeroSSL certificate instead. This bypasses client-side connectivity errors caused by WebRTC libraries that reject Let's Encrypt certificates. Contibuted by @AndrewFerr.
diff --git a/changelog.d/11724.misc b/changelog.d/11724.misc
new file mode 100644
index 000000000000..e9d5dae857b2
--- /dev/null
+++ b/changelog.d/11724.misc
@@ -0,0 +1 @@
+Improve accuracy of `python_twisted_reactor_tick_time` prometheus metric.
diff --git a/changelog.d/11724.removal b/changelog.d/11724.removal
new file mode 100644
index 000000000000..088c3ff31ff8
--- /dev/null
+++ b/changelog.d/11724.removal
@@ -0,0 +1 @@
+Remove `python_twisted_reactor_pending_calls` prometheus metric.
diff --git a/changelog.d/11737.bugfix b/changelog.d/11737.bugfix
new file mode 100644
index 000000000000..a293d1cfec0c
--- /dev/null
+++ b/changelog.d/11737.bugfix
@@ -0,0 +1 @@
+Make the list rooms admin api sort stable. Contributed by Daniƫl Sonck.
\ No newline at end of file
diff --git a/changelog.d/11742.misc b/changelog.d/11742.misc
new file mode 100644
index 000000000000..f65ccdf30a5f
--- /dev/null
+++ b/changelog.d/11742.misc
@@ -0,0 +1 @@
+Minor efficiency improvements when inserting many values into the database.
diff --git a/changelog.d/11749.feature b/changelog.d/11749.feature
new file mode 100644
index 000000000000..3d4f2159c0b3
--- /dev/null
+++ b/changelog.d/11749.feature
@@ -0,0 +1 @@
+Add `track_puppeted_user_ips` config flag to record client IP addresses against puppeted users, and include the puppeted users in monthly active user counts.
diff --git a/changelog.d/11755.doc b/changelog.d/11755.doc
new file mode 100644
index 000000000000..5dd8feea63e7
--- /dev/null
+++ b/changelog.d/11755.doc
@@ -0,0 +1 @@
+Update documentation for configuring login with facebook.
diff --git a/changelog.d/11757.feature b/changelog.d/11757.feature
new file mode 100644
index 000000000000..3d4f2159c0b3
--- /dev/null
+++ b/changelog.d/11757.feature
@@ -0,0 +1 @@
+Add `track_puppeted_user_ips` config flag to record client IP addresses against puppeted users, and include the puppeted users in monthly active user counts.
diff --git a/changelog.d/11760.misc b/changelog.d/11760.misc
new file mode 100644
index 000000000000..6cb1b5dd49ac
--- /dev/null
+++ b/changelog.d/11760.misc
@@ -0,0 +1 @@
+Add optional debugging to investigate [issue 8631](https://github.com/matrix-org/synapse/issues/8631).
\ No newline at end of file
diff --git a/changelog.d/11761.misc b/changelog.d/11761.misc
new file mode 100644
index 000000000000..d4d997a7b9bc
--- /dev/null
+++ b/changelog.d/11761.misc
@@ -0,0 +1 @@
+Remove `log_function` utility function and its uses.
diff --git a/changelog.d/11765.misc b/changelog.d/11765.misc
new file mode 100644
index 000000000000..a6c946e45237
--- /dev/null
+++ b/changelog.d/11765.misc
@@ -0,0 +1 @@
+Add a unit test that checks both `client` and `webclient` resources will function when simultaneously enabled.
\ No newline at end of file
diff --git a/changelog.d/11766.misc b/changelog.d/11766.misc
new file mode 100644
index 000000000000..3c9e5f95ffcc
--- /dev/null
+++ b/changelog.d/11766.misc
@@ -0,0 +1 @@
+Allow overriding complement commit using `COMPLEMENT_REF`.
diff --git a/changelog.d/11768.misc b/changelog.d/11768.misc
new file mode 100644
index 000000000000..1cac1f7446f8
--- /dev/null
+++ b/changelog.d/11768.misc
@@ -0,0 +1 @@
+Use `auto_attribs` and native type hints for attrs classes.
\ No newline at end of file
diff --git a/changelog.d/11770.feature b/changelog.d/11770.feature
new file mode 100644
index 000000000000..72777075cb33
--- /dev/null
+++ b/changelog.d/11770.feature
@@ -0,0 +1 @@
+Add a flag to the `synapse_review_recent_signups` script to ignore and filter appservice users.
\ No newline at end of file
diff --git a/changelog.d/11771.misc b/changelog.d/11771.misc
new file mode 100644
index 000000000000..e9d5dae857b2
--- /dev/null
+++ b/changelog.d/11771.misc
@@ -0,0 +1 @@
+Improve accuracy of `python_twisted_reactor_tick_time` prometheus metric.
diff --git a/changelog.d/11774.misc b/changelog.d/11774.misc
new file mode 100644
index 000000000000..136ba57f9410
--- /dev/null
+++ b/changelog.d/11774.misc
@@ -0,0 +1 @@
+Deprecate support for `webclient` listeners and non-HTTP(S) `web_client_location` configuration.
diff --git a/changelog.d/11775.bugfix b/changelog.d/11775.bugfix
new file mode 100644
index 000000000000..2c548dbf3096
--- /dev/null
+++ b/changelog.d/11775.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug where space hierarchy over federation would only work correctly some of the time.
diff --git a/changelog.d/11776.misc b/changelog.d/11776.misc
new file mode 100644
index 000000000000..572ccda84741
--- /dev/null
+++ b/changelog.d/11776.misc
@@ -0,0 +1 @@
+Add some comments and type annotations for `_update_outliers_txn`.
diff --git a/changelog.d/11781.doc b/changelog.d/11781.doc
new file mode 100644
index 000000000000..b68e861d6772
--- /dev/null
+++ b/changelog.d/11781.doc
@@ -0,0 +1 @@
+Update installation instructions to note that Python 3.6 is no longer supported.
diff --git a/changelog.d/11783.misc b/changelog.d/11783.misc
new file mode 100644
index 000000000000..136ba57f9410
--- /dev/null
+++ b/changelog.d/11783.misc
@@ -0,0 +1 @@
+Deprecate support for `webclient` listeners and non-HTTP(S) `web_client_location` configuration.
diff --git a/contrib/prometheus/consoles/synapse.html b/contrib/prometheus/consoles/synapse.html
index cd9ad15231fc..d17c8a08d9e3 100644
--- a/contrib/prometheus/consoles/synapse.html
+++ b/contrib/prometheus/consoles/synapse.html
@@ -92,22 +92,6 @@
Average reactor tick time
})
-Pending calls per tick
-
-
-
Storage
Queries
diff --git a/debian/changelog b/debian/changelog
index b54c0ff34878..18983f5da672 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,21 @@
+matrix-synapse-py3 (1.50.1) stable; urgency=medium
+
+ * New synapse release 1.50.1.
+
+ -- Synapse Packaging team Tue, 18 Jan 2022 16:06:26 +0000
+
+matrix-synapse-py3 (1.50.0) stable; urgency=medium
+
+ * New synapse release 1.50.0.
+
+ -- Synapse Packaging team Tue, 18 Jan 2022 10:40:38 +0000
+
+matrix-synapse-py3 (1.50.0~rc2) stable; urgency=medium
+
+ * New synapse release 1.50.0~rc2.
+
+ -- Synapse Packaging team Fri, 14 Jan 2022 11:18:06 +0000
+
matrix-synapse-py3 (1.50.0~rc1) stable; urgency=medium
* New synapse release 1.50.0~rc1.
diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md
index 74933d2fcf0e..c514cadb9dae 100644
--- a/docs/admin_api/user_admin_api.md
+++ b/docs/admin_api/user_admin_api.md
@@ -15,9 +15,10 @@ server admin: [Admin API](../usage/administration/admin_api)
It returns a JSON body like the following:
-```json
+```jsonc
{
- "displayname": "User",
+ "name": "@user:example.com",
+ "displayname": "User", // can be null if not set
"threepids": [
{
"medium": "email",
@@ -32,11 +33,11 @@ It returns a JSON body like the following:
"validated_at": 1586458409743
}
],
- "avatar_url": "",
+ "avatar_url": "", // can be null if not set
+ "is_guest": 0,
"admin": 0,
"deactivated": 0,
"shadow_banned": 0,
- "password_hash": "$2b$12$p9B4GkqYdRTPGD",
"creation_ts": 1560432506,
"appservice_id": null,
"consent_server_notice_sent": null,
diff --git a/docs/development/url_previews.md b/docs/development/url_previews.md
index aff38136091d..154b9a5e12f4 100644
--- a/docs/development/url_previews.md
+++ b/docs/development/url_previews.md
@@ -35,7 +35,12 @@ When Synapse is asked to preview a URL it does the following:
5. If the media is HTML:
1. Decodes the HTML via the stored file.
2. Generates an Open Graph response from the HTML.
- 3. If an image exists in the Open Graph response:
+ 3. If a JSON oEmbed URL was found in the HTML via autodiscovery:
+ 1. Downloads the URL and stores it into a file via the media storage provider
+ and saves the local media metadata.
+ 2. Convert the oEmbed response to an Open Graph response.
+ 3. Override any Open Graph data from the HTML with data from oEmbed.
+ 4. If an image exists in the Open Graph response:
1. Downloads the URL and stores it into a file via the media storage
provider and saves the local media metadata.
2. Generates thumbnails.
diff --git a/docs/openid.md b/docs/openid.md
index ff9de9d5b8bf..171ea3b7128b 100644
--- a/docs/openid.md
+++ b/docs/openid.md
@@ -390,9 +390,6 @@ oidc_providers:
### Facebook
-Like Github, Facebook provide a custom OAuth2 API rather than an OIDC-compliant
-one so requires a little more configuration.
-
0. You will need a Facebook developer account. You can register for one
[here](https://developers.facebook.com/async/registration/).
1. On the [apps](https://developers.facebook.com/apps/) page of the developer
@@ -412,24 +409,28 @@ Synapse config:
idp_name: Facebook
idp_brand: "facebook" # optional: styling hint for clients
discover: false
- issuer: "https://facebook.com"
+ issuer: "https://www.facebook.com"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
scopes: ["openid", "email"]
- authorization_endpoint: https://facebook.com/dialog/oauth
- token_endpoint: https://graph.facebook.com/v9.0/oauth/access_token
- user_profile_method: "userinfo_endpoint"
- userinfo_endpoint: "https://graph.facebook.com/v9.0/me?fields=id,name,email,picture"
+ authorization_endpoint: "https://facebook.com/dialog/oauth"
+ token_endpoint: "https://graph.facebook.com/v9.0/oauth/access_token"
+ jwks_uri: "https://www.facebook.com/.well-known/oauth/openid/jwks/"
user_mapping_provider:
config:
- subject_claim: "id"
display_name_template: "{{ user.name }}"
+ email_template: "{{ '{{ user.email }}' }}"
```
Relevant documents:
- * https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow
- * Using Facebook's Graph API: https://developers.facebook.com/docs/graph-api/using-graph-api/
- * Reference to the User endpoint: https://developers.facebook.com/docs/graph-api/reference/user
+ * [Manually Build a Login Flow](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow)
+ * [Using Facebook's Graph API](https://developers.facebook.com/docs/graph-api/using-graph-api/)
+ * [Reference to the User endpoint](https://developers.facebook.com/docs/graph-api/reference/user)
+
+Facebook do have an [OIDC discovery endpoint](https://www.facebook.com/.well-known/openid-configuration),
+but it has a `response_types_supported` which excludes "code" (which we rely on, and
+is even mentioned in their [documentation](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow#login)),
+so we have to disable discovery and configure the URIs manually.
### Gitea
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 907e067e5163..1b86d0295d73 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -74,13 +74,7 @@ server_name: "SERVERNAME"
#
pid_file: DATADIR/homeserver.pid
-# The absolute URL to the web client which /_matrix/client will redirect
-# to if 'webclient' is configured under the 'listeners' configuration.
-#
-# This option can be also set to the filesystem path to the web client
-# which will be served at /_matrix/client/ if 'webclient' is configured
-# under the 'listeners' configuration, however this is a security risk:
-# https://github.com/matrix-org/synapse#security-note
+# The absolute URL to the web client which / will redirect to.
#
#web_client_location: https://riot.example.com/
@@ -310,8 +304,6 @@ presence:
# static: static resources under synapse/static (/_matrix/static). (Mostly
# useful for 'fallback authentication'.)
#
-# webclient: A web client. Requires web_client_location to be set.
-#
listeners:
# TLS-enabled listener: for when matrix traffic is sent directly to synapse.
#
@@ -1503,11 +1495,20 @@ room_prejoin_state:
#additional_event_types:
# - org.example.custom.event.type
-# If enabled, puppeted user IP's can also be tracked. By default when
-# puppeting another user, the user who has created the access token
-# for puppeting is tracked. If this is enabled, both requests are tracked.
-# Implicitly enables MAU tracking for puppeted users.
-#track_puppeted_user_ips: false
+# We record the IP address of clients used to access the API for various
+# reasons, including displaying it to the user in the "Where you're signed in"
+# dialog.
+#
+# By default, when puppeting another user via the admin API, the client IP
+# address is recorded against the user who created the access token (ie, the
+# admin user), and *not* the puppeted user.
+#
+# Uncomment the following to also record the IP address against the puppeted
+# user. (This also means that the puppeted user will count as an "active" user
+# for the purpose of monthly active user tracking - see 'limit_usage_by_mau' etc
+# above.)
+#
+#track_puppeted_user_ips: true
# A list of application service config files to use
@@ -1876,10 +1877,13 @@ saml2_config:
# Defaults to false. Avoid this in production.
#
# user_profile_method: Whether to fetch the user profile from the userinfo
-# endpoint. Valid values are: 'auto' or 'userinfo_endpoint'.
+# endpoint, or to rely on the data returned in the id_token from the
+# token_endpoint.
+#
+# Valid values are: 'auto' or 'userinfo_endpoint'.
#
-# Defaults to 'auto', which fetches the userinfo endpoint if 'openid' is
-# included in 'scopes'. Set to 'userinfo_endpoint' to always fetch the
+# Defaults to 'auto', which uses the userinfo endpoint if 'openid' is
+# not included in 'scopes'. Set to 'userinfo_endpoint' to always use the
# userinfo endpoint.
#
# allow_existing_users: set to 'true' to allow a user logging in via OIDC to
diff --git a/docs/setup/installation.md b/docs/setup/installation.md
index 69ade036c39d..fe657a15dfa6 100644
--- a/docs/setup/installation.md
+++ b/docs/setup/installation.md
@@ -194,7 +194,7 @@ When following this route please make sure that the [Platform-specific prerequis
System requirements:
- POSIX-compliant system (tested on Linux & OS X)
-- Python 3.7 or later, up to Python 3.10.
+- Python 3.7 or later, up to Python 3.9.
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
To install the Synapse homeserver run:
diff --git a/docs/turn-howto.md b/docs/turn-howto.md
index e32aaa1850d6..eba7ca6124a5 100644
--- a/docs/turn-howto.md
+++ b/docs/turn-howto.md
@@ -137,6 +137,10 @@ This will install and start a systemd service called `coturn`.
# TLS private key file
pkey=/path/to/privkey.pem
+
+ # Ensure the configuration lines that disable TLS/DTLS are commented-out or removed
+ #no-tls
+ #no-dtls
```
In this case, replace the `turn:` schemes in the `turn_uris` settings below
@@ -145,6 +149,14 @@ This will install and start a systemd service called `coturn`.
We recommend that you only try to set up TLS/DTLS once you have set up a
basic installation and got it working.
+ NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
+ not work with any Matrix client that uses Chromium's WebRTC library. This
+ currently includes Element Android & iOS; for more details, see their
+ [respective](https://github.com/vector-im/element-android/issues/1533)
+ [issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
+ [WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
+ Consider using a ZeroSSL certificate for your TURN server as a working alternative.
+
1. Ensure your firewall allows traffic into the TURN server on the ports
you've configured it to listen on (By default: 3478 and 5349 for TURN
traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
@@ -250,6 +262,10 @@ Here are a few things to try:
* Check that you have opened your firewall to allow UDP traffic to the UDP
relay ports (49152-65535 by default).
+ * Try disabling `coturn`'s TLS/DTLS listeners and enable only its (unencrypted)
+ TCP/UDP listeners. (This will only leave signaling traffic unencrypted;
+ voice & video WebRTC traffic is always encrypted.)
+
* Some WebRTC implementations (notably, that of Google Chrome) appear to get
confused by TURN servers which are reachable over IPv6 (this appears to be
an unexpected side-effect of its handling of multiple IP addresses as
diff --git a/docs/upgrade.md b/docs/upgrade.md
index 30bb0dcd9cfd..f455d257babf 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -85,6 +85,17 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
+# Upgrading to v1.51.0
+
+## Deprecation of `webclient` listeners and non-HTTP(S) `web_client_location`
+
+Listeners of type `webclient` are deprecated and scheduled to be removed in
+Synapse v1.53.0.
+
+Similarly, a non-HTTP(S) `web_client_location` configuration is deprecated and
+will become a configuration error in Synapse v1.53.0.
+
+
# Upgrading to v1.50.0
## Dropping support for old Python and Postgres versions
diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh
index 67a22d3ed3e7..e08ffedaf33a 100755
--- a/scripts-dev/complement.sh
+++ b/scripts-dev/complement.sh
@@ -8,7 +8,8 @@
# By default the script will fetch the latest Complement master branch and
# run tests with that. This can be overridden to use a custom Complement
# checkout by setting the COMPLEMENT_DIR environment variable to the
-# filepath of a local Complement checkout.
+# filepath of a local Complement checkout or by setting the COMPLEMENT_REF
+# environment variable to pull a different branch or commit.
#
# By default Synapse is run in monolith mode. This can be overridden by
# setting the WORKERS environment variable.
@@ -31,11 +32,12 @@ cd "$(dirname $0)/.."
# Check for a user-specified Complement checkout
if [[ -z "$COMPLEMENT_DIR" ]]; then
- echo "COMPLEMENT_DIR not set. Fetching the latest Complement checkout..."
- wget -Nq https://github.com/matrix-org/complement/archive/master.tar.gz
- tar -xzf master.tar.gz
- COMPLEMENT_DIR=complement-master
- echo "Checkout available at 'complement-master'"
+ COMPLEMENT_REF=${COMPLEMENT_REF:-master}
+ echo "COMPLEMENT_DIR not set. Fetching Complement checkout from ${COMPLEMENT_REF}..."
+ wget -Nq https://github.com/matrix-org/complement/archive/${COMPLEMENT_REF}.tar.gz
+ tar -xzf ${COMPLEMENT_REF}.tar.gz
+ COMPLEMENT_DIR=complement-${COMPLEMENT_REF}
+ echo "Checkout available at 'complement-${COMPLEMENT_REF}'"
fi
# Build the base Synapse image from the local checkout
diff --git a/synapse/__init__.py b/synapse/__init__.py
index e0273a543841..b99c65933e60 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -47,7 +47,7 @@
except ImportError:
pass
-__version__ = "1.50.0rc1"
+__version__ = "1.50.1"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when
diff --git a/synapse/_scripts/review_recent_signups.py b/synapse/_scripts/review_recent_signups.py
index 093af4327ae0..e207f154f3aa 100644
--- a/synapse/_scripts/review_recent_signups.py
+++ b/synapse/_scripts/review_recent_signups.py
@@ -46,7 +46,9 @@ class UserInfo:
ips: List[str] = attr.Factory(list)
-def get_recent_users(txn: LoggingTransaction, since_ms: int) -> List[UserInfo]:
+def get_recent_users(
+ txn: LoggingTransaction, since_ms: int, exclude_app_service: bool
+) -> List[UserInfo]:
"""Fetches recently registered users and some info on them."""
sql = """
@@ -56,6 +58,9 @@ def get_recent_users(txn: LoggingTransaction, since_ms: int) -> List[UserInfo]:
AND deactivated = 0
"""
+ if exclude_app_service:
+ sql += " AND appservice_id IS NULL"
+
txn.execute(sql, (since_ms / 1000,))
user_infos = [UserInfo(user_id, creation_ts) for user_id, creation_ts in txn]
@@ -113,7 +118,7 @@ def main() -> None:
"-e",
"--exclude-emails",
action="store_true",
- help="Exclude users that have validated email addresses",
+ help="Exclude users that have validated email addresses.",
)
parser.add_argument(
"-u",
@@ -121,6 +126,12 @@ def main() -> None:
action="store_true",
help="Only print user IDs that match.",
)
+ parser.add_argument(
+ "-a",
+ "--exclude-app-service",
+ help="Exclude appservice users.",
+ action="store_true",
+ )
config = ReviewConfig()
@@ -133,6 +144,7 @@ def main() -> None:
since_ms = time.time() * 1000 - Config.parse_duration(config_args.since)
exclude_users_with_email = config_args.exclude_emails
+ exclude_users_with_appservice = config_args.exclude_app_service
include_context = not config_args.only_users
for database_config in config.database.databases:
@@ -143,7 +155,7 @@ def main() -> None:
with make_conn(database_config, engine, "review_recent_signups") as db_conn:
# This generates a type of Cursor, not LoggingTransaction.
- user_infos = get_recent_users(db_conn.cursor(), since_ms) # type: ignore[arg-type]
+ user_infos = get_recent_users(db_conn.cursor(), since_ms, exclude_users_with_appservice) # type: ignore[arg-type]
for user_info in user_infos:
if exclude_users_with_email and user_info.emails:
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 177ce040e857..efedcc88894b 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -27,7 +27,6 @@
import synapse.config.logger
from synapse import events
from synapse.api.urls import (
- CLIENT_API_PREFIX,
FEDERATION_PREFIX,
LEGACY_MEDIA_PREFIX,
MEDIA_R0_PREFIX,
@@ -132,9 +131,18 @@ def _listener_http(
resources.update(self._module_web_resources)
self._module_web_resources_consumed = True
- # try to find something useful to redirect '/' to
- if WEB_CLIENT_PREFIX in resources:
- root_resource: Resource = RootOptionsRedirectResource(WEB_CLIENT_PREFIX)
+ # Try to find something useful to serve at '/':
+ #
+ # 1. Redirect to the web client if it is an HTTP(S) URL.
+ # 2. Redirect to the web client served via Synapse.
+ # 3. Redirect to the static "Synapse is running" page.
+ # 4. Do not redirect and use a blank resource.
+ if self.config.server.web_client_location_is_redirect:
+ root_resource: Resource = RootOptionsRedirectResource(
+ self.config.server.web_client_location
+ )
+ elif WEB_CLIENT_PREFIX in resources:
+ root_resource = RootOptionsRedirectResource(WEB_CLIENT_PREFIX)
elif STATIC_PREFIX in resources:
root_resource = RootOptionsRedirectResource(STATIC_PREFIX)
else:
@@ -193,7 +201,13 @@ def _configure_named_resource(
resources.update(
{
- CLIENT_API_PREFIX: client_resource,
+ "/_matrix/client/api/v1": client_resource,
+ "/_matrix/client/r0": client_resource,
+ "/_matrix/client/v1": client_resource,
+ "/_matrix/client/v3": client_resource,
+ "/_matrix/client/unstable": client_resource,
+ "/_matrix/client/v2_alpha": client_resource,
+ "/_matrix/client/versions": client_resource,
"/.well-known": well_known_resource(self),
"/_synapse/admin": AdminRestResource(self),
**build_synapse_client_resource_tree(self),
@@ -257,15 +271,15 @@ def _configure_named_resource(
resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
if name == "webclient":
+ # webclient listeners are deprecated as of Synapse v1.51.0, remove it
+ # in > v1.53.0.
webclient_loc = self.config.server.web_client_location
if webclient_loc is None:
logger.warning(
"Not enabling webclient resource, as web_client_location is unset."
)
- elif webclient_loc.startswith("http://") or webclient_loc.startswith(
- "https://"
- ):
+ elif self.config.server.web_client_location_is_redirect:
resources[WEB_CLIENT_PREFIX] = RootRedirect(webclient_loc)
else:
logger.warning(
diff --git a/synapse/config/api.py b/synapse/config/api.py
index bdbe9f0fa280..8133b6b62402 100644
--- a/synapse/config/api.py
+++ b/synapse/config/api.py
@@ -61,11 +61,20 @@ def generate_config_section(cls, **kwargs) -> str:
#additional_event_types:
# - org.example.custom.event.type
- # If enabled, puppeted user IP's can also be tracked. By default when
- # puppeting another user, the user who has created the access token
- # for puppeting is tracked. If this is enabled, both requests are tracked.
- # Implicitly enables MAU tracking for puppeted users.
- #track_puppeted_user_ips: false
+ # We record the IP address of clients used to access the API for various
+ # reasons, including displaying it to the user in the "Where you're signed in"
+ # dialog.
+ #
+ # By default, when puppeting another user via the admin API, the client IP
+ # address is recorded against the user who created the access token (ie, the
+ # admin user), and *not* the puppeted user.
+ #
+ # Uncomment the following to also record the IP address against the puppeted
+ # user. (This also means that the puppeted user will count as an "active" user
+ # for the purpose of monthly active user tracking - see 'limit_usage_by_mau' etc
+ # above.)
+ #
+ #track_puppeted_user_ips: true
""" % {
"formatted_default_state_types": formatted_default_state_types
}
diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py
index 79c400fe30b8..e783b1131501 100644
--- a/synapse/config/oidc.py
+++ b/synapse/config/oidc.py
@@ -148,10 +148,13 @@ def generate_config_section(self, config_dir_path, server_name, **kwargs) -> str
# Defaults to false. Avoid this in production.
#
# user_profile_method: Whether to fetch the user profile from the userinfo
- # endpoint. Valid values are: 'auto' or 'userinfo_endpoint'.
+ # endpoint, or to rely on the data returned in the id_token from the
+ # token_endpoint.
#
- # Defaults to 'auto', which fetches the userinfo endpoint if 'openid' is
- # included in 'scopes'. Set to 'userinfo_endpoint' to always fetch the
+ # Valid values are: 'auto' or 'userinfo_endpoint'.
+ #
+ # Defaults to 'auto', which uses the userinfo endpoint if 'openid' is
+ # not included in 'scopes'. Set to 'userinfo_endpoint' to always use the
# userinfo endpoint.
#
# allow_existing_users: set to 'true' to allow a user logging in via OIDC to
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 5010266b6983..f200d0c1f1cf 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -259,7 +259,6 @@ def read_config(self, config, **kwargs):
raise ConfigError(str(e))
self.pid_file = self.abspath(config.get("pid_file"))
- self.web_client_location = config.get("web_client_location", None)
self.soft_file_limit = config.get("soft_file_limit", 0)
self.daemonize = config.get("daemonize")
self.print_pidfile = config.get("print_pidfile")
@@ -506,8 +505,17 @@ def read_config(self, config, **kwargs):
l2.append(listener)
self.listeners = l2
- if not self.web_client_location:
- _warn_if_webclient_configured(self.listeners)
+ self.web_client_location = config.get("web_client_location", None)
+ self.web_client_location_is_redirect = self.web_client_location and (
+ self.web_client_location.startswith("http://")
+ or self.web_client_location.startswith("https://")
+ )
+ # A non-HTTP(S) web client location is deprecated.
+ if self.web_client_location and not self.web_client_location_is_redirect:
+ logger.warning(NO_MORE_NONE_HTTP_WEB_CLIENT_LOCATION_WARNING)
+
+ # Warn if webclient is configured for a worker.
+ _warn_if_webclient_configured(self.listeners)
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
self.gc_seconds = self.read_gc_intervals(config.get("gc_min_interval", None))
@@ -793,13 +801,7 @@ def generate_config_section(
#
pid_file: %(pid_file)s
- # The absolute URL to the web client which /_matrix/client will redirect
- # to if 'webclient' is configured under the 'listeners' configuration.
- #
- # This option can be also set to the filesystem path to the web client
- # which will be served at /_matrix/client/ if 'webclient' is configured
- # under the 'listeners' configuration, however this is a security risk:
- # https://github.com/matrix-org/synapse#security-note
+ # The absolute URL to the web client which / will redirect to.
#
#web_client_location: https://riot.example.com/
@@ -1011,8 +1013,6 @@ def generate_config_section(
# static: static resources under synapse/static (/_matrix/static). (Mostly
# useful for 'fallback authentication'.)
#
- # webclient: A web client. Requires web_client_location to be set.
- #
listeners:
# TLS-enabled listener: for when matrix traffic is sent directly to synapse.
#
@@ -1349,9 +1349,15 @@ def parse_listener_def(listener: Any) -> ListenerConfig:
return ListenerConfig(port, bind_addresses, listener_type, tls, http_config)
+NO_MORE_NONE_HTTP_WEB_CLIENT_LOCATION_WARNING = """
+Synapse no longer supports serving a web client. To remove this warning,
+configure 'web_client_location' with an HTTP(S) URL.
+"""
+
+
NO_MORE_WEB_CLIENT_WARNING = """
-Synapse no longer includes a web client. To enable a web client, configure
-web_client_location. To remove this warning, remove 'webclient' from the 'listeners'
+Synapse no longer includes a web client. To redirect the root resource to a web client, configure
+'web_client_location'. To remove this warning, remove 'webclient' from the 'listeners'
configuration.
"""
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 6ea4edfc71f8..74f17aa4daa3 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -56,7 +56,6 @@
from synapse.events import EventBase, builder
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
from synapse.federation.transport.client import SendJoinResponse
-from synapse.logging.utils import log_function
from synapse.types import JsonDict, get_domain_from_id
from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.expiringcache import ExpiringCache
@@ -119,7 +118,8 @@ def __init__(self, hs: "HomeServer"):
# It is a map of (room ID, suggested-only) -> the response of
# get_room_hierarchy.
self._get_room_hierarchy_cache: ExpiringCache[
- Tuple[str, bool], Tuple[JsonDict, Sequence[JsonDict], Sequence[str]]
+ Tuple[str, bool],
+ Tuple[JsonDict, Sequence[JsonDict], Sequence[JsonDict], Sequence[str]],
] = ExpiringCache(
cache_name="get_room_hierarchy_cache",
clock=self._clock,
@@ -144,7 +144,6 @@ def _clear_tried_cache(self) -> None:
if destination_dict:
self.pdu_destination_tried[event_id] = destination_dict
- @log_function
async def make_query(
self,
destination: str,
@@ -178,7 +177,6 @@ async def make_query(
ignore_backoff=ignore_backoff,
)
- @log_function
async def query_client_keys(
self, destination: str, content: JsonDict, timeout: int
) -> JsonDict:
@@ -196,7 +194,6 @@ async def query_client_keys(
destination, content, timeout
)
- @log_function
async def query_user_devices(
self, destination: str, user_id: str, timeout: int = 30000
) -> JsonDict:
@@ -208,7 +205,6 @@ async def query_user_devices(
destination, user_id, timeout
)
- @log_function
async def claim_client_keys(
self, destination: str, content: JsonDict, timeout: int
) -> JsonDict:
@@ -1338,7 +1334,7 @@ async def get_room_hierarchy(
destinations: Iterable[str],
room_id: str,
suggested_only: bool,
- ) -> Tuple[JsonDict, Sequence[JsonDict], Sequence[str]]:
+ ) -> Tuple[JsonDict, Sequence[JsonDict], Sequence[JsonDict], Sequence[str]]:
"""
Call other servers to get a hierarchy of the given room.
@@ -1353,7 +1349,8 @@ async def get_room_hierarchy(
Returns:
A tuple of:
- The room as a JSON dictionary.
+ The room as a JSON dictionary, without a "children_state" key.
+ A list of `m.space.child` state events.
A list of children rooms, as JSON dictionaries.
A list of inaccessible children room IDs.
@@ -1368,7 +1365,7 @@ async def get_room_hierarchy(
async def send_request(
destination: str,
- ) -> Tuple[JsonDict, Sequence[JsonDict], Sequence[str]]:
+ ) -> Tuple[JsonDict, Sequence[JsonDict], Sequence[JsonDict], Sequence[str]]:
try:
res = await self.transport_layer.get_room_hierarchy(
destination=destination,
@@ -1397,7 +1394,7 @@ async def send_request(
raise InvalidResponseError("'room' must be a dict")
# Validate children_state of the room.
- children_state = room.get("children_state", [])
+ children_state = room.pop("children_state", [])
if not isinstance(children_state, Sequence):
raise InvalidResponseError("'room.children_state' must be a list")
if any(not isinstance(e, dict) for e in children_state):
@@ -1426,7 +1423,7 @@ async def send_request(
"Invalid room ID in 'inaccessible_children' list"
)
- return room, children, inaccessible_children
+ return room, children_state, children, inaccessible_children
try:
result = await self._try_destination_list(
@@ -1474,8 +1471,6 @@ async def send_request(
if event.room_id == room_id:
children_events.append(event.data)
children_room_ids.add(event.state_key)
- # And add them under the requested room.
- requested_room["children_state"] = children_events
# Find the children rooms.
children = []
@@ -1485,7 +1480,7 @@ async def send_request(
# It isn't clear from the response whether some of the rooms are
# not accessible.
- result = (requested_room, children, ())
+ result = (requested_room, children_events, children, ())
# Cache the result to avoid fetching data over federation every time.
self._get_room_hierarchy_cache[(room_id, suggested_only)] = result
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index ee71f289c81c..af9cb98f67b1 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -58,7 +58,6 @@
run_in_background,
)
from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace
-from synapse.logging.utils import log_function
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.replication.http.federation import (
ReplicationFederationSendEduRestServlet,
@@ -859,7 +858,6 @@ async def on_event_auth(
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
return 200, res
- @log_function
async def on_query_client_keys(
self, origin: str, content: Dict[str, str]
) -> Tuple[int, Dict[str, Any]]:
@@ -940,7 +938,6 @@ async def on_get_missing_events(
return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]}
- @log_function
async def on_openid_userinfo(self, token: str) -> Optional[str]:
ts_now_ms = self._clock.time_msec()
return await self.store.get_user_id_for_open_id_token(token, ts_now_ms)
diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py
index 523ab1c51ed1..60e2e6cf019f 100644
--- a/synapse/federation/persistence.py
+++ b/synapse/federation/persistence.py
@@ -23,7 +23,6 @@
from typing import Optional, Tuple
from synapse.federation.units import Transaction
-from synapse.logging.utils import log_function
from synapse.storage.databases.main import DataStore
from synapse.types import JsonDict
@@ -36,7 +35,6 @@ class TransactionActions:
def __init__(self, datastore: DataStore):
self.store = datastore
- @log_function
async def have_responded(
self, origin: str, transaction: Transaction
) -> Optional[Tuple[int, JsonDict]]:
@@ -53,7 +51,6 @@ async def have_responded(
return await self.store.get_received_txn_response(transaction_id, origin)
- @log_function
async def set_response(
self, origin: str, transaction: Transaction, code: int, response: JsonDict
) -> None:
diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py
index ab935e5a7eda..742ee572558d 100644
--- a/synapse/federation/sender/transaction_manager.py
+++ b/synapse/federation/sender/transaction_manager.py
@@ -35,6 +35,7 @@
import synapse.server
logger = logging.getLogger(__name__)
+issue_8631_logger = logging.getLogger("synapse.8631_debug")
last_pdu_ts_metric = Gauge(
"synapse_federation_last_sent_pdu_time",
@@ -124,6 +125,17 @@ async def send_new_transaction(
len(pdus),
len(edus),
)
+ if issue_8631_logger.isEnabledFor(logging.DEBUG):
+ DEVICE_UPDATE_EDUS = {"m.device_list_update", "m.signing_key_update"}
+ device_list_updates = [
+ edu.content for edu in edus if edu.edu_type in DEVICE_UPDATE_EDUS
+ ]
+ if device_list_updates:
+ issue_8631_logger.debug(
+ "about to send txn [%s] including device list updates: %s",
+ transaction.transaction_id,
+ device_list_updates,
+ )
# Actually send the transaction
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 9fc4c31c93f6..8782586cd6b4 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -44,7 +44,6 @@
from synapse.events import EventBase, make_event_from_dict
from synapse.federation.units import Transaction
from synapse.http.matrixfederationclient import ByteParser
-from synapse.logging.utils import log_function
from synapse.types import JsonDict
logger = logging.getLogger(__name__)
@@ -62,7 +61,6 @@ def __init__(self, hs):
self.server_name = hs.hostname
self.client = hs.get_federation_http_client()
- @log_function
async def get_room_state_ids(
self, destination: str, room_id: str, event_id: str
) -> JsonDict:
@@ -88,7 +86,6 @@ async def get_room_state_ids(
try_trailing_slash_on_400=True,
)
- @log_function
async def get_event(
self, destination: str, event_id: str, timeout: Optional[int] = None
) -> JsonDict:
@@ -111,7 +108,6 @@ async def get_event(
destination, path=path, timeout=timeout, try_trailing_slash_on_400=True
)
- @log_function
async def backfill(
self, destination: str, room_id: str, event_tuples: Collection[str], limit: int
) -> Optional[JsonDict]:
@@ -149,7 +145,6 @@ async def backfill(
destination, path=path, args=args, try_trailing_slash_on_400=True
)
- @log_function
async def timestamp_to_event(
self, destination: str, room_id: str, timestamp: int, direction: str
) -> Union[JsonDict, List]:
@@ -185,7 +180,6 @@ async def timestamp_to_event(
return remote_response
- @log_function
async def send_transaction(
self,
transaction: Transaction,
@@ -234,7 +228,6 @@ async def send_transaction(
try_trailing_slash_on_400=True,
)
- @log_function
async def make_query(
self,
destination: str,
@@ -254,7 +247,6 @@ async def make_query(
ignore_backoff=ignore_backoff,
)
- @log_function
async def make_membership_event(
self,
destination: str,
@@ -317,7 +309,6 @@ async def make_membership_event(
ignore_backoff=ignore_backoff,
)
- @log_function
async def send_join_v1(
self,
room_version: RoomVersion,
@@ -336,7 +327,6 @@ async def send_join_v1(
max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN,
)
- @log_function
async def send_join_v2(
self,
room_version: RoomVersion,
@@ -355,7 +345,6 @@ async def send_join_v2(
max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN,
)
- @log_function
async def send_leave_v1(
self, destination: str, room_id: str, event_id: str, content: JsonDict
) -> Tuple[int, JsonDict]:
@@ -372,7 +361,6 @@ async def send_leave_v1(
ignore_backoff=True,
)
- @log_function
async def send_leave_v2(
self, destination: str, room_id: str, event_id: str, content: JsonDict
) -> JsonDict:
@@ -389,7 +377,6 @@ async def send_leave_v2(
ignore_backoff=True,
)
- @log_function
async def send_knock_v1(
self,
destination: str,
@@ -423,7 +410,6 @@ async def send_knock_v1(
destination=destination, path=path, data=content
)
- @log_function
async def send_invite_v1(
self, destination: str, room_id: str, event_id: str, content: JsonDict
) -> Tuple[int, JsonDict]:
@@ -433,7 +419,6 @@ async def send_invite_v1(
destination=destination, path=path, data=content, ignore_backoff=True
)
- @log_function
async def send_invite_v2(
self, destination: str, room_id: str, event_id: str, content: JsonDict
) -> JsonDict:
@@ -443,7 +428,6 @@ async def send_invite_v2(
destination=destination, path=path, data=content, ignore_backoff=True
)
- @log_function
async def get_public_rooms(
self,
remote_server: str,
@@ -516,7 +500,6 @@ async def get_public_rooms(
return response
- @log_function
async def exchange_third_party_invite(
self, destination: str, room_id: str, event_dict: JsonDict
) -> JsonDict:
@@ -526,7 +509,6 @@ async def exchange_third_party_invite(
destination=destination, path=path, data=event_dict
)
- @log_function
async def get_event_auth(
self, destination: str, room_id: str, event_id: str
) -> JsonDict:
@@ -534,7 +516,6 @@ async def get_event_auth(
return await self.client.get_json(destination=destination, path=path)
- @log_function
async def query_client_keys(
self, destination: str, query_content: JsonDict, timeout: int
) -> JsonDict:
@@ -576,7 +557,6 @@ async def query_client_keys(
destination=destination, path=path, data=query_content, timeout=timeout
)
- @log_function
async def query_user_devices(
self, destination: str, user_id: str, timeout: int
) -> JsonDict:
@@ -616,7 +596,6 @@ async def query_user_devices(
destination=destination, path=path, timeout=timeout
)
- @log_function
async def claim_client_keys(
self, destination: str, query_content: JsonDict, timeout: int
) -> JsonDict:
@@ -655,7 +634,6 @@ async def claim_client_keys(
destination=destination, path=path, data=query_content, timeout=timeout
)
- @log_function
async def get_missing_events(
self,
destination: str,
@@ -680,7 +658,6 @@ async def get_missing_events(
timeout=timeout,
)
- @log_function
async def get_group_profile(
self, destination: str, group_id: str, requester_user_id: str
) -> JsonDict:
@@ -694,7 +671,6 @@ async def get_group_profile(
ignore_backoff=True,
)
- @log_function
async def update_group_profile(
self, destination: str, group_id: str, requester_user_id: str, content: JsonDict
) -> JsonDict:
@@ -716,7 +692,6 @@ async def update_group_profile(
ignore_backoff=True,
)
- @log_function
async def get_group_summary(
self, destination: str, group_id: str, requester_user_id: str
) -> JsonDict:
@@ -730,7 +705,6 @@ async def get_group_summary(
ignore_backoff=True,
)
- @log_function
async def get_rooms_in_group(
self, destination: str, group_id: str, requester_user_id: str
) -> JsonDict:
@@ -798,7 +772,6 @@ async def remove_room_from_group(
ignore_backoff=True,
)
- @log_function
async def get_users_in_group(
self, destination: str, group_id: str, requester_user_id: str
) -> JsonDict:
@@ -812,7 +785,6 @@ async def get_users_in_group(
ignore_backoff=True,
)
- @log_function
async def get_invited_users_in_group(
self, destination: str, group_id: str, requester_user_id: str
) -> JsonDict:
@@ -826,7 +798,6 @@ async def get_invited_users_in_group(
ignore_backoff=True,
)
- @log_function
async def accept_group_invite(
self, destination: str, group_id: str, user_id: str, content: JsonDict
) -> JsonDict:
@@ -837,7 +808,6 @@ async def accept_group_invite(
destination=destination, path=path, data=content, ignore_backoff=True
)
- @log_function
def join_group(
self, destination: str, group_id: str, user_id: str, content: JsonDict
) -> Awaitable[JsonDict]:
@@ -848,7 +818,6 @@ def join_group(
destination=destination, path=path, data=content, ignore_backoff=True
)
- @log_function
async def invite_to_group(
self,
destination: str,
@@ -868,7 +837,6 @@ async def invite_to_group(
ignore_backoff=True,
)
- @log_function
async def invite_to_group_notification(
self, destination: str, group_id: str, user_id: str, content: JsonDict
) -> JsonDict:
@@ -882,7 +850,6 @@ async def invite_to_group_notification(
destination=destination, path=path, data=content, ignore_backoff=True
)
- @log_function
async def remove_user_from_group(
self,
destination: str,
@@ -902,7 +869,6 @@ async def remove_user_from_group(
ignore_backoff=True,
)
- @log_function
async def remove_user_from_group_notification(
self, destination: str, group_id: str, user_id: str, content: JsonDict
) -> JsonDict:
@@ -916,7 +882,6 @@ async def remove_user_from_group_notification(
destination=destination, path=path, data=content, ignore_backoff=True
)
- @log_function
async def renew_group_attestation(
self, destination: str, group_id: str, user_id: str, content: JsonDict
) -> JsonDict:
@@ -930,7 +895,6 @@ async def renew_group_attestation(
destination=destination, path=path, data=content, ignore_backoff=True
)
- @log_function
async def update_group_summary_room(
self,
destination: str,
@@ -959,7 +923,6 @@ async def update_group_summary_room(
ignore_backoff=True,
)
- @log_function
async def delete_group_summary_room(
self,
destination: str,
@@ -986,7 +949,6 @@ async def delete_group_summary_room(
ignore_backoff=True,
)
- @log_function
async def get_group_categories(
self, destination: str, group_id: str, requester_user_id: str
) -> JsonDict:
@@ -1000,7 +962,6 @@ async def get_group_categories(
ignore_backoff=True,
)
- @log_function
async def get_group_category(
self, destination: str, group_id: str, requester_user_id: str, category_id: str
) -> JsonDict:
@@ -1014,7 +975,6 @@ async def get_group_category(
ignore_backoff=True,
)
- @log_function
async def update_group_category(
self,
destination: str,
@@ -1034,7 +994,6 @@ async def update_group_category(
ignore_backoff=True,
)
- @log_function
async def delete_group_category(
self, destination: str, group_id: str, requester_user_id: str, category_id: str
) -> JsonDict:
@@ -1048,7 +1007,6 @@ async def delete_group_category(
ignore_backoff=True,
)
- @log_function
async def get_group_roles(
self, destination: str, group_id: str, requester_user_id: str
) -> JsonDict:
@@ -1062,7 +1020,6 @@ async def get_group_roles(
ignore_backoff=True,
)
- @log_function
async def get_group_role(
self, destination: str, group_id: str, requester_user_id: str, role_id: str
) -> JsonDict:
@@ -1076,7 +1033,6 @@ async def get_group_role(
ignore_backoff=True,
)
- @log_function
async def update_group_role(
self,
destination: str,
@@ -1096,7 +1052,6 @@ async def update_group_role(
ignore_backoff=True,
)
- @log_function
async def delete_group_role(
self, destination: str, group_id: str, requester_user_id: str, role_id: str
) -> JsonDict:
@@ -1110,7 +1065,6 @@ async def delete_group_role(
ignore_backoff=True,
)
- @log_function
async def update_group_summary_user(
self,
destination: str,
@@ -1136,7 +1090,6 @@ async def update_group_summary_user(
ignore_backoff=True,
)
- @log_function
async def set_group_join_policy(
self, destination: str, group_id: str, requester_user_id: str, content: JsonDict
) -> JsonDict:
@@ -1151,7 +1104,6 @@ async def set_group_join_policy(
ignore_backoff=True,
)
- @log_function
async def delete_group_summary_user(
self,
destination: str,
diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py
index 77bfd88ad052..beadfa422ba3 100644
--- a/synapse/federation/transport/server/federation.py
+++ b/synapse/federation/transport/server/federation.py
@@ -36,6 +36,7 @@
from synapse.util.versionstring import get_version_string
logger = logging.getLogger(__name__)
+issue_8631_logger = logging.getLogger("synapse.8631_debug")
class BaseFederationServerServlet(BaseFederationServlet):
@@ -95,6 +96,20 @@ async def on_PUT(
len(transaction_data.get("edus", [])),
)
+ if issue_8631_logger.isEnabledFor(logging.DEBUG):
+ DEVICE_UPDATE_EDUS = {"m.device_list_update", "m.signing_key_update"}
+ device_list_updates = [
+ edu.content
+ for edu in transaction_data.get("edus", [])
+ if edu.edu_type in DEVICE_UPDATE_EDUS
+ ]
+ if device_list_updates:
+ issue_8631_logger.debug(
+ "received transaction [%s] including device list updates: %s",
+ transaction_id,
+ device_list_updates,
+ )
+
except Exception as e:
logger.exception(e)
return 400, {"error": "Invalid transaction"}
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index 85157a138b71..00ab5e79bf2e 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -55,21 +55,47 @@ async def get_whois(self, user: UserID) -> JsonDict:
async def get_user(self, user: UserID) -> Optional[JsonDict]:
"""Function to get user details"""
- ret = await self.store.get_user_by_id(user.to_string())
- if ret:
- profile = await self.store.get_profileinfo(user.localpart)
- threepids = await self.store.user_get_threepids(user.to_string())
- external_ids = [
- ({"auth_provider": auth_provider, "external_id": external_id})
- for auth_provider, external_id in await self.store.get_external_ids_by_user(
- user.to_string()
- )
- ]
- ret["displayname"] = profile.display_name
- ret["avatar_url"] = profile.avatar_url
- ret["threepids"] = threepids
- ret["external_ids"] = external_ids
- return ret
+ user_info_dict = await self.store.get_user_by_id(user.to_string())
+ if user_info_dict is None:
+ return None
+
+ # Restrict returned information to a known set of fields. This prevents additional
+ # fields added to get_user_by_id from modifying Synapse's external API surface.
+ user_info_to_return = {
+ "name",
+ "admin",
+ "deactivated",
+ "shadow_banned",
+ "creation_ts",
+ "appservice_id",
+ "consent_server_notice_sent",
+ "consent_version",
+ "user_type",
+ "is_guest",
+ }
+
+ # Restrict returned keys to a known set.
+ user_info_dict = {
+ key: value
+ for key, value in user_info_dict.items()
+ if key in user_info_to_return
+ }
+
+ # Add additional user metadata
+ profile = await self.store.get_profileinfo(user.localpart)
+ threepids = await self.store.user_get_threepids(user.to_string())
+ external_ids = [
+ ({"auth_provider": auth_provider, "external_id": external_id})
+ for auth_provider, external_id in await self.store.get_external_ids_by_user(
+ user.to_string()
+ )
+ ]
+ user_info_dict["displayname"] = profile.display_name
+ user_info_dict["avatar_url"] = profile.avatar_url
+ user_info_dict["threepids"] = threepids
+ user_info_dict["external_ids"] = external_ids
+
+ return user_info_dict
async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> Any:
"""Write all data we have on the user to the given writer.
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index a3add8a58679..bac5de052609 100644
--- a/synapse/handlers/events.py
+++ b/synapse/handlers/events.py
@@ -20,7 +20,6 @@
from synapse.api.errors import AuthError, SynapseError
from synapse.events import EventBase
from synapse.handlers.presence import format_user_presence_state
-from synapse.logging.utils import log_function
from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, UserID
from synapse.visibility import filter_events_for_client
@@ -43,7 +42,6 @@ def __init__(self, hs: "HomeServer"):
self._server_notices_sender = hs.get_server_notices_sender()
self._event_serializer = hs.get_event_client_serializer()
- @log_function
async def get_stream(
self,
auth_user_id: str,
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 26b8e3f43c40..a37ae0ca094f 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -51,7 +51,6 @@
preserve_fn,
run_in_background,
)
-from synapse.logging.utils import log_function
from synapse.replication.http.federation import (
ReplicationCleanRoomRestServlet,
ReplicationStoreRoomOnOutlierMembershipRestServlet,
@@ -556,7 +555,6 @@ async def do_invite_join(
run_in_background(self._handle_queued_pdus, room_queue)
- @log_function
async def do_knock(
self,
target_hosts: List[str],
@@ -928,7 +926,6 @@ async def on_make_leave_request(
return event
- @log_function
async def on_make_knock_request(
self, origin: str, room_id: str, user_id: str
) -> EventBase:
@@ -1039,7 +1036,6 @@ async def get_state_ids_for_pdu(self, room_id: str, event_id: str) -> List[str]:
else:
return []
- @log_function
async def on_backfill_request(
self, origin: str, room_id: str, pdu_list: List[str], limit: int
) -> List[EventBase]:
@@ -1056,7 +1052,6 @@ async def on_backfill_request(
return events
- @log_function
async def get_persisted_pdu(
self, origin: str, event_id: str
) -> Optional[EventBase]:
@@ -1118,7 +1113,6 @@ async def on_get_missing_events(
return missing_events
- @log_function
async def exchange_third_party_invite(
self, sender_user_id: str, target_user_id: str, room_id: str, signed: JsonDict
) -> None:
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 11771f3c9c2b..3905f60b3a78 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -56,7 +56,6 @@
from synapse.events.snapshot import EventContext
from synapse.federation.federation_client import InvalidResponseError
from synapse.logging.context import nested_logging_context, run_in_background
-from synapse.logging.utils import log_function
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
from synapse.replication.http.federation import (
@@ -275,7 +274,6 @@ async def on_receive_pdu(self, origin: str, pdu: EventBase) -> None:
await self._process_received_pdu(origin, pdu, state=None)
- @log_function
async def on_send_membership_event(
self, origin: str, event: EventBase
) -> Tuple[EventBase, EventContext]:
@@ -472,7 +470,6 @@ async def process_remote_join(
return await self.persist_events_and_notify(room_id, [(event, context)])
- @log_function
async def backfill(
self, dest: str, room_id: str, limit: int, extremities: Collection[str]
) -> None:
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 472688f04506..973f262964c1 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -537,7 +537,7 @@ async def get_messages(
state_dict = await self.store.get_events(list(state_ids.values()))
state = state_dict.values()
- aggregations = await self.store.get_bundled_aggregations(events)
+ aggregations = await self.store.get_bundled_aggregations(events, user_id)
time_now = self.clock.time_msec()
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index c781fefb1bc7..067c43ae4714 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -55,7 +55,6 @@
from synapse.appservice import ApplicationService
from synapse.events.presence_router import PresenceRouter
from synapse.logging.context import run_in_background
-from synapse.logging.utils import log_function
from synapse.metrics import LaterGauge
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.presence import (
@@ -1542,7 +1541,6 @@ def __init__(self, hs: "HomeServer"):
self.clock = hs.get_clock()
self.store = hs.get_datastore()
- @log_function
async def get_new_events(
self,
user: UserID,
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 3d47163f25cf..f963078e596c 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -1182,12 +1182,18 @@ async def filter_evts(events: List[EventBase]) -> List[EventBase]:
results["event"] = filtered[0]
# Fetch the aggregations.
- aggregations = await self.store.get_bundled_aggregations([results["event"]])
+ aggregations = await self.store.get_bundled_aggregations(
+ [results["event"]], user.to_string()
+ )
aggregations.update(
- await self.store.get_bundled_aggregations(results["events_before"])
+ await self.store.get_bundled_aggregations(
+ results["events_before"], user.to_string()
+ )
)
aggregations.update(
- await self.store.get_bundled_aggregations(results["events_after"])
+ await self.store.get_bundled_aggregations(
+ results["events_after"], user.to_string()
+ )
)
results["aggregations"] = aggregations
diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py
index 7c60cb0bdd27..4844b69a0345 100644
--- a/synapse/handlers/room_summary.py
+++ b/synapse/handlers/room_summary.py
@@ -780,6 +780,7 @@ async def _summarize_remote_room_hierarchy(
try:
(
room_response,
+ children_state_events,
children,
inaccessible_children,
) = await self._federation_client.get_room_hierarchy(
@@ -804,7 +805,7 @@ async def _summarize_remote_room_hierarchy(
}
return (
- _RoomEntry(room_id, room_response, room_response.pop("children_state", ())),
+ _RoomEntry(room_id, room_response, children_state_events),
children_by_room_id,
set(inaccessible_children),
)
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index e1df9b310635..ffc6b748e84e 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -637,7 +637,9 @@ async def _load_filtered_recents(
# as clients will have all the necessary information.
bundled_aggregations = None
if limited or newly_joined_room:
- bundled_aggregations = await self.store.get_bundled_aggregations(recents)
+ bundled_aggregations = await self.store.get_bundled_aggregations(
+ recents, sync_config.user.to_string()
+ )
return TimelineBatch(
events=recents,
diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py
index 5672d60de348..b240d2d21da2 100644
--- a/synapse/logging/opentracing.py
+++ b/synapse/logging/opentracing.py
@@ -247,7 +247,7 @@ class _DummyTagNames:
class BaseReporter: # type: ignore[no-redef]
pass
- @attr.s(slots=True, frozen=True)
+ @attr.s(slots=True, frozen=True, auto_attribs=True)
class _WrappedRustReporter(BaseReporter):
"""Wrap the reporter to ensure `report_span` never throws."""
diff --git a/synapse/logging/utils.py b/synapse/logging/utils.py
deleted file mode 100644
index 4a01b902c255..000000000000
--- a/synapse/logging/utils.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import logging
-from functools import wraps
-from inspect import getcallargs
-from typing import Callable, TypeVar, cast
-
-_TIME_FUNC_ID = 0
-
-
-def _log_debug_as_f(f, msg, msg_args):
- name = f.__module__
- logger = logging.getLogger(name)
-
- if logger.isEnabledFor(logging.DEBUG):
- lineno = f.__code__.co_firstlineno
- pathname = f.__code__.co_filename
-
- record = logger.makeRecord(
- name=name,
- level=logging.DEBUG,
- fn=pathname,
- lno=lineno,
- msg=msg,
- args=msg_args,
- exc_info=None,
- )
-
- logger.handle(record)
-
-
-F = TypeVar("F", bound=Callable)
-
-
-def log_function(f: F) -> F:
- """Function decorator that logs every call to that function."""
- func_name = f.__name__
-
- @wraps(f)
- def wrapped(*args, **kwargs):
- name = f.__module__
- logger = logging.getLogger(name)
- level = logging.DEBUG
-
- if logger.isEnabledFor(level):
- bound_args = getcallargs(f, *args, **kwargs)
-
- def format(value):
- r = str(value)
- if len(r) > 50:
- r = r[:50] + "..."
- return r
-
- func_args = ["%s=%s" % (k, format(v)) for k, v in bound_args.items()]
-
- msg_args = {"func_name": func_name, "args": ", ".join(func_args)}
-
- _log_debug_as_f(f, "Invoked '%(func_name)s' with args: %(args)s", msg_args)
-
- return f(*args, **kwargs)
-
- wrapped.__name__ = func_name
- return cast(F, wrapped)
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index ba7ca0f2d4ed..9e6c1b2f3b54 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -12,15 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import functools
import itertools
import logging
import os
import platform
import threading
-import time
from typing import (
- Any,
Callable,
Dict,
Generic,
@@ -33,7 +30,6 @@
Type,
TypeVar,
Union,
- cast,
)
import attr
@@ -44,11 +40,9 @@
GaugeMetricFamily,
)
-from twisted.internet import reactor
-from twisted.internet.base import ReactorBase
from twisted.python.threadpool import ThreadPool
-import synapse
+import synapse.metrics._reactor_metrics
from synapse.metrics._exposition import (
MetricsResource,
generate_latest,
@@ -368,21 +362,6 @@ def collect(self) -> Iterable[Metric]:
REGISTRY.register(CPUMetrics())
-#
-# Twisted reactor metrics
-#
-
-tick_time = Histogram(
- "python_twisted_reactor_tick_time",
- "Tick time of the Twisted reactor (sec)",
- buckets=[0.001, 0.002, 0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.5, 1, 2, 5],
-)
-pending_calls_metric = Histogram(
- "python_twisted_reactor_pending_calls",
- "Pending calls",
- buckets=[1, 2, 5, 10, 25, 50, 100, 250, 500, 1000],
-)
-
#
# Federation Metrics
#
@@ -434,8 +413,6 @@ def collect(self) -> Iterable[Metric]:
" ".join([platform.system(), platform.release()]),
).set(1)
-last_ticked = time.time()
-
# 3PID send info
threepid_send_requests = Histogram(
"synapse_threepid_send_requests_with_tries",
@@ -483,75 +460,6 @@ def register_threadpool(name: str, threadpool: ThreadPool) -> None:
)
-class ReactorLastSeenMetric:
- def collect(self) -> Iterable[Metric]:
- cm = GaugeMetricFamily(
- "python_twisted_reactor_last_seen",
- "Seconds since the Twisted reactor was last seen",
- )
- cm.add_metric([], time.time() - last_ticked)
- yield cm
-
-
-REGISTRY.register(ReactorLastSeenMetric())
-
-F = TypeVar("F", bound=Callable[..., Any])
-
-
-def runUntilCurrentTimer(reactor: ReactorBase, func: F) -> F:
- @functools.wraps(func)
- def f(*args: Any, **kwargs: Any) -> Any:
- now = reactor.seconds()
- num_pending = 0
-
- # _newTimedCalls is one long list of *all* pending calls. Below loop
- # is based off of impl of reactor.runUntilCurrent
- for delayed_call in reactor._newTimedCalls:
- if delayed_call.time > now:
- break
-
- if delayed_call.delayed_time > 0:
- continue
-
- num_pending += 1
-
- num_pending += len(reactor.threadCallQueue)
- start = time.time()
- ret = func(*args, **kwargs)
- end = time.time()
-
- # record the amount of wallclock time spent running pending calls.
- # This is a proxy for the actual amount of time between reactor polls,
- # since about 25% of time is actually spent running things triggered by
- # I/O events, but that is harder to capture without rewriting half the
- # reactor.
- tick_time.observe(end - start)
- pending_calls_metric.observe(num_pending)
-
- # Update the time we last ticked, for the metric to test whether
- # Synapse's reactor has frozen
- global last_ticked
- last_ticked = end
-
- return ret
-
- return cast(F, f)
-
-
-try:
- # Ensure the reactor has all the attributes we expect
- reactor.seconds # type: ignore
- reactor.runUntilCurrent # type: ignore
- reactor._newTimedCalls # type: ignore
- reactor.threadCallQueue # type: ignore
-
- # runUntilCurrent is called when we have pending calls. It is called once
- # per iteratation after fd polling.
- reactor.runUntilCurrent = runUntilCurrentTimer(reactor, reactor.runUntilCurrent) # type: ignore
-except AttributeError:
- pass
-
-
__all__ = [
"MetricsResource",
"generate_latest",
diff --git a/synapse/metrics/_reactor_metrics.py b/synapse/metrics/_reactor_metrics.py
new file mode 100644
index 000000000000..f38f7983131f
--- /dev/null
+++ b/synapse/metrics/_reactor_metrics.py
@@ -0,0 +1,83 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import select
+import time
+from typing import Any, Iterable, List, Tuple
+
+from prometheus_client import Histogram, Metric
+from prometheus_client.core import REGISTRY, GaugeMetricFamily
+
+from twisted.internet import reactor
+
+#
+# Twisted reactor metrics
+#
+
+tick_time = Histogram(
+ "python_twisted_reactor_tick_time",
+ "Tick time of the Twisted reactor (sec)",
+ buckets=[0.001, 0.002, 0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.5, 1, 2, 5],
+)
+
+
+class EpollWrapper:
+ """a wrapper for an epoll object which records the time between polls"""
+
+ def __init__(self, poller: "select.epoll"): # type: ignore[name-defined]
+ self.last_polled = time.time()
+ self._poller = poller
+
+ def poll(self, *args, **kwargs) -> List[Tuple[int, int]]: # type: ignore[no-untyped-def]
+ # record the time since poll() was last called. This gives a good proxy for
+ # how long it takes to run everything in the reactor - ie, how long anything
+ # waiting for the next tick will have to wait.
+ tick_time.observe(time.time() - self.last_polled)
+
+ ret = self._poller.poll(*args, **kwargs)
+
+ self.last_polled = time.time()
+ return ret
+
+ def __getattr__(self, item: str) -> Any:
+ return getattr(self._poller, item)
+
+
+class ReactorLastSeenMetric:
+ def __init__(self, epoll_wrapper: EpollWrapper):
+ self._epoll_wrapper = epoll_wrapper
+
+ def collect(self) -> Iterable[Metric]:
+ cm = GaugeMetricFamily(
+ "python_twisted_reactor_last_seen",
+ "Seconds since the Twisted reactor was last seen",
+ )
+ cm.add_metric([], time.time() - self._epoll_wrapper.last_polled)
+ yield cm
+
+
+try:
+ # if the reactor has a `_poller` attribute, which is an `epoll` object
+ # (ie, it's an EPollReactor), we wrap the `epoll` with a thing that will
+ # measure the time between ticks
+ from select import epoll # type: ignore[attr-defined]
+
+ poller = reactor._poller # type: ignore[attr-defined]
+except (AttributeError, ImportError):
+ pass
+else:
+ if isinstance(poller, epoll):
+ poller = EpollWrapper(poller)
+ reactor._poller = poller # type: ignore[attr-defined]
+ REGISTRY.register(ReactorLastSeenMetric(poller))
diff --git a/synapse/notifier.py b/synapse/notifier.py
index 41fd94d7724b..632b2245ef55 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -40,7 +40,6 @@
from synapse.logging import issue9533_logger
from synapse.logging.context import PreserveLoggingContext
from synapse.logging.opentracing import log_kv, start_active_span
-from synapse.logging.utils import log_function
from synapse.metrics import LaterGauge
from synapse.streams.config import PaginationConfig
from synapse.types import (
@@ -686,7 +685,6 @@ async def _is_world_readable(self, room_id: str) -> bool:
else:
return False
- @log_function
def remove_expired_streams(self) -> None:
time_now_ms = self.clock.time_msec()
expired_streams = []
@@ -700,7 +698,6 @@ def remove_expired_streams(self) -> None:
for expired_stream in expired_streams:
expired_stream.remove(self)
- @log_function
def _register_with_keys(self, user_stream: _NotifierUserStream):
self.user_to_user_stream[user_stream.user_id] = user_stream
diff --git a/synapse/rest/admin/background_updates.py b/synapse/rest/admin/background_updates.py
index 6ec00ce0b9a8..e9bce22a347b 100644
--- a/synapse/rest/admin/background_updates.py
+++ b/synapse/rest/admin/background_updates.py
@@ -123,34 +123,25 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
job_name = body["job_name"]
if job_name == "populate_stats_process_rooms":
- jobs = [
- {
- "update_name": "populate_stats_process_rooms",
- "progress_json": "{}",
- },
- ]
+ jobs = [("populate_stats_process_rooms", "{}", "")]
elif job_name == "regenerate_directory":
jobs = [
- {
- "update_name": "populate_user_directory_createtables",
- "progress_json": "{}",
- "depends_on": "",
- },
- {
- "update_name": "populate_user_directory_process_rooms",
- "progress_json": "{}",
- "depends_on": "populate_user_directory_createtables",
- },
- {
- "update_name": "populate_user_directory_process_users",
- "progress_json": "{}",
- "depends_on": "populate_user_directory_process_rooms",
- },
- {
- "update_name": "populate_user_directory_cleanup",
- "progress_json": "{}",
- "depends_on": "populate_user_directory_process_users",
- },
+ ("populate_user_directory_createtables", "{}", ""),
+ (
+ "populate_user_directory_process_rooms",
+ "{}",
+ "populate_user_directory_createtables",
+ ),
+ (
+ "populate_user_directory_process_users",
+ "{}",
+ "populate_user_directory_process_rooms",
+ ),
+ (
+ "populate_user_directory_cleanup",
+ "{}",
+ "populate_user_directory_process_users",
+ ),
]
else:
raise SynapseError(HTTPStatus.BAD_REQUEST, "Invalid job_name")
@@ -158,6 +149,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
try:
await self._store.db_pool.simple_insert_many(
table="background_updates",
+ keys=("update_name", "progress_json", "depends_on"),
values=jobs,
desc=f"admin_api_run_{job_name}",
)
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 78e795c34764..c2617ee30c48 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -173,12 +173,11 @@ async def on_GET(
if not self.hs.is_mine(target_user):
raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only look up local users")
- ret = await self.admin_handler.get_user(target_user)
-
- if not ret:
+ user_info_dict = await self.admin_handler.get_user(target_user)
+ if not user_info_dict:
raise NotFoundError("User not found")
- return HTTPStatus.OK, ret
+ return HTTPStatus.OK, user_info_dict
async def on_PUT(
self, request: SynapseRequest, user_id: str
@@ -399,10 +398,10 @@ async def on_PUT(
target_user, requester, body["avatar_url"], True
)
- user = await self.admin_handler.get_user(target_user)
- assert user is not None
+ user_info_dict = await self.admin_handler.get_user(target_user)
+ assert user_info_dict is not None
- return 201, user
+ return HTTPStatus.CREATED, user_info_dict
class UserRegisterServlet(RestServlet):
diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py
index 37d949a71e74..8cf5ebaa07b7 100644
--- a/synapse/rest/client/relations.py
+++ b/synapse/rest/client/relations.py
@@ -118,7 +118,9 @@ async def on_GET(
)
# The relations returned for the requested event do include their
# bundled aggregations.
- aggregations = await self.store.get_bundled_aggregations(events)
+ aggregations = await self.store.get_bundled_aggregations(
+ events, requester.user.to_string()
+ )
serialized_events = self._event_serializer.serialize_events(
events, now, bundle_aggregations=aggregations
)
diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index da6014900a9c..31fd329a3862 100644
--- a/synapse/rest/client/room.py
+++ b/synapse/rest/client/room.py
@@ -663,7 +663,9 @@ async def on_GET(
if event:
# Ensure there are bundled aggregations available.
- aggregations = await self._store.get_bundled_aggregations([event])
+ aggregations = await self._store.get_bundled_aggregations(
+ [event], requester.user.to_string()
+ )
time_now = self.clock.time_msec()
event_dict = self._event_serializer.serialize_event(
diff --git a/synapse/rest/media/v1/oembed.py b/synapse/rest/media/v1/oembed.py
index cce1527ed9fb..2177b46c9eba 100644
--- a/synapse/rest/media/v1/oembed.py
+++ b/synapse/rest/media/v1/oembed.py
@@ -33,6 +33,8 @@
class OEmbedResult:
# The Open Graph result (converted from the oEmbed result).
open_graph_result: JsonDict
+ # The author_name of the oEmbed result
+ author_name: Optional[str]
# Number of milliseconds to cache the content, according to the oEmbed response.
#
# This will be None if no cache-age is provided in the oEmbed response (or
@@ -154,11 +156,12 @@ def parse_oembed_response(self, url: str, raw_body: bytes) -> OEmbedResult:
"og:url": url,
}
- # Use either title or author's name as the title.
- title = oembed.get("title") or oembed.get("author_name")
+ title = oembed.get("title")
if title:
open_graph_response["og:title"] = title
+ author_name = oembed.get("author_name")
+
# Use the provider name and as the site.
provider_name = oembed.get("provider_name")
if provider_name:
@@ -193,9 +196,10 @@ def parse_oembed_response(self, url: str, raw_body: bytes) -> OEmbedResult:
# Trap any exception and let the code follow as usual.
logger.warning("Error parsing oEmbed metadata from %s: %r", url, e)
open_graph_response = {}
+ author_name = None
cache_age = None
- return OEmbedResult(open_graph_response, cache_age)
+ return OEmbedResult(open_graph_response, author_name, cache_age)
def _fetch_urls(tree: "etree.Element", tag_name: str) -> List[str]:
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index a3829d943b7f..e8881bc8709e 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -262,6 +262,7 @@ async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes:
# The number of milliseconds that the response should be considered valid.
expiration_ms = media_info.expires
+ author_name: Optional[str] = None
if _is_media(media_info.media_type):
file_id = media_info.filesystem_id
@@ -294,17 +295,25 @@ async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes:
# Check if this HTML document points to oEmbed information and
# defer to that.
oembed_url = self._oembed.autodiscover_from_html(tree)
- og = {}
+ og_from_oembed: JsonDict = {}
if oembed_url:
oembed_info = await self._download_url(oembed_url, user)
- og, expiration_ms = await self._handle_oembed_response(
+ (
+ og_from_oembed,
+ author_name,
+ expiration_ms,
+ ) = await self._handle_oembed_response(
url, oembed_info, expiration_ms
)
- # If there was no oEmbed URL (or oEmbed parsing failed), attempt
- # to generate the Open Graph information from the HTML.
- if not oembed_url or not og:
- og = parse_html_to_open_graph(tree, media_info.uri)
+ # Parse Open Graph information from the HTML in case the oEmbed
+ # response failed or is incomplete.
+ og_from_html = parse_html_to_open_graph(tree, media_info.uri)
+
+ # Compile the Open Graph response by using the scraped
+ # information from the HTML and overlaying any information
+ # from the oEmbed response.
+ og = {**og_from_html, **og_from_oembed}
await self._precache_image_url(user, media_info, og)
else:
@@ -312,7 +321,7 @@ async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes:
elif oembed_url:
# Handle the oEmbed information.
- og, expiration_ms = await self._handle_oembed_response(
+ og, author_name, expiration_ms = await self._handle_oembed_response(
url, media_info, expiration_ms
)
await self._precache_image_url(user, media_info, og)
@@ -321,6 +330,11 @@ async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes:
logger.warning("Failed to find any OG data in %s", url)
og = {}
+ # If we don't have a title but we have author_name, copy it as
+ # title
+ if not og.get("og:title") and author_name:
+ og["og:title"] = author_name
+
# filter out any stupidly long values
keys_to_remove = []
for k, v in og.items():
@@ -484,7 +498,7 @@ async def _precache_image_url(
async def _handle_oembed_response(
self, url: str, media_info: MediaInfo, expiration_ms: int
- ) -> Tuple[JsonDict, int]:
+ ) -> Tuple[JsonDict, Optional[str], int]:
"""
Parse the downloaded oEmbed info.
@@ -497,11 +511,12 @@ async def _handle_oembed_response(
Returns:
A tuple of:
The Open Graph dictionary, if the oEmbed info can be parsed.
+ The author name if it could be retrieved from oEmbed.
The (possibly updated) length of time, in milliseconds, the media is valid for.
"""
# If JSON was not returned, there's nothing to do.
if not _is_json(media_info.media_type):
- return {}, expiration_ms
+ return {}, None, expiration_ms
with open(media_info.filename, "rb") as file:
body = file.read()
@@ -513,7 +528,7 @@ async def _handle_oembed_response(
if open_graph_result and oembed_response.cache_age is not None:
expiration_ms = oembed_response.cache_age
- return open_graph_result, expiration_ms
+ return open_graph_result, oembed_response.author_name, expiration_ms
def _start_expire_url_cache_data(self) -> Deferred:
return run_as_background_process(
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 923e31587e4b..67e8bc6ec288 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -45,7 +45,6 @@
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.logging.context import ContextResourceUsage
-from synapse.logging.utils import log_function
from synapse.state import v1, v2
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.roommember import ProfileInfo
@@ -512,7 +511,6 @@ def __init__(self, hs: "HomeServer"):
self.clock.looping_call(self._report_metrics, 120 * 1000)
- @log_function
async def resolve_state_groups(
self,
room_id: str,
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index a27cc3605c73..57cc1d76e02f 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -934,56 +934,6 @@ def simple_insert_txn(
txn.execute(sql, vals)
async def simple_insert_many(
- self, table: str, values: List[Dict[str, Any]], desc: str
- ) -> None:
- """Executes an INSERT query on the named table.
-
- The input is given as a list of dicts, with one dict per row.
- Generally simple_insert_many_values should be preferred for new code.
-
- Args:
- table: string giving the table name
- values: dict of new column names and values for them
- desc: description of the transaction, for logging and metrics
- """
- await self.runInteraction(desc, self.simple_insert_many_txn, table, values)
-
- @staticmethod
- def simple_insert_many_txn(
- txn: LoggingTransaction, table: str, values: List[Dict[str, Any]]
- ) -> None:
- """Executes an INSERT query on the named table.
-
- The input is given as a list of dicts, with one dict per row.
- Generally simple_insert_many_values_txn should be preferred for new code.
-
- Args:
- txn: The transaction to use.
- table: string giving the table name
- values: dict of new column names and values for them
- """
- if not values:
- return
-
- # This is a *slight* abomination to get a list of tuples of key names
- # and a list of tuples of value names.
- #
- # i.e. [{"a": 1, "b": 2}, {"c": 3, "d": 4}]
- # => [("a", "b",), ("c", "d",)] and [(1, 2,), (3, 4,)]
- #
- # The sort is to ensure that we don't rely on dictionary iteration
- # order.
- keys, vals = zip(
- *(zip(*(sorted(i.items(), key=lambda kv: kv[0]))) for i in values if i)
- )
-
- for k in keys:
- if k != keys[0]:
- raise RuntimeError("All items must have the same keys")
-
- return DatabasePool.simple_insert_many_values_txn(txn, table, keys[0], vals)
-
- async def simple_insert_many_values(
self,
table: str,
keys: Collection[str],
@@ -1002,11 +952,11 @@ async def simple_insert_many_values(
desc: description of the transaction, for logging and metrics
"""
await self.runInteraction(
- desc, self.simple_insert_many_values_txn, table, keys, values
+ desc, self.simple_insert_many_txn, table, keys, values
)
@staticmethod
- def simple_insert_many_values_txn(
+ def simple_insert_many_txn(
txn: LoggingTransaction,
table: str,
keys: Collection[str],
diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py
index 93db71d1b489..ef475e18c788 100644
--- a/synapse/storage/databases/main/account_data.py
+++ b/synapse/storage/databases/main/account_data.py
@@ -536,9 +536,9 @@ def _add_account_data_for_user(
self.db_pool.simple_insert_many_txn(
txn,
table="ignored_users",
+ keys=("ignorer_user_id", "ignored_user_id"),
values=[
- {"ignorer_user_id": user_id, "ignored_user_id": u}
- for u in currently_ignored_users - previously_ignored_users
+ (user_id, u) for u in currently_ignored_users - previously_ignored_users
],
)
diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py
index 3682cb6a8139..4eca97189bef 100644
--- a/synapse/storage/databases/main/deviceinbox.py
+++ b/synapse/storage/databases/main/deviceinbox.py
@@ -432,14 +432,21 @@ def add_messages_txn(txn, now_ms, stream_id):
self.db_pool.simple_insert_many_txn(
txn,
table="device_federation_outbox",
+ keys=(
+ "destination",
+ "stream_id",
+ "queued_ts",
+ "messages_json",
+ "instance_name",
+ ),
values=[
- {
- "destination": destination,
- "stream_id": stream_id,
- "queued_ts": now_ms,
- "messages_json": json_encoder.encode(edu),
- "instance_name": self._instance_name,
- }
+ (
+ destination,
+ stream_id,
+ now_ms,
+ json_encoder.encode(edu),
+ self._instance_name,
+ )
for destination, edu in remote_messages_by_destination.items()
],
)
@@ -571,14 +578,9 @@ def _add_messages_to_local_device_inbox_txn(
self.db_pool.simple_insert_many_txn(
txn,
table="device_inbox",
+ keys=("user_id", "device_id", "stream_id", "message_json", "instance_name"),
values=[
- {
- "user_id": user_id,
- "device_id": device_id,
- "stream_id": stream_id,
- "message_json": message_json,
- "instance_name": self._instance_name,
- }
+ (user_id, device_id, stream_id, message_json, self._instance_name)
for user_id, messages_by_device in local_by_user_then_device.items()
for device_id, message_json in messages_by_device.items()
],
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index 52fbf50db64f..b2a5cd9a6508 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -53,6 +53,7 @@
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
+issue_8631_logger = logging.getLogger("synapse.8631_debug")
DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES = (
"drop_device_list_streams_non_unique_indexes"
@@ -191,7 +192,7 @@ async def get_devices_by_auth_provider_session_id(
@trace
async def get_device_updates_by_remote(
self, destination: str, from_stream_id: int, limit: int
- ) -> Tuple[int, List[Tuple[str, dict]]]:
+ ) -> Tuple[int, List[Tuple[str, JsonDict]]]:
"""Get a stream of device updates to send to the given remote server.
Args:
@@ -200,9 +201,10 @@ async def get_device_updates_by_remote(
limit: Maximum number of device updates to return
Returns:
- A mapping from the current stream id (ie, the stream id of the last
- update included in the response), and the list of updates, where
- each update is a pair of EDU type and EDU contents.
+ - The current stream id (i.e. the stream id of the last update included
+ in the response); and
+ - The list of updates, where each update is a pair of EDU type and
+ EDU contents.
"""
now_stream_id = self.get_device_stream_token()
@@ -221,10 +223,19 @@ async def get_device_updates_by_remote(
limit,
)
+ # We need to ensure `updates` doesn't grow too big.
+ # Currently: `len(updates) <= limit`.
+
# Return an empty list if there are no updates
if not updates:
return now_stream_id, []
+ if issue_8631_logger.isEnabledFor(logging.DEBUG):
+ data = {(user, device): stream_id for user, device, stream_id, _ in updates}
+ issue_8631_logger.debug(
+ "device updates need to be sent to %s: %s", destination, data
+ )
+
# get the cross-signing keys of the users in the list, so that we can
# determine which of the device changes were cross-signing keys
users = {r[0] for r in updates}
@@ -270,19 +281,50 @@ async def get_device_updates_by_remote(
# The most recent request's opentracing_context is used as the
# context which created the Edu.
+ # This is the stream ID that we will return for the consumer to resume
+ # following this stream later.
+ last_processed_stream_id = from_stream_id
+
query_map = {}
cross_signing_keys_by_user = {}
for user_id, device_id, update_stream_id, update_context in updates:
- if (
+ # Calculate the remaining length budget.
+ # Note that, for now, each entry in `cross_signing_keys_by_user`
+ # gives rise to two device updates in the result, so those cost twice
+ # as much (and are the whole reason we need to separately calculate
+ # the budget; we know len(updates) <= limit otherwise!)
+ # N.B. len() on dicts is cheap since they store their size.
+ remaining_length_budget = limit - (
+ len(query_map) + 2 * len(cross_signing_keys_by_user)
+ )
+ assert remaining_length_budget >= 0
+
+ is_master_key_update = (
user_id in master_key_by_user
and device_id == master_key_by_user[user_id]["device_id"]
- ):
- result = cross_signing_keys_by_user.setdefault(user_id, {})
- result["master_key"] = master_key_by_user[user_id]["key_info"]
- elif (
+ )
+ is_self_signing_key_update = (
user_id in self_signing_key_by_user
and device_id == self_signing_key_by_user[user_id]["device_id"]
+ )
+
+ is_cross_signing_key_update = (
+ is_master_key_update or is_self_signing_key_update
+ )
+
+ if (
+ is_cross_signing_key_update
+ and user_id not in cross_signing_keys_by_user
):
+ # This will give rise to 2 device updates.
+ # If we don't have the budget, stop here!
+ if remaining_length_budget < 2:
+ break
+
+ if is_master_key_update:
+ result = cross_signing_keys_by_user.setdefault(user_id, {})
+ result["master_key"] = master_key_by_user[user_id]["key_info"]
+ elif is_self_signing_key_update:
result = cross_signing_keys_by_user.setdefault(user_id, {})
result["self_signing_key"] = self_signing_key_by_user[user_id][
"key_info"
@@ -290,24 +332,58 @@ async def get_device_updates_by_remote(
else:
key = (user_id, device_id)
+ if key not in query_map and remaining_length_budget < 1:
+ # We don't have space for a new entry
+ break
+
previous_update_stream_id, _ = query_map.get(key, (0, None))
if update_stream_id > previous_update_stream_id:
+ # FIXME If this overwrites an older update, this discards the
+ # previous OpenTracing context.
+ # It might make it harder to track down issues using OpenTracing.
+ # If there's a good reason why it doesn't matter, a comment here
+ # about that would not hurt.
query_map[key] = (update_stream_id, update_context)
+ # As this update has been added to the response, advance the stream
+ # position.
+ last_processed_stream_id = update_stream_id
+
+ # In the worst case scenario, each update is for a distinct user and is
+ # added either to the query_map or to cross_signing_keys_by_user,
+ # but not both:
+ # len(query_map) + len(cross_signing_keys_by_user) <= len(updates) here,
+ # so len(query_map) + len(cross_signing_keys_by_user) <= limit.
+
results = await self._get_device_update_edus_by_remote(
destination, from_stream_id, query_map
)
- # add the updated cross-signing keys to the results list
+ # len(results) <= len(query_map) here,
+ # so len(results) + len(cross_signing_keys_by_user) <= limit.
+
+ # Add the updated cross-signing keys to the results list
for user_id, result in cross_signing_keys_by_user.items():
result["user_id"] = user_id
results.append(("m.signing_key_update", result))
# also send the unstable version
# FIXME: remove this when enough servers have upgraded
+ # and remove the length budgeting above.
results.append(("org.matrix.signing_key_update", result))
- return now_stream_id, results
+ if issue_8631_logger.isEnabledFor(logging.DEBUG):
+ for (user_id, edu) in results:
+ issue_8631_logger.debug(
+ "device update to %s for %s from %s to %s: %s",
+ destination,
+ user_id,
+ from_stream_id,
+ last_processed_stream_id,
+ edu,
+ )
+
+ return last_processed_stream_id, results
def _get_device_updates_by_remote_txn(
self,
@@ -316,7 +392,7 @@ def _get_device_updates_by_remote_txn(
from_stream_id: int,
now_stream_id: int,
limit: int,
- ):
+ ) -> List[Tuple[str, str, int, Optional[str]]]:
"""Return device update information for a given remote destination
Args:
@@ -327,7 +403,11 @@ def _get_device_updates_by_remote_txn(
limit: Maximum number of device updates to return
Returns:
- List: List of device updates
+ List: List of device update tuples:
+ - user_id
+ - device_id
+ - stream_id
+ - opentracing_context
"""
# get the list of device updates that need to be sent
sql = """
@@ -351,15 +431,21 @@ async def _get_device_update_edus_by_remote(
Args:
destination: The host the device updates are intended for
from_stream_id: The minimum stream_id to filter updates by, exclusive
- query_map (Dict[(str, str): (int, str|None)]): Dictionary mapping
- user_id/device_id to update stream_id and the relevant json-encoded
- opentracing context
+ query_map: Dictionary mapping (user_id, device_id) to
+ (update stream_id, the relevant json-encoded opentracing context)
Returns:
- List of objects representing an device update EDU
+ List of objects representing a device update EDU.
+
+ Postconditions:
+ The returned list has a length not exceeding that of the query_map:
+ len(result) <= len(query_map)
"""
devices = (
await self.get_e2e_device_keys_and_signatures(
+ # Because these are (user_id, device_id) tuples with all
+ # device_ids not being None, the returned list's length will not
+ # exceed that of query_map.
query_map.keys(),
include_all_devices=True,
include_deleted_devices=True,
@@ -1386,12 +1472,9 @@ def _update_remote_device_list_cache_txn(
self.db_pool.simple_insert_many_txn(
txn,
table="device_lists_remote_cache",
+ keys=("user_id", "device_id", "content"),
values=[
- {
- "user_id": user_id,
- "device_id": content["device_id"],
- "content": json_encoder.encode(content),
- }
+ (user_id, content["device_id"], json_encoder.encode(content))
for content in devices
],
)
@@ -1479,8 +1562,9 @@ def _add_device_change_to_stream_txn(
self.db_pool.simple_insert_many_txn(
txn,
table="device_lists_stream",
+ keys=("stream_id", "user_id", "device_id"),
values=[
- {"stream_id": stream_id, "user_id": user_id, "device_id": device_id}
+ (stream_id, user_id, device_id)
for stream_id, device_id in zip(stream_ids, device_ids)
],
)
@@ -1507,18 +1591,27 @@ def _add_device_outbound_poke_to_stream_txn(
self.db_pool.simple_insert_many_txn(
txn,
table="device_lists_outbound_pokes",
+ keys=(
+ "destination",
+ "stream_id",
+ "user_id",
+ "device_id",
+ "sent",
+ "ts",
+ "opentracing_context",
+ ),
values=[
- {
- "destination": destination,
- "stream_id": next(next_stream_id),
- "user_id": user_id,
- "device_id": device_id,
- "sent": False,
- "ts": now,
- "opentracing_context": json_encoder.encode(context)
+ (
+ destination,
+ next(next_stream_id),
+ user_id,
+ device_id,
+ False,
+ now,
+ json_encoder.encode(context)
if whitelisted_homeserver(destination)
else "{}",
- }
+ )
for destination in hosts
for device_id in device_ids
],
diff --git a/synapse/storage/databases/main/directory.py b/synapse/storage/databases/main/directory.py
index f76c6121e8a9..5903fdaf007a 100644
--- a/synapse/storage/databases/main/directory.py
+++ b/synapse/storage/databases/main/directory.py
@@ -112,10 +112,8 @@ def alias_txn(txn: LoggingTransaction) -> None:
self.db_pool.simple_insert_many_txn(
txn,
table="room_alias_servers",
- values=[
- {"room_alias": room_alias.to_string(), "server": server}
- for server in servers
- ],
+ keys=("room_alias", "server"),
+ values=[(room_alias.to_string(), server) for server in servers],
)
self._invalidate_cache_and_stream(
diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py
index 0cb48b9dd750..b789a588a54b 100644
--- a/synapse/storage/databases/main/e2e_room_keys.py
+++ b/synapse/storage/databases/main/e2e_room_keys.py
@@ -110,16 +110,16 @@ async def add_e2e_room_keys(
values = []
for (room_id, session_id, room_key) in room_keys:
values.append(
- {
- "user_id": user_id,
- "version": version_int,
- "room_id": room_id,
- "session_id": session_id,
- "first_message_index": room_key["first_message_index"],
- "forwarded_count": room_key["forwarded_count"],
- "is_verified": room_key["is_verified"],
- "session_data": json_encoder.encode(room_key["session_data"]),
- }
+ (
+ user_id,
+ version_int,
+ room_id,
+ session_id,
+ room_key["first_message_index"],
+ room_key["forwarded_count"],
+ room_key["is_verified"],
+ json_encoder.encode(room_key["session_data"]),
+ )
)
log_kv(
{
@@ -131,7 +131,19 @@ async def add_e2e_room_keys(
)
await self.db_pool.simple_insert_many(
- table="e2e_room_keys", values=values, desc="add_e2e_room_keys"
+ table="e2e_room_keys",
+ keys=(
+ "user_id",
+ "version",
+ "room_id",
+ "session_id",
+ "first_message_index",
+ "forwarded_count",
+ "is_verified",
+ "session_data",
+ ),
+ values=values,
+ desc="add_e2e_room_keys",
)
@trace
diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py
index 86cab975639c..1f8447b5076f 100644
--- a/synapse/storage/databases/main/end_to_end_keys.py
+++ b/synapse/storage/databases/main/end_to_end_keys.py
@@ -387,15 +387,16 @@ def _add_e2e_one_time_keys(txn: LoggingTransaction) -> None:
self.db_pool.simple_insert_many_txn(
txn,
table="e2e_one_time_keys_json",
+ keys=(
+ "user_id",
+ "device_id",
+ "algorithm",
+ "key_id",
+ "ts_added_ms",
+ "key_json",
+ ),
values=[
- {
- "user_id": user_id,
- "device_id": device_id,
- "algorithm": algorithm,
- "key_id": key_id,
- "ts_added_ms": time_now,
- "key_json": json_bytes,
- }
+ (user_id, device_id, algorithm, key_id, time_now, json_bytes)
for algorithm, key_id, json_bytes in new_keys
],
)
@@ -1186,15 +1187,22 @@ async def store_e2e_cross_signing_signatures(
"""
await self.db_pool.simple_insert_many(
"e2e_cross_signing_signatures",
- [
- {
- "user_id": user_id,
- "key_id": item.signing_key_id,
- "target_user_id": item.target_user_id,
- "target_device_id": item.target_device_id,
- "signature": item.signature,
- }
+ keys=(
+ "user_id",
+ "key_id",
+ "target_user_id",
+ "target_device_id",
+ "signature",
+ ),
+ values=[
+ (
+ user_id,
+ item.signing_key_id,
+ item.target_user_id,
+ item.target_device_id,
+ item.signature,
+ )
for item in signatures
],
- "add_e2e_signing_key",
+ desc="add_e2e_signing_key",
)
diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py
index a98e6b259378..b7c4c62222bd 100644
--- a/synapse/storage/databases/main/event_push_actions.py
+++ b/synapse/storage/databases/main/event_push_actions.py
@@ -875,14 +875,21 @@ def _rotate_notifs_before_txn(
self.db_pool.simple_insert_many_txn(
txn,
table="event_push_summary",
+ keys=(
+ "user_id",
+ "room_id",
+ "notif_count",
+ "unread_count",
+ "stream_ordering",
+ ),
values=[
- {
- "user_id": user_id,
- "room_id": room_id,
- "notif_count": summary.notif_count,
- "unread_count": summary.unread_count,
- "stream_ordering": summary.stream_ordering,
- }
+ (
+ user_id,
+ room_id,
+ summary.notif_count,
+ summary.unread_count,
+ summary.stream_ordering,
+ )
for ((user_id, room_id), summary) in summaries.items()
if summary.old_user_id is None
],
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index cce230559773..1ae1ebe10879 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -39,7 +39,6 @@
from synapse.crypto.event_signing import compute_event_reference_hash
from synapse.events import EventBase # noqa: F401
from synapse.events.snapshot import EventContext # noqa: F401
-from synapse.logging.utils import log_function
from synapse.storage._base import db_to_json, make_in_list_sql_clause
from synapse.storage.database import (
DatabasePool,
@@ -328,7 +327,6 @@ def _get_prevs_before_rejected_txn(txn, batch):
return existing_prevs
- @log_function
def _persist_events_txn(
self,
txn: LoggingTransaction,
@@ -442,12 +440,9 @@ def _persist_event_auth_chain_txn(
self.db_pool.simple_insert_many_txn(
txn,
table="event_auth",
+ keys=("event_id", "room_id", "auth_id"),
values=[
- {
- "event_id": event.event_id,
- "room_id": event.room_id,
- "auth_id": auth_id,
- }
+ (event.event_id, event.room_id, auth_id)
for event in events
for auth_id in event.auth_event_ids()
if event.is_state()
@@ -675,8 +670,9 @@ def _add_chain_cover_index(
db_pool.simple_insert_many_txn(
txn,
table="event_auth_chains",
+ keys=("event_id", "chain_id", "sequence_number"),
values=[
- {"event_id": event_id, "chain_id": c_id, "sequence_number": seq}
+ (event_id, c_id, seq)
for event_id, (c_id, seq) in new_chain_tuples.items()
],
)
@@ -782,13 +778,14 @@ def _add_chain_cover_index(
db_pool.simple_insert_many_txn(
txn,
table="event_auth_chain_links",
+ keys=(
+ "origin_chain_id",
+ "origin_sequence_number",
+ "target_chain_id",
+ "target_sequence_number",
+ ),
values=[
- {
- "origin_chain_id": source_id,
- "origin_sequence_number": source_seq,
- "target_chain_id": target_id,
- "target_sequence_number": target_seq,
- }
+ (source_id, source_seq, target_id, target_seq)
for (
source_id,
source_seq,
@@ -943,20 +940,28 @@ def _persist_transaction_ids_txn(
txn_id = getattr(event.internal_metadata, "txn_id", None)
if token_id and txn_id:
to_insert.append(
- {
- "event_id": event.event_id,
- "room_id": event.room_id,
- "user_id": event.sender,
- "token_id": token_id,
- "txn_id": txn_id,
- "inserted_ts": self._clock.time_msec(),
- }
+ (
+ event.event_id,
+ event.room_id,
+ event.sender,
+ token_id,
+ txn_id,
+ self._clock.time_msec(),
+ )
)
if to_insert:
self.db_pool.simple_insert_many_txn(
txn,
table="event_txn_id",
+ keys=(
+ "event_id",
+ "room_id",
+ "user_id",
+ "token_id",
+ "txn_id",
+ "inserted_ts",
+ ),
values=to_insert,
)
@@ -1161,8 +1166,9 @@ def _update_forward_extremities_txn(
self.db_pool.simple_insert_many_txn(
txn,
table="event_forward_extremities",
+ keys=("event_id", "room_id"),
values=[
- {"event_id": ev_id, "room_id": room_id}
+ (ev_id, room_id)
for room_id, new_extrem in new_forward_extremities.items()
for ev_id in new_extrem
],
@@ -1174,12 +1180,9 @@ def _update_forward_extremities_txn(
self.db_pool.simple_insert_many_txn(
txn,
table="stream_ordering_to_exterm",
+ keys=("room_id", "event_id", "stream_ordering"),
values=[
- {
- "room_id": room_id,
- "event_id": event_id,
- "stream_ordering": max_stream_order,
- }
+ (room_id, event_id, max_stream_order)
for room_id, new_extrem in new_forward_extremities.items()
for event_id in new_extrem
],
@@ -1251,20 +1254,22 @@ def _update_room_depths_txn(
for room_id, depth in depth_updates.items():
self._update_min_depth_for_room_txn(txn, room_id, depth)
- def _update_outliers_txn(self, txn, events_and_contexts):
+ def _update_outliers_txn(
+ self,
+ txn: LoggingTransaction,
+ events_and_contexts: List[Tuple[EventBase, EventContext]],
+ ) -> List[Tuple[EventBase, EventContext]]:
"""Update any outliers with new event info.
- This turns outliers into ex-outliers (unless the new event was
- rejected).
+ This turns outliers into ex-outliers (unless the new event was rejected), and
+ also removes any other events we have already seen from the list.
Args:
- txn (twisted.enterprise.adbapi.Connection): db connection
- events_and_contexts (list[(EventBase, EventContext)]): events
- we are persisting
+ txn: db connection
+ events_and_contexts: events we are persisting
Returns:
- list[(EventBase, EventContext)] new list, without events which
- are already in the events table.
+ new list, without events which are already in the events table.
"""
txn.execute(
"SELECT event_id, outlier FROM events WHERE event_id in (%s)"
@@ -1272,7 +1277,9 @@ def _update_outliers_txn(self, txn, events_and_contexts):
[event.event_id for event, _ in events_and_contexts],
)
- have_persisted = {event_id: outlier for event_id, outlier in txn}
+ have_persisted: Dict[str, bool] = {
+ event_id: outlier for event_id, outlier in txn
+ }
to_remove = set()
for event, context in events_and_contexts:
@@ -1282,15 +1289,22 @@ def _update_outliers_txn(self, txn, events_and_contexts):
to_remove.add(event)
if context.rejected:
- # If the event is rejected then we don't care if the event
- # was an outlier or not.
+ # If the incoming event is rejected then we don't care if the event
+ # was an outlier or not - what we have is at least as good.
continue
outlier_persisted = have_persisted[event.event_id]
if not event.internal_metadata.is_outlier() and outlier_persisted:
# We received a copy of an event that we had already stored as
- # an outlier in the database. We now have some state at that
+ # an outlier in the database. We now have some state at that event
# so we need to update the state_groups table with that state.
+ #
+ # Note that we do not update the stream_ordering of the event in this
+ # scenario. XXX: does this cause bugs? It will mean we won't send such
+ # events down /sync. In general they will be historical events, so that
+ # doesn't matter too much, but that is not always the case.
+
+ logger.info("Updating state for ex-outlier event %s", event.event_id)
# insert into event_to_state_groups.
try:
@@ -1342,7 +1356,7 @@ def event_dict(event):
d.pop("redacted_because", None)
return d
- self.db_pool.simple_insert_many_values_txn(
+ self.db_pool.simple_insert_many_txn(
txn,
table="event_json",
keys=("event_id", "room_id", "internal_metadata", "json", "format_version"),
@@ -1358,7 +1372,7 @@ def event_dict(event):
),
)
- self.db_pool.simple_insert_many_values_txn(
+ self.db_pool.simple_insert_many_txn(
txn,
table="events",
keys=(
@@ -1412,7 +1426,7 @@ def event_dict(event):
)
txn.execute(sql + clause, [False] + args)
- self.db_pool.simple_insert_many_values_txn(
+ self.db_pool.simple_insert_many_txn(
txn,
table="state_events",
keys=("event_id", "room_id", "type", "state_key"),
@@ -1622,14 +1636,9 @@ def insert_labels_for_event_txn(
return self.db_pool.simple_insert_many_txn(
txn=txn,
table="event_labels",
+ keys=("event_id", "label", "room_id", "topological_ordering"),
values=[
- {
- "event_id": event_id,
- "label": label,
- "room_id": room_id,
- "topological_ordering": topological_ordering,
- }
- for label in labels
+ (event_id, label, room_id, topological_ordering) for label in labels
],
)
@@ -1657,16 +1666,13 @@ def _store_event_reference_hashes_txn(self, txn, events):
vals = []
for event in events:
ref_alg, ref_hash_bytes = compute_event_reference_hash(event)
- vals.append(
- {
- "event_id": event.event_id,
- "algorithm": ref_alg,
- "hash": memoryview(ref_hash_bytes),
- }
- )
+ vals.append((event.event_id, ref_alg, memoryview(ref_hash_bytes)))
self.db_pool.simple_insert_many_txn(
- txn, table="event_reference_hashes", values=vals
+ txn,
+ table="event_reference_hashes",
+ keys=("event_id", "algorithm", "hash"),
+ values=vals,
)
def _store_room_members_txn(
@@ -1689,18 +1695,25 @@ def non_null_str_or_none(val: Any) -> Optional[str]:
self.db_pool.simple_insert_many_txn(
txn,
table="room_memberships",
+ keys=(
+ "event_id",
+ "user_id",
+ "sender",
+ "room_id",
+ "membership",
+ "display_name",
+ "avatar_url",
+ ),
values=[
- {
- "event_id": event.event_id,
- "user_id": event.state_key,
- "sender": event.user_id,
- "room_id": event.room_id,
- "membership": event.membership,
- "display_name": non_null_str_or_none(
- event.content.get("displayname")
- ),
- "avatar_url": non_null_str_or_none(event.content.get("avatar_url")),
- }
+ (
+ event.event_id,
+ event.state_key,
+ event.user_id,
+ event.room_id,
+ event.membership,
+ non_null_str_or_none(event.content.get("displayname")),
+ non_null_str_or_none(event.content.get("avatar_url")),
+ )
for event in events
],
)
@@ -1791,6 +1804,13 @@ def _handle_event_relations(
txn.call_after(
self.store.get_thread_summary.invalidate, (parent_id, event.room_id)
)
+ # It should be safe to only invalidate the cache if the user has not
+ # previously participated in the thread, but that's difficult (and
+ # potentially error-prone) so it is always invalidated.
+ txn.call_after(
+ self.store.get_thread_participated.invalidate,
+ (parent_id, event.room_id, event.sender),
+ )
def _handle_insertion_event(self, txn: LoggingTransaction, event: EventBase):
"""Handles keeping track of insertion events and edges/connections.
@@ -2163,13 +2183,9 @@ def _handle_mult_prev_events(self, txn, events):
self.db_pool.simple_insert_many_txn(
txn,
table="event_edges",
+ keys=("event_id", "prev_event_id", "room_id", "is_state"),
values=[
- {
- "event_id": ev.event_id,
- "prev_event_id": e_id,
- "room_id": ev.room_id,
- "is_state": False,
- }
+ (ev.event_id, e_id, ev.room_id, False)
for ev in events
for e_id in ev.prev_event_ids()
],
diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py
index 0a96664caf1b..d5f005966597 100644
--- a/synapse/storage/databases/main/events_bg_updates.py
+++ b/synapse/storage/databases/main/events_bg_updates.py
@@ -684,13 +684,14 @@ def _event_store_labels_txn(txn: LoggingTransaction) -> int:
self.db_pool.simple_insert_many_txn(
txn=txn,
table="event_labels",
+ keys=("event_id", "label", "room_id", "topological_ordering"),
values=[
- {
- "event_id": event_id,
- "label": label,
- "room_id": event_json["room_id"],
- "topological_ordering": event_json["depth"],
- }
+ (
+ event_id,
+ label,
+ event_json["room_id"],
+ event_json["depth"],
+ )
for label in event_json["content"].get(
EventContentFields.LABELS, []
)
@@ -803,29 +804,19 @@ def get_rejected_events(
if not has_state:
state_events.append(
- {
- "event_id": event.event_id,
- "room_id": event.room_id,
- "type": event.type,
- "state_key": event.state_key,
- }
+ (event.event_id, event.room_id, event.type, event.state_key)
)
if not has_event_auth:
# Old, dodgy, events may have duplicate auth events, which we
# need to deduplicate as we have a unique constraint.
for auth_id in set(event.auth_event_ids()):
- auth_events.append(
- {
- "room_id": event.room_id,
- "event_id": event.event_id,
- "auth_id": auth_id,
- }
- )
+ auth_events.append((event.event_id, event.room_id, auth_id))
if state_events:
await self.db_pool.simple_insert_many(
table="state_events",
+ keys=("event_id", "room_id", "type", "state_key"),
values=state_events,
desc="_rejected_events_metadata_state_events",
)
@@ -833,6 +824,7 @@ def get_rejected_events(
if auth_events:
await self.db_pool.simple_insert_many(
table="event_auth",
+ keys=("event_id", "room_id", "auth_id"),
values=auth_events,
desc="_rejected_events_metadata_event_auth",
)
diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py
index cbf9ec38f75d..4f05811a77eb 100644
--- a/synapse/storage/databases/main/presence.py
+++ b/synapse/storage/databases/main/presence.py
@@ -129,18 +129,29 @@ def _update_presence_txn(self, txn, stream_orderings, presence_states):
self.db_pool.simple_insert_many_txn(
txn,
table="presence_stream",
+ keys=(
+ "stream_id",
+ "user_id",
+ "state",
+ "last_active_ts",
+ "last_federation_update_ts",
+ "last_user_sync_ts",
+ "status_msg",
+ "currently_active",
+ "instance_name",
+ ),
values=[
- {
- "stream_id": stream_id,
- "user_id": state.user_id,
- "state": state.state,
- "last_active_ts": state.last_active_ts,
- "last_federation_update_ts": state.last_federation_update_ts,
- "last_user_sync_ts": state.last_user_sync_ts,
- "status_msg": state.status_msg,
- "currently_active": state.currently_active,
- "instance_name": self._instance_name,
- }
+ (
+ stream_id,
+ state.user_id,
+ state.state,
+ state.last_active_ts,
+ state.last_federation_update_ts,
+ state.last_user_sync_ts,
+ state.status_msg,
+ state.currently_active,
+ self._instance_name,
+ )
for stream_id, state in zip(stream_orderings, presence_states)
],
)
diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py
index 747b4f31df67..cf64cd63a46f 100644
--- a/synapse/storage/databases/main/pusher.py
+++ b/synapse/storage/databases/main/pusher.py
@@ -561,13 +561,9 @@ def delete_pushers_txn(txn, stream_ids):
self.db_pool.simple_insert_many_txn(
txn,
table="deleted_pushers",
+ keys=("stream_id", "app_id", "pushkey", "user_id"),
values=[
- {
- "stream_id": stream_id,
- "app_id": pusher.app_id,
- "pushkey": pusher.pushkey,
- "user_id": user_id,
- }
+ (stream_id, pusher.app_id, pusher.pushkey, user_id)
for stream_id, pusher in zip(stream_ids, pushers)
],
)
diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py
index c6c4bd18da3e..2cb5d06c1352 100644
--- a/synapse/storage/databases/main/relations.py
+++ b/synapse/storage/databases/main/relations.py
@@ -384,8 +384,7 @@ def _get_applicable_edit_txn(txn: LoggingTransaction) -> Optional[str]:
async def get_thread_summary(
self, event_id: str, room_id: str
) -> Tuple[int, Optional[EventBase]]:
- """Get the number of threaded replies, the senders of those replies, and
- the latest reply (if any) for the given event.
+ """Get the number of threaded replies and the latest reply (if any) for the given event.
Args:
event_id: Summarize the thread related to this event ID.
@@ -398,7 +397,7 @@ async def get_thread_summary(
def _get_thread_summary_txn(
txn: LoggingTransaction,
) -> Tuple[int, Optional[str]]:
- # Fetch the count of threaded events and the latest event ID.
+ # Fetch the latest event ID in the thread.
# TODO Should this only allow m.room.message events.
sql = """
SELECT event_id
@@ -419,6 +418,7 @@ def _get_thread_summary_txn(
latest_event_id = row[0]
+ # Fetch the number of threaded replies.
sql = """
SELECT COUNT(event_id)
FROM event_relations
@@ -443,6 +443,44 @@ def _get_thread_summary_txn(
return count, latest_event
+ @cached()
+ async def get_thread_participated(
+ self, event_id: str, room_id: str, user_id: str
+ ) -> bool:
+ """Get whether the requesting user participated in a thread.
+
+ This is separate from get_thread_summary since that can be cached across
+ all users while this value is specific to the requeser.
+
+ Args:
+ event_id: The thread related to this event ID.
+ room_id: The room the event belongs to.
+ user_id: The user requesting the summary.
+
+ Returns:
+ True if the requesting user participated in the thread, otherwise false.
+ """
+
+ def _get_thread_summary_txn(txn: LoggingTransaction) -> bool:
+ # Fetch whether the requester has participated or not.
+ sql = """
+ SELECT 1
+ FROM event_relations
+ INNER JOIN events USING (event_id)
+ WHERE
+ relates_to_id = ?
+ AND room_id = ?
+ AND relation_type = ?
+ AND sender = ?
+ """
+
+ txn.execute(sql, (event_id, room_id, RelationTypes.THREAD, user_id))
+ return bool(txn.fetchone())
+
+ return await self.db_pool.runInteraction(
+ "get_thread_summary", _get_thread_summary_txn
+ )
+
async def events_have_relations(
self,
parent_ids: List[str],
@@ -546,7 +584,7 @@ def _get_if_user_has_annotated_event(txn: LoggingTransaction) -> bool:
)
async def _get_bundled_aggregation_for_event(
- self, event: EventBase
+ self, event: EventBase, user_id: str
) -> Optional[Dict[str, Any]]:
"""Generate bundled aggregations for an event.
@@ -554,6 +592,7 @@ async def _get_bundled_aggregation_for_event(
Args:
event: The event to calculate bundled aggregations for.
+ user_id: The user requesting the bundled aggregations.
Returns:
The bundled aggregations for an event, if bundled aggregations are
@@ -598,27 +637,32 @@ async def _get_bundled_aggregation_for_event(
# If this event is the start of a thread, include a summary of the replies.
if self._msc3440_enabled:
- (
- thread_count,
- latest_thread_event,
- ) = await self.get_thread_summary(event_id, room_id)
+ thread_count, latest_thread_event = await self.get_thread_summary(
+ event_id, room_id
+ )
+ participated = await self.get_thread_participated(
+ event_id, room_id, user_id
+ )
if latest_thread_event:
aggregations[RelationTypes.THREAD] = {
- # Don't bundle aggregations as this could recurse forever.
"latest_event": latest_thread_event,
"count": thread_count,
+ "current_user_participated": participated,
}
# Store the bundled aggregations in the event metadata for later use.
return aggregations
async def get_bundled_aggregations(
- self, events: Iterable[EventBase]
+ self,
+ events: Iterable[EventBase],
+ user_id: str,
) -> Dict[str, Dict[str, Any]]:
"""Generate bundled aggregations for events.
Args:
events: The iterable of events to calculate bundled aggregations for.
+ user_id: The user requesting the bundled aggregations.
Returns:
A map of event ID to the bundled aggregation for the event. Not all
@@ -631,7 +675,7 @@ async def get_bundled_aggregations(
# TODO Parallelize.
results = {}
for event in events:
- event_result = await self._get_bundled_aggregation_for_event(event)
+ event_result = await self._get_bundled_aggregation_for_event(event, user_id)
if event_result is not None:
results[event.event_id] = event_result
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index c0e837854a32..95167116c953 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -551,24 +551,24 @@ async def get_rooms_paginate(
FROM room_stats_state state
INNER JOIN room_stats_current curr USING (room_id)
INNER JOIN rooms USING (room_id)
- %s
- ORDER BY %s %s
+ {where}
+ ORDER BY {order_by} {direction}, state.room_id {direction}
LIMIT ?
OFFSET ?
- """ % (
- where_statement,
- order_by_column,
- "ASC" if order_by_asc else "DESC",
+ """.format(
+ where=where_statement,
+ order_by=order_by_column,
+ direction="ASC" if order_by_asc else "DESC",
)
# Use a nested SELECT statement as SQL can't count(*) with an OFFSET
count_sql = """
SELECT count(*) FROM (
SELECT room_id FROM room_stats_state state
- %s
+ {where}
) AS get_room_ids
- """ % (
- where_statement,
+ """.format(
+ where=where_statement,
)
def _get_rooms_paginate_txn(
diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
index 0f9b8575d3a5..f7c778bdf22b 100644
--- a/synapse/storage/databases/main/user_directory.py
+++ b/synapse/storage/databases/main/user_directory.py
@@ -105,8 +105,10 @@ def _make_staging_area(txn: LoggingTransaction) -> None:
GROUP BY room_id
"""
txn.execute(sql)
- rooms = [{"room_id": x[0], "events": x[1]} for x in txn.fetchall()]
- self.db_pool.simple_insert_many_txn(txn, TEMP_TABLE + "_rooms", rooms)
+ rooms = list(txn.fetchall())
+ self.db_pool.simple_insert_many_txn(
+ txn, TEMP_TABLE + "_rooms", keys=("room_id", "events"), values=rooms
+ )
del rooms
sql = (
@@ -117,9 +119,11 @@ def _make_staging_area(txn: LoggingTransaction) -> None:
txn.execute(sql)
txn.execute("SELECT name FROM users")
- users = [{"user_id": x[0]} for x in txn.fetchall()]
+ users = list(txn.fetchall())
- self.db_pool.simple_insert_many_txn(txn, TEMP_TABLE + "_users", users)
+ self.db_pool.simple_insert_many_txn(
+ txn, TEMP_TABLE + "_users", keys=("user_id",), values=users
+ )
new_pos = await self.get_max_stream_id_in_current_state_deltas()
await self.db_pool.runInteraction(
diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py
index eb1118d2cb20..5de70f31d294 100644
--- a/synapse/storage/databases/state/bg_updates.py
+++ b/synapse/storage/databases/state/bg_updates.py
@@ -327,14 +327,15 @@ def reindex_txn(txn: LoggingTransaction) -> Tuple[bool, int]:
self.db_pool.simple_insert_many_txn(
txn,
table="state_groups_state",
+ keys=(
+ "state_group",
+ "room_id",
+ "type",
+ "state_key",
+ "event_id",
+ ),
values=[
- {
- "state_group": state_group,
- "room_id": room_id,
- "type": key[0],
- "state_key": key[1],
- "event_id": state_id,
- }
+ (state_group, room_id, key[0], key[1], state_id)
for key, state_id in delta_state.items()
],
)
diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py
index c4c8c0021bca..7614d76ac646 100644
--- a/synapse/storage/databases/state/store.py
+++ b/synapse/storage/databases/state/store.py
@@ -460,14 +460,9 @@ def _store_state_group_txn(txn: LoggingTransaction) -> int:
self.db_pool.simple_insert_many_txn(
txn,
table="state_groups_state",
+ keys=("state_group", "room_id", "type", "state_key", "event_id"),
values=[
- {
- "state_group": state_group,
- "room_id": room_id,
- "type": key[0],
- "state_key": key[1],
- "event_id": state_id,
- }
+ (state_group, room_id, key[0], key[1], state_id)
for key, state_id in delta_ids.items()
],
)
@@ -475,14 +470,9 @@ def _store_state_group_txn(txn: LoggingTransaction) -> int:
self.db_pool.simple_insert_many_txn(
txn,
table="state_groups_state",
+ keys=("state_group", "room_id", "type", "state_key", "event_id"),
values=[
- {
- "state_group": state_group,
- "room_id": room_id,
- "type": key[0],
- "state_key": key[1],
- "event_id": state_id,
- }
+ (state_group, room_id, key[0], key[1], state_id)
for key, state_id in current_state_ids.items()
],
)
@@ -589,14 +579,9 @@ def _purge_unreferenced_state_groups(
self.db_pool.simple_insert_many_txn(
txn,
table="state_groups_state",
+ keys=("state_group", "room_id", "type", "state_key", "event_id"),
values=[
- {
- "state_group": sg,
- "room_id": room_id,
- "type": key[0],
- "state_key": key[1],
- "event_id": state_id,
- }
+ (sg, room_id, key[0], key[1], state_id)
for key, state_id in curr_state.items()
],
)
diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py
index ce3ebcf2f2fa..51b22d299812 100644
--- a/tests/handlers/test_room_summary.py
+++ b/tests/handlers/test_room_summary.py
@@ -28,6 +28,7 @@
from synapse.api.errors import AuthError, NotFoundError, SynapseError
from synapse.api.room_versions import RoomVersions
from synapse.events import make_event_from_dict
+from synapse.federation.transport.client import TransportLayerClient
from synapse.handlers.room_summary import _child_events_comparison_key, _RoomEntry
from synapse.rest import admin
from synapse.rest.client import login, room
@@ -134,10 +135,18 @@ def prepare(self, reactor, clock, hs: HomeServer):
self._add_child(self.space, self.room, self.token)
def _add_child(
- self, space_id: str, room_id: str, token: str, order: Optional[str] = None
+ self,
+ space_id: str,
+ room_id: str,
+ token: str,
+ order: Optional[str] = None,
+ via: Optional[List[str]] = None,
) -> None:
"""Add a child room to a space."""
- content: JsonDict = {"via": [self.hs.hostname]}
+ if via is None:
+ via = [self.hs.hostname]
+
+ content: JsonDict = {"via": via}
if order is not None:
content["order"] = order
self.helper.send_state(
@@ -1036,6 +1045,85 @@ async def summarize_remote_room_hierarchy(_self, room, suggested_only):
)
self._assert_hierarchy(result, expected)
+ def test_fed_caching(self):
+ """
+ Federation `/hierarchy` responses should be cached.
+ """
+ fed_hostname = self.hs.hostname + "2"
+ fed_subspace = "#space:" + fed_hostname
+ fed_room = "#room:" + fed_hostname
+
+ # Add a room to the space which is on another server.
+ self._add_child(self.space, fed_subspace, self.token, via=[fed_hostname])
+
+ federation_requests = 0
+
+ async def get_room_hierarchy(
+ _self: TransportLayerClient,
+ destination: str,
+ room_id: str,
+ suggested_only: bool,
+ ) -> JsonDict:
+ nonlocal federation_requests
+ federation_requests += 1
+
+ return {
+ "room": {
+ "room_id": fed_subspace,
+ "world_readable": True,
+ "room_type": RoomTypes.SPACE,
+ "children_state": [
+ {
+ "type": EventTypes.SpaceChild,
+ "room_id": fed_subspace,
+ "state_key": fed_room,
+ "content": {"via": [fed_hostname]},
+ },
+ ],
+ },
+ "children": [
+ {
+ "room_id": fed_room,
+ "world_readable": True,
+ },
+ ],
+ "inaccessible_children": [],
+ }
+
+ expected = [
+ (self.space, [self.room, fed_subspace]),
+ (self.room, ()),
+ (fed_subspace, [fed_room]),
+ (fed_room, ()),
+ ]
+
+ with mock.patch(
+ "synapse.federation.transport.client.TransportLayerClient.get_room_hierarchy",
+ new=get_room_hierarchy,
+ ):
+ result = self.get_success(
+ self.handler.get_room_hierarchy(create_requester(self.user), self.space)
+ )
+ self.assertEqual(federation_requests, 1)
+ self._assert_hierarchy(result, expected)
+
+ # The previous federation response should be reused.
+ result = self.get_success(
+ self.handler.get_room_hierarchy(create_requester(self.user), self.space)
+ )
+ self.assertEqual(federation_requests, 1)
+ self._assert_hierarchy(result, expected)
+
+ # Expire the response cache
+ self.reactor.advance(5 * 60 + 1)
+
+ # A new federation request should be made.
+ result = self.get_success(
+ self.handler.get_room_hierarchy(create_requester(self.user), self.space)
+ )
+ self.assertEqual(federation_requests, 2)
+ self._assert_hierarchy(result, expected)
+
class RoomSummaryTestCase(unittest.HomeserverTestCase):
servlets = [
diff --git a/tests/http/test_webclient.py b/tests/http/test_webclient.py
new file mode 100644
index 000000000000..ee5cf299f64c
--- /dev/null
+++ b/tests/http/test_webclient.py
@@ -0,0 +1,108 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from http import HTTPStatus
+from typing import Dict
+
+from twisted.web.resource import Resource
+
+from synapse.app.homeserver import SynapseHomeServer
+from synapse.config.server import HttpListenerConfig, HttpResourceConfig, ListenerConfig
+from synapse.http.site import SynapseSite
+
+from tests.server import make_request
+from tests.unittest import HomeserverTestCase, create_resource_tree, override_config
+
+
+class WebClientTests(HomeserverTestCase):
+ @override_config(
+ {
+ "web_client_location": "https://example.org",
+ }
+ )
+ def test_webclient_resolves_with_client_resource(self):
+ """
+ Tests that both client and webclient resources can be accessed simultaneously.
+
+ This is a regression test created in response to https://github.com/matrix-org/synapse/issues/11763.
+ """
+ for resource_name_order_list in [
+ ["webclient", "client"],
+ ["client", "webclient"],
+ ]:
+ # Create a dictionary from path regex -> resource
+ resource_dict: Dict[str, Resource] = {}
+
+ for resource_name in resource_name_order_list:
+ resource_dict.update(
+ SynapseHomeServer._configure_named_resource(self.hs, resource_name)
+ )
+
+ # Create a root resource which ties the above resources together into one
+ root_resource = Resource()
+ create_resource_tree(resource_dict, root_resource)
+
+ # Create a site configured with this resource to make HTTP requests against
+ listener_config = ListenerConfig(
+ port=8008,
+ bind_addresses=["127.0.0.1"],
+ type="http",
+ http_options=HttpListenerConfig(
+ resources=[HttpResourceConfig(names=resource_name_order_list)]
+ ),
+ )
+ test_site = SynapseSite(
+ logger_name="synapse.access.http.fake",
+ site_tag=self.hs.config.server.server_name,
+ config=listener_config,
+ resource=root_resource,
+ server_version_string="1",
+ max_request_body_size=1234,
+ reactor=self.reactor,
+ )
+
+ # Attempt to make requests to endpoints on both the webclient and client resources
+ # on test_site.
+ self._request_client_and_webclient_resources(test_site)
+
+ def _request_client_and_webclient_resources(self, test_site: SynapseSite) -> None:
+ """Make a request to an endpoint on both the webclient and client-server resources
+ of the given SynapseSite.
+
+ Args:
+ test_site: The SynapseSite object to make requests against.
+ """
+
+ # Ensure that the *webclient* resource is behaving as expected (we get redirected to
+ # the configured web_client_location)
+ channel = make_request(
+ self.reactor,
+ site=test_site,
+ method="GET",
+ path="/_matrix/client",
+ )
+ # Check that we are being redirected to the webclient location URI.
+ self.assertEqual(channel.code, HTTPStatus.FOUND)
+ self.assertEqual(
+ channel.headers.getRawHeaders("Location"), ["https://example.org"]
+ )
+
+ # Ensure that a request to the *client* resource works.
+ channel = make_request(
+ self.reactor,
+ site=test_site,
+ method="GET",
+ path="/_matrix/client/v3/login",
+ )
+ self.assertEqual(channel.code, HTTPStatus.OK)
+ self.assertIn("flows", channel.json_body)
diff --git a/tests/rest/admin/test_registration_tokens.py b/tests/rest/admin/test_registration_tokens.py
index 81f3ac7f0448..8513b1d2df53 100644
--- a/tests/rest/admin/test_registration_tokens.py
+++ b/tests/rest/admin/test_registration_tokens.py
@@ -223,20 +223,13 @@ def test_create_unable_to_generate_token(self) -> None:
# Create all possible single character tokens
tokens = []
for c in string.ascii_letters + string.digits + "._~-":
- tokens.append(
- {
- "token": c,
- "uses_allowed": None,
- "pending": 0,
- "completed": 0,
- "expiry_time": None,
- }
- )
+ tokens.append((c, None, 0, 0, None))
self.get_success(
self.store.db_pool.simple_insert_many(
"registration_tokens",
- tokens,
- "create_all_registration_tokens",
+ keys=("token", "uses_allowed", "pending", "completed", "expiry_time"),
+ values=tokens,
+ desc="create_all_registration_tokens",
)
)
diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py
index d2c8781cd4b9..3495a0366ad3 100644
--- a/tests/rest/admin/test_room.py
+++ b/tests/rest/admin/test_room.py
@@ -1089,6 +1089,8 @@ def test_list_rooms(self) -> None:
)
room_ids.append(room_id)
+ room_ids.sort()
+
# Request the list of rooms
url = "/_synapse/admin/v1/rooms"
channel = self.make_request(
@@ -1360,6 +1362,12 @@ def _order_test(
room_id_2 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
room_id_3 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+ # Also create a list sorted by IDs for properties that are equal (and thus sorted by room_id)
+ sorted_by_room_id_asc = [room_id_1, room_id_2, room_id_3]
+ sorted_by_room_id_asc.sort()
+ sorted_by_room_id_desc = sorted_by_room_id_asc.copy()
+ sorted_by_room_id_desc.reverse()
+
# Set room names in alphabetical order. room 1 -> A, 2 -> B, 3 -> C
self.helper.send_state(
room_id_1,
@@ -1405,41 +1413,42 @@ def _order_test(
_order_test("canonical_alias", [room_id_1, room_id_2, room_id_3])
_order_test("canonical_alias", [room_id_3, room_id_2, room_id_1], reverse=True)
+ # Note: joined_member counts are sorted in descending order when dir=f
_order_test("joined_members", [room_id_3, room_id_2, room_id_1])
_order_test("joined_members", [room_id_1, room_id_2, room_id_3], reverse=True)
+ # Note: joined_local_member counts are sorted in descending order when dir=f
_order_test("joined_local_members", [room_id_3, room_id_2, room_id_1])
_order_test(
"joined_local_members", [room_id_1, room_id_2, room_id_3], reverse=True
)
- _order_test("version", [room_id_1, room_id_2, room_id_3])
- _order_test("version", [room_id_1, room_id_2, room_id_3], reverse=True)
+ # Note: versions are sorted in descending order when dir=f
+ _order_test("version", sorted_by_room_id_asc, reverse=True)
+ _order_test("version", sorted_by_room_id_desc)
- _order_test("creator", [room_id_1, room_id_2, room_id_3])
- _order_test("creator", [room_id_1, room_id_2, room_id_3], reverse=True)
+ _order_test("creator", sorted_by_room_id_asc)
+ _order_test("creator", sorted_by_room_id_desc, reverse=True)
- _order_test("encryption", [room_id_1, room_id_2, room_id_3])
- _order_test("encryption", [room_id_1, room_id_2, room_id_3], reverse=True)
+ _order_test("encryption", sorted_by_room_id_asc)
+ _order_test("encryption", sorted_by_room_id_desc, reverse=True)
- _order_test("federatable", [room_id_1, room_id_2, room_id_3])
- _order_test("federatable", [room_id_1, room_id_2, room_id_3], reverse=True)
+ _order_test("federatable", sorted_by_room_id_asc)
+ _order_test("federatable", sorted_by_room_id_desc, reverse=True)
- _order_test("public", [room_id_1, room_id_2, room_id_3])
- # Different sort order of SQlite and PostreSQL
- # _order_test("public", [room_id_3, room_id_2, room_id_1], reverse=True)
+ _order_test("public", sorted_by_room_id_asc)
+ _order_test("public", sorted_by_room_id_desc, reverse=True)
- _order_test("join_rules", [room_id_1, room_id_2, room_id_3])
- _order_test("join_rules", [room_id_1, room_id_2, room_id_3], reverse=True)
+ _order_test("join_rules", sorted_by_room_id_asc)
+ _order_test("join_rules", sorted_by_room_id_desc, reverse=True)
- _order_test("guest_access", [room_id_1, room_id_2, room_id_3])
- _order_test("guest_access", [room_id_1, room_id_2, room_id_3], reverse=True)
+ _order_test("guest_access", sorted_by_room_id_asc)
+ _order_test("guest_access", sorted_by_room_id_desc, reverse=True)
- _order_test("history_visibility", [room_id_1, room_id_2, room_id_3])
- _order_test(
- "history_visibility", [room_id_1, room_id_2, room_id_3], reverse=True
- )
+ _order_test("history_visibility", sorted_by_room_id_asc)
+ _order_test("history_visibility", sorted_by_room_id_desc, reverse=True)
+ # Note: state_event counts are sorted in descending order when dir=f
_order_test("state_events", [room_id_3, room_id_2, room_id_1])
_order_test("state_events", [room_id_1, room_id_2, room_id_3], reverse=True)
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index e0b9fe8e91b3..9711405735db 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -1181,6 +1181,7 @@ def prepare(self, reactor, clock, hs):
self.other_user, device_id=None, valid_until_ms=None
)
)
+
self.url_prefix = "/_synapse/admin/v2/users/%s"
self.url_other_user = self.url_prefix % self.other_user
@@ -1188,7 +1189,7 @@ def test_requester_is_no_admin(self):
"""
If the user is not a server admin, an error is returned.
"""
- url = "/_synapse/admin/v2/users/@bob:test"
+ url = self.url_prefix % "@bob:test"
channel = self.make_request(
"GET",
@@ -1216,7 +1217,7 @@ def test_user_does_not_exist(self):
channel = self.make_request(
"GET",
- "/_synapse/admin/v2/users/@unknown_person:test",
+ self.url_prefix % "@unknown_person:test",
access_token=self.admin_user_tok,
)
@@ -1337,7 +1338,7 @@ def test_create_server_admin(self):
"""
Check that a new admin user is created successfully.
"""
- url = "/_synapse/admin/v2/users/@bob:test"
+ url = self.url_prefix % "@bob:test"
# Create user (server admin)
body = {
@@ -1386,7 +1387,7 @@ def test_create_user(self):
"""
Check that a new regular user is created successfully.
"""
- url = "/_synapse/admin/v2/users/@bob:test"
+ url = self.url_prefix % "@bob:test"
# Create user
body = {
@@ -1478,7 +1479,7 @@ def test_create_user_mau_limit_reached_active_admin(self):
)
# Register new user with admin API
- url = "/_synapse/admin/v2/users/@bob:test"
+ url = self.url_prefix % "@bob:test"
# Create user
channel = self.make_request(
@@ -1515,7 +1516,7 @@ def test_create_user_mau_limit_reached_passive_admin(self):
)
# Register new user with admin API
- url = "/_synapse/admin/v2/users/@bob:test"
+ url = self.url_prefix % "@bob:test"
# Create user
channel = self.make_request(
@@ -1545,7 +1546,7 @@ def test_create_user_email_notif_for_new_users(self):
Check that a new regular user is created successfully and
got an email pusher.
"""
- url = "/_synapse/admin/v2/users/@bob:test"
+ url = self.url_prefix % "@bob:test"
# Create user
body = {
@@ -1588,7 +1589,7 @@ def test_create_user_email_no_notif_for_new_users(self):
Check that a new regular user is created successfully and
got not an email pusher.
"""
- url = "/_synapse/admin/v2/users/@bob:test"
+ url = self.url_prefix % "@bob:test"
# Create user
body = {
@@ -2085,10 +2086,13 @@ def test_deactivate_user(self):
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["deactivated"])
- self.assertIsNone(channel.json_body["password_hash"])
self.assertEqual(0, len(channel.json_body["threepids"]))
self.assertEqual("mxc://servername/mediaid", channel.json_body["avatar_url"])
self.assertEqual("User", channel.json_body["displayname"])
+
+ # This key was removed intentionally. Ensure it is not accidentally re-included.
+ self.assertNotIn("password_hash", channel.json_body)
+
# the user is deactivated, the threepid will be deleted
# Get user
@@ -2101,11 +2105,13 @@ def test_deactivate_user(self):
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["deactivated"])
- self.assertIsNone(channel.json_body["password_hash"])
self.assertEqual(0, len(channel.json_body["threepids"]))
self.assertEqual("mxc://servername/mediaid", channel.json_body["avatar_url"])
self.assertEqual("User", channel.json_body["displayname"])
+ # This key was removed intentionally. Ensure it is not accidentally re-included.
+ self.assertNotIn("password_hash", channel.json_body)
+
@override_config({"user_directory": {"enabled": True, "search_all_users": True}})
def test_change_name_deactivate_user_user_directory(self):
"""
@@ -2177,9 +2183,11 @@ def test_reactivate_user(self):
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertFalse(channel.json_body["deactivated"])
- self.assertIsNotNone(channel.json_body["password_hash"])
self._is_erased("@user:test", False)
+ # This key was removed intentionally. Ensure it is not accidentally re-included.
+ self.assertNotIn("password_hash", channel.json_body)
+
@override_config({"password_config": {"localdb_enabled": False}})
def test_reactivate_user_localdb_disabled(self):
"""
@@ -2209,9 +2217,11 @@ def test_reactivate_user_localdb_disabled(self):
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertFalse(channel.json_body["deactivated"])
- self.assertIsNone(channel.json_body["password_hash"])
self._is_erased("@user:test", False)
+ # This key was removed intentionally. Ensure it is not accidentally re-included.
+ self.assertNotIn("password_hash", channel.json_body)
+
@override_config({"password_config": {"enabled": False}})
def test_reactivate_user_password_disabled(self):
"""
@@ -2241,9 +2251,11 @@ def test_reactivate_user_password_disabled(self):
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertFalse(channel.json_body["deactivated"])
- self.assertIsNone(channel.json_body["password_hash"])
self._is_erased("@user:test", False)
+ # This key was removed intentionally. Ensure it is not accidentally re-included.
+ self.assertNotIn("password_hash", channel.json_body)
+
def test_set_user_as_admin(self):
"""
Test setting the admin flag on a user.
@@ -2328,7 +2340,7 @@ def test_accidental_deactivation_prevention(self):
Ensure an account can't accidentally be deactivated by using a str value
for the deactivated body parameter
"""
- url = "/_synapse/admin/v2/users/@bob:test"
+ url = self.url_prefix % "@bob:test"
# Create user
channel = self.make_request(
@@ -2392,18 +2404,20 @@ def _deactivate_user(self, user_id: str) -> None:
# Deactivate the user.
channel = self.make_request(
"PUT",
- "/_synapse/admin/v2/users/%s" % urllib.parse.quote(user_id),
+ self.url_prefix % urllib.parse.quote(user_id),
access_token=self.admin_user_tok,
content={"deactivated": True},
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertTrue(channel.json_body["deactivated"])
- self.assertIsNone(channel.json_body["password_hash"])
self._is_erased(user_id, False)
d = self.store.mark_user_erased(user_id)
self.assertIsNone(self.get_success(d))
self._is_erased(user_id, True)
+ # This key was removed intentionally. Ensure it is not accidentally re-included.
+ self.assertNotIn("password_hash", channel.json_body)
+
def _check_fields(self, content: JsonDict):
"""Checks that the expected user attributes are present in content
@@ -2416,13 +2430,15 @@ def _check_fields(self, content: JsonDict):
self.assertIn("admin", content)
self.assertIn("deactivated", content)
self.assertIn("shadow_banned", content)
- self.assertIn("password_hash", content)
self.assertIn("creation_ts", content)
self.assertIn("appservice_id", content)
self.assertIn("consent_server_notice_sent", content)
self.assertIn("consent_version", content)
self.assertIn("external_ids", content)
+ # This key was removed intentionally. Ensure it is not accidentally re-included.
+ self.assertNotIn("password_hash", content)
+
class UserMembershipRestTestCase(unittest.HomeserverTestCase):
diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py
index ee26751430f0..4b20ab0e3e57 100644
--- a/tests/rest/client/test_relations.py
+++ b/tests/rest/client/test_relations.py
@@ -515,6 +515,9 @@ def assert_bundle(actual):
2,
actual[RelationTypes.THREAD].get("count"),
)
+ self.assertTrue(
+ actual[RelationTypes.THREAD].get("current_user_participated")
+ )
# The latest thread event has some fields that don't matter.
self.assert_dict(
{
diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py
index 6790aa524291..b547bf8d9978 100644
--- a/tests/storage/test_devices.py
+++ b/tests/storage/test_devices.py
@@ -94,7 +94,7 @@ def test_count_devices_by_users(self):
def test_get_device_updates_by_remote(self):
device_ids = ["device_id1", "device_id2"]
- # Add two device updates with a single stream_id
+ # Add two device updates with sequential `stream_id`s
self.get_success(
self.store.add_device_change_to_streams("user_id", device_ids, ["somehost"])
)
@@ -107,6 +107,164 @@ def test_get_device_updates_by_remote(self):
# Check original device_ids are contained within these updates
self._check_devices_in_updates(device_ids, device_updates)
+ def test_get_device_updates_by_remote_can_limit_properly(self):
+ """
+ Tests that `get_device_updates_by_remote` returns an appropriate
+ stream_id to resume fetching from (without skipping any results).
+ """
+
+ # Add some device updates with sequential `stream_id`s
+ device_ids = [
+ "device_id1",
+ "device_id2",
+ "device_id3",
+ "device_id4",
+ "device_id5",
+ ]
+ self.get_success(
+ self.store.add_device_change_to_streams("user_id", device_ids, ["somehost"])
+ )
+
+ # Get device updates meant for this remote
+ next_stream_id, device_updates = self.get_success(
+ self.store.get_device_updates_by_remote("somehost", -1, limit=3)
+ )
+
+ # Check the first three original device_ids are contained within these updates
+ self._check_devices_in_updates(device_ids[:3], device_updates)
+
+ # Get the next batch of device updates
+ next_stream_id, device_updates = self.get_success(
+ self.store.get_device_updates_by_remote("somehost", next_stream_id, limit=3)
+ )
+
+ # Check the last two original device_ids are contained within these updates
+ self._check_devices_in_updates(device_ids[3:], device_updates)
+
+ # Add some more device updates to ensure it still resumes properly
+ device_ids = ["device_id6", "device_id7"]
+ self.get_success(
+ self.store.add_device_change_to_streams("user_id", device_ids, ["somehost"])
+ )
+
+ # Get the next batch of device updates
+ next_stream_id, device_updates = self.get_success(
+ self.store.get_device_updates_by_remote("somehost", next_stream_id, limit=3)
+ )
+
+ # Check the newly-added device_ids are contained within these updates
+ self._check_devices_in_updates(device_ids, device_updates)
+
+ # Check there are no more device updates left.
+ _, device_updates = self.get_success(
+ self.store.get_device_updates_by_remote("somehost", next_stream_id, limit=3)
+ )
+ self.assertEqual(device_updates, [])
+
+ def test_get_device_updates_by_remote_cross_signing_key_updates(
+ self,
+ ) -> None:
+ """
+ Tests that `get_device_updates_by_remote` limits the length of the return value
+ properly when cross-signing key updates are present.
+ Current behaviour is that the cross-signing key updates will always come in pairs,
+ even if that means leaving an earlier batch one EDU short of the limit.
+ """
+
+ assert self.hs.is_mine_id(
+ "@user_id:test"
+ ), "Test not valid: this MXID should be considered local"
+
+ self.get_success(
+ self.store.set_e2e_cross_signing_key(
+ "@user_id:test",
+ "master",
+ {
+ "keys": {
+ "ed25519:fakeMaster": "aaafakefakefake1AAAAAAAAAAAAAAAAAAAAAAAAAAA="
+ },
+ "signatures": {
+ "@user_id:test": {
+ "ed25519:fake2": "aaafakefakefake2AAAAAAAAAAAAAAAAAAAAAAAAAAA="
+ }
+ },
+ },
+ )
+ )
+ self.get_success(
+ self.store.set_e2e_cross_signing_key(
+ "@user_id:test",
+ "self_signing",
+ {
+ "keys": {
+ "ed25519:fakeSelfSigning": "aaafakefakefake3AAAAAAAAAAAAAAAAAAAAAAAAAAA="
+ },
+ "signatures": {
+ "@user_id:test": {
+ "ed25519:fake4": "aaafakefakefake4AAAAAAAAAAAAAAAAAAAAAAAAAAA="
+ }
+ },
+ },
+ )
+ )
+
+ # Add some device updates with sequential `stream_id`s
+ # Note that the public cross-signing keys occupy the same space as device IDs,
+ # so also notify that those have updated.
+ device_ids = [
+ "device_id1",
+ "device_id2",
+ "fakeMaster",
+ "fakeSelfSigning",
+ ]
+
+ self.get_success(
+ self.store.add_device_change_to_streams(
+ "@user_id:test", device_ids, ["somehost"]
+ )
+ )
+
+ # Get device updates meant for this remote
+ next_stream_id, device_updates = self.get_success(
+ self.store.get_device_updates_by_remote("somehost", -1, limit=3)
+ )
+
+ # Here we expect the device updates for `device_id1` and `device_id2`.
+ # That means we only receive 2 updates this time around.
+ # If we had a higher limit, we would expect to see the pair of
+ # (unstable-prefixed & unprefixed) signing key updates for the device
+ # represented by `fakeMaster` and `fakeSelfSigning`.
+ # Our implementation only sends these two variants together, so we get
+ # a short batch.
+ self.assertEqual(len(device_updates), 2, device_updates)
+
+ # Check the first two devices (device_id1, device_id2) came out.
+ self._check_devices_in_updates(device_ids[:2], device_updates)
+
+ # Get more device updates meant for this remote
+ next_stream_id, device_updates = self.get_success(
+ self.store.get_device_updates_by_remote("somehost", next_stream_id, limit=3)
+ )
+
+ # The next 2 updates should be a cross-signing key update
+ # (the master key update and the self-signing key update are combined into
+ # one 'signing key update', but the cross-signing key update is emitted
+ # twice, once with an unprefixed type and once again with an unstable-prefixed type)
+ # (This is a temporary arrangement for backwards compatibility!)
+ self.assertEqual(len(device_updates), 2, device_updates)
+ self.assertEqual(
+ device_updates[0][0], "m.signing_key_update", device_updates[0]
+ )
+ self.assertEqual(
+ device_updates[1][0], "org.matrix.signing_key_update", device_updates[1]
+ )
+
+ # Check there are no more device updates left.
+ _, device_updates = self.get_success(
+ self.store.get_device_updates_by_remote("somehost", next_stream_id, limit=3)
+ )
+ self.assertEqual(device_updates, [])
+
def _check_devices_in_updates(self, expected_device_ids, device_updates):
"""Check that an specific device ids exist in a list of device update EDUs"""
self.assertEqual(len(device_updates), len(expected_device_ids))
diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py
index ecfda7677e0a..632bbc9de73e 100644
--- a/tests/storage/test_event_federation.py
+++ b/tests/storage/test_event_federation.py
@@ -515,17 +515,23 @@ def test_prune_inbound_federation_queue(self):
self.get_success(
self.store.db_pool.simple_insert_many(
table="federation_inbound_events_staging",
+ keys=(
+ "origin",
+ "room_id",
+ "received_ts",
+ "event_id",
+ "event_json",
+ "internal_metadata",
+ ),
values=[
- {
- "origin": "some_origin",
- "room_id": room_id,
- "received_ts": 0,
- "event_id": f"$fake_event_id_{i + 1}",
- "event_json": json_encoder.encode(
- {"prev_events": [f"$fake_event_id_{i}"]}
- ),
- "internal_metadata": "{}",
- }
+ (
+ "some_origin",
+ room_id,
+ 0,
+ f"$fake_event_id_{i + 1}",
+ json_encoder.encode({"prev_events": [f"$fake_event_id_{i}"]}),
+ "{}",
+ )
for i in range(500)
],
desc="test_prune_inbound_federation_queue",