5 Commits

Author SHA1 Message Date
renovate[bot]
baa1a29f76 chore(deps): update dependency systemd_service_manager to v3.1.0-0 2026-03-01 04:10:05 +02:00
renovate[bot]
9d6980a175 chore(deps): update dependency systemd_docker_base to v1.5.0-0 2026-03-01 04:09:58 +02:00
parisni
90bcb1f4ee feat: prune empty dir when migrate to s3 storage
OTW many empty dirs are kept
2026-03-01 01:04:22 +02:00
Slavi Pantaleev
46321552b7 docs(changelog): document Synapse S3 prefix wiring behavior change 2026-03-01 00:49:05 +02:00
parisni
0620d6a822 fix: make matrix_synapse_ext_synapse_s3_storage_provider_config_prefix be used 2026-03-01 00:48:59 +02:00
5 changed files with 28 additions and 3 deletions

View File

@@ -1,3 +1,18 @@
# 2026-03-01
## (Potential BC Break) Synapse S3 media prefix is now applied consistently
The `matrix_synapse_ext_synapse_s3_storage_provider_config_prefix` variable is now wired consistently for both:
- the Synapse `s3_storage_provider` module configuration
- the `matrix-synapse-s3-storage-provider-migrate` migration script (`s3_media_upload --prefix`)
Previously, this variable could be set, but was not effectively applied by either of these paths.
**Affects**: users of [synapse-s3-storage-provider](docs/configuring-playbook-synapse-s3-storage-provider.md) who have configured a non-empty `matrix_synapse_ext_synapse_s3_storage_provider_config_prefix` value.
If your bucket data was uploaded without the prefix before this fix, enabling proper prefix usage can make existing objects appear missing until data is migrated/copied to the prefixed key namespace.
# 2026-02-26
## Internal refactor: merged the Synapse reverse-proxy companion role into `matrix-synapse`

View File

@@ -177,6 +177,8 @@ By default, we periodically ensure that all local files are uploaded to S3 and a
- … invoked via the `matrix-synapse-s3-storage-provider-migrate.service` service
- … triggered by the `matrix-synapse-s3-storage-provider-migrate.timer` timer, every day at 05:00
The same `migrate` script also prunes empty directories in the local media repository (`remote_content` and `remote_thumbnail`) after upload/delete operations.
So… you don't need to perform any maintenance yourself.
The schedule is defined in the format of systemd timer calendar. To edit the schedule, add the following configuration to your `vars.yml` file (adapt to your needs):

View File

@@ -75,10 +75,10 @@
version: v0.19.1-0
name: prometheus_postgres_exporter
- src: git+https://github.com/devture/com.devture.ansible.role.systemd_docker_base.git
version: v1.4.1-0
version: v1.5.0-0
name: systemd_docker_base
- src: git+https://github.com/devture/com.devture.ansible.role.systemd_service_manager.git
version: v3.0.0-1
version: v3.1.0-0
name: systemd_service_manager
- src: git+https://github.com/devture/com.devture.ansible.role.timesync.git
version: v1.1.0-1

View File

@@ -15,7 +15,7 @@ container_id=$(\
{{ arg }} \
{% endfor %}
{{ matrix_synapse_container_image_final }} \
-c 's3_media_upload update-db $UPDATE_DB_DURATION && s3_media_upload --no-progress check-deleted $MEDIA_PATH && s3_media_upload --no-progress upload $MEDIA_PATH $BUCKET --delete --storage-class $STORAGE_CLASS --endpoint-url $ENDPOINT {% if matrix_synapse_ext_synapse_s3_storage_provider_config_sse_customer_enabled %}--sse-customer-algo $SSE_CUSTOMER_ALGO --sse-customer-key $SSE_CUSTOMER_KEY{% endif %}' \
-c 's3_media_upload update-db $UPDATE_DB_DURATION && s3_media_upload --no-progress check-deleted $MEDIA_PATH && s3_media_upload --no-progress upload $MEDIA_PATH $BUCKET --delete --storage-class $STORAGE_CLASS --endpoint-url $ENDPOINT {% if matrix_synapse_ext_synapse_s3_storage_provider_config_prefix %}--prefix $PREFIX {% endif %}{% if matrix_synapse_ext_synapse_s3_storage_provider_config_sse_customer_enabled %}--sse-customer-algo $SSE_CUSTOMER_ALGO --sse-customer-key $SSE_CUSTOMER_KEY{% endif %}' \
)
{# We need to connect to the Postgres network, which should be in this list. #}
@@ -24,3 +24,8 @@ container_id=$(\
{% endfor %}
{{ devture_systemd_docker_base_host_command_docker }} start --attach $container_id
for prune_dir in "{{ matrix_synapse_storage_path }}/{{ matrix_synapse_media_store_directory_name }}/remote_content" "{{ matrix_synapse_storage_path }}/{{ matrix_synapse_media_store_directory_name }}/remote_thumbnail"; do
[ -d "$prune_dir" ] || continue
find "$prune_dir" -depth -mindepth 1 -type d -empty -print -delete
done

View File

@@ -12,6 +12,9 @@ store_remote: {{ matrix_synapse_ext_synapse_s3_storage_provider_store_remote | t
store_synchronous: {{ matrix_synapse_ext_synapse_s3_storage_provider_store_synchronous | to_json }}
config:
bucket: {{ matrix_synapse_ext_synapse_s3_storage_provider_config_bucket | to_json }}
{% if matrix_synapse_ext_synapse_s3_storage_provider_config_prefix %}
prefix: {{ matrix_synapse_ext_synapse_s3_storage_provider_config_prefix | to_json }}
{% endif %}
region_name: {{ matrix_synapse_ext_synapse_s3_storage_provider_config_region_name | to_json }}
endpoint_url: {{ matrix_synapse_ext_synapse_s3_storage_provider_config_endpoint_url | to_json }}
{% if not matrix_synapse_ext_synapse_s3_storage_provider_config_ec2_instance_profile | bool %}