Compare commits
29 Commits
gh/v4.1.4-
...
gh/stable
Author | SHA1 | Date |
---|---|---|
Ducky | 1c2f0f97e8 | |
Ducky | fa47e96fa0 | |
Ducky | 73938a75b2 | |
Ducky | f117479a75 | |
Ducky | cd2f2b2fe2 | |
Ducky | f14ac87522 | |
Ducky | 767aa3d66f | |
Ducky | 1642c4127d | |
Ducky | 9bef0d71ea | |
Ducky | ccaafd7157 | |
Ducky | f6625f49c2 | |
Ducky | fa9feece61 | |
Ducky | 7442c4fef4 | |
Ducky | 0124911b89 | |
Astra | 878d76adc5 | |
Claire | 370b8f0b81 | |
Renaud Chaput | b7bf343b26 | |
Emelia Smith | caf1c87ecd | |
Claire | eace7f9fcf | |
Emelia Smith | 2461ffbff9 | |
Claire | b5791487b1 | |
Ducky | 2465f9d180 | |
Claire | ebf4f034c2 | |
Claire | 889102013f | |
Claire | d94a2c8aca | |
Claire | efd066670d | |
Claire | 13ec425b72 | |
Michael Stanclift | 7a99f0744d | |
Claire | 69c8f26946 |
28
CHANGELOG.md
28
CHANGELOG.md
|
@ -3,6 +3,34 @@ Changelog
|
||||||
|
|
||||||
All notable changes to this project will be documented in this file.
|
All notable changes to this project will be documented in this file.
|
||||||
|
|
||||||
|
## [4.1.6] - 2023-07-28
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix memory leak in streaming server ([ThisIsMissEm](https://github.com/mastodon/mastodon/pull/26228))
|
||||||
|
- Fix wrong filters sometimes applying in streaming ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/26159), [ThisIsMissEm](https://github.com/mastodon/mastodon/pull/26213), [renchap](https://github.com/mastodon/mastodon/pull/26233))
|
||||||
|
- Fix incorrect connect timeout in outgoing requests ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/26116))
|
||||||
|
|
||||||
|
## [4.1.5] - 2023-07-21
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Add check preventing Sidekiq workers from running with Makara configured ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/25850))
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Change request timeout handling to use a longer deadline ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/26055))
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix moderation interface for remote instances with a .zip TLD ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/25886))
|
||||||
|
- Fix remote accounts being possibly persisted to database with incomplete protocol values ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/25886))
|
||||||
|
- Fix trending publishers table not rendering correctly on narrow screens ([vmstan](https://github.com/mastodon/mastodon/pull/25945))
|
||||||
|
|
||||||
|
### Security
|
||||||
|
|
||||||
|
- Fix CSP headers being unintentionally wide ([ClearlyClaire](https://github.com/mastodon/mastodon/pull/26105))
|
||||||
|
|
||||||
## [4.1.4] - 2023-07-07
|
## [4.1.4] - 2023-07-07
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
|
@ -0,0 +1,99 @@
|
||||||
|
# syntax=docker/dockerfile:1.4
|
||||||
|
# This needs to be bullseye-slim because the Ruby image is built on bullseye-slim
|
||||||
|
ARG NODE_VERSION="16.18.1-bullseye-slim"
|
||||||
|
|
||||||
|
FROM ghcr.io/moritzheiber/ruby-jemalloc:3.0.6-slim as ruby
|
||||||
|
FROM node:${NODE_VERSION} as build
|
||||||
|
|
||||||
|
COPY --from=ruby /opt/ruby /opt/ruby
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND="noninteractive" \
|
||||||
|
PATH="${PATH}:/opt/ruby/bin"
|
||||||
|
|
||||||
|
#SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
|
WORKDIR /opt/mastodon
|
||||||
|
COPY Gemfile* package.json yarn.lock /opt/mastodon/
|
||||||
|
|
||||||
|
# hadolint ignore=DL3008
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends build-essential \
|
||||||
|
ca-certificates \
|
||||||
|
git \
|
||||||
|
libicu-dev \
|
||||||
|
libidn11-dev \
|
||||||
|
libpq-dev \
|
||||||
|
libjemalloc-dev \
|
||||||
|
zlib1g-dev \
|
||||||
|
libgdbm-dev \
|
||||||
|
libgmp-dev \
|
||||||
|
libssl-dev \
|
||||||
|
libyaml-0-2 \
|
||||||
|
ca-certificates \
|
||||||
|
libreadline8 \
|
||||||
|
python3 \
|
||||||
|
shared-mime-info && \
|
||||||
|
bundle config set --local deployment 'true' && \
|
||||||
|
bundle config set --local without 'development test' && \
|
||||||
|
bundle config set silence_root_warning true && \
|
||||||
|
bundle install -j"$(nproc)" && \
|
||||||
|
yarn install --pure-lockfile --network-timeout 600000
|
||||||
|
|
||||||
|
FROM node:${NODE_VERSION}
|
||||||
|
|
||||||
|
ARG UID="991"
|
||||||
|
ARG GID="991"
|
||||||
|
|
||||||
|
COPY --from=ruby /opt/ruby /opt/ruby
|
||||||
|
|
||||||
|
#SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND="noninteractive" \
|
||||||
|
PATH="${PATH}:/opt/ruby/bin:/opt/mastodon/bin"
|
||||||
|
|
||||||
|
# Ignoreing these here since we don't want to pin any versions and the Debian image removes apt-get content after use
|
||||||
|
# hadolint ignore=DL3008,DL3009
|
||||||
|
RUN apt-get update && \
|
||||||
|
echo "Etc/UTC" > /etc/localtime && \
|
||||||
|
groupadd -g "${GID}" mastodon && \
|
||||||
|
useradd -l -u "$UID" -g "${GID}" -m -d /opt/mastodon mastodon && \
|
||||||
|
apt-get -y --no-install-recommends install whois \
|
||||||
|
wget \
|
||||||
|
procps \
|
||||||
|
libssl1.1 \
|
||||||
|
libpq5 \
|
||||||
|
imagemagick \
|
||||||
|
ffmpeg \
|
||||||
|
libjemalloc2 \
|
||||||
|
libicu67 \
|
||||||
|
libidn11 \
|
||||||
|
libyaml-0-2 \
|
||||||
|
file \
|
||||||
|
ca-certificates \
|
||||||
|
tzdata \
|
||||||
|
libreadline8 \
|
||||||
|
tini && \
|
||||||
|
ln -s /opt/mastodon /mastodon
|
||||||
|
|
||||||
|
# Note: no, cleaning here since Debian does this automatically
|
||||||
|
# See the file /etc/apt/apt.conf.d/docker-clean within the Docker image's filesystem
|
||||||
|
|
||||||
|
COPY --chown=mastodon:mastodon . /opt/mastodon
|
||||||
|
COPY --chown=mastodon:mastodon --from=build /opt/mastodon /opt/mastodon
|
||||||
|
|
||||||
|
ENV RAILS_ENV="production" \
|
||||||
|
NODE_ENV="production" \
|
||||||
|
RAILS_SERVE_STATIC_FILES="true" \
|
||||||
|
BIND="0.0.0.0"
|
||||||
|
|
||||||
|
# Set the run user
|
||||||
|
USER mastodon
|
||||||
|
WORKDIR /opt/mastodon
|
||||||
|
|
||||||
|
# Precompile assets
|
||||||
|
RUN OTP_SECRET=precompile_placeholder SECRET_KEY_BASE=precompile_placeholder rails assets:precompile && \
|
||||||
|
yarn cache clean
|
||||||
|
|
||||||
|
# Set the work dir and the container entry point
|
||||||
|
ENTRYPOINT ["/usr/bin/tini", "--"]
|
||||||
|
EXPOSE 3000 4000
|
|
@ -0,0 +1,6 @@
|
||||||
|
@import 'contrast/variables';
|
||||||
|
@import 'application';
|
||||||
|
@import 'contrast/diff';
|
||||||
|
@import 'elephant/layout-single-column.scss';
|
||||||
|
@import 'elephant/layout-multiple-columns.scss';
|
||||||
|
@import 'gh/elephant-mods.scss';
|
|
@ -0,0 +1,6 @@
|
||||||
|
@import 'mastodon-light/variables';
|
||||||
|
@import 'application';
|
||||||
|
@import 'mastodon-light/diff';
|
||||||
|
@import 'elephant/layout-single-column.scss';
|
||||||
|
@import 'elephant/layout-multiple-columns.scss';
|
||||||
|
@import 'gh/elephant-mods.scss';
|
|
@ -0,0 +1,4 @@
|
||||||
|
@import 'application';
|
||||||
|
@import 'elephant/layout-single-column.scss';
|
||||||
|
@import 'elephant/layout-multiple-columns.scss';
|
||||||
|
@import 'gh/elephant-mods.scss';
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -4,14 +4,22 @@ require 'ipaddr'
|
||||||
require 'socket'
|
require 'socket'
|
||||||
require 'resolv'
|
require 'resolv'
|
||||||
|
|
||||||
# Monkey-patch the HTTP.rb timeout class to avoid using a timeout block
|
# Use our own timeout class to avoid using HTTP.rb's timeout block
|
||||||
# around the Socket#open method, since we use our own timeout blocks inside
|
# around the Socket#open method, since we use our own timeout blocks inside
|
||||||
# that method
|
# that method
|
||||||
#
|
#
|
||||||
# Also changes how the read timeout behaves so that it is cumulative (closer
|
# Also changes how the read timeout behaves so that it is cumulative (closer
|
||||||
# to HTTP::Timeout::Global, but still having distinct timeouts for other
|
# to HTTP::Timeout::Global, but still having distinct timeouts for other
|
||||||
# operation types)
|
# operation types)
|
||||||
class HTTP::Timeout::PerOperation
|
class PerOperationWithDeadline < HTTP::Timeout::PerOperation
|
||||||
|
READ_DEADLINE = 30
|
||||||
|
|
||||||
|
def initialize(*args)
|
||||||
|
super
|
||||||
|
|
||||||
|
@read_deadline = options.fetch(:read_deadline, READ_DEADLINE)
|
||||||
|
end
|
||||||
|
|
||||||
def connect(socket_class, host, port, nodelay = false)
|
def connect(socket_class, host, port, nodelay = false)
|
||||||
@socket = socket_class.open(host, port)
|
@socket = socket_class.open(host, port)
|
||||||
@socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1) if nodelay
|
@socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1) if nodelay
|
||||||
|
@ -24,7 +32,7 @@ class HTTP::Timeout::PerOperation
|
||||||
|
|
||||||
# Read data from the socket
|
# Read data from the socket
|
||||||
def readpartial(size, buffer = nil)
|
def readpartial(size, buffer = nil)
|
||||||
@deadline ||= Process.clock_gettime(Process::CLOCK_MONOTONIC) + @read_timeout
|
@deadline ||= Process.clock_gettime(Process::CLOCK_MONOTONIC) + @read_deadline
|
||||||
|
|
||||||
timeout = false
|
timeout = false
|
||||||
loop do
|
loop do
|
||||||
|
@ -33,7 +41,8 @@ class HTTP::Timeout::PerOperation
|
||||||
return :eof if result.nil?
|
return :eof if result.nil?
|
||||||
|
|
||||||
remaining_time = @deadline - Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
remaining_time = @deadline - Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
||||||
raise HTTP::TimeoutError, "Read timed out after #{@read_timeout} seconds" if timeout || remaining_time <= 0
|
raise HTTP::TimeoutError, "Read timed out after #{@read_timeout} seconds" if timeout
|
||||||
|
raise HTTP::TimeoutError, "Read timed out after a total of #{@read_deadline} seconds" if remaining_time <= 0
|
||||||
return result if result != :wait_readable
|
return result if result != :wait_readable
|
||||||
|
|
||||||
# marking the socket for timeout. Why is this not being raised immediately?
|
# marking the socket for timeout. Why is this not being raised immediately?
|
||||||
|
@ -46,7 +55,7 @@ class HTTP::Timeout::PerOperation
|
||||||
# timeout. Else, the first timeout was a proper timeout.
|
# timeout. Else, the first timeout was a proper timeout.
|
||||||
# This hack has to be done because io/wait#wait_readable doesn't provide a value for when
|
# This hack has to be done because io/wait#wait_readable doesn't provide a value for when
|
||||||
# the socket is closed by the server, and HTTP::Parser doesn't provide the limit for the chunks.
|
# the socket is closed by the server, and HTTP::Parser doesn't provide the limit for the chunks.
|
||||||
timeout = true unless @socket.to_io.wait_readable(remaining_time)
|
timeout = true unless @socket.to_io.wait_readable([remaining_time, @read_timeout].min)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
@ -57,7 +66,7 @@ class Request
|
||||||
# We enforce a 5s timeout on DNS resolving, 5s timeout on socket opening
|
# We enforce a 5s timeout on DNS resolving, 5s timeout on socket opening
|
||||||
# and 5s timeout on the TLS handshake, meaning the worst case should take
|
# and 5s timeout on the TLS handshake, meaning the worst case should take
|
||||||
# about 15s in total
|
# about 15s in total
|
||||||
TIMEOUT = { connect: 5, read: 10, write: 10 }.freeze
|
TIMEOUT = { connect_timeout: 5, read_timeout: 10, write_timeout: 10, read_deadline: 30 }.freeze
|
||||||
|
|
||||||
include RoutingHelper
|
include RoutingHelper
|
||||||
|
|
||||||
|
@ -69,6 +78,7 @@ class Request
|
||||||
@http_client = options.delete(:http_client)
|
@http_client = options.delete(:http_client)
|
||||||
@allow_local = options.delete(:allow_local)
|
@allow_local = options.delete(:allow_local)
|
||||||
@options = options.merge(socket_class: use_proxy? || @allow_local ? ProxySocket : Socket)
|
@options = options.merge(socket_class: use_proxy? || @allow_local ? ProxySocket : Socket)
|
||||||
|
@options = @options.merge(timeout_class: PerOperationWithDeadline, timeout_options: TIMEOUT)
|
||||||
@options = @options.merge(proxy_url) if use_proxy?
|
@options = @options.merge(proxy_url) if use_proxy?
|
||||||
@headers = {}
|
@headers = {}
|
||||||
|
|
||||||
|
@ -129,7 +139,7 @@ class Request
|
||||||
end
|
end
|
||||||
|
|
||||||
def http_client
|
def http_client
|
||||||
HTTP.use(:auto_inflate).timeout(TIMEOUT.dup).follow(max_hops: 3)
|
HTTP.use(:auto_inflate).follow(max_hops: 3)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -275,11 +285,11 @@ class Request
|
||||||
end
|
end
|
||||||
|
|
||||||
until socks.empty?
|
until socks.empty?
|
||||||
_, available_socks, = IO.select(nil, socks, nil, Request::TIMEOUT[:connect])
|
_, available_socks, = IO.select(nil, socks, nil, Request::TIMEOUT[:connect_timeout])
|
||||||
|
|
||||||
if available_socks.nil?
|
if available_socks.nil?
|
||||||
socks.each(&:close)
|
socks.each(&:close)
|
||||||
raise HTTP::TimeoutError, "Connect timed out after #{Request::TIMEOUT[:connect]} seconds"
|
raise HTTP::TimeoutError, "Connect timed out after #{Request::TIMEOUT[:connect_timeout]} seconds"
|
||||||
end
|
end
|
||||||
|
|
||||||
available_socks.each do |sock|
|
available_socks.each do |sock|
|
||||||
|
|
|
@ -76,6 +76,9 @@ class ActivityPub::ProcessAccountService < BaseService
|
||||||
@account.suspended_at = domain_block.created_at if auto_suspend?
|
@account.suspended_at = domain_block.created_at if auto_suspend?
|
||||||
@account.suspension_origin = :local if auto_suspend?
|
@account.suspension_origin = :local if auto_suspend?
|
||||||
@account.silenced_at = domain_block.created_at if auto_silence?
|
@account.silenced_at = domain_block.created_at if auto_silence?
|
||||||
|
|
||||||
|
set_immediate_protocol_attributes!
|
||||||
|
|
||||||
@account.save
|
@account.save
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
- Trends::PreviewCardProviderFilter::KEYS.each do |key|
|
- Trends::PreviewCardProviderFilter::KEYS.each do |key|
|
||||||
= hidden_field_tag key, params[key] if params[key].present?
|
= hidden_field_tag key, params[key] if params[key].present?
|
||||||
|
|
||||||
.batch-table.optional
|
.batch-table
|
||||||
.batch-table__toolbar
|
.batch-table__toolbar
|
||||||
%label.batch-table__toolbar__select.batch-checkbox-all
|
%label.batch-table__toolbar__select.batch-checkbox-all
|
||||||
= check_box_tag :batch_checkbox_all, nil, false
|
= check_box_tag :batch_checkbox_all, nil, false
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy
|
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy
|
||||||
|
|
||||||
def host_to_url(str)
|
def host_to_url(str)
|
||||||
"http#{Rails.configuration.x.use_https ? 's' : ''}://#{str}".split('/').first if str.present?
|
"http#{Rails.configuration.x.use_https ? 's' : ''}://#{str.split('/').first}" if str.present?
|
||||||
end
|
end
|
||||||
|
|
||||||
base_host = Rails.configuration.x.web_domain
|
base_host = Rails.configuration.x.web_domain
|
||||||
|
|
|
@ -3,6 +3,11 @@
|
||||||
require_relative '../../lib/mastodon/sidekiq_middleware'
|
require_relative '../../lib/mastodon/sidekiq_middleware'
|
||||||
|
|
||||||
Sidekiq.configure_server do |config|
|
Sidekiq.configure_server do |config|
|
||||||
|
if Rails.configuration.database_configuration.dig('production', 'adapter') == 'postgresql_makara'
|
||||||
|
STDERR.puts 'ERROR: Database replication is not currently supported in Sidekiq workers. Check your configuration.'
|
||||||
|
exit 1
|
||||||
|
end
|
||||||
|
|
||||||
config.redis = REDIS_SIDEKIQ_PARAMS
|
config.redis = REDIS_SIDEKIQ_PARAMS
|
||||||
|
|
||||||
config.server_middleware do |chain|
|
config.server_middleware do |chain|
|
||||||
|
|
|
@ -1597,6 +1597,9 @@ en:
|
||||||
themes:
|
themes:
|
||||||
contrast: Mastodon (High contrast)
|
contrast: Mastodon (High contrast)
|
||||||
default: Mastodon (Dark)
|
default: Mastodon (Dark)
|
||||||
|
elephant: Elephant (Dark)
|
||||||
|
elephant-contrast: Elephant (Contrast)
|
||||||
|
elephant-light: Elephant (Light)
|
||||||
mastodon-light: Mastodon (Light)
|
mastodon-light: Mastodon (Light)
|
||||||
time:
|
time:
|
||||||
formats:
|
formats:
|
||||||
|
|
|
@ -292,7 +292,7 @@ Rails.application.routes.draw do
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
resources :instances, only: [:index, :show, :destroy], constraints: { id: /[^\/]+/ } do
|
resources :instances, only: [:index, :show, :destroy], constraints: { id: /[^\/]+/ }, format: 'html' do
|
||||||
member do
|
member do
|
||||||
post :clear_delivery_errors
|
post :clear_delivery_errors
|
||||||
post :restart_delivery
|
post :restart_delivery
|
||||||
|
|
|
@ -1,3 +1,6 @@
|
||||||
default: styles/application.scss
|
default: styles/application.scss
|
||||||
contrast: styles/contrast.scss
|
elephant: styles/elephant.scss
|
||||||
|
elephant-light: styles/elephant-light.scss
|
||||||
|
elephant-contrast: styles/elephant-contrast.scss
|
||||||
mastodon-light: styles/mastodon-light.scss
|
mastodon-light: styles/mastodon-light.scss
|
||||||
|
contrast: styles/contrast.scss
|
||||||
|
|
|
@ -56,7 +56,7 @@ services:
|
||||||
|
|
||||||
web:
|
web:
|
||||||
build: .
|
build: .
|
||||||
image: ghcr.io/mastodon/mastodon
|
image: ghcr.io/mastodon/mastodon:v4.1.6
|
||||||
restart: always
|
restart: always
|
||||||
env_file: .env.production
|
env_file: .env.production
|
||||||
command: bash -c "rm -f /mastodon/tmp/pids/server.pid; bundle exec rails s -p 3000"
|
command: bash -c "rm -f /mastodon/tmp/pids/server.pid; bundle exec rails s -p 3000"
|
||||||
|
@ -77,7 +77,7 @@ services:
|
||||||
|
|
||||||
streaming:
|
streaming:
|
||||||
build: .
|
build: .
|
||||||
image: ghcr.io/mastodon/mastodon
|
image: ghcr.io/mastodon/mastodon:v4.1.6
|
||||||
restart: always
|
restart: always
|
||||||
env_file: .env.production
|
env_file: .env.production
|
||||||
command: node ./streaming
|
command: node ./streaming
|
||||||
|
@ -95,7 +95,7 @@ services:
|
||||||
|
|
||||||
sidekiq:
|
sidekiq:
|
||||||
build: .
|
build: .
|
||||||
image: ghcr.io/mastodon/mastodon
|
image: ghcr.io/mastodon/mastodon:v4.1.6
|
||||||
restart: always
|
restart: always
|
||||||
env_file: .env.production
|
env_file: .env.production
|
||||||
command: bundle exec sidekiq
|
command: bundle exec sidekiq
|
||||||
|
|
|
@ -13,7 +13,7 @@ module Mastodon
|
||||||
end
|
end
|
||||||
|
|
||||||
def patch
|
def patch
|
||||||
4
|
6
|
||||||
end
|
end
|
||||||
|
|
||||||
def flags
|
def flags
|
||||||
|
@ -21,7 +21,7 @@ module Mastodon
|
||||||
end
|
end
|
||||||
|
|
||||||
def suffix
|
def suffix
|
||||||
'-gh23200'
|
'-gh23240'
|
||||||
end
|
end
|
||||||
|
|
||||||
def to_a
|
def to_a
|
||||||
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
require 'rails_helper'
|
||||||
|
|
||||||
|
describe 'Content-Security-Policy' do
|
||||||
|
it 'sets the expected CSP headers' do
|
||||||
|
allow(SecureRandom).to receive(:base64).with(16).and_return('ZbA+JmE7+bK8F5qvADZHuQ==')
|
||||||
|
|
||||||
|
get '/'
|
||||||
|
expect(response.headers['Content-Security-Policy'].split(';').map(&:strip)).to contain_exactly(
|
||||||
|
"base-uri 'none'",
|
||||||
|
"default-src 'none'",
|
||||||
|
"frame-ancestors 'none'",
|
||||||
|
"font-src 'self' https://cb6e6126.ngrok.io",
|
||||||
|
"img-src 'self' https: data: blob: https://cb6e6126.ngrok.io",
|
||||||
|
"style-src 'self' https://cb6e6126.ngrok.io 'nonce-ZbA+JmE7+bK8F5qvADZHuQ=='",
|
||||||
|
"media-src 'self' https: data: https://cb6e6126.ngrok.io",
|
||||||
|
"frame-src 'self' https:",
|
||||||
|
"manifest-src 'self' https://cb6e6126.ngrok.io",
|
||||||
|
"form-action 'self'",
|
||||||
|
"child-src 'self' blob: https://cb6e6126.ngrok.io",
|
||||||
|
"worker-src 'self' blob: https://cb6e6126.ngrok.io",
|
||||||
|
"connect-src 'self' data: blob: https://cb6e6126.ngrok.io https://cb6e6126.ngrok.io ws://localhost:4000",
|
||||||
|
"script-src 'self' https://cb6e6126.ngrok.io 'wasm-unsafe-eval'"
|
||||||
|
)
|
||||||
|
end
|
||||||
|
end
|
|
@ -226,9 +226,15 @@ const startWorker = async (workerId) => {
|
||||||
callbacks.forEach(callback => callback(json));
|
callbacks.forEach(callback => callback(json));
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @callback SubscriptionListener
|
||||||
|
* @param {ReturnType<parseJSON>} json of the message
|
||||||
|
* @returns void
|
||||||
|
*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param {string} channel
|
* @param {string} channel
|
||||||
* @param {function(string): void} callback
|
* @param {SubscriptionListener} callback
|
||||||
*/
|
*/
|
||||||
const subscribe = (channel, callback) => {
|
const subscribe = (channel, callback) => {
|
||||||
log.silly(`Adding listener for ${channel}`);
|
log.silly(`Adding listener for ${channel}`);
|
||||||
|
@ -245,7 +251,7 @@ const startWorker = async (workerId) => {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param {string} channel
|
* @param {string} channel
|
||||||
* @param {function(Object<string, any>): void} callback
|
* @param {SubscriptionListener} callback
|
||||||
*/
|
*/
|
||||||
const unsubscribe = (channel, callback) => {
|
const unsubscribe = (channel, callback) => {
|
||||||
log.silly(`Removing listener for ${channel}`);
|
log.silly(`Removing listener for ${channel}`);
|
||||||
|
@ -623,51 +629,66 @@ const startWorker = async (workerId) => {
|
||||||
* @param {string[]} ids
|
* @param {string[]} ids
|
||||||
* @param {any} req
|
* @param {any} req
|
||||||
* @param {function(string, string): void} output
|
* @param {function(string, string): void} output
|
||||||
* @param {function(string[], function(string): void): void} attachCloseHandler
|
* @param {undefined | function(string[], SubscriptionListener): void} attachCloseHandler
|
||||||
* @param {boolean=} needsFiltering
|
* @param {boolean=} needsFiltering
|
||||||
* @returns {function(object): void}
|
* @returns {SubscriptionListener}
|
||||||
*/
|
*/
|
||||||
const streamFrom = (ids, req, output, attachCloseHandler, needsFiltering = false) => {
|
const streamFrom = (ids, req, output, attachCloseHandler, needsFiltering = false) => {
|
||||||
const accountId = req.accountId || req.remoteAddress;
|
const accountId = req.accountId || req.remoteAddress;
|
||||||
|
|
||||||
log.verbose(req.requestId, `Starting stream from ${ids.join(', ')} for ${accountId}`);
|
log.verbose(req.requestId, `Starting stream from ${ids.join(', ')} for ${accountId}`);
|
||||||
|
|
||||||
// Currently message is of type string, soon it'll be Record<string, any>
|
const transmit = (event, payload) => {
|
||||||
|
// TODO: Replace "string"-based delete payloads with object payloads:
|
||||||
|
const encodedPayload = typeof payload === 'object' ? JSON.stringify(payload) : payload;
|
||||||
|
|
||||||
|
log.silly(req.requestId, `Transmitting for ${accountId}: ${event} ${encodedPayload}`);
|
||||||
|
output(event, encodedPayload);
|
||||||
|
};
|
||||||
|
|
||||||
|
// The listener used to process each message off the redis subscription,
|
||||||
|
// message here is an object with an `event` and `payload` property. Some
|
||||||
|
// events also include a queued_at value, but this is being removed shortly.
|
||||||
|
/** @type {SubscriptionListener} */
|
||||||
const listener = message => {
|
const listener = message => {
|
||||||
const { event, payload, queued_at } = message;
|
const { event, payload } = message;
|
||||||
|
|
||||||
const transmit = () => {
|
// Streaming only needs to apply filtering to some channels and only to
|
||||||
const now = new Date().getTime();
|
// some events. This is because majority of the filtering happens on the
|
||||||
const delta = now - queued_at;
|
// Ruby on Rails side when producing the event for streaming.
|
||||||
const encodedPayload = typeof payload === 'object' ? JSON.stringify(payload) : payload;
|
//
|
||||||
|
// The only events that require filtering from the streaming server are
|
||||||
log.silly(req.requestId, `Transmitting for ${accountId}: ${event} ${encodedPayload} Delay: ${delta}ms`);
|
// `update` and `status.update`, all other events are transmitted to the
|
||||||
output(event, encodedPayload);
|
// client as soon as they're received (pass-through).
|
||||||
};
|
//
|
||||||
|
// The channels that need filtering are determined in the function
|
||||||
// Only messages that may require filtering are statuses, since notifications
|
// `channelNameToIds` defined below:
|
||||||
// are already personalized and deletes do not matter
|
if (!needsFiltering || (event !== 'update' && event !== 'status.update')) {
|
||||||
if (!needsFiltering || event !== 'update') {
|
transmit(event, payload);
|
||||||
transmit();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const unpackedPayload = payload;
|
// The rest of the logic from here on in this function is to handle
|
||||||
const targetAccountIds = [unpackedPayload.account.id].concat(unpackedPayload.mentions.map(item => item.id));
|
// filtering of statuses:
|
||||||
const accountDomain = unpackedPayload.account.acct.split('@')[1];
|
|
||||||
|
|
||||||
if (Array.isArray(req.chosenLanguages) && unpackedPayload.language !== null && req.chosenLanguages.indexOf(unpackedPayload.language) === -1) {
|
// Filter based on language:
|
||||||
log.silly(req.requestId, `Message ${unpackedPayload.id} filtered by language (${unpackedPayload.language})`);
|
if (Array.isArray(req.chosenLanguages) && payload.language !== null && req.chosenLanguages.indexOf(payload.language) === -1) {
|
||||||
|
log.silly(req.requestId, `Message ${payload.id} filtered by language (${payload.language})`);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// When the account is not logged in, it is not necessary to confirm the block or mute
|
// When the account is not logged in, it is not necessary to confirm the block or mute
|
||||||
if (!req.accountId) {
|
if (!req.accountId) {
|
||||||
transmit();
|
transmit(event, payload);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
pgPool.connect((err, client, done) => {
|
// Filter based on domain blocks, blocks, mutes, or custom filters:
|
||||||
|
const targetAccountIds = [payload.account.id].concat(payload.mentions.map(item => item.id));
|
||||||
|
const accountDomain = payload.account.acct.split('@')[1];
|
||||||
|
|
||||||
|
// TODO: Move this logic out of the message handling loop
|
||||||
|
pgPool.connect((err, client, releasePgConnection) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error(err);
|
log.error(err);
|
||||||
return;
|
return;
|
||||||
|
@ -682,40 +703,57 @@ const startWorker = async (workerId) => {
|
||||||
SELECT 1
|
SELECT 1
|
||||||
FROM mutes
|
FROM mutes
|
||||||
WHERE account_id = $1
|
WHERE account_id = $1
|
||||||
AND target_account_id IN (${placeholders(targetAccountIds, 2)})`, [req.accountId, unpackedPayload.account.id].concat(targetAccountIds)),
|
AND target_account_id IN (${placeholders(targetAccountIds, 2)})`, [req.accountId, payload.account.id].concat(targetAccountIds)),
|
||||||
];
|
];
|
||||||
|
|
||||||
if (accountDomain) {
|
if (accountDomain) {
|
||||||
queries.push(client.query('SELECT 1 FROM account_domain_blocks WHERE account_id = $1 AND domain = $2', [req.accountId, accountDomain]));
|
queries.push(client.query('SELECT 1 FROM account_domain_blocks WHERE account_id = $1 AND domain = $2', [req.accountId, accountDomain]));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!unpackedPayload.filtered && !req.cachedFilters) {
|
if (!payload.filtered && !req.cachedFilters) {
|
||||||
queries.push(client.query('SELECT filter.id AS id, filter.phrase AS title, filter.context AS context, filter.expires_at AS expires_at, filter.action AS filter_action, keyword.keyword AS keyword, keyword.whole_word AS whole_word FROM custom_filter_keywords keyword JOIN custom_filters filter ON keyword.custom_filter_id = filter.id WHERE filter.account_id = $1 AND (filter.expires_at IS NULL OR filter.expires_at > NOW())', [req.accountId]));
|
queries.push(client.query('SELECT filter.id AS id, filter.phrase AS title, filter.context AS context, filter.expires_at AS expires_at, filter.action AS filter_action, keyword.keyword AS keyword, keyword.whole_word AS whole_word FROM custom_filter_keywords keyword JOIN custom_filters filter ON keyword.custom_filter_id = filter.id WHERE filter.account_id = $1 AND (filter.expires_at IS NULL OR filter.expires_at > NOW())', [req.accountId]));
|
||||||
}
|
}
|
||||||
|
|
||||||
Promise.all(queries).then(values => {
|
Promise.all(queries).then(values => {
|
||||||
done();
|
releasePgConnection();
|
||||||
|
|
||||||
|
// Handling blocks & mutes and domain blocks: If one of those applies,
|
||||||
|
// then we don't transmit the payload of the event to the client
|
||||||
if (values[0].rows.length > 0 || (accountDomain && values[1].rows.length > 0)) {
|
if (values[0].rows.length > 0 || (accountDomain && values[1].rows.length > 0)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!unpackedPayload.filtered && !req.cachedFilters) {
|
// If the payload already contains the `filtered` property, it means
|
||||||
|
// that filtering has been applied on the ruby on rails side, as
|
||||||
|
// such, we don't need to construct or apply the filters in streaming:
|
||||||
|
if (Object.prototype.hasOwnProperty.call(payload, "filtered")) {
|
||||||
|
transmit(event, payload);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handling for constructing the custom filters and caching them on the request
|
||||||
|
// TODO: Move this logic out of the message handling lifecycle
|
||||||
|
if (!req.cachedFilters) {
|
||||||
const filterRows = values[accountDomain ? 2 : 1].rows;
|
const filterRows = values[accountDomain ? 2 : 1].rows;
|
||||||
|
|
||||||
req.cachedFilters = filterRows.reduce((cache, row) => {
|
req.cachedFilters = filterRows.reduce((cache, filter) => {
|
||||||
if (cache[row.id]) {
|
if (cache[filter.id]) {
|
||||||
cache[row.id].keywords.push([row.keyword, row.whole_word]);
|
cache[filter.id].keywords.push([filter.keyword, filter.whole_word]);
|
||||||
} else {
|
} else {
|
||||||
cache[row.id] = {
|
cache[filter.id] = {
|
||||||
keywords: [[row.keyword, row.whole_word]],
|
keywords: [[filter.keyword, filter.whole_word]],
|
||||||
expires_at: row.expires_at,
|
expires_at: filter.expires_at,
|
||||||
repr: {
|
filter: {
|
||||||
id: row.id,
|
id: filter.id,
|
||||||
title: row.title,
|
title: filter.title,
|
||||||
context: row.context,
|
context: filter.context,
|
||||||
expires_at: row.expires_at,
|
expires_at: filter.expires_at,
|
||||||
filter_action: ['warn', 'hide'][row.filter_action],
|
// filter.filter_action is the value from the
|
||||||
|
// custom_filters.action database column, it is an integer
|
||||||
|
// representing a value in an enum defined by Ruby on Rails:
|
||||||
|
//
|
||||||
|
// enum { warn: 0, hide: 1 }
|
||||||
|
filter_action: ['warn', 'hide'][filter.filter_action],
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -723,6 +761,10 @@ const startWorker = async (workerId) => {
|
||||||
return cache;
|
return cache;
|
||||||
}, {});
|
}, {});
|
||||||
|
|
||||||
|
// Construct the regular expressions for the custom filters: This
|
||||||
|
// needs to be done in a separate loop as the database returns one
|
||||||
|
// filterRow per keyword, so we need all the keywords before
|
||||||
|
// constructing the regular expression
|
||||||
Object.keys(req.cachedFilters).forEach((key) => {
|
Object.keys(req.cachedFilters).forEach((key) => {
|
||||||
req.cachedFilters[key].regexp = new RegExp(req.cachedFilters[key].keywords.map(([keyword, whole_word]) => {
|
req.cachedFilters[key].regexp = new RegExp(req.cachedFilters[key].keywords.map(([keyword, whole_word]) => {
|
||||||
let expr = keyword.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
let expr = keyword.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
||||||
|
@ -742,31 +784,58 @@ const startWorker = async (workerId) => {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check filters
|
// Apply cachedFilters against the payload, constructing a
|
||||||
if (req.cachedFilters && !unpackedPayload.filtered) {
|
// `filter_results` array of FilterResult entities
|
||||||
const status = unpackedPayload;
|
if (req.cachedFilters) {
|
||||||
const searchContent = ([status.spoiler_text || '', status.content].concat((status.poll && status.poll.options) ? status.poll.options.map(option => option.title) : [])).concat(status.media_attachments.map(att => att.description)).join('\n\n').replace(/<br\s*\/?>/g, '\n').replace(/<\/p><p>/g, '\n\n');
|
const status = payload;
|
||||||
const searchIndex = JSDOM.fragment(searchContent).textContent;
|
// TODO: Calculate searchableContent in Ruby on Rails:
|
||||||
|
const searchableContent = ([status.spoiler_text || '', status.content].concat((status.poll && status.poll.options) ? status.poll.options.map(option => option.title) : [])).concat(status.media_attachments.map(att => att.description)).join('\n\n').replace(/<br\s*\/?>/g, '\n').replace(/<\/p><p>/g, '\n\n');
|
||||||
|
const searchableTextContent = JSDOM.fragment(searchableContent).textContent;
|
||||||
|
|
||||||
const now = new Date();
|
const now = new Date();
|
||||||
payload.filtered = [];
|
const filter_results = Object.values(req.cachedFilters).reduce((results, cachedFilter) => {
|
||||||
Object.values(req.cachedFilters).forEach((cachedFilter) => {
|
// Check the filter hasn't expired before applying:
|
||||||
if ((cachedFilter.expires_at === null || cachedFilter.expires_at > now)) {
|
if (cachedFilter.expires_at !== null && cachedFilter.expires_at < now) {
|
||||||
const keyword_matches = searchIndex.match(cachedFilter.regexp);
|
return results;
|
||||||
if (keyword_matches) {
|
|
||||||
payload.filtered.push({
|
|
||||||
filter: cachedFilter.repr,
|
|
||||||
keyword_matches,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
transmit();
|
// Just in-case JSDOM fails to find textContent in searchableContent
|
||||||
|
if (!searchableTextContent) {
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
|
||||||
|
const keyword_matches = searchableTextContent.match(cachedFilter.regexp);
|
||||||
|
if (keyword_matches) {
|
||||||
|
// results is an Array of FilterResult; status_matches is always
|
||||||
|
// null as we only are only applying the keyword-based custom
|
||||||
|
// filters, not the status-based custom filters.
|
||||||
|
// https://docs.joinmastodon.org/entities/FilterResult/
|
||||||
|
results.push({
|
||||||
|
filter: cachedFilter.filter,
|
||||||
|
keyword_matches,
|
||||||
|
status_matches: null
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return results;
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
// Send the payload + the FilterResults as the `filtered` property
|
||||||
|
// to the streaming connection. To reach this code, the `event` must
|
||||||
|
// have been either `update` or `status.update`, meaning the
|
||||||
|
// `payload` is a Status entity, which has a `filtered` property:
|
||||||
|
//
|
||||||
|
// filtered: https://docs.joinmastodon.org/entities/Status/#filtered
|
||||||
|
transmit(event, {
|
||||||
|
...payload,
|
||||||
|
filtered: filter_results
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
transmit(event, payload);
|
||||||
|
}
|
||||||
}).catch(err => {
|
}).catch(err => {
|
||||||
|
releasePgConnection();
|
||||||
log.error(err);
|
log.error(err);
|
||||||
done();
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
@ -775,7 +844,7 @@ const startWorker = async (workerId) => {
|
||||||
subscribe(`${redisPrefix}${id}`, listener);
|
subscribe(`${redisPrefix}${id}`, listener);
|
||||||
});
|
});
|
||||||
|
|
||||||
if (attachCloseHandler) {
|
if (typeof attachCloseHandler === 'function') {
|
||||||
attachCloseHandler(ids.map(id => `${redisPrefix}${id}`), listener);
|
attachCloseHandler(ids.map(id => `${redisPrefix}${id}`), listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -812,12 +881,13 @@ const startWorker = async (workerId) => {
|
||||||
/**
|
/**
|
||||||
* @param {any} req
|
* @param {any} req
|
||||||
* @param {function(): void} [closeHandler]
|
* @param {function(): void} [closeHandler]
|
||||||
* @return {function(string[]): void}
|
* @returns {function(string[], SubscriptionListener): void}
|
||||||
*/
|
*/
|
||||||
const streamHttpEnd = (req, closeHandler = undefined) => (ids) => {
|
|
||||||
|
const streamHttpEnd = (req, closeHandler = undefined) => (ids, listener) => {
|
||||||
req.on('close', () => {
|
req.on('close', () => {
|
||||||
ids.forEach(id => {
|
ids.forEach(id => {
|
||||||
unsubscribe(id);
|
unsubscribe(id, listener);
|
||||||
});
|
});
|
||||||
|
|
||||||
if (closeHandler) {
|
if (closeHandler) {
|
||||||
|
@ -1077,7 +1147,7 @@ const startWorker = async (workerId) => {
|
||||||
* @typedef WebSocketSession
|
* @typedef WebSocketSession
|
||||||
* @property {any} socket
|
* @property {any} socket
|
||||||
* @property {any} request
|
* @property {any} request
|
||||||
* @property {Object.<string, { listener: function(string): void, stopHeartbeat: function(): void }>} subscriptions
|
* @property {Object.<string, { listener: SubscriptionListener, stopHeartbeat: function(): void }>} subscriptions
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Reference in New Issue