Browse Source

finished nextcloud

feature/startup-from-labels
root 5 years ago
parent
commit
e2d5acb7b1
  1. 74
      bin/pdm-build
  2. 22
      bin/pdm-create
  3. 54
      bin/pdm-launch
  4. 8
      bin/pdm-login
  5. 10
      bin/pdm-shell
  6. 1216
      lib/shflags
  7. 5
      src/debian/Containerfile
  8. 7
      src/debian/build.sh
  9. 33
      src/nextcloud/Containerfile
  10. 46
      src/nextcloud/bugfix/redis.service
  11. 3
      src/nextcloud/resources/crontab
  12. 15
      src/nextcloud/resources/my.config.php
  13. 314
      src/nextcloud/resources/redis.conf

74
bin/pdm-build

@ -1,31 +1,55 @@
#!/bin/bash
# potential flags: custom tag, squash, delete/redo, custom dir, debug (don't delete tmp), custom build image
# todo: configure autostart service
# todo: handle volumes
# Variables
# Functions/variables
quit() {
podman rm -f tmp-$epoch 2>&1 > /dev/null
if [[ $1 == 0 || $FLAGS_debug == $FLAGS_FALSE ]]; then
podman rm -i -f tmp-$epoch 2>&1 > /dev/null
podman rmi tmp:$epoch 2>&1 > /dev/null
fi
exit $1
}
libdir=/tank/local/podman/lib
today=$(date "+%Y-%m-%d-T%H%M")
epoch=$(date "+%s.%3N")
tag=latest
# Handle errors/arguments
if [[ $# -eq 0 ]]; then
echo "Usage: $0 directory [image_name]"
exit 0
fi
# Handle flags
source $libdir/shflags
DEFINE_boolean 'squash' false 'squash newly built layers into a single new layer' 's'
DEFINE_boolean 'debug' false "Don't delete temporary image/container on build fail" 'd'
DEFINE_string 'tag' 'latest' 'Tag (other than date) to assign to the image' 't'
FLAGS_HELP="Usage: $0 [-s] [-d] [-t tag] [directory] [name]
Builds an image from the Containerfile and (optionally) Systemdfile in a
directory passed as the first argument, and names the image after the second
argument. If no first argument is given, the current working directory is
used. If no second argument is given, the image is named after the directory.
"
FLAGS "$@" || exit $?
eval set -- "${FLAGS_ARGV}"
if [[ ! -d $1 ]]; then
echo "Error: directory \"$1\" not found."
# Handle errors/arguments/cases
if [[ $# -gt 2 ]]; then
echo "Error: too many arguments"
echo ""
flags_help
exit 1
fi
cd $1
if [[ -n $1 ]]; then
directory=$1
else
directory=$(pwd)
fi
if [[ ! -d $directory ]]; then
echo "Error: directory \"$directory\" not found"
echo ""
flags_help
exit 1
else
cd $directory
fi
if [[ -n $2 ]]; then
name=$2
@ -33,25 +57,31 @@ else
name=$(basename $(pwd))
fi
if [[ $FLAGS_squash == $FLAGS_TRUE ]]; then
buildopts="--squash"
else
buildopts=""
fi
# Main
# build image
echo "Building container ..."
podman build -f Containerfile -t tmp:$epoch || quit 2
podman build -f Containerfile -t tmp:$epoch $buildopts || quit $?
# start container
echo "Creating container ..."
podman create --name tmp-$epoch tmp:$epoch || quit 2
podman start tmp-$epoch || quit 2
podman create --name tmp-$epoch tmp:$epoch || quit $?
podman start tmp-$epoch || quit $?
# Systemdfile is for commands that need systemd to execute
echo "Running build steps that require systemd ..."
podman exec tmp-$epoch bash -c "if [ -f /root/Systemdfile ]; then /root/Systemdfile; fi" || quit 2
podman exec tmp-$epoch bash -c "if [ -f /root/Systemdfile ]; then /root/Systemdfile; fi" || quit $?
# commit finalized container state to image
echo "Committing container to image ..."
podman commit tmp-$epoch $name:$today || quit 2
podman commit tmp-$epoch $name:$today || quit $?
# tag with latest tag
podman tag $name:$today $name:$tag
echo "Finished!"
podman tag $name:$today $name:$FLAGS_tag
echo "Done!"
quit 0

22
bin/pdm-create

@ -1,22 +0,0 @@
#!/bin/bash
# potential flags: use other deploy config
if [[ -z $1 ]]; then
echo "Usage: $0 image [name]"
exit 1
fi
image=$1
if [[ -n $2 ]]; then
name=$2
else
name=$image
fi
podman create --name $name $image
podman start $name
# get container IP
printf "Container IP is: "
podman inspect -f '{{ .NetworkSettings.IPAddress }}' $cont

54
bin/pdm-launch

@ -0,0 +1,54 @@
#!/bin/bash
# potential flags: deploy config other than default, overwrite
# Variables
libdir=/tank/local/podman/lib
# Handle flags
source $libdir/shflags
DEFINE_boolean 'overwrite' false 'Overwrite container if one with same name already exists.' 'o'
DEFINE_boolean 'config' false "Automatically configure container with deploy options stored in image metadata." 'c'
DEFINE_string 'deployopts' 'deployopts' 'Image metadata label from which to get the deploy options.' 'd'
FLAGS_HELP="Usage: $0 [-o] [-d label] image [name]
Creates and starts a container from the specified image. If a second
argument is given, the container name is set to that string. Otherwise, the
container is given the same name as the image.
"
FLAGS "$@" || exit $?
eval set -- "${FLAGS_ARGV}"
if [[ -n $1 ]]; then
image=$1
else
echo "Error: need image name"
echo ""
flags_help
exit 1
fi
if [[ -n $2 ]]; then
name=$2
else
name=$image
fi
if [[ $FLAGS_config == $FLAGS_TRUE ]]; then
echo "Getting deploy options from image metadata label \"$FLAGS_deployopts\" ..."
deployopts=$(podman image inspect -f "{{ .Config.Labels.${FLAGS_deployopts} }}" $image)
if [[ $deployopts == "<no value>" ]]; then
echo "Error: image metadata label \"$FLAGS_deployopts\" is empty or nonexistent."
exit 2
fi
else
deployopts=""
fi
if [[ $FLAGS_overwrite ]]; then
podman rm -i -f $name
fi
podman create --name $name $deployopts $image
podman start $name
echo "Done!"

8
bin/pdm-login

@ -1,8 +0,0 @@
#!/bin/bash
if [[ -z $1 ]]; then
echo "Usage: $0 container"
exit 1
fi
podman exec -it $1 su -l root

10
bin/pdm-shell

@ -0,0 +1,10 @@
#!/bin/bash
if [[ -z $1 || $1 == "-h" || $1 == "--help" ]]; then
echo "Usage: $0 container
Start a shell on the given container, and connect to it."
exit 1
fi
podman exec -it $1 bash

1216
lib/shflags

File diff suppressed because it is too large

5
src/debian/Containerfile

@ -1,10 +1,9 @@
ARG FROM_IMAGE="debian:stable"
FROM ${FROM_IMAGE}
FROM debian:stable
CMD [ "/sbin/init" ]
ENTRYPOINT [ "/sbin/init" ]
# We can't use timedatectl because systemd isn't available
# during the build process, so we have to set it manually
# during the build process, so we have to set the timezone manually
ENV TZ=US/Central
RUN rm /etc/localtime && \
ln -s /usr/share/zoneinfo/$TZ /etc/localtime && \

7
src/debian/build.sh

@ -1,7 +0,0 @@
#!/bin/bash
# Variables
today=$(date "+%Y-%m-%d-T%H%M")
proj=debian
podman build -f Containerfile -t $proj:$today -t $proj:latest --squash

33
src/nextcloud/Containerfile

@ -1,14 +1,15 @@
###
### Meta Information
###
ARG FROM_IMAGE="localhost/debian"
FROM ${FROM_IMAGE}
FROM localhost/debian
# deploy options
# -p (port) and -v (volume) both go host:container
LABEL deploy.default="-p 10080:80 \
LABEL deployopts="-p 4380:80 \
-v /tank/files/user/mar:/vol/files/mar/files \
-v /tank/files/db/nextcloud:/vol/db"
# make directories that we will be mounting into
RUN mkdir -p /vol/files/mar/files /vol/db
# php and postgres versions. will depend on version of debian we are running
ARG phpv=7.3
@ -25,7 +26,7 @@ ENV DBNAME=nextcloud
# install packages we want
RUN apt update -y && apt install -y systemd sudo wget apache2 php-fpm \
php-gd php-zip php-pgsql php-curl php-mbstring php-intl php-imagick \
php-xml php-json redis-server php-redis postgresql postgresql-doc \
php-xml php-json redis php-redis postgresql postgresql-doc \
unzip php-ldap
# this is a bug workaround b/c testing is currently between versions of php. should be removed ideally
@ -34,10 +35,7 @@ RUN update-alternatives --set php /usr/bin/php7.3
# change www-data's UID to the file owner UID
RUN usermod --uid 5000 www-data && \
groupmod --gid 5000 www-data && \
chown -R www-data:www-data /var/www
# make directories that we will be mounting into
RUN mkdir -p /vol/files/mar/files /vol/database && chown -R www-data:www-data /vol
chown -R www-data:www-data /var/www /vol
# copy our custom scripts
COPY resources/bin/ /usr/local/bin/
@ -70,7 +68,7 @@ COPY resources/php/www.conf /etc/php/${phpv}/fpm/pool.d/
###
# copy redis config
COPY --chown=redis:redis resources/redis.conf /etc/redis/
COPY --chown=redis:redis resources/redis.conf /etc/redis/redis.conf
# add www-data to redis group so it can use the socket
RUN usermod -a -G redis www-data
@ -89,22 +87,31 @@ COPY --chown=postgres:postgres resources/pg_hba.conf /etc/postgresql/${psqlv}/ma
# download nextcloud
WORKDIR /var/www/html
RUN wget https://download.nextcloud.com/server/releases/latest.zip && \
echo "Unzipping ..." && \
unzip -q latest.zip && \
chown -R www-data:www-data nextcloud && \
rm latest.zip
# copy nextcloud configuration file
# copy nextcloud config
COPY --chown=www-data:www-data resources/my.config.php nextcloud/config/
###
### Crontab
###
WORKDIR /root
COPY resources/crontab .
RUN crontab -u www-data crontab && rm crontab
COPY resources/crontab /root/
# the sed command is needed to workaround a bug in cron
RUN sed -i '/session required pam_loginuid.so/c\#session required pam_loginuid.so' /etc/pam.d/cron && \
crontab -u www-data /root/crontab
###
### Systemdfile
###
COPY Systemdfile /root/
RUN chmod +x /root/Systemdfile
###
### Bugfix
###
# push the fixed systemd file for redis
COPY bugfix/redis.service /etc/systemd/system/redis.service

46
src/nextcloud/bugfix/redis.service

@ -0,0 +1,46 @@
[Unit]
Description=Advanced key-value store
After=network.target
Documentation=http://redis.io/documentation, man:redis-server(1)
[Service]
Type=forking
ExecStart=/usr/bin/redis-server /etc/redis/redis.conf
ExecStop=/bin/kill -s TERM $MAINPID
PIDFile=/run/redis/redis-server.pid
TimeoutStopSec=0
Restart=always
User=redis
Group=redis
RuntimeDirectory=redis
RuntimeDirectoryMode=2755
UMask=007
#PrivateTmp=yes
LimitNOFILE=65535
#PrivateDevices=yes
#ProtectHome=yes
#ReadOnlyDirectories=/
#ReadWritePaths=-/var/lib/redis
#ReadWritePaths=-/var/log/redis
#ReadWritePaths=-/var/run/redis
NoNewPrivileges=true
CapabilityBoundingSet=CAP_SETGID CAP_SETUID CAP_SYS_RESOURCE
MemoryDenyWriteExecute=true
#ProtectKernelModules=true
#ProtectKernelTunables=true
#ProtectControlGroups=true
RestrictRealtime=true
RestrictNamespaces=true
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
# redis-server can write to its own config file when in cluster mode so we
# permit writing there by default. If you are not using this feature, it is
# recommended that you replace the following lines with "ProtectSystem=full".
#ProtectSystem=true
#ReadWritePaths=-/etc/redis
[Install]
WantedBy=multi-user.target
Alias=redis.service

3
src/nextcloud/resources/crontab

@ -1,7 +1,8 @@
PATH=/usr/local/bin:/bin:/usr/bin
# m h dom mon dow command
# Run Nextcloud cron tasks every 5 minutes
*/5 * * * * php -f /var/www/html/nextcloud/cron.php
# Dump database every hour
10 * * * * /usr/local/bin/maint 01 01
10 * * * * maint 01 01
# Scan for new files every 15 minutes
*/15 * * * * nc-occ files:scan --all

15
src/nextcloud/resources/my.config.php

@ -1,22 +1,13 @@
<?php
$CONFIG = array(
/** Setup **/
'datadirectory' => '/srv/nextcloud/files/',
/** Basic **/
'htaccess.RewriteBase' => '/',
/** Database **/
'dbtype' => 'pgsql',
'dbname' => 'nextcloud',
'dbuser' => 'ncadmin',
'dbpassword' => '',
'dbhost' => '/var/run/postgresql',
'dbtableprefix' => 'oc_',
/** Network **/
'trusted_domains' =>
array (
0 => 'nextcloud.lxd',
0 => 'medusa.casa.alemor.org',
),
'overwriteprotocol' => 'http',
'overwritehost' => 'medusa.casa.alemor.org',
@ -24,7 +15,6 @@ $CONFIG = array(
'overwrite.cli.url' => 'http://medusa.casa.alemor.org/nextcloud/',
/** Memory Caching **/
/**
'memcache.local' => '\\OC\\Memcache\\Redis',
'memcache.distributed' => '\\OC\\Memcache\\Redis',
'memcache.locking' => '\\OC\\Memcache\\Redis',
@ -35,5 +25,4 @@ $CONFIG = array(
'port' => 0,
'timeout' => 0.0,
),
**/
);

314
src/nextcloud/resources/redis.conf

@ -59,7 +59,7 @@
# internet, binding to all the interfaces is dangerous and will expose the
# instance to everybody on the internet. So by default we uncomment the
# following bind directive, that will force Redis to listen only into
# the IPv4 lookback interface address (this means Redis will be able to
# the IPv4 loopback interface address (this means Redis will be able to
# accept connections only from clients running into the same computer it
# is running).
#
@ -264,57 +264,64 @@ dir /var/lib/redis
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
# Master-Replica replication. Use replicaof to make a Redis instance a copy of
# another Redis server. A few things to understand ASAP about Redis replication.
#
# +------------------+ +---------------+
# | Master | ---> | Replica |
# | (receive writes) | | (exact copy) |
# +------------------+ +---------------+
#
# 1) Redis replication is asynchronous, but you can configure a master to
# stop accepting writes if it appears to be not connected with at least
# a given number of slaves.
# 2) Redis slaves are able to perform a partial resynchronization with the
# a given number of replicas.
# 2) Redis replicas are able to perform a partial resynchronization with the
# master if the replication link is lost for a relatively small amount of
# time. You may want to configure the replication backlog size (see the next
# sections of this file) with a sensible value depending on your needs.
# 3) Replication is automatic and does not need user intervention. After a
# network partition slaves automatically try to reconnect to masters
# network partition replicas automatically try to reconnect to masters
# and resynchronize with them.
#
# slaveof <masterip> <masterport>
# replicaof <masterip> <masterport>
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before
# directive below) it is possible to tell the replica to authenticate before
# starting the replication synchronization process, otherwise the master will
# refuse the slave request.
# refuse the replica request.
#
# masterauth <master-password>
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
# When a replica loses its connection with the master, or when the replication
# is still in progress, the replica can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# 2) if replica-serve-stale-data is set to 'no' the replica will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
# but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG,
# SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB,
# COMMAND, POST, HOST: and LATENCY.
#
slave-serve-stale-data yes
replica-serve-stale-data yes
# You can configure a slave instance to accept writes or not. Writing against
# a slave instance may be useful to store some ephemeral data (because data
# written on a slave will be easily deleted after resync with the master) but
# You can configure a replica instance to accept writes or not. Writing against
# a replica instance may be useful to store some ephemeral data (because data
# written on a replica will be easily deleted after resync with the master) but
# may also cause problems if clients are writing to it because of a
# misconfiguration.
#
# Since Redis 2.6 by default slaves are read-only.
# Since Redis 2.6 by default replicas are read-only.
#
# Note: read only slaves are not designed to be exposed to untrusted clients
# Note: read only replicas are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
# Still a read only slave exports by default all the administrative commands
# Still a read only replica exports by default all the administrative commands
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
# security of read only slaves using 'rename-command' to shadow all the
# security of read only replicas using 'rename-command' to shadow all the
# administrative / dangerous commands.
slave-read-only yes
replica-read-only yes
# Replication SYNC strategy: disk or socket.
#
@ -322,25 +329,25 @@ slave-read-only yes
# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY
# -------------------------------------------------------
#
# New slaves and reconnecting slaves that are not able to continue the replication
# New replicas and reconnecting replicas that are not able to continue the replication
# process just receiving differences, need to do what is called a "full
# synchronization". An RDB file is transmitted from the master to the slaves.
# synchronization". An RDB file is transmitted from the master to the replicas.
# The transmission can happen in two different ways:
#
# 1) Disk-backed: The Redis master creates a new process that writes the RDB
# file on disk. Later the file is transferred by the parent
# process to the slaves incrementally.
# process to the replicas incrementally.
# 2) Diskless: The Redis master creates a new process that directly writes the
# RDB file to slave sockets, without touching the disk at all.
# RDB file to replica sockets, without touching the disk at all.
#
# With disk-backed replication, while the RDB file is generated, more slaves
# With disk-backed replication, while the RDB file is generated, more replicas
# can be queued and served with the RDB file as soon as the current child producing
# the RDB file finishes its work. With diskless replication instead once
# the transfer starts, new slaves arriving will be queued and a new transfer
# the transfer starts, new replicas arriving will be queued and a new transfer
# will start when the current one terminates.
#
# When diskless replication is used, the master waits a configurable amount of
# time (in seconds) before starting the transfer in the hope that multiple slaves
# time (in seconds) before starting the transfer in the hope that multiple replicas
# will arrive and the transfer can be parallelized.
#
# With slow disks and fast (large bandwidth) networks, diskless replication
@ -349,140 +356,140 @@ repl-diskless-sync no
# When diskless replication is enabled, it is possible to configure the delay
# the server waits in order to spawn the child that transfers the RDB via socket
# to the slaves.
# to the replicas.
#
# This is important since once the transfer starts, it is not possible to serve
# new slaves arriving, that will be queued for the next RDB transfer, so the server
# waits a delay in order to let more slaves arrive.
# new replicas arriving, that will be queued for the next RDB transfer, so the server
# waits a delay in order to let more replicas arrive.
#
# The delay is specified in seconds, and by default is 5 seconds. To disable
# it entirely just set it to 0 seconds and the transfer will start ASAP.
repl-diskless-sync-delay 5
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# Replicas send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_replica_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# repl-ping-replica-period 10
# The following option sets the replication timeout for:
#
# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
# 2) Master timeout from the point of view of slaves (data, pings).
# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
# 1) Bulk transfer I/O during SYNC, from the point of view of replica.
# 2) Master timeout from the point of view of replicas (data, pings).
# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings).
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
# every time there is low traffic between the master and the slave.
# specified for repl-ping-replica-period otherwise a timeout will be detected
# every time there is low traffic between the master and the replica.
#
# repl-timeout 60
# Disable TCP_NODELAY on the slave socket after SYNC?
# Disable TCP_NODELAY on the replica socket after SYNC?
#
# If you select "yes" Redis will use a smaller number of TCP packets and
# less bandwidth to send data to slaves. But this can add a delay for
# the data to appear on the slave side, up to 40 milliseconds with
# less bandwidth to send data to replicas. But this can add a delay for
# the data to appear on the replica side, up to 40 milliseconds with
# Linux kernels using a default configuration.
#
# If you select "no" the delay for data to appear on the slave side will
# If you select "no" the delay for data to appear on the replica side will
# be reduced but more bandwidth will be used for replication.
#
# By default we optimize for low latency, but in very high traffic conditions
# or when the master and slaves are many hops away, turning this to "yes" may
# or when the master and replicas are many hops away, turning this to "yes" may
# be a good idea.
repl-disable-tcp-nodelay no
# Set the replication backlog size. The backlog is a buffer that accumulates
# slave data when slaves are disconnected for some time, so that when a slave
# replica data when replicas are disconnected for some time, so that when a replica
# wants to reconnect again, often a full resync is not needed, but a partial
# resync is enough, just passing the portion of data the slave missed while
# resync is enough, just passing the portion of data the replica missed while
# disconnected.
#
# The bigger the replication backlog, the longer the time the slave can be
# The bigger the replication backlog, the longer the time the replica can be
# disconnected and later be able to perform a partial resynchronization.
#
# The backlog is only allocated once there is at least a slave connected.
# The backlog is only allocated once there is at least a replica connected.
#
# repl-backlog-size 1mb
# After a master has no longer connected slaves for some time, the backlog
# After a master has no longer connected replicas for some time, the backlog
# will be freed. The following option configures the amount of seconds that
# need to elapse, starting from the time the last slave disconnected, for
# need to elapse, starting from the time the last replica disconnected, for
# the backlog buffer to be freed.
#
# Note that slaves never free the backlog for timeout, since they may be
# Note that replicas never free the backlog for timeout, since they may be
# promoted to masters later, and should be able to correctly "partially
# resynchronize" with the slaves: hence they should always accumulate backlog.
# resynchronize" with the replicas: hence they should always accumulate backlog.
#
# A value of 0 means to never release the backlog.
#
# repl-backlog-ttl 3600
# The slave priority is an integer number published by Redis in the INFO output.
# It is used by Redis Sentinel in order to select a slave to promote into a
# The replica priority is an integer number published by Redis in the INFO output.
# It is used by Redis Sentinel in order to select a replica to promote into a
# master if the master is no longer working correctly.
#
# A slave with a low priority number is considered better for promotion, so
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
# A replica with a low priority number is considered better for promotion, so
# for instance if there are three replicas with priority 10, 100, 25 Sentinel will
# pick the one with priority 10, that is the lowest.
#
# However a special priority of 0 marks the slave as not able to perform the
# role of master, so a slave with priority of 0 will never be selected by
# However a special priority of 0 marks the replica as not able to perform the
# role of master, so a replica with priority of 0 will never be selected by
# Redis Sentinel for promotion.
#
# By default the priority is 100.
slave-priority 100
replica-priority 100
# It is possible for a master to stop accepting writes if there are less than
# N slaves connected, having a lag less or equal than M seconds.
# N replicas connected, having a lag less or equal than M seconds.
#
# The N slaves need to be in "online" state.
# The N replicas need to be in "online" state.
#
# The lag in seconds, that must be <= the specified value, is calculated from
# the last ping received from the slave, that is usually sent every second.
# the last ping received from the replica, that is usually sent every second.
#
# This option does not GUARANTEE that N replicas will accept the write, but
# will limit the window of exposure for lost writes in case not enough slaves
# will limit the window of exposure for lost writes in case not enough replicas
# are available, to the specified number of seconds.
#
# For example to require at least 3 slaves with a lag <= 10 seconds use:
# For example to require at least 3 replicas with a lag <= 10 seconds use:
#
# min-slaves-to-write 3
# min-slaves-max-lag 10
# min-replicas-to-write 3
# min-replicas-max-lag 10
#
# Setting one or the other to 0 disables the feature.
#
# By default min-slaves-to-write is set to 0 (feature disabled) and
# min-slaves-max-lag is set to 10.
# By default min-replicas-to-write is set to 0 (feature disabled) and
# min-replicas-max-lag is set to 10.
# A Redis master is able to list the address and port of the attached
# slaves in different ways. For example the "INFO replication" section
# replicas in different ways. For example the "INFO replication" section
# offers this information, which is used, among other tools, by
# Redis Sentinel in order to discover slave instances.
# Redis Sentinel in order to discover replica instances.
# Another place where this info is available is in the output of the
# "ROLE" command of a master.
#
# The listed IP and address normally reported by a slave is obtained
# The listed IP and address normally reported by a replica is obtained
# in the following way:
#
# IP: The address is auto detected by checking the peer address
# of the socket used by the slave to connect with the master.
# of the socket used by the replica to connect with the master.
#
# Port: The port is communicated by the slave during the replication
# handshake, and is normally the port that the slave is using to
# list for connections.
# Port: The port is communicated by the replica during the replication
# handshake, and is normally the port that the replica is using to
# listen for connections.
#
# However when port forwarding or Network Address Translation (NAT) is
# used, the slave may be actually reachable via different IP and port
# pairs. The following two options can be used by a slave in order to
# used, the replica may be actually reachable via different IP and port
# pairs. The following two options can be used by a replica in order to
# report to its master a specific set of IP and port, so that both INFO
# and ROLE will report those values.
#
# There is no need to use both the options if you need to override just
# the port or the IP address.
#
# slave-announce-ip 5.5.5.5
# slave-announce-port 1234
# replica-announce-ip 5.5.5.5
# replica-announce-port 1234
################################## SECURITY ###################################
@ -516,7 +523,7 @@ slave-priority 100
# rename-command CONFIG ""
#
# Please note that changing the name of commands that are logged into the
# AOF file or transmitted to slaves may cause problems.
# AOF file or transmitted to replicas may cause problems.
################################### CLIENTS ####################################
@ -545,15 +552,15 @@ slave-priority 100
# This option is usually useful when using Redis as an LRU or LFU cache, or to
# set a hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have slaves attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the slaves are subtracted
# WARNING: If you have replicas attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the replicas are subtracted
# from the used memory count, so that network problems / resyncs will
# not trigger a loop where keys are evicted, and in turn the output
# buffer of slaves is full with DELs of keys evicted triggering the deletion
# buffer of replicas is full with DELs of keys evicted triggering the deletion
# of more keys, and so forth until the database is completely emptied.
#
# In short... if you have slaves attached it is suggested that you set a lower
# limit for maxmemory so that there is some free RAM on the system for slave
# In short... if you have replicas attached it is suggested that you set a lower
# limit for maxmemory so that there is some free RAM on the system for replica
# output buffers (but this is not needed if the policy is 'noeviction').
#
# maxmemory <bytes>
@ -600,6 +607,26 @@ slave-priority 100
#
# maxmemory-samples 5
# Starting from Redis 5, by default a replica will ignore its maxmemory setting
# (unless it is promoted to master after a failover or manually). It means
# that the eviction of keys will be just handled by the master, sending the
# DEL commands to the replica as keys evict in the master side.
#
# This behavior ensures that masters and replicas stay consistent, and is usually
# what you want, however if your replica is writable, or you want the replica to have
# a different memory setting, and you are sure all the writes performed to the
# replica are idempotent, then you may change this default (but be sure to understand
# what you are doing).
#
# Note that since the replica by default does not evict, it may end using more
# memory than the one set via maxmemory (there are certain buffers that may
# be larger on the replica, or data structures may sometimes take more memory and so
# forth). So make sure you monitor your replicas and make sure they have enough
# memory to never hit a real out-of-memory condition before the master hits
# the configured maxmemory setting.
#
# replica-ignore-maxmemory yes
############################# LAZY FREEING ####################################
# Redis has two primitives to delete keys. One is called DEL and is a blocking
@ -635,9 +662,9 @@ slave-priority 100
# or SORT with STORE option may delete existing keys. The SET command
# itself removes any old content of the specified key in order to replace
# it with the specified string.
# 4) During replication, when a slave performs a full resynchronization with
# 4) During replication, when a replica performs a full resynchronization with
# its master, the content of the whole database is removed in order to
# load the RDB file just transfered.
# load the RDB file just transferred.
#
# In all the above cases the default is to delete objects in a blocking way,
# like if DEL was called. However you can configure each case specifically
@ -647,7 +674,7 @@ slave-priority 100
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
slave-lazy-flush no
replica-lazy-flush no
############################## APPEND ONLY MODE ###############################
@ -776,10 +803,7 @@ aof-load-truncated yes
# When loading Redis recognizes that the AOF file starts with the "REDIS"
# string and loads the prefixed RDB file, and continues loading the AOF
# tail.
#
# This is currently turned off by default in order to avoid the surprise
# of a format change, but will at some point be used as the default.
aof-use-rdb-preamble no
aof-use-rdb-preamble yes
################################ LUA SCRIPTING ###############################
@ -827,42 +851,42 @@ lua-time-limit 5000
#
# cluster-node-timeout 15000
# A slave of a failing master will avoid to start a failover if its data
# A replica of a failing master will avoid to start a failover if its data
# looks too old.
#
# There is no simple way for a slave to actually have an exact measure of
# There is no simple way for a replica to actually have an exact measure of
# its "data age", so the following two checks are performed:
#
# 1) If there are multiple slaves able to failover, they exchange messages
# in order to try to give an advantage to the slave with the best
# 1) If there are multiple replicas able to failover, they exchange messages
# in order to try to give an advantage to the replica with the best
# replication offset (more data from the master processed).
# Slaves will try to get their rank by offset, and apply to the start
# Replicas will try to get their rank by offset, and apply to the start
# of the failover a delay proportional to their rank.
#
# 2) Every single slave computes the time of the last interaction with
# 2) Every single replica computes the time of the last interaction with
# its master. This can be the last ping or command received (if the master
# is still in the "connected" state), or the time that elapsed since the
# disconnection with the master (if the replication link is currently down).
# If the last interaction is too old, the slave will not try to failover
# If the last interaction is too old, the replica will not try to failover
# at all.
#
# The point "2" can be tuned by user. Specifically a slave will not perform
# The point "2" can be tuned by user. Specifically a replica will not perform
# the failover if, since the last interaction with the master, the time
# elapsed is greater than:
#
# (node-timeout * slave-validity-factor) + repl-ping-slave-period
# (node-timeout * replica-validity-factor) + repl-ping-replica-period
#
# So for example if node-timeout is 30 seconds, and the slave-validity-factor
# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the
# slave will not try to failover if it was not able to talk with the master
# So for example if node-timeout is 30 seconds, and the replica-validity-factor
# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the
# replica will not try to failover if it was not able to talk with the master
# for longer than 310 seconds.
#
# A large slave-validity-factor may allow slaves with too old data to failover
# A large replica-validity-factor may allow replicas with too old data to failover
# a master, while a too small value may prevent the cluster from being able to
# elect a slave at all.
# elect a replica at all.
#
# For maximum availability, it is possible to set the slave-validity-factor
# to a value of 0, which means, that slaves will always try to failover the
# For maximum availability, it is possible to set the replica-validity-factor
# to a value of 0, which means, that replicas will always try to failover the
# master regardless of the last time they interacted with the master.
# (However they'll always try to apply a delay proportional to their
# offset rank).
@ -870,22 +894,22 @@ lua-time-limit 5000
# Zero is the only value able to guarantee that when all the partitions heal
# the cluster will always be able to continue.
#
# cluster-slave-validity-factor 10
# cluster-replica-validity-factor 10
# Cluster slaves are able to migrate to orphaned masters, that are masters
# that are left without working slaves. This improves the cluster ability
# Cluster replicas are able to migrate to orphaned masters, that are masters
# that are left without working replicas. This improves the cluster ability
# to resist to failures as otherwise an orphaned master can't be failed over
# in case of failure if it has no working slaves.
# in case of failure if it has no working replicas.
#
# Slaves migrate to orphaned masters only if there are still at least a
# given number of other working slaves for their old master. This number
# is the "migration barrier". A migration barrier of 1 means that a slave
# will migrate only if there is at least 1 other working slave for its master
# and so forth. It usually reflects the number of slaves you want for every
# Replicas migrate to orphaned masters only if there are still at least a
# given number of other working replicas for their old master. This number
# is the "migration barrier". A migration barrier of 1 means that a replica
# will migrate only if there is at least 1 other working replica for its master
# and so forth. It usually reflects the number of replicas you want for every
# master in your cluster.
#
# Default is 1 (slaves migrate only if their masters remain with at least
# one slave). To disable migration just set it to a very large value.
# Default is 1 (replicas migrate only if their masters remain with at least
# one replica). To disable migration just set it to a very large value.
# A value of 0 can be set but is useful only for debugging and dangerous
# in production.
#
@ -904,7 +928,7 @@ lua-time-limit 5000
#
# cluster-require-full-coverage yes
# This option, when set to yes, prevents slaves from trying to failover its
# This option, when set to yes, prevents replicas from trying to failover its
# master during master failures. However the master can still perform a
# manual failover, if forced to do so.
#
@ -912,7 +936,7 @@ lua-time-limit 5000
# data center operations, where we want one side to never be promoted if not
# in the case of a total DC failure.
#
# cluster-slave-no-failover no
# cluster-replica-no-failover no
# In order to setup your cluster make sure to read the documentation
# available at http://redis.io web site.
@ -1107,6 +1131,17 @@ zset-max-ziplist-value 64
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
hll-sparse-max-bytes 3000
# Streams macro node max size / items. The stream data structure is a radix
# tree of big nodes that encode multiple items inside. Using this configuration
# it is possible to configure how big a single node can be in bytes, and the
# maximum number of items it may contain before switching to a new node when
# appending new stream entries. If any of the following settings are set to
# zero, the limit is ignored, so for instance it is possible to set just a
# max entires limit by setting max-bytes to 0 and max-entries to the desired
# value.
stream-node-max-bytes 4096
stream-node-max-entries 100
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
@ -1135,7 +1170,7 @@ activerehashing yes
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients including MONITOR clients
# slave -> slave clients
# replica -> replica clients
# pubsub -> clients subscribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
@ -1156,12 +1191,12 @@ activerehashing yes
# asynchronous clients may create a scenario where data is requested faster
# than it can read.
#
# Instead there is a default limit for pubsub and slave clients, since
# subscribers and slaves receive data in a push fashion.
# Instead there is a default limit for pubsub and replica clients, since
# subscribers and replicas receive data in a push fashion.
#
# Both the hard or the soft limit can be disabled by setting them to zero.
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
# Client query buffers accumulate new commands. They are limited to a fixed
@ -1195,12 +1230,34 @@ client-output-buffer-limit pubsub 32mb 8mb 60
# 100 only in environments where very low latency is required.
hz 10
# Normally it is useful to have an HZ value which is proportional to the
# number of clients connected. This is useful in order, for instance, to
# avoid too many clients are processed for each background task invocation
# in order to avoid latency spikes.
#
# Since the default HZ value by default is conservatively set to 10, Redis
# offers, and enables by default, the ability to use an adaptive HZ value
# which will temporary raise when there are many connected clients.
#
# When dynamic HZ is enabled, the actual configured HZ will be used as
# as a baseline, but multiples of the configured HZ value will be actually
# used as needed once more clients are connected. In this way an idle
# instance will use very little CPU time while a busy instance will be
# more responsive.
dynamic-hz yes
# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
aof-rewrite-incremental-fsync yes
# When redis saves RDB file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
rdb-save-incremental-fsync yes
# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good
# idea to start with the default settings and only change them after investigating
# how to improve the performances and how the keys LFU change over time, which
@ -1310,8 +1367,11 @@ aof-rewrite-incremental-fsync yes
# active-defrag-threshold-upper 100
# Minimal effort for defrag in CPU percentage
# active-defrag-cycle-min 25
# active-defrag-cycle-min 5
# Maximal effort for defrag in CPU percentage
# active-defrag-cycle-max 75
# Maximum number of set/hash/zset/list fields that will be processed from
# the main dictionary scan
# active-defrag-max-scan-fields 1000

Loading…
Cancel
Save