Merge branch 'compose'

This commit is contained in:
Disassembler 2020-04-11 16:47:26 +02:00
commit 8771ac9828
Signed by: Disassembler
GPG Key ID: 524BD33A0EE29499
557 changed files with 17126 additions and 6578 deletions

3
.gitmodules vendored
View File

@ -1,3 +1,6 @@
[submodule "app-vmmgr"] [submodule "app-vmmgr"]
path = apk/vmmgr path = apk/vmmgr
url = ssh://git@git.spotter.cz:2222/Spotter-Cluster/vmmgr.git url = ssh://git@git.spotter.cz:2222/Spotter-Cluster/vmmgr.git
[submodule "spoc"]
path = apk/spoc
url = ssh://git@git.spotter.cz:2222/Spotter-Cluster/spoc.git

View File

@ -1,28 +0,0 @@
# Contributor: Disassembler <disassembler@dasm.cz>
# Maintainer: Disassembler <disassembler@dasm.cz>
pkgname=acme-sh
pkgver=2.8.1
pkgrel=0
pkgdesc="A pure Unix shell ACME protocol client"
url="https://github.com/Neilpang/acme.sh"
arch="noarch"
license="GPL"
_commit=09bce5e6d6be6b97b3c843b815087874e3e44a21
source="${pkgname}-${pkgver}.tar.gz::https://github.com/Neilpang/acme.sh/archive/${_commit}.tar.gz"
builddir="${srcdir}/acme.sh-${_commit}"
options="!check"
build() {
return 0
}
package() {
mkdir -p ${pkgdir}/usr/bin
mkdir -p ${pkgdir}/etc/acme.sh.d
mkdir -p ${pkgdir}/etc/periodic/daily
sed 's|$HOME/.$PROJECT_NAME|/etc/acme.sh.d|' ${builddir}/acme.sh > ${pkgdir}/usr/bin/acme.sh
chmod +x ${pkgdir}/usr/bin/acme.sh
cp ${startdir}/source/acme-sh ${pkgdir}/etc/periodic/daily/
}
sha512sums="b2d3d1c3f0ba1d57f40373aa430f53b8398e3798cadb4ab6bc376c9c70edda08cda4380bc8fe9fd272b9e02bd9f763de0a399aeeb5ea8d1a2e8ff5b1d8cef86a acme-sh-2.8.1.tar.gz"

View File

@ -1,5 +0,0 @@
#!/bin/sh
# Sleep randomly up to 1hr to avoid peak on ACME server
/bin/sleep $(/usr/bin/shuf -i 60-3600 -n 1)
/usr/bin/acme.sh --cron >/dev/null

View File

@ -1,76 +0,0 @@
# Contributor: Trevor R.H. Clarke <trevor@notcows.com>
# Maintainer: Trevor R.H. Clarke <trevor@notcows.com>
pkgname=gdal
pkgver=2.4.0
pkgrel=1
pkgdesc="A translator library for raster and vector geospatial data formats"
url="http://gdal.org"
arch="x86 x86_64"
license="MIT"
depends=""
depends_dev="gdal"
makedepends="
curl-dev
geos-dev
giflib-dev
jpeg-dev
libjpeg-turbo-dev
libpng-dev
linux-headers
postgresql-dev
python2-dev
sqlite-dev
swig
tiff-dev
zlib-dev
"
subpackages="
$pkgname-dev
py-$pkgname:py
"
source="http://download.osgeo.org/$pkgname/$pkgver/$pkgname-$pkgver.tar.xz"
builddir="$srcdir/$pkgname-$pkgver"
build() {
cd "$builddir"
./configure --prefix=/usr \
--with-curl=/usr/bin/curl-config
make
cd swig/python
python2 setup.py build
}
package() {
cd "$builddir"
make DESTDIR="$pkgdir" install
chmod -x "$pkgdir"/usr/include/*.h
}
py() {
pkgdesc="$pkgname (python bindings)"
cd "$builddir"/swig/python
python2 setup.py install --prefix=/usr --root="$subpkgdir"
chmod a+x scripts/*
install -d "$subpkgdir"/usr/bin
install -m755 scripts/*.py "$subpkgdir"/usr/bin/
}
check() {
# TODO: https://trac.osgeo.org/gdal/wiki/TestingNotes
cd "$builddir"
apps/gdal-config --version | grep "$pkgver"
# confirms MBTiles support
apps/gdal_translate --formats | grep "MBTiles -raster,vector- (rw+v): MBTiles"
# confirms PostgreSQL/PostGIS support
apps/ogr2ogr --formats | grep "PostgreSQL -vector- (rw+): PostgreSQL/PostGIS"
}
sha512sums="d4eb6535043b1495f691ab96aa8087d9254aa01efbc57a4051f8b9f4f6b2537719d7bf03ff82c3f6cfd0499a973c491fa9da9f5854dbd9863a0ec9796d3642bb gdal-2.4.0.tar.xz"

View File

@ -1,55 +0,0 @@
# Contributor: Eric Kidd <git@randomhacks.net>
# Maintainer:
pkgname=geos
pkgver=3.7.1
pkgrel=0
pkgdesc="GEOS is a library providing OpenGIS and JTS spatial operations in C++."
url="https://trac.osgeo.org/geos/"
# test fails on other archs
arch="x86 x86_64"
license="LGPL-2.1"
makedepends="swig python2-dev"
subpackages="py-$pkgname:py $pkgname-dev"
source="http://download.osgeo.org/geos/geos-$pkgver.tar.bz2"
builddir="$srcdir/$pkgname-$pkgver"
build() {
cd "$builddir"
./configure \
--build=$CBUILD \
--host=$CHOST \
--prefix=/usr \
--sysconfdir=/etc \
--mandir=/usr/share/man \
--localstatedir=/var \
--enable-python
# --enable-ruby produces a gem which crashes, and which seems to
# mostly ignored in favor of the rgeo and ffi-geos modules, anyway.
make
}
check() {
cd "$builddir"
make check
}
package() {
cd "$builddir"
make DESTDIR="$pkgdir" install
install -Dm644 COPYING "$pkgdir"/usr/share/licenses/$pkgname/COPYING
}
py() {
pkgdesc="$pkgname Python bindings"
cd "$builddir"
install -d "$subpkgdir"/usr/lib
mv "$pkgdir"/usr/lib/python* "$subpkgdir"/usr/lib/
}
sha512sums="01e8087bcd3cb8f873adb7b56910e1575ccb3336badfdd3f13bc6792095b7010e5ab109ea0d0cd3d1459e2e526e83bcf64d6ee3f7eb47be75639becdaacd2a87 geos-3.7.1.tar.bz2"

178
apk/lxc/APKBUILD Normal file
View File

@ -0,0 +1,178 @@
# Contributor: Łukasz Jendrysik <scadu@yandex.com>
# Contributor: Jakub Jirutka <jakub@jirutka.cz>
# Maintainer: Natanael Copa <ncopa@alpinelinux.org>
pkgname=lxc
pkgver=3.2.1
_pkgver=${pkgver/_rc/.rc}
pkgrel=2
pkgdesc="Userspace interface for the Linux kernel containment features"
url="https://linuxcontainers.org/lxc/"
arch="all"
license="GPL-2.0-only"
makedepends="
libcap-dev
libcap-static
libseccomp-dev
linux-pam-dev
linux-headers
bsd-compat-headers
docbook2x
automake
autoconf
libtool
"
options="suid"
subpackages="
$pkgname-dev
$pkgname-doc
$pkgname-openrc
$pkgname-lvm::noarch
$pkgname-libs
$pkgname-bridge::noarch
$pkgname-bash-completion:bashcomp:noarch
$pkgname-pam
$pkgname-download:_download:noarch
$pkgname-templates-oci:templates_oci:noarch
$pkgname-templates::noarch
"
source="https://linuxcontainers.org/downloads/lxc/lxc-$_pkgver.tar.gz
cgroups-initialize-cpuset-properly.patch
network-restore-ability-to-move-nl80211-devices.patch
execute-attach-user-group.patch
attach-returncode.patch
lxc.initd
lxc.confd
"
# secfixes:
# 3.1.0-r1:
# - CVE-2019-5736
# 2.1.1-r9:
# - CVE-2018-6556
#
_tmpldir="usr/share/lxc/templates"
build() {
./configure \
--build=$CBUILD \
--host=$CHOST \
--prefix=/usr \
--sysconfdir=/etc \
--localstatedir=/var \
--disable-apparmor \
--enable-pam \
--with-distro=alpine \
--disable-werror \
--enable-doc
make
}
check() {
make check
}
package() {
make DESTDIR="$pkgdir" install
install -Dm755 "$srcdir"/lxc.initd "$pkgdir"/etc/init.d/lxc
install -Dm644 "$srcdir"/lxc.confd "$pkgdir"/etc/conf.d/lxc
install -d "$pkgdir"/var/lib/lxc
# Remove useless config for SysVinit.
rm -r "$pkgdir"/etc/default
}
lvm() {
pkgdesc="LVM support for LXC"
depends="$pkgname=$pkgver-r$pkgrel lvm2 util-linux"
install_if="$pkgname=$pkgver-r$pkgrel lvm2"
mkdir "$subpkgdir"
}
_py3() {
pkgdesc="Python3 module for LXC"
depends="python3"
mkdir -p "$subpkgdir"/usr/lib
mv "$pkgdir"/usr/lib/python3.* "$subpkgdir"/usr/lib
}
_download() {
pkgdesc="LXC container image downloader template"
depends="$pkgname gnupg1 tar wget"
mkdir -p "$subpkgdir"/$_tmpldir
mv "$pkgdir"/$_tmpldir/lxc-download "$subpkgdir"/$_tmpldir/
}
templates() {
pkgdesc="Templates for LXC (except alpine and download)"
depends="tar"
mkdir -p "$subpkgdir"/$_tmpldir
mv "$pkgdir"/$_tmpldir/* "$subpkgdir"/$_tmpldir/
}
templates_oci() {
pkgdesc="OCI Template for LXC"
depends="bash jq"
mkdir -p "$subpkgdir"/usr/share/lxc/templates
mv "$pkgdir"/usr/share/lxc/templates/lxc-oci \
"$subpkgdir"/usr/share/lxc/templates/
}
pam() {
pkgdesc="PAM module for LXC"
mkdir -p "$subpkgdir"/lib/security
mv "$pkgdir"/lib/security/pam_cgfs.so "$subpkgdir"/lib/security/
}
dev() {
default_dev
# fix abuild smartness
mv "$subpkgdir"/usr/bin/lxc-config "$pkgdir"/usr/bin/
mv "$subpkgdir"/usr/bin/lxc-update-config "$pkgdir"/usr/bin/
}
bridge() {
depends="dnsmasq"
pkgdesc="Bridge interface for LXC with dhcp"
mkdir -p "$subpkgdir"/etc/conf.d \
"$subpkgdir"/etc/init.d \
"$subpkgdir"/etc/lxc
ln -s dnsmasq "$subpkgdir"/etc/init.d/dnsmasq.lxcbr0
cat >>"$subpkgdir"/etc/conf.d/dnsmasq.lxcbr0 <<- EOF
rc_before="lxc"
BRIDGE_ADDR="10.0.3.1"
BRIDGE_NETMASK="255.255.255.0"
BRIDGE_NETWORK="10.0.3.0/24"
BRIDGE_DHCP_RANGE="10.0.3.2,10.0.3.254"
BRIDGE_DHCP_MAX="253"
BRIDGE_MAC="00:16:3e:00:00:00"
DNSMASQ_CONFFILE="/etc/lxc/dnsmasq.conf"
EOF
cat >>"$subpkgdir"/etc/lxc/dnsmasq.conf <<- EOF
#dhcp-host=somehost,10.0.3.3
#dhcp-host=otherhost,10.0.3.4
EOF
}
bashcomp() {
depends=""
pkgdesc="Bash completions for $pkgname"
install_if="$pkgname=$pkgver-r$pkgrel bash-completion"
mkdir -p "$subpkgdir"/usr/share/bash-completion/completions
mv "$pkgdir"/etc/bash_completion.d/$pkgname "$subpkgdir"/usr/share/bash-completion/completions
rmdir "$pkgdir"/etc/bash_completion.d
}
sha512sums="4b3046fc6c4aa497fb26bd45839e60de503184af86d3966e796d14e619203536b9a9ed67bdcd8a108cf1a548f8d095fb46dff53094a08abd8d268c866db685c0 lxc-3.2.1.tar.gz
2bebe6cc24987354b6e7dc9003c3a4df450ca10263e6dc0e9313977fdfc2eb57c0d68560da4d1071c8de2f8e3e394ed3ca17af445bea524daa5f8ae8955b3ba6 cgroups-initialize-cpuset-properly.patch
d302b7296918680901d034dc12ae0687dbbc65766800a9f7256e661f638d3dcad66bcc737aec2c6de8c27d3b9c08833e00420c2064f356d6d73efda9ae9bd707 network-restore-ability-to-move-nl80211-devices.patch
a26cd718760e73309a686242b03c5de5ceff17ab9c348438cd19d2d875696e5c788f0e04d66dd01e08449754c14ce2b7cc1dfc1bac2a64429ccf4462f8aa93a5 execute-attach-user-group.patch
565b7c1774b19f66cc3435557325b75c85203bae4e53db1677580f4b93c4fb2db7f9bd9dd02b956b18bc2730b8645984e790f27162510d642ce7647df21febc0 attach-returncode.patch
b74ffe7c3e8f193265a90ffeb6e5743b1212bc1416b898e5a7e59ddd7f06fc77dc34e2dcbb3614038ac6222a95e2b9beb9f03ab734c991837203ab626b1b091f lxc.initd
91de43db5369a9e10102933514d674e9c875218a1ff2910dd882e5b9c308f9e430deacb13d1d7e0b2ed1ef682d0bb035aa6f8a6738f54fa2ca3a05acce04e467 lxc.confd"

View File

@ -0,0 +1,15 @@
--- a/src/lxc/tools/lxc_attach.c
+++ b/src/lxc/tools/lxc_attach.c
@@ -385,10 +385,9 @@
ret = lxc_wait_for_pid_status(pid);
if (ret < 0)
goto out;
-
- if (WIFEXITED(ret))
- wexit = WEXITSTATUS(ret);
}
+ if (WIFEXITED(ret))
+ wexit = WEXITSTATUS(ret);
out:
lxc_container_put(c);

View File

@ -0,0 +1,33 @@
From b31d62b847a3ee013613795094cce4acc12345ef Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner@ubuntu.com>
Date: Sun, 28 Jul 2019 23:13:26 +0200
Subject: [PATCH] cgroups: initialize cpuset properly
Closes #3108.
Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
---
src/lxc/cgroups/cgfsng.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/src/lxc/cgroups/cgfsng.c b/src/lxc/cgroups/cgfsng.c
index 7b8fe6736f..c29c0958e9 100644
--- a/src/lxc/cgroups/cgfsng.c
+++ b/src/lxc/cgroups/cgfsng.c
@@ -496,12 +496,12 @@ static bool cg_legacy_filter_and_set_cpus(char *path, bool am_initialized)
}
if (!flipped_bit) {
- DEBUG("No isolated or offline cpus present in cpuset");
- return true;
+ cpulist = lxc_cpumask_to_cpulist(possmask, maxposs);
+ TRACE("No isolated or offline cpus present in cpuset");
+ } else {
+ cpulist = move_ptr(posscpus);
+ TRACE("Removed isolated or offline cpus from cpuset");
}
- DEBUG("Removed isolated or offline cpus from cpuset");
-
- cpulist = lxc_cpumask_to_cpulist(possmask, maxposs);
if (!cpulist) {
ERROR("Failed to create cpu list");
return false;

View File

@ -0,0 +1,53 @@
--- a/src/lxc/tools/lxc_attach.c
+++ b/src/lxc/tools/lxc_attach.c
@@ -153,6 +153,8 @@
.checker = NULL,
.log_priority = "ERROR",
.log_file = "none",
+ .uid = LXC_INVALID_UID,
+ .gid = LXC_INVALID_GID,
};
static int my_parser(struct lxc_arguments *args, int c, char *arg)
@@ -366,10 +368,10 @@
goto out;
}
- if (my_args.uid)
+ if (my_args.uid != LXC_INVALID_UID)
attach_options.uid = my_args.uid;
- if (my_args.gid)
+ if (my_args.gid != LXC_INVALID_GID)
attach_options.gid = my_args.gid;
if (command.program) {
--- a/src/lxc/tools/lxc_execute.c
+++ b/src/lxc/tools/lxc_execute.c
@@ -84,6 +84,8 @@
.log_priority = "ERROR",
.log_file = "none",
.daemonize = 0,
+ .uid = LXC_INVALID_UID,
+ .gid = LXC_INVALID_GID,
};
static int my_parser(struct lxc_arguments *args, int c, char *arg)
@@ -211,7 +213,7 @@
if (!bret)
goto out;
- if (my_args.uid) {
+ if (my_args.uid != LXC_INVALID_UID) {
char buf[256];
ret = snprintf(buf, 256, "%d", my_args.uid);
@@ -223,7 +225,7 @@
goto out;
}
- if (my_args.gid) {
+ if (my_args.gid != LXC_INVALID_GID) {
char buf[256];
ret = snprintf(buf, 256, "%d", my_args.gid);

10
apk/lxc/lxc.confd Normal file
View File

@ -0,0 +1,10 @@
# Configuration for /etc/init.d/lxc[.*]
# Enable cgroup for systemd-based containers.
#systemd_container=no
# autostart groups (comma separated)
#lxc_group="onboot"
# Directory for containers' logs (used for symlinked runscripts lxc.*).
#logdir="/var/log/lxc"

157
apk/lxc/lxc.initd Normal file
View File

@ -0,0 +1,157 @@
#!/sbin/openrc-run
# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Header: /var/cvsroot/gentoo-x86/app-emulation/lxc/files/lxc.initd.2,v 1.5 2012/07/21 05:07:15 flameeyes Exp $
extra_started_commands="reboot"
description="Linux Containers (LXC)"
description_reboot="Reboot containers"
CONTAINER=${SVCNAME#*.}
: ${lxc_group:=$LXC_GROUP}
: ${systemd_container:=no}
: ${logdir:=/var/log/lxc}
command="/usr/bin/lxc-start"
pidfile="/var/run/lxc/$CONTAINER.pid"
depend() {
need localmount sysfs cgroups
after firewall net
}
lxc_get_configfile() {
local i
for i in /var/lib/lxc/${CONTAINER}/config \
/etc/lxc/${CONTAINER}.conf \
/etc/lxc/${CONTAINER}/config; do
if [ -f "$i" ]; then
echo "$i"
return 0
fi
done
eerror "Unable to find a suitable configuration file."
eerror "If you set up the container in a non-standard"
eerror "location, please set the CONFIGFILE variable."
return 1
}
lxc_get_var() {
awk 'BEGIN { FS="[ \t]*=[ \t]*" } $1 == "'$1'" { print $2; exit }' ${CONFIGFILE} | cut -d: -f2
}
checkconfig() {
if [ ${CONTAINER} = ${SVCNAME} ]; then
CONTAINER=
return 0
fi
CONFIGFILE=${CONFIGFILE:-$(lxc_get_configfile)}
# no need to output anything, the function takes care of that.
[ -z "${CONFIGFILE}" ] && return 1
utsname=$(lxc_get_var lxc.uts.name)
if [ "${CONTAINER}" != "${utsname}" ]; then
eerror "You should use the same name for the service and the"
eerror "lxc.uts.name : Right now the lxc.uts.name is set to : ${utsname}"
return 1
fi
}
systemd_ctr() {
local cmd="$1"
# Required for lxc-console and services inside systemd containers.
local cgroup=/sys/fs/cgroup/systemd
local mnt_opts='rw,nosuid,nodev,noexec,relatime,none,name=systemd'
case "$cmd" in
mount)
checkpath -d $cgroup
if ! mount | grep $cgroup >/dev/null; then
mount -t cgroup -o $mnt_opts cgroup $cgroup
fi
;;
unmount)
if mount | grep $cgroup >/dev/null; then
umount $cgroup
fi
;;
esac
}
_autostart() {
ebegin "$1 LXC containers"
shift
lxc-autostart --group "$lxc_group" "$@"
eend $?
}
start() {
checkconfig || return 1
if yesno "$systemd_container"; then
systemd_ctr mount
fi
if [ -z "$CONTAINER" ]; then
_autostart "Starting"
return
fi
rm -f "$logdir"/${CONTAINER}.log
rootpath=$(lxc_get_var lxc.rootfs.path)
# verify that container is not on tmpfs
dev=$(df -P "${rootpath}" | awk '{d=$1}; END {print d}')
type=$(awk -v dev="$dev" '$1 == dev {m=$3}; END {print m}' /proc/mounts)
if [ "$type" = tmpfs ] && ! yesno "$ALLOW_TMPFS"; then
eerror "${rootpath} is on tmpfs and ALLOW_TMPFS is not set"
return 1
fi
checkpath -d -m 750 -o root:wheel $logdir
checkpath -d ${pidfile%/*}
ebegin "Starting container ${CONTAINER}"
start-stop-daemon --start $command \
--pidfile $pidfile \
-- \
--daemon \
--pidfile $pidfile \
--name ${CONTAINER} \
--rcfile ${CONFIGFILE} \
--logpriority WARN \
--logfile $logdir/${CONTAINER}.log \
|| eend $? || return $?
lxc-wait -n ${CONTAINER} -t 5 -s RUNNING
eend $?
}
stop() {
checkconfig || return 1
systemd_ctr unmount
if [ -z "$CONTAINER" ]; then
_autostart "Stopping" --shutdown --timeout ${LXC_TIMEOUT:-30}
return
fi
ebegin "Stopping container ${CONTAINER}"
start-stop-daemon --stop --pidfile ${pidfile} \
--retry ${POWEROFF_SIGNAL:-SIGUSR2}/${TIMEOUT:-30} \
--progress
eend $?
}
reboot() {
checkconfig || return 1
if [ -z "$CONTAINER" ]; then
_autostart "Rebooting" --reboot
return
fi
ebegin "Sending reboot signal to container $CONTAINER"
start-stop-daemon --signal ${RESTART_SIG:-SIGTERM} \
--pidfile ${pidfile}
eend $?
}

View File

@ -0,0 +1,91 @@
From 3dd7829433f63b2ec1323a1f237efa7d67ea6e2b Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner@ubuntu.com>
Date: Fri, 26 Jul 2019 08:20:02 +0200
Subject: [PATCH] network: restore ability to move nl80211 devices
Closes #3105.
Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
---
src/lxc/network.c | 31 +++++++++++++++++--------------
1 file changed, 17 insertions(+), 14 deletions(-)
diff --git a/src/lxc/network.c b/src/lxc/network.c
index 9755116ba1..7684f95918 100644
--- a/src/lxc/network.c
+++ b/src/lxc/network.c
@@ -1248,22 +1248,21 @@ static int lxc_netdev_rename_by_name_in_netns(pid_t pid, const char *old,
static int lxc_netdev_move_wlan(char *physname, const char *ifname, pid_t pid,
const char *newname)
{
- char *cmd;
+ __do_free char *cmd = NULL;
pid_t fpid;
- int err = -1;
/* Move phyN into the container. TODO - do this using netlink.
* However, IIUC this involves a bit more complicated work to talk to
* the 80211 module, so for now just call out to iw.
*/
cmd = on_path("iw", NULL);
- if (!cmd)
- goto out1;
- free(cmd);
+ if (!cmd) {
+ return -1;
+ }
fpid = fork();
if (fpid < 0)
- goto out1;
+ return -1;
if (fpid == 0) {
char pidstr[30];
@@ -1274,21 +1273,18 @@ static int lxc_netdev_move_wlan(char *physname, const char *ifname, pid_t pid,
}
if (wait_for_pid(fpid))
- goto out1;
+ return -1;
- err = 0;
if (newname)
- err = lxc_netdev_rename_by_name_in_netns(pid, ifname, newname);
+ return lxc_netdev_rename_by_name_in_netns(pid, ifname, newname);
-out1:
- free(physname);
- return err;
+ return 0;
}
int lxc_netdev_move_by_name(const char *ifname, pid_t pid, const char* newname)
{
+ __do_free char *physname = NULL;
int index;
- char *physname;
if (!ifname)
return -EINVAL;
@@ -3279,13 +3275,20 @@ int lxc_network_move_created_netdev_priv(struct lxc_handler *handler)
return 0;
lxc_list_for_each(iterator, network) {
+ __do_free char *physname = NULL;
int ret;
struct lxc_netdev *netdev = iterator->elem;
if (!netdev->ifindex)
continue;
- ret = lxc_netdev_move_by_index(netdev->ifindex, pid, NULL);
+ if (netdev->type == LXC_NET_PHYS)
+ physname = is_wlan(netdev->link);
+
+ if (physname)
+ ret = lxc_netdev_move_wlan(physname, netdev->link, pid, NULL);
+ else
+ ret = lxc_netdev_move_by_index(netdev->ifindex, pid, NULL);
if (ret) {
errno = -ret;
SYSERROR("Failed to move network device \"%s\" with ifindex %d to network namespace %d",

View File

@ -1,38 +0,0 @@
# Contributor: Bjoern Schilberg <bjoern@intevation.de>
# Maintainer: Bjoern Schilberg <bjoern@intevation.de>
pkgname=postgis
pkgver=2.5.1
pkgrel=1
pkgdesc="PostGIS is a spatial database extender for PostgreSQL object-relational database."
url="https://postgis.net/"
# geos test fails on other archs
arch="x86 x86_64" # fails on x86*
license="GPL-2.0-or-later"
depends="postgresql perl"
makedepends="postgresql-dev geos-dev gdal-dev libxml2-dev proj4-dev perl-dev
json-c-dev pcre-dev"
subpackages="$pkgname-dev $pkgname-doc"
source="http://download.osgeo.org/postgis/source/$pkgname-$pkgver.tar.gz"
options="!check" # tests depends on a running PostgreSQL server
build() {
cd "$builddir"
./configure \
--build=$CBUILD \
--host=$CHOST \
--prefix=/usr \
--disable-gtktest \
--disable-nls \
--disable-rpath \
--without-protobuf
make -j1
}
package() {
cd "$builddir"
make DESTDIR="$pkgdir" install
}
sha512sums="c6c9c8c5befd945614e92d1062df1d753ca8b7fd69b70226065c2dac77a59783b14ece4da994187079b683ee090ba5a79389ba679f22fce8c20a5afc2c8dfca0 postgis-2.5.1.tar.gz"

View File

@ -1,33 +0,0 @@
# Maintainer: Natanael Copa <ncopa@alpinelinux.org>
pkgname=proj4
pkgver=5.2.0
pkgrel=0
pkgdesc="PROJ.4 - Cartographic Projections Library"
url="https://trac.osgeo.org/proj/"
arch="all"
license="MIT"
options=""
depends=""
makedepends=""
subpackages="$pkgname-doc $pkgname-dev"
source="http://download.osgeo.org/proj/proj-$pkgver.tar.gz
"
builddir="$srcdir"/proj-$pkgver
build () {
cd "$builddir"
./configure \
--build=$CBUILD \
--host=$CHOST \
--prefix=/usr \
--without-jni \
|| return 1
make || return 1
}
package() {
cd "$builddir"
mkdir -p $pkgdir/usr/bin
make DESTDIR="$pkgdir" install
}
sha512sums="f773117d22309d4ee8dbedc2a7b6ba27e8cd032e1bd0af3c98f270bf7b7ab3353be0b04d91202a1f137fc45164c8e8a52712bb06281948008160d08f9f9074ba proj-5.2.0.tar.gz"

View File

@ -1,8 +1,8 @@
# Contributor: Nathan Johnson <nathan@nathanjohnson.info> # Contributor: Nathan Johnson <nathan@nathanjohnson.info>
# Maintainer: Nathan Johnson <nathan@nathanjohnson.info> # Maintainer: Nathan Johnson <nathan@nathanjohnson.info>
pkgname=rabbitmq-server pkgname=rabbitmq-server
pkgver=3.7.11 pkgver=3.7.18
pkgrel=0 pkgrel=1
pkgdesc="RabbitMQ is an open source multi-protocol messaging broker." pkgdesc="RabbitMQ is an open source multi-protocol messaging broker."
url="https://www.rabbitmq.com/" url="https://www.rabbitmq.com/"
arch="noarch !s390x" arch="noarch !s390x"
@ -11,10 +11,9 @@ depends="erlang erlang-tools erlang-runtime-tools erlang-stdlib
logrotate erlang-ssl erlang-crypto erlang-parsetools logrotate erlang-ssl erlang-crypto erlang-parsetools
erlang-mnesia erlang-sasl erlang-inets erlang-syntax-tools erlang-mnesia erlang-sasl erlang-inets erlang-syntax-tools
erlang-eldap erlang-xmerl erlang-os-mon erlang-asn1 erlang-public-key" erlang-eldap erlang-xmerl erlang-os-mon erlang-asn1 erlang-public-key"
depends_dev="" makedepends="$depends_dev erlang-dev python3 py3-simplejson xmlto libxslt
makedepends="$depends_dev erlang-dev python2 py2-simplejson xmlto libxslt
rsync zip gawk grep erlang-compiler erlang-erl-docgen rsync zip gawk grep erlang-compiler erlang-erl-docgen
erlang-edoc socat elixir" erlang-edoc socat erlang-eunit elixir"
install="$pkgname.pre-install $pkgname.post-deinstall" install="$pkgname.pre-install $pkgname.post-deinstall"
pkgusers="rabbitmq" pkgusers="rabbitmq"
pkggroups="rabbitmq" pkggroups="rabbitmq"
@ -22,34 +21,37 @@ subpackages="$pkgname-doc"
source=" source="
rabbitmq-server.initd rabbitmq-server.initd
rabbitmq-server.logrotate rabbitmq-server.logrotate
https://github.com/rabbitmq/${pkgname}/releases/download/v${pkgver}/${pkgname}-${pkgver}.tar.xz https://github.com/rabbitmq/rabbitmq-server/releases/download/v$pkgver/rabbitmq-server-$pkgver.tar.xz
py3.patch
" "
options="!check" # test suite broken
# secfixes:
# 3.7.17:
# - CVE-2015-9251
# - CVE-2017-16012
# - CVE-2019-11358
builddir="$srcdir/${pkgname}-${pkgver}"
build() { build() {
cd "$builddir" make dist manpages PYTHON=python3
make dist manpages
} }
package() { package() {
cd "$builddir"
make install install-bin install-man DESTDIR="$pkgdir" PREFIX=/usr \ make install install-bin install-man DESTDIR="$pkgdir" PREFIX=/usr \
RMQ_ROOTDIR="/usr/lib/rabbitmq" MANDIR=/usr/share/man RMQ_ROOTDIR="/usr/lib/rabbitmq" MANDIR=/usr/share/man PYTHON=python3
mkdir -p "$pkgdir"/var/lib/rabbitmq/mnesia mkdir -p "$pkgdir"/var/lib/rabbitmq/mnesia
mkdir -p "$pkgdir"/var/log/rabbitmq mkdir -p "$pkgdir"/var/log/rabbitmq
#Copy all necessary lib files etc. #Copy all necessary lib files etc.
install -p -m755 -D "$builddir"/scripts/rabbitmq-server.ocf \ install -p -m755 -D "$builddir"/scripts/rabbitmq-server.ocf \
"$pkgdir"/usr/lib/ocf/resource.d/rabbitmq/rabbitmq-server || return 1 "$pkgdir"/usr/lib/ocf/resource.d/rabbitmq/rabbitmq-server
install -p -m755 -D "$builddir"/scripts/rabbitmq-server-ha.ocf \ install -p -m755 -D "$builddir"/scripts/rabbitmq-server-ha.ocf \
"$pkgdir"/usr/lib/ocf/resource.d/rabbitmq/rabbitmq-server-ha \ "$pkgdir"/usr/lib/ocf/resource.d/rabbitmq/rabbitmq-server-ha
|| return 1
install -p -m644 -D "$srcdir/"$pkgname.logrotate \ install -p -m644 -D "$srcdir/"$pkgname.logrotate \
"$pkgdir"/etc/logrotate.d/rabbitmq-server || return 1 "$pkgdir"/etc/logrotate.d/rabbitmq-server
install -m755 -D "$srcdir"/$pkgname.initd \ install -m755 -D "$srcdir"/$pkgname.initd \
"$pkgdir"/etc/init.d/$pkgname || return 1 "$pkgdir"/etc/init.d/$pkgname
mkdir -p "$pkgdir"/usr/sbin mkdir -p "$pkgdir"/usr/sbin
# This is lifted / adapted from the official upstream spec file. # This is lifted / adapted from the official upstream spec file.
@ -79,4 +81,5 @@ package() {
sha512sums="a8bb02a7cae1f8720e5c7aaabfe6a2c0e731cffbe0d8f99bdcb6597daa654dc49e6d41943974601435700cf469eaa8286dc91a3255a6b9023754c3861fbb5cd9 rabbitmq-server.initd sha512sums="a8bb02a7cae1f8720e5c7aaabfe6a2c0e731cffbe0d8f99bdcb6597daa654dc49e6d41943974601435700cf469eaa8286dc91a3255a6b9023754c3861fbb5cd9 rabbitmq-server.initd
b8655cb048ab3b32001d4e6920bb5366696f3a5da75c053605e9b270e771c548e36858dca8338813d34376534515bba00af5e6dd7b4b1754a0e64a8fb756e3f3 rabbitmq-server.logrotate b8655cb048ab3b32001d4e6920bb5366696f3a5da75c053605e9b270e771c548e36858dca8338813d34376534515bba00af5e6dd7b4b1754a0e64a8fb756e3f3 rabbitmq-server.logrotate
a54034ebc919be0c6f58832ea5d47f8e3964e30ca9185c59bf882c3dc17d1df5b6e1ab0460f75e8cf0cc325504cc3a674f7cb44a5d7613e16a5ad8b721a286a4 rabbitmq-server-3.7.11.tar.xz" 7ac10172b2a1d282a0fbcfc13e4612b0aaee31c7248616cc16451c9390aabd96d866619336a29c9bb3b4142d2141b5d442a07a49c6bb0a4ea0cdb287dc813c0f rabbitmq-server-3.7.18.tar.xz
7862c8566631aeb8c7756e5c8ea11705546ffcdca6ec9058516f91c2650a21b1bb373879e8eb8a78dc5af808eb1fdf6c8167997ea7feace2de61dfa1fb1e5c8b py3.patch"

View File

@ -0,0 +1,112 @@
diff --git a/deps/amqp10_common/codegen.py b/deps/amqp10_common/codegen.py
index dc4480a..d573bcf 100755
--- a/deps/amqp10_common/codegen.py
+++ b/deps/amqp10_common/codegen.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
from __future__ import print_function
diff --git a/deps/rabbit_common/codegen.py b/deps/rabbit_common/codegen.py
index 8b81362..70bd7fa 100755
--- a/deps/rabbit_common/codegen.py
+++ b/deps/rabbit_common/codegen.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
## The contents of this file are subject to the Mozilla Public License
## Version 1.1 (the "License"); you may not use this file except in
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/manage.py b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/manage.py
index 1ae2e80..3e61442 100755
--- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/manage.py
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/manage.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
import os
import sys
diff --git a/deps/rabbitmq_consistent_hash_exchange/README.md b/deps/rabbitmq_consistent_hash_exchange/README.md
index ce1623f..6ff906b 100644
--- a/deps/rabbitmq_consistent_hash_exchange/README.md
+++ b/deps/rabbitmq_consistent_hash_exchange/README.md
@@ -150,7 +150,7 @@ Executable versions of some of the code examples can be found under [./examples]
This version of the example uses [Pika](https://pika.readthedocs.io/en/stable/), the most widely used Python client for RabbitMQ:
``` python
-#!/usr/bin/env python
+#!/usr/bin/env python3
import pika
import time
@@ -342,7 +342,7 @@ routed to the same **arbitrarily chosen** queue.
#### Code Example in Python
``` python
-#!/usr/bin/env python
+#!/usr/bin/env python3
import pika
import time
@@ -544,7 +544,7 @@ routed to the same **arbitrarily chosen** queue.
#### Code Example in Python
``` python
-#!/usr/bin/env python
+#!/usr/bin/env python3
import pika
import time
diff --git a/deps/rabbitmq_consistent_hash_exchange/examples/python/example1.py b/deps/rabbitmq_consistent_hash_exchange/examples/python/example1.py
index 6cf67d6..30e43ea 100644
--- a/deps/rabbitmq_consistent_hash_exchange/examples/python/example1.py
+++ b/deps/rabbitmq_consistent_hash_exchange/examples/python/example1.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
import pika
import time
diff --git a/deps/rabbitmq_consistent_hash_exchange/examples/python/example2.py b/deps/rabbitmq_consistent_hash_exchange/examples/python/example2.py
index 8c1ac15..0099b28 100644
--- a/deps/rabbitmq_consistent_hash_exchange/examples/python/example2.py
+++ b/deps/rabbitmq_consistent_hash_exchange/examples/python/example2.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
import pika
import time
diff --git a/deps/rabbitmq_consistent_hash_exchange/examples/python/example3.py b/deps/rabbitmq_consistent_hash_exchange/examples/python/example3.py
index 0b74501..c11a4ce 100644
--- a/deps/rabbitmq_consistent_hash_exchange/examples/python/example3.py
+++ b/deps/rabbitmq_consistent_hash_exchange/examples/python/example3.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
import pika
import time
diff --git a/deps/rabbitmq_management/bin/rabbitmqadmin b/deps/rabbitmq_management/bin/rabbitmqadmin
index 55173cb..04c0c12 100755
--- a/deps/rabbitmq_management/bin/rabbitmqadmin
+++ b/deps/rabbitmq_management/bin/rabbitmqadmin
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
diff --git a/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/manage.py b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/manage.py
index 469f277..ea21f63 100755
--- a/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/manage.py
+++ b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/manage.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
import os
import sys

View File

@ -1,6 +1,6 @@
#!/bin/sh #!/bin/sh
addgroup -S rabbitmq 2>/dev/null addgroup -S rabbitmq 2>/dev/null
adduser -S -D -H -s /sbin/nologin -h /usr/lib/rabbitmq -G rabbitmq \ adduser -S -D -H -s /sbin/nologin -h /var/lib/rabbitmq -G rabbitmq \
-g 'RabbitMQ Server' rabbitmq 2>/dev/null -g 'RabbitMQ Server' rabbitmq 2>/dev/null
exit 0 exit 0

1
apk/spoc Submodule

@ -0,0 +1 @@
Subproject commit 0614c15e3ed8b8482febaf6113a7f53ef006df18

@ -1 +1 @@
Subproject commit d9334fd12be8feb11106564d1a3b2e7526c89f43 Subproject commit 71604f26b6cd26e3d7cef720401bdc24bb13bd16

View File

@ -1,65 +0,0 @@
# Contributor: Stuart Cardall <developer@it-offshore.co.uk>
# Maintainer: Stuart Cardall <developer@it-offshore.co.uk>
pkgname=wireguard-tools
pkgver=0.0.20190702
pkgrel=0
pkgdesc="Next generation secure network tunnel: userspace tools"
arch="all"
url="https://www.wireguard.com"
# SPDX identifier headers tells us 'GPL-2.0' but considering it
# is a kernel project i think it is safe to assume it is GPL-2.0-only just
# like the kernel.
license="GPL-2.0-only"
makedepends="libmnl-dev"
depends="$pkgname-wg $pkgname-wg-quick"
subpackages="
$pkgname-doc
$pkgname-bash-completion:bashcomp:noarch
$pkgname-wg:_split
$pkgname-wg-quick:_split:noarch
"
options="!check"
source="https://git.zx2c4.com/WireGuard/snapshot/WireGuard-$pkgver.tar.xz
alpine-compat.patch
"
builddir="$srcdir"/WireGuard-$pkgver
build() {
make -C src/tools
}
package() {
mkdir -p "$pkgdir/usr/share/doc/$pkgname"
make -C src/tools \
DESTDIR="$pkgdir" \
WITH_BASHCOMPLETION=yes \
WITH_WGQUICK=yes \
WITH_SYSTEMDUNITS=no \
install
find "$builddir"/contrib/examples -name '.gitignore' -delete
cp -rf "$builddir"/contrib/examples "$pkgdir/usr/share/doc/$pkgname/"
}
_split() {
local cmd=${subpkgname/$pkgname-}
pkgdesc="$pkgdesc ($cmd)"
case $cmd in
wg-quick) depends="$pkgname-wg iproute2 bash openresolv" ;;
*) depends= ;;
esac
mkdir -p "$subpkgdir"/usr/bin
mv "$pkgdir"/usr/bin/$cmd "$subpkgdir"/usr/bin/
}
bashcomp() {
depends="bash"
pkgdesc="WireGuard bash completions"
mkdir -p "$subpkgdir"/usr
mv "$pkgdir"/usr/share "$subpkgdir"/usr
}
sha512sums="8b92b51506cd3f8e9939378b86f23678e08e8501432decd0abf6a9d4e3dfe4742b6f1cb75e06407f5816778b3dd90849a5da83252ab882392ec1905dfb997501 WireGuard-0.0.20190702.tar.xz
4577574333f023217ae6e0945807e1ccd2dec7caa87e329b1d5b44569f6b5969663ad74f8154b85d3dc7063dd762649e3fa87c7667e238ffb77c0e5df9245a5e alpine-compat.patch"

View File

@ -1,12 +0,0 @@
diff --git a/src/tools/wg-quick/linux.bash b/src/tools/wg-quick/linux.bash
--- a/src/tools/wg-quick/linux.bash
+++ b/src/tools/wg-quick/linux.bash
@@ -201,7 +201,7 @@
cmd ip $proto rule add table main suppress_prefixlength 0
while read -r key _ value; do
[[ $value -eq 1 ]] && sysctl -q "$key=2"
- done < <(sysctl -a -r '^net\.ipv4.conf\.[^ .=]+\.rp_filter$')
+ done < <(sysctl -a 2>/dev/null | sed -n -r 's#^(net\.ipv4.conf\.[^ .=]+\.rp_filter).*$#\1#p')
return 0
}

View File

@ -1,91 +0,0 @@
# Contributor: Stuart Cardall <developer@it-offshore.co.uk>
# Maintainer: Stuart Cardall <developer@it-offshore.co.uk>
# wireguard version
_ver=0.0.20190702
_rel=0
# kernel version
_kver=4.19.52
_krel=0
_kpkgver="$_kver-r$_krel"
# for custom kernels set $FLAVOR
_extra_flavors=
if [ -z "$FLAVOR" ]; then
_flavor=vanilla
case $CARCH in
x86|x86_64) _extra_flavors="virt";;
esac
else
_flavor=$FLAVOR
fi
_kpkg=linux-$_flavor
pkgname=wireguard-$_flavor
pkgver=$_kver
pkgrel=$(( $_rel + $_krel))
pkgdesc="Next generation secure network tunnel: kernel modules for $_flavor"
arch="all"
url="https://www.wireguard.com"
license="GPL-2.0"
depends="linux-$_flavor=$_kpkgver"
makedepends="
libmnl-dev
linux-$_flavor-dev=$_kpkgver
linux-firmware-none
"
install_if="wireguard-tools-wg=$_ver-r$_rel linux-$_flavor=$_kpkgver"
options="!check"
source="https://git.zx2c4.com/WireGuard/snapshot/WireGuard-$_ver.tar.xz"
builddir="$srcdir"/WireGuard-$_ver
for f in $_extra_flavors; do
makedepends="$makedepends linux-$f-dev=$_kpkgver"
subpackages="$subpackages wireguard-$f:_extra"
done
prepare() {
default_prepare
if [ -z "$FLAVOR" ]; then
( . "$startdir"/../../main/linux-$_flavor/APKBUILD
[ "$_kver" != "$pkgver" ] && die "please update _kver to $pkgver"
[ "$_krel" != "$pkgrel" ] && die "please update _krel to $pkgrel"
return 0
)
fi
local flavor=
for flavor in $_flavor $_extra_flavors; do
cp -r "$builddir" "$srcdir"/$flavor
done
}
build() {
unset LDFLAGS
local flavor= kabi=
for flavor in $_flavor $_extra_flavors; do
kabi="$_kver-$_krel-$flavor"
make -C "$srcdir/$flavor"/src \
KERNELDIR=/lib/modules/$kabi/build module
done
}
package() {
local kabi="$_kver-$_krel-$_flavor"
install -Dm644 "$srcdir"/$_flavor/src/wireguard.ko \
"$pkgdir/lib/modules/$kabi/extra/wireguard.ko"
}
_extra() {
flavor=${subpkgname##*-}
depends="linux-$flavor=$_kpkgver"
install_if="wireguard-tools-wg=$_ver-r$_rel linux-$flavor=$_kpkgver"
pkgdesc="Next generation secure network tunnel: kernel modules for $flavor"
local kabi="$_kver-$_krel-$flavor"
install -Dm644 "$srcdir"/virt/src/wireguard.ko \
"$subpkgdir/lib/modules/$kabi/extra/wireguard.ko"
}
sha512sums="8b92b51506cd3f8e9939378b86f23678e08e8501432decd0abf6a9d4e3dfe4742b6f1cb75e06407f5816778b3dd90849a5da83252ab882392ec1905dfb997501 WireGuard-0.0.20190702.tar.xz"

View File

@ -7,140 +7,106 @@ ROOT=$(dirname $(dirname $(realpath "${0}")))
cd ${ROOT}/doc cd ${ROOT}/doc
make html make html
# Build basic.tar # Build basic tar
cd ${ROOT}/vm cd ${ROOT}/vm
tar cpf /srv/build/vm.tar * tar czpf /srv/build/vm.tar.gz *
# Build native apps # Build native apps
cd ${ROOT}/apk/acme-sh cd ${ROOT}/apk/lxc
abuild -F apk add -U autoconf automake bsd-compat-headers docbook2x libcap-dev libcap-static libseccomp-dev libtool linux-headers linux-pam-dev
cd ${ROOT}/apk/geos
apk add -U swig python2-dev
abuild -F
cd ${ROOT}/apk/gdal
apk add -U curl-dev geos-dev@vm giflib-dev jpeg-dev libjpeg-turbo-dev libpng-dev linux-headers postgresql-dev python2-dev sqlite-dev swig tiff-dev zlib-dev
abuild -F
cd ${ROOT}/apk/proj4
abuild -F abuild -F
cd ${ROOT}/apk/rabbitmq-server cd ${ROOT}/apk/rabbitmq-server
apk add -U elixir erlang-compiler erlang-dev erlang-edoc erlang-eldap erlang-erl-docgen erlang-mnesia erlang-os-mon erlang-runtime-tools erlang-tools erlang-xmerl gawk grep libxslt logrotate py2-simplejson python2 rsync socat xmlto zip apk add -U elixir erlang-compiler erlang-dev erlang-edoc erlang-eldap erlang-erl-docgen erlang-mnesia erlang-os-mon erlang-runtime-tools erlang-tools erlang-eunit erlang-xmerl gawk grep libxslt py3-simplejson python3 rsync socat xmlto zip
abuild -F abuild -F
cd ${ROOT}/apk/postgis cd ${ROOT}/apk/spoc
apk add -U gdal-dev@vm geos-dev@vm json-c-dev libxml2-dev pcre-dev perl perl-dev postgresql postgresql-dev proj4-dev@vm
abuild -F abuild -F
cd ${ROOT}/apk/vmmgr cd ${ROOT}/apk/vmmgr
abuild -F abuild -F
cd ${ROOT}/apk/wireguard # Build runtimes
apk add -U libmnl-dev linux-virt-dev linux-firmware-none
FLAVOR=virt abuild -F
cd ${ROOT}/apk/wireguard-tools
apk add -U libmnl-dev
abuild -F
# Build apd pack runtimes
cd ${ROOT}/lxc-shared cd ${ROOT}/lxc-shared
lxc-build alpine3.8 spoc-image build -p alpine3.8/image
lxc-build alpine3.8-php5.6 spoc-image build -p alpine3.8-java8/image
lxc-build alpine3.8-nodejs8 spoc-image build -p alpine3.8-php5.6/image
lxc-build alpine3.8-ruby2.4 spoc-image build -p alpine3.8-ruby2.4/image
lxc-build alpine3.9 spoc-image build -p alpine3.10/image
lxc-build alpine3.9-java8 spoc-image build -p alpine3.10-nodejs10/image
lxc-build alpine3.9-php7.2 spoc-image build -p alpine3.10-python2.7/image
lxc-build alpine3.9-python2.7 spoc-image build -p alpine3.10-python3.7/image
lxc-build alpine3.9-python3.6 spoc-image build -p alpine3.11/image
lxc-build alpine3.9-nodejs10 spoc-image build -p alpine3.11-java8/image
lxc-build alpine3.9-ruby2.4 spoc-image build -p alpine3.11-php7.3/image
lxc-build alpine3.9-tomcat7 spoc-image build -p alpine3.11-python2.7/image
lxc-build alpine3.9-tomcat8.5 spoc-image build -p alpine3.11-python3.8/image
spoc-image build -p alpine3.11-ruby2.4/image
spoc-image build -p alpine3.11-ruby2.6/image
spoc-image build -p alpine3.11-tomcat7/image
spoc-image build -p alpine3.11-tomcat8.5/image
# Build services # Build services
cd ${ROOT}/lxc-services cd ${ROOT}/lxc-services
lxc-build activemq spoc-image build -p activemq/image
lxc-build mariadb spoc-image build -p mariadb/image
lxc-build postgres spoc-image build -p postgres/image
lxc-build rabbitmq spoc-image build -p postgis/image
lxc-build redis spoc-image build -p rabbitmq/image
lxc-build solr spoc-image build -p redis/image
spoc-image build -p solr6/image
# Build applications # Build applications
cd ${ROOT}/lxc-apps cd ${ROOT}/lxc-apps
lxc-build ckan-datapusher
lxc-build ckan
lxc-build crisiscleanup
lxc-build cts
lxc-build ecogis
lxc-build frontlinesms
lxc-build gnuhealth
lxc-build kanboard
lxc-build mifosx
lxc-build motech
lxc-build odoo
lxc-build opendatakit
lxc-build opendatakit-build
lxc-build openmapkit
lxc-build pandora
lxc-build sahana-shared
lxc-build sahana
lxc-build sahana-demo
lxc-build sambro
lxc-build seeddms
lxc-build sigmah
lxc-build ushahidi
# Pack runtimes spoc-image build -p ckan/ckan.image
cd ${ROOT}/lxc-shared spoc-image build -p ckan/ckan-datapusher.image
lxc-pack alpine3.8 spoc-app publish ckan/app
lxc-pack alpine3.8-php5.6
lxc-pack alpine3.8-nodejs8
lxc-pack alpine3.8-ruby2.4
lxc-pack alpine3.9
lxc-pack alpine3.9-java8
lxc-pack alpine3.9-php7.2
lxc-pack alpine3.9-python2.7
lxc-pack alpine3.9-python3.6
lxc-pack alpine3.9-nodejs10
lxc-pack alpine3.9-ruby2.4
lxc-pack alpine3.9-tomcat7
lxc-pack alpine3.9-tomcat8.5
# Pack services spoc-image build -p crisiscleanup/image
cd ${ROOT}/lxc-services spoc-app publish crisiscleanup/app
lxc-pack activemq
lxc-pack mariadb
lxc-pack postgres
lxc-pack rabbitmq
lxc-pack redis
lxc-pack solr
# Pack applications spoc-image build -p cts/image
cd ${ROOT}/lxc-apps spoc-app publish cts/app
lxc-pack ckan-datapusher
lxc-pack ckan spoc-image build -p decidim/image
lxc-pack crisiscleanup spoc-app publish decidim/app
lxc-pack cts
lxc-pack ecogis spoc-image build -p frontlinesms/image
lxc-pack frontlinesms spoc-app publish frontlinesms/app
lxc-pack gnuhealth
lxc-pack kanboard spoc-image build -p gnuhealth/image
lxc-pack mifosx spoc-app publish gnuhealth/app
lxc-pack motech
lxc-pack odoo spoc-image build -p kanboard/image
lxc-pack opendatakit spoc-app publish kanboard/app
lxc-pack opendatakit-build
lxc-pack openmapkit spoc-image build -p mifosx/image
lxc-pack pandora spoc-app publish mifosx/app
lxc-pack sahana-shared
lxc-pack sahana spoc-image build -p motech/image
lxc-pack sahana-demo spoc-app publish motech/app
lxc-pack sambro
lxc-pack seeddms spoc-image build -p odoo/image
lxc-pack sigmah spoc-app publish odoo/app
lxc-pack ushahidi
spoc-image build -p opendatakit/opendatakit.image
spoc-image build -p opendatakit/opendatakit-build.image
spoc-app publish opendatakit/app
spoc-image build -p openmapkit/image
spoc-app publish openmapkit/app
spoc-image build -p pandora/image
spoc-app publish pandora/app
spoc-image build -p sahana/image
spoc-app publish sahana/app
spoc-app publish sahana-demo/app
spoc-app publish sambro/app
spoc-image build -p seeddms/image
spoc-app publish seeddms/app
spoc-image build -p ushahidi/image
spoc-app publish ushahidi/app

29
build/clean-all.sh Executable file
View File

@ -0,0 +1,29 @@
#!/bin/sh
set -ev
# Clean documentation
rm -rf /srv/build/doc/*
# Clean basic tar
rm -f /srv/build/vm.tar.gz
# Clean native apps
rm -rf /srv/build/alpine/*
# Clean built LXC packages
rm -rf /srv/build/spoc
# Remove nginx configs
for CONF in $(find /etc/nginx/conf.d -name '*.conf' -a ! -name repo.conf -a ! -name default.conf); do
rm -f ${CONF}
done
service nginx reload
# Stop running containers
for APP in $(spoc-container list); do
spoc-container stop ${APP}
done
# Remove data
rm -rf /var/lib/spoc
rm -rf /var/log/spoc

View File

@ -9,18 +9,21 @@ export MAKEFLAGS=-j$JOBS
# remove line below to disable colors # remove line below to disable colors
USE_COLORS=1 USE_COLORS=1
# uncomment line below to enable ccache support.
#USE_CCACHE=1
SRCDEST=/var/cache/distfiles SRCDEST=/var/cache/distfiles
# uncomment line below to store built packages in other location # uncomment line below to store built packages in other location
# The package will be stored as $REPODEST/$repo/$pkgname-$pkgver-r$pkgrel.apk # The package will be stored as $REPODEST/$repo/$pkgname-$pkgver-r$pkgrel.apk
# where $repo is the name of the parent directory of $startdir. # where $repo is the name of the parent directory of $startdir.
REPODEST=/srv/build/alpine/v3.9 REPODEST=/srv/build/alpine/v3.11
# PACKAGER and MAINTAINER are used by newapkbuild when creating new aports for # PACKAGER and MAINTAINER are used by newapkbuild when creating new aports for
# the APKBUILD's "Contributor:" and "Maintainer:" comments, respectively. # the APKBUILD's "Contributor:" and "Maintainer:" comments, respectively.
#PACKAGER="Your Name <your@email.address>" #PACKAGER="Your Name <your@email.address>"
#MAINTAINER="$PACKAGER" #MAINTAINER="$PACKAGER"
PACKAGER_PRIVKEY="/srv/build/repokey.rsa" PACKAGER_PRIVKEY="/root/repo.spotter.cz.rsa"
# what to clean up after a successful build # what to clean up after a successful build
CLEANUP="srcdir bldroot pkgdir deps" CLEANUP="srcdir bldroot pkgdir deps"

View File

@ -1,6 +1,6 @@
server { server {
listen [::]:80; listen [::]:80;
server_name repo.spotter.cz; server_name repo.build.vm;
location / { location / {
root /srv/build; root /srv/build;

View File

@ -5,7 +5,7 @@ cd $(realpath $(dirname "${0}"))
# Install basic build tools # Install basic build tools
apk update apk update
apk add git file htop less openssh-client tar xz apk add git file htop less openssh-client tree
# Install Alpine SDK # Install Alpine SDK
apk add alpine-sdk apk add alpine-sdk
# Install Sphinx support # Install Sphinx support
@ -13,7 +13,7 @@ apk add py3-sphinx
pip3 install recommonmark sphinx-markdown-tables pip3 install recommonmark sphinx-markdown-tables
# Copy root profile files and settings # Copy root profile files and settings
mkdir -p /root/.config/htop /root/.ssh mkdir -p /root/.config/htop
cp root/.profile /root/.profile cp root/.profile /root/.profile
cp root/.config/htop/htoprc /root/.config/htop/htoprc cp root/.config/htop/htoprc /root/.config/htop/htoprc
@ -21,19 +21,17 @@ cp root/.config/htop/htoprc /root/.config/htop/htoprc
adduser root abuild adduser root abuild
cp etc/abuild.conf /etc/abuild.conf cp etc/abuild.conf /etc/abuild.conf
# Prepare LXC build toolchain
cp usr/bin/fix-apk /usr/bin/fix-apk
cp usr/bin/lxc-build /usr/bin/lxc-build
cp usr/bin/lxc-pack /usr/bin/lxc-pack
# Prepare local APK repository # Prepare local APK repository
cp etc/nginx/conf.d/apkrepo.conf /etc/nginx/conf.d/apkrepo.conf cp etc/nginx/conf.d/repo.conf /etc/nginx/conf.d/repo.conf
echo "172.17.0.1 repo.spotter.cz" >>/etc/hosts echo "172.17.0.1 repo.build.vm" >>/etc/hosts
service nginx reload service nginx reload
# Supply abuild key # Change SPOC repository
# echo '/srv/build/repokey.rsa' | abuild-keygen sed -i 's/https:\/\/repo\.spotter\.cz/http:\/\/repo.build.vm/' /etc/spoc/spoc.conf
# Supply LXC build key # Supply abuild key
# openssl ecparam -genkey -name secp384r1 -out /srv/build/packages.key # echo '/root/repo.spotter.cz.rsa' | abuild-keygen
# openssl ec -in /srv/build/packages.key -pubout -out /srv/build/packages.pub
# Supply SPOC key
# openssl ecparam -genkey -name secp384r1 -out /etc/spoc/publish.key
# openssl ec -in /etc/spoc/publish.key -pubout -out /tmp/repository.pub

View File

@ -1,43 +0,0 @@
#!/usr/bin/python3
import os
import sys
def fix_installed(layers):
installed = []
for layer in layers[:-1]:
try:
with open(os.path.join(layer, 'lib/apk/db/installed'), 'r') as f:
buffer = []
for line in f:
if line.startswith('C:'):
buffer = ''.join(buffer)
if buffer not in installed:
installed.append(buffer)
buffer = []
buffer.append(line)
buffer = ''.join(buffer)
if buffer not in installed:
installed.append(buffer)
except:
continue
os.makedirs(os.path.join(layers[-1], 'lib/apk/db'), 0o755, True)
with open(os.path.join(layers[-1], 'lib/apk/db/installed'), 'w') as f:
f.writelines(installed)
def fix_world(layers):
world = []
for layer in layers[:-1]:
try:
with open(os.path.join(layer, 'etc/apk/world'), 'r') as f:
for line in f:
if line not in world:
world.append(line)
except:
continue
os.makedirs(os.path.join(layers[-1], 'etc/apk'), 0o755, True)
with open(os.path.join(layers[-1], 'etc/apk/world'), 'w') as f:
f.writelines(world)
fix_installed(sys.argv[1:])
fix_world(sys.argv[1:])

View File

@ -1,210 +0,0 @@
#!/usr/bin/python3
import os
import shutil
import subprocess
import sys
LXC_ROOT = '/var/lib/lxc'
CONFIG_TEMPLATE = '''# Image name
lxc.uts.name = {name}
# Network
lxc.net.0.type = veth
lxc.net.0.link = lxcbr0
lxc.net.0.flags = up
# Volumes
lxc.rootfs.path = {rootfs}
# Mounts
lxc.mount.entry = shm dev/shm tmpfs rw,nodev,noexec,nosuid,relatime,mode=1777,create=dir 0 0
lxc.mount.entry = /etc/hosts etc/hosts none bind,create=file 0 0
lxc.mount.entry = /etc/resolv.conf etc/resolv.conf none bind,create=file 0 0
{mounts}
# Init
lxc.init.cmd = {cmd}
lxc.init.uid = {uid}
lxc.init.gid = {gid}
lxc.init.cwd = {cwd}
# Environment
lxc.environment = PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
{env}
# Halt
lxc.signal.halt = {halt}
# Log
lxc.console.size = 1MB
lxc.console.logfile = /var/log/lxc/{name}.log
# Other
lxc.arch = x86_64
lxc.cap.drop = sys_admin
lxc.hook.pre-start = /usr/bin/vmmgr prepare-container
lxc.hook.start-host = /usr/bin/vmmgr register-container
lxc.hook.post-stop = /usr/bin/vmmgr unregister-container
lxc.include = /usr/share/lxc/config/common.conf
'''
class LXCImage:
def __init__(self, build_path):
self.name = None
self.layers = []
self.mounts = []
self.env = []
self.uid = 0
self.gid = 0
self.cmd = '/bin/true'
self.cwd = '/'
self.halt = 'SIGINT'
if os.path.isfile(build_path):
self.lxcfile = os.path.realpath(build_path)
self.build_dir = os.path.dirname(self.lxcfile)
else:
self.build_dir = os.path.realpath(build_path)
self.lxcfile = os.path.join(self.build_dir, 'lxcfile')
def build(self):
with open(self.lxcfile, 'r') as f:
lxcfile = [l.strip() for l in f.readlines()]
script = []
script_eof = None
for line in lxcfile:
if script_eof:
if line == script_eof:
script_eof = None
self.run_script(script)
else:
script.append(line)
elif line.startswith('RUN'):
script = []
script_eof = line.split()[1]
elif line.startswith('IMAGE'):
self.set_name(line.split()[1])
elif line.startswith('LAYER'):
self.add_layer(line.split()[1])
elif line.startswith('FIXLAYER'):
self.fix_layer(line.split()[1])
elif line.startswith('COPY'):
srcdst = line.split()
self.copy_files(srcdst[1], srcdst[2] if len(srcdst) == 3 else '')
elif line.startswith('MOUNT'):
mount = line.split()
self.add_mount(mount[1], mount[2], mount[3])
elif line.startswith('ENV'):
env = line.split()
self.add_env(env[1], env[2])
elif line.startswith('USER'):
uidgid = line.split()
self.set_user(uidgid[1], uidgid[2])
elif line.startswith('CMD'):
self.set_cmd(' '.join(line.split()[1:]))
elif line.startswith('WORKDIR'):
self.set_cwd(line.split()[1])
elif line.startswith('HALT'):
self.set_halt(line.split()[1])
# Add the final layer which will be treated as ephemeral
self.add_layer('{}/delta0'.format(self.name))
def rebuild_config(self):
if not self.name:
return
if len(self.layers) == 1:
rootfs = self.layers[0]
else:
# Multiple lower overlayfs layers are ordered from right to left (lower2:lower1:rootfs:upper)
rootfs = 'overlay:{}:{}'.format(':'.join(self.layers[:-1][::-1]), self.layers[-1])
mounts = '\n'.join(self.mounts)
env = '\n'.join(self.env)
with open(os.path.join(LXC_ROOT, self.name, 'config'), 'w') as f:
f.write(CONFIG_TEMPLATE.format(name=self.name,
rootfs=rootfs, mounts=mounts, env=env,
uid=self.uid, gid=self.gid,
cmd=self.cmd, cwd=self.cwd, halt=self.halt))
def run_script(self, script):
sh = os.path.join(self.layers[-1], 'run.sh')
with open(sh, 'w') as f:
f.write('#!/bin/sh\nset -ev\n\n{}\n'.format('\n'.join(script)))
os.chmod(sh, 0o700)
subprocess.run(['lxc-execute', '-n', self.name, '--', '/bin/sh', '-lc', '/run.sh'], check=True)
os.unlink(sh)
def set_name(self, name):
self.name = name
os.makedirs(os.path.join(LXC_ROOT, self.name), 0o755, True)
def add_layer(self, layer):
layer = os.path.join(LXC_ROOT, layer)
self.layers.append(layer)
os.makedirs(layer, 0o755, True)
self.rebuild_config()
def fix_layer(self, cmd):
subprocess.run([cmd]+self.layers, check=True)
def copy_files(self, src, dst):
dst = os.path.join(self.layers[-1], dst)
if src.startswith('http://') or src.startswith('https://'):
self.unpack_http_archive(src, dst)
else:
src = os.path.join(self.build_dir, src)
copy_tree(src, dst)
def unpack_http_archive(self, src, dst):
xf = 'xzf'
if src.endswith('.bz2'):
xf = 'xjf'
elif src.endswith('.xz'):
xf = 'xJf'
with subprocess.Popen(['wget', src, '-O', '-'], stdout=subprocess.PIPE) as wget:
with subprocess.Popen(['tar', xf, '-', '-C', dst], stdin=wget.stdout) as tar:
wget.stdout.close()
tar.wait()
def add_mount(self, type, src, dst):
self.mounts.append('lxc.mount.entry = {} {} none bind,create={} 0 0'.format(src, dst, type.lower()))
self.rebuild_config()
def add_env(self, key, value):
self.env.append('lxc.environment = {}={}'.format(key, value))
self.rebuild_config()
def set_user(self, uid, gid):
self.uid = uid
self.gid = gid
self.rebuild_config()
def set_cmd(self, cmd):
self.cmd = cmd
self.rebuild_config()
def set_cwd(self, cwd):
self.cwd = cwd
self.rebuild_config()
def set_halt(self, halt):
self.halt = halt
self.rebuild_config()
def copy_tree(src, dst):
if not os.path.isdir(src):
shutil.copy2(src, dst)
else:
os.makedirs(dst, exist_ok=True)
for name in os.listdir(src):
copy_tree(os.path.join(src, name), os.path.join(dst, name))
shutil.copystat(src, dst)
if __name__ == '__main__':
if len(sys.argv) != 2 or sys.argv[1] in ('-h', '--help'):
print('Usage: lxc-build <buildpath>\n where the buildpath can be either specific lxcfile or a directory containing one')
else:
i = LXCImage(sys.argv[1])
i.build()

View File

@ -1,92 +0,0 @@
#!/usr/bin/python3
import hashlib
import json
import os
import subprocess
import sys
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.serialization import load_pem_private_key
PKG_ROOT = '/srv/build/lxc'
PRIVATE_KEY = '/srv/build/packages.key'
LXC_ROOT = '/var/lib/lxc'
def pack(path):
# Determine correct metadata file and package name
path = os.path.realpath(path)
if os.path.isdir(path):
meta_dir = path
meta_file = os.path.join(meta_dir, 'meta')
else:
meta_dir = os.path.dirname(path)
meta_file = path
pkg_name = os.path.basename(meta_dir)
# Load metadata
with open(meta_file) as f:
meta = json.load(f)
# Prepare package file names
os.makedirs(PKG_ROOT, 0o755, True)
tar_path = os.path.join(PKG_ROOT, '{}_{}-{}.tar'.format(pkg_name, meta['version'], meta['release']))
xz_path = '{}.xz'.format(tar_path)
# Remove old package
if os.path.exists(tar_path):
os.unlink(tar_path)
if os.path.exists(xz_path):
os.unlink(xz_path)
# Create archive
print('Archiving', meta['lxcpath'])
subprocess.run(['tar', '--xattrs', '-cpf', tar_path, os.path.join(LXC_ROOT, meta['lxcpath'])], cwd='/')
# Add install/upgrade/uninstall scripts
scripts = ('install', 'install.sh', 'upgrade', 'upgrade.sh', 'uninstall', 'uninstall.sh')
scripts = [s for s in scripts if os.path.exists(os.path.join(meta_dir, s))]
subprocess.run(['tar', '--transform', 's|^|srv/{}/|'.format(pkg_name), '-rpf', tar_path] + scripts, cwd=meta_dir)
# Compress the tarball with xz (LZMA2)
print('Compressing', tar_path, '({:.2f} MB)'.format(os.path.getsize(tar_path)/1048576))
subprocess.run(['xz', '-9', tar_path])
print('Compressed ', xz_path, '({:.2f} MB)'.format(os.path.getsize(xz_path)/1048576))
# Register package
print('Registering package')
packages = {}
packages_file = os.path.join(PKG_ROOT, 'packages')
if os.path.exists(packages_file):
with open(packages_file, 'r') as f:
packages = json.load(f)
packages[pkg_name] = meta
packages[pkg_name]['size'] = os.path.getsize(xz_path)
packages[pkg_name]['sha512'] = hash_file(xz_path)
with open(packages_file, 'w') as f:
json.dump(packages, f, sort_keys=True, indent=4)
# Sign packages file
print('Signing packages')
with open(PRIVATE_KEY, 'rb') as f:
priv_key = load_pem_private_key(f.read(), None, default_backend())
with open(os.path.join(PKG_ROOT, 'packages'), 'rb') as f:
data = f.read()
with open(os.path.join(PKG_ROOT, 'packages.sig'), 'wb') as f:
f.write(priv_key.sign(data, ec.ECDSA(hashes.SHA512())))
def hash_file(file_path):
sha512 = hashlib.sha512()
with open(file_path, 'rb') as f:
while True:
data = f.read(65536)
if not data:
break
sha512.update(data)
return sha512.hexdigest()
if __name__ == '__main__':
if len(sys.argv) != 2 or sys.argv[1] in ('-h', '--help'):
print('Usage: lxc-pack <buildpath>\n where the buildpath can be either specific meta file or a directory containing one')
else:
pack(sys.argv[1])

View File

@ -6,54 +6,52 @@
|-------------------------|---------------------| |-------------------------|---------------------|
| Alpine 3.8 | alpine3.8 | | Alpine 3.8 | alpine3.8 |
| Alpine 3.8 - PHP 5.6 | alpine3.8-php5.6 | | Alpine 3.8 - PHP 5.6 | alpine3.8-php5.6 |
| Alpine 3.8 - NodeJS 8 | alpine3.8-nodejs8 |
| Alpine 3.9 - Ruby 2.4 | alpine3.8-ruby2.4 | | Alpine 3.9 - Ruby 2.4 | alpine3.8-ruby2.4 |
| Alpine 3.9 | alpine3.9 | | Alpine 3.9 | alpine3.9 |
| Alpine 3.9 - Java 8 | alpine3.9-java8 | | Alpine 3.9 - Java 8 | alpine3.9-java8 |
| Alpine 3.9 - PHP 7.2 | alpine3.9-php7.2 | | Alpine 3.9 - PHP 7.2 | alpine3.9-php7.2 |
| Alpine 3.9 - Python 2.7 | alpine3.9-python2.7 | | Alpine 3.9 - Python 2.7 | alpine3.9-python2.7 |
| Alpine 3.9 - Python 3.6 | alpine3.9-python3.6 | | Alpine 3.9 - Python 3.6 | alpine3.9-python3.6 |
| Alpine 3.9 - NodeJS 10 | alpine3.9-nodejs10 |
| Alpine 3.9 - Ruby 2.4 | alpine3.9-ruby2.4 | | Alpine 3.9 - Ruby 2.4 | alpine3.9-ruby2.4 |
| Alpine 3.9 - Ruby 2.6 | alpine3.9-ruby2.6 |
| Alpine 3.9 - Tomcat 7 | alpine3.9-tomcat7 | | Alpine 3.9 - Tomcat 7 | alpine3.9-tomcat7 |
| Alpine 3.9 - Tomcat 8.5 | alpine3.9-tomcat8.5 | | Alpine 3.9 - Tomcat 8.5 | alpine3.9-tomcat8.5 |
| Sahana - Shared | sahana-shared |
## List of service containers ## List of service containers
| Service | Container | UID/GID | Internal Port | | Service | Container | UID/GID | Internal Port |
|-----------------|-----------------|---------|------------------| |-----------------|-----------------|---------|------------------|
| ActiveMQ | activemq | 61616 | 61616 (ActiveMQ) | | ActiveMQ | activemq | 61616 | 61616 (ActiveMQ) |
| CKAN Datapusher | ckan-datapusher | 8004 | 8080 (HTTP) |
| MariaDB | mariadb | 3306 | 3306 (MySQL) | | MariaDB | mariadb | 3306 | 3306 (MySQL) |
| Postgres | postgres | 5432 | 5432 (Postgres) | | Postgres | postgres | 5432 | 5432 (Postgres) |
| PostGIS | postgis | 5432 | 5432 (Postgres) |
| RabbitMQ | rabbitmq | 5672 | 5672 (AMQP) | | RabbitMQ | rabbitmq | 5672 | 5672 (AMQP) |
| Redis | redis | 6379 | 6379 (Redis) | | Redis | redis | 6379 | 6379 (Redis) |
| Solr | solr | 8983 | 8983 (HTTP) | | Solr 6 | solr6 | 8983 | 8983 (HTTP) |
## List of application containers ## List of application containers
All application containers listen on internal port 8080 (HTTP) All application containers have the application user UID/GID 8080 and listen on internal port 8080 (HTTP)
| Application | Container | UID/GID | Host | | Application | Container | Host |
|----------------|-------------------|---------|-------------| |----------------|-------------------|-------------|
| CKAN | ckan | 8003 | ckan | | CKAN | ckan | ckan |
| Crisis Cleanup | crisiscleanup | 8005 | cc | | Crisis Cleanup | crisiscleanup | cc |
| CTS | cts | 8006 | cts | | CTS | cts | cts |
| EcoGIS | ecogis | 8020 | ecogis | | EcoGIS | ecogis | ecogis |
| FrontlineSMS | frontlinesms | 8018 | sms | | FrontlineSMS | frontlinesms | sms |
| GNU Health | gnuhealth | 8008 | gh | | GNU Health | gnuhealth | gh |
| KanBoard | kanboard | 8009 | kb | | KanBoard | kanboard | kb |
| Mifos X | mifosx | 8012 | mifosx | | Mifos X | mifosx | mifosx |
| Motech | motech | 8013 | motech | | Motech | motech | motech |
| ODK Aggregate | opendatakit | 8015 | odk | | ODK Aggregate | opendatakit | odk |
| ODK Build | opendatakit-build | 8017 | odkbuild | | ODK Build | opendatakit-build | odkbuild |
| Odoo | odoo | 8019 | odoo | | Odoo | odoo | odoo |
| OpenMapKit | openmapkit | 8007 | omk | | OpenMapKit | openmapkit | omk |
| Pan.do/ra | pandora | 8002 | pandora | | Pan.do/ra | pandora | pandora |
| Sahana | sahana | 8001 | sahana | | Sahana | sahana | sahana |
| Sahana - Demo | sahana-demo | 8001 | sahana-demo | | Sahana - Demo | sahana-demo | sahana-demo |
| SAMBRO | sambro | 8001 | sambro | | SAMBRO | sambro | sambro |
| SeedDMS | seeddms | 8010 | dms | | SeedDMS | seeddms | dms |
| Sigmah | sigmah | 8011 | sigmah | | Sigmah | sigmah | sigmah |
| Ushahidi | ushahidi | 8014 | ush | | Ushahidi | ushahidi | ush |

View File

@ -13,7 +13,7 @@ The usage of Abuild, APK package manager and syntax of `APKBUILD` files is best
## Abuild in a nutshell ## Abuild in a nutshell
Building with abuild requires `alpine-sdk` package installed, `/etc/abuild.conf` configured and an RSA private key created in `/srv/build/repokey.rsa` and subsequently registered by `abuild-keygen` command. All these are taken care of in `install-toolchain.sh` script as part of [Build environment installation](vm-creation). Building with abuild requires `alpine-sdk` package installed, `/etc/abuild.conf` configured and an RSA private key created in `/srv/repo.spotter.cz.rsa` and subsequently registered by `abuild-keygen` command. All these are taken care of in `install-toolchain.sh` script as part of [Build environment installation](vm-creation).
Abuild toolchain is intended to be used in automated builds, therefore it requires some dependencies normally not found in other packaging systems. Abuild expects that `APKBUILD` files are a part of git repository and tries to read current commit hash. Then it tries to automatically download, build (compile), strip binaries, find out dependencies, and generally perform a lot of tasks normally useful when you are compiling from sources. Finally it packages the result to one or more subpackages, according to the build recipe. For purposes of LXC packaging, this is mostly useless, which is the reason why we have a [custom package manager](pkgmgr). It is however perfectly suitable for packages installed directly on the basic VM. Abuild toolchain is intended to be used in automated builds, therefore it requires some dependencies normally not found in other packaging systems. Abuild expects that `APKBUILD` files are a part of git repository and tries to read current commit hash. Then it tries to automatically download, build (compile), strip binaries, find out dependencies, and generally perform a lot of tasks normally useful when you are compiling from sources. Finally it packages the result to one or more subpackages, according to the build recipe. For purposes of LXC packaging, this is mostly useless, which is the reason why we have a [custom package manager](pkgmgr). It is however perfectly suitable for packages installed directly on the basic VM.

View File

@ -7,7 +7,7 @@ VM building and packaging
vm-creation vm-creation
abuild abuild
lxc-overview lxc-overview
lxc-build lxcbuild
lxc-pack lxc-pack
pkgmgr pkgmgr
vmmgr-hooks vmmgr-hooks

View File

@ -2,18 +2,18 @@
## Overview ## Overview
`lxc-build` utility creates a LXC container based on its build recipe and build context path given in command line parameter. If a filename is given, the build recipe is loaded from the file and the directory in which the file resides is taken as build context, ie. all relative paths are resolved from it. In case a directory path is passed as parameter, the directory is then used as build context and a file called `lxcfile` from the given directory is used as build recipe. `lxcbuild` utility creates a LXC container based on its build recipe and build context path given in command line parameter. If a filename is given, the build recipe is loaded from the file and the directory in which the file resides is taken as build context, ie. all relative paths are resolved from it. In case a directory path is passed as parameter, the directory is then used as build context and a file called `lxcfile` from the given directory is used as build recipe.
### Usage ### Usage
```bash ```bash
lxc-build <buildpath> lxcbuild <buildpath>
where the buildpath can be either specific lxcfile or a directory containing one where the buildpath can be either specific lxcfile or a directory containing one
``` ```
## Directives used in lxcfile ## Directives used in lxcfile
The *lxcfile* syntax is designed to resemble *Dockerfile* syntax in order to ease the potential transition. Since LXC operates on much lower level of abstraction than Docker, some principles are applied more explicitly and verbosely. Major difference between Docker and *lxc-build* is that every directive in *Dockerfile* creates a new filesystem layer whereas layers in *lxc-build* are managed manually. The *lxcfile* syntax is designed to resemble *Dockerfile* syntax in order to ease the potential transition. Since LXC operates on much lower level of abstraction than Docker, some principles are applied more explicitly and verbosely. Major difference between Docker and *lxcbuild* is that every directive in *Dockerfile* creates a new filesystem layer whereas layers in *lxcbuild* are managed manually.
### IMAGE ### IMAGE
@ -29,13 +29,6 @@ The *lxcfile* syntax is designed to resemble *Dockerfile* syntax in order to eas
- **Docker equivalent:** `FROM` - **Docker equivalent:** `FROM`
- **Populates LXC field:** `lxc.rootfs.path` - **Populates LXC field:** `lxc.rootfs.path`
### FIXLAYER
- **Usage:** `FIXLAYER <scriptname>`
- **Description:** Runs `<scriptname>` on LXC host and passes all layer paths as parameter to this script. This helps you to resolve the conflicts in cases where you mix multiple OverlayFS layers with overlapping files, ie. package manager cache. The idea is that all layers are read separately by the `<scriptname>` script and the fixed result is written back to the uppermost layer.
- **Docker equivalent:** None
- **Populates LXC field:** None
### RUN ### RUN
- **Usage:** - **Usage:**
@ -82,13 +75,6 @@ The *lxcfile* syntax is designed to resemble *Dockerfile* syntax in order to eas
- **Docker equivalent:** `COPY` or `ADD` - **Docker equivalent:** `COPY` or `ADD`
- **Populates LXC field:** None - **Populates LXC field:** None
### MOUNT
- **Usage:** `MOUNT DIR|FILE <source> <destination>`
- **Description:** Creates a directory or file mount for the container. The `<source>` is usually given as absolute path existing on the LXC host, the `<destination>` is a path relative to the container root directory. If the file doesn't exist in any of the container layers, it is automatically created on container startup.
- **Docker equivalent:** `VOLUME`
- **Populates LXC field:** `lxc.mount.entry`
### USER ### USER
- **Usage:** `USER <uid> <gid>` - **Usage:** `USER <uid> <gid>`
@ -126,7 +112,7 @@ The *lxcfile* syntax is designed to resemble *Dockerfile* syntax in order to eas
## LXC config ## LXC config
Although *lxcfile* populates some LXC config fields, there are lot of defaults with remain unchanged. The template file to which *lxc-build* fills in the values looks as follows: Although *lxcfile* populates some LXC config fields, there are lot of defaults with remain unchanged. The template file to which *lxcbuild* fills in the values looks as follows:
```bash ```bash
# Image name # Image name
@ -192,9 +178,6 @@ RUN EOF
apk --no-cache add redis apk --no-cache add redis
EOF EOF
MOUNT FILE /srv/redis/conf/redis.conf etc/redis.conf
MOUNT DIR /srv/redis/data var/lib/redis
USER 6379 6379 USER 6379 6379
CMD redis-server /etc/redis.conf CMD redis-server /etc/redis.conf
``` ```

View File

@ -28,7 +28,7 @@ Due to the Docker's approach, storage overlay layers cannot be easily managed by
Finally, Docker maintainers explicitly refuse to implement a possibility to isolate the docker daemon to private Docker repositories (registries) in the community edition of Docker. It is possible to have some custom and even private repositories, but it is not possible to deactivate the default public *Dockerhub*. Finally, Docker maintainers explicitly refuse to implement a possibility to isolate the docker daemon to private Docker repositories (registries) in the community edition of Docker. It is possible to have some custom and even private repositories, but it is not possible to deactivate the default public *Dockerhub*.
The downsides of using LXC is that its usage requires a bit more knowledge about how the linux containers actually work, and that most 3rd party applications are distributed using `Dockerfile`, which requires rewriting into LXC, however this is simplified by the [`lxc-build`](lxc-build) tool, which aims to automatize LXC container building using *Dockerfile*-like syntax. The downsides of using LXC is that its usage requires a bit more knowledge about how the linux containers actually work, and that most 3rd party applications are distributed using `Dockerfile`, which requires rewriting into LXC, however this is simplified by the [`lxcbuild`](lxcbuild) tool, which aims to automatize LXC container building using *Dockerfile*-like syntax.
## Container interfaces ## Container interfaces

View File

@ -4,7 +4,7 @@
The `lxc-pack` utility creates a `.tar.xz` archives based on package metadata and manages the `packages.json` repository metadata file. If a filename is passed as command line parameter to `lxc-pack`, the metadata are loaded from the file. In case a directory path is given, the metadata are loaded from a file called `pkg` from the directory. All metadata files are in JSON format. The `lxc-pack` utility creates a `.tar.xz` archives based on package metadata and manages the `packages.json` repository metadata file. If a filename is passed as command line parameter to `lxc-pack`, the metadata are loaded from the file. In case a directory path is given, the metadata are loaded from a file called `pkg` from the directory. All metadata files are in JSON format.
The product of *lxc-build* command described in LXC building documentation can be used in its entirety, ie. both filesystem layer and configuration, or only as dependency, in which case the container configuration is omitted and only the filesystem layer is used. Apart from that, the package can contain installation, upgrade and uninstallation script and data, all of which are optional. Accepted names are The product of *lxcbuild* command described in LXC building documentation can be used in its entirety, ie. both filesystem layer and configuration, or only as dependency, in which case the container configuration is omitted and only the filesystem layer is used. Apart from that, the package can contain installation, upgrade and uninstallation script and data, all of which are optional. Accepted names are
- `install.sh` file and `install` directory for post-install scripts. - `install.sh` file and `install` directory for post-install scripts.
- `upgrade.sh` file and `upgrade` directory for post-upgrade scripts. - `upgrade.sh` file and `upgrade` directory for post-upgrade scripts.

View File

@ -20,7 +20,7 @@ setup-interfaces
ifup eth0 ifup eth0
# Download and launch the setup script # Download and launch the setup script
wget repo.spotter.cz/vm.sh wget https://repo.spotter.cz/vm.sh
sh vm.sh sh vm.sh
``` ```
@ -46,7 +46,7 @@ Assign the newly generated key to your GitLab account
```bash ```bash
# Clone the repository # Clone the repository
git clone --recurse-submodules ssh://git@git.spotter.cz:2222/Spotter-Cluster/Spotter-Cluster.git git clone --recursive ssh://git@git.spotter.cz:2222/Spotter-Cluster/Spotter-Cluster.git
# Install the build toolchain # Install the build toolchain
Spotter-Cluster/build/install-toolchain.sh Spotter-Cluster/build/install-toolchain.sh
@ -58,7 +58,7 @@ There are 3 distinct packaging systems.
1. Just a plain tar for basic OS setup used by `vm.sh` installation script. 1. Just a plain tar for basic OS setup used by `vm.sh` installation script.
2. [Abuild](abuild) for the native Alpine linux packages (APK) used for ACME client and VMMgr packaging. 2. [Abuild](abuild) for the native Alpine linux packages (APK) used for ACME client and VMMgr packaging.
3. [`lxc-build`](lxc-build) / [`lxc-pack`](lxc-pack) for LXC container building and packaging. 3. [`lxcbuild`](lxcbuild) / [`lxc-pack`](lxc-pack) for LXC container building and packaging.
Before any building and packaging can be started, build toolchain including signing keys needs to be set up. This is done via `install-toolchain.sh` script. Before any building and packaging can be started, build toolchain including signing keys needs to be set up. This is done via `install-toolchain.sh` script.

View File

@ -26,7 +26,7 @@ Where the `application` is the internal application name, same as previously use
## LXC hooks ## LXC hooks
LXC hooks set various environment variables prior to calling the defined executables. For overview of native LXC hooks, see section *Container hooks* in the official [lxc.container.conf(5) documentation](https://linuxcontainers.org/lxc/manpages/man5/lxc.container.conf.5.html). All hooks mentioned in this chapter are hardcoded in the container configuration via a template used by[`lxc-build`](lxc-build). LXC hooks set various environment variables prior to calling the defined executables. For overview of native LXC hooks, see section *Container hooks* in the official [lxc.container.conf(5) documentation](https://linuxcontainers.org/lxc/manpages/man5/lxc.container.conf.5.html). All hooks mentioned in this chapter are hardcoded in the container configuration via a template used by[`lxcbuild`](lxcbuild).
### prepare-container ### prepare-container

View File

@ -0,0 +1,27 @@
{
"version": "2.10.1-200403",
"meta": {
"title": "EcoGIS",
"desc-cs": "EcoGIS",
"desc-en": "EcoGIS",
"license": "GPL"
},
"containers": {
"ecogis": {
"image": "ecogis_2.10.1-200403",
"depends": [
"ecogis-postgres"
],
"mounts": {
"ecogis/ecogis_data": "srv/ecogis/data/files",
"ecogis/ecogis_conf/config.php": "srv/ecogis/etc/config.php:file"
}
},
"ecogis-postgres": {
"image": "postgres_11.3.0-190620",
"mounts": {
"ecogis/postgres_data": "var/lib/postgresql"
}
}
}
}

View File

@ -1,7 +1,5 @@
IMAGE ecogis IMAGE ecogis_2.10.1-200403
LAYER shared/alpine3.8 FROM alpine3.8-php5.6_5.6.40-200403
LAYER shared/alpine3.8-php5.6
LAYER ecogis/ecogis
RUN EOF RUN EOF
# Install runtime dependencies # Install runtime dependencies
@ -23,8 +21,8 @@ RUN EOF
pear install Auth Log pear install Auth Log
# Create OS user # Create OS user
addgroup -S -g 8020 ecogis addgroup -S -g 8080 ecogis
adduser -S -u 8020 -h /srv/ecogis -s /bin/false -g ecogis -G ecogis ecogis adduser -S -u 8080 -h /srv/ecogis -s /bin/false -g ecogis -G ecogis ecogis
# Cleanup # Cleanup
apk --no-cache del .deps apk --no-cache del .deps
@ -32,9 +30,6 @@ RUN EOF
rm -rf /usr/bin/composer /tmp/.composer rm -rf /usr/bin/composer /tmp/.composer
EOF EOF
COPY lxc COPY image.d
# MOUNT DIR /srv/ecogis/data srv/ecogis/data/files CMD /bin/s6-svscan /etc/services.d
MOUNT FILE /srv/ecogis/conf/config.php srv/ecogis/etc/config.php
CMD s6-svscan /etc/services.d

View File

@ -15,6 +15,8 @@ http {
server_tokens off; server_tokens off;
client_max_body_size 100m; client_max_body_size 100m;
sendfile on; sendfile on;
tcp_nodelay on;
send_timeout 300;
server { server {
listen 8080; listen 8080;

View File

@ -0,0 +1,31 @@
#!/bin/sh
set -ev
# Volumes
POSTGRES_DATA="${VOLUMES_DIR}/ecogis/postgres_data"
ECOGIS_CONF="${VOLUMES_DIR}/ecogis/ecogis_conf"
ECOGIS_DATA="${VOLUMES_DIR}/ecogis/ecogis_data"
# Create Postgres instance
install -o 105432 -g 105432 -m 700 -d ${POSTGRES_DATA}
spoc-container exec cts-postgres -- initdb -D /var/lib/postgresql
# Configure Postgres
install -o 105432 -g 105432 -m 600 postgres_data/postgresql.conf ${POSTGRES_DATA}/postgresql.conf
install -o 105432 -g 105432 -m 600 postgres_data/pg_hba.conf ${POSTGRES_DATA}/pg_hba.conf
# Populate database
export ECOGIS_PWD=$(head -c 18 /dev/urandom | base64 | tr -d '+/=')
spoc-container start ecogis-postgres
envsubst <createdb.sql | spoc-container exec ecogis-postgres -- psql
# Configure EcoGIS
install -o 108080 -g 108080 -m 750 ${ECOGIS_CONF}
install -o 108080 -g 108080 -m 750 ${ECOGIS_DATA}
envsubst <ecogis_conf/config.php | install -o 108080 -g 108080 -m 640 /dev/stdin ${ECOGIS_CONF}/config.php
# Stop services required for setup
spoc-container stop ecogis-postgres
# Register application
vmmgr register-app ecogis ecogis

View File

@ -9,7 +9,7 @@
/* ------------------------------ DB Settings ------------------------------ */ /* ------------------------------ DB Settings ------------------------------ */
$dsn = array('dbtype' => 'pgsql', $dsn = array('dbtype' => 'pgsql',
'dbhost' => 'postgres', // host 'dbhost' => 'ecogis-postgres', // host
'dbuser' => 'ecogis', // login 'dbuser' => 'ecogis', // login
'dbpass' => '${ECOGIS_PWD}', // Password 'dbpass' => '${ECOGIS_PWD}', // Password
'dbname' => 'ecogis', // database 'dbname' => 'ecogis', // database

View File

@ -73,27 +73,7 @@ unix_socket_directories = '/run/postgresql,/tmp' # comma-separated list of direc
#bonjour_name = '' # defaults to the computer name #bonjour_name = '' # defaults to the computer name
# (change requires restart) # (change requires restart)
# - Security and Authentication - # - TCP settings -
#authentication_timeout = 1min # 1s-600s
#ssl = off
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_dh_params_file = ''
#ssl_cert_file = 'server.crt'
#ssl_key_file = 'server.key'
#ssl_ca_file = ''
#ssl_crl_file = ''
#password_encryption = md5 # md5 or scram-sha-256
#db_user_namespace = off
#row_security = on
# GSSAPI using Kerberos
#krb_server_keyfile = ''
#krb_caseins_users = off
# - TCP Keepalives -
# see "man 7 tcp" for details # see "man 7 tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; #tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
@ -102,6 +82,34 @@ unix_socket_directories = '/run/postgresql,/tmp' # comma-separated list of direc
# 0 selects the system default # 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT; #tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default # 0 selects the system default
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
# 0 selects the system default
# - Authentication -
#authentication_timeout = 1min # 1s-600s
#password_encryption = md5 # md5 or scram-sha-256
#db_user_namespace = off
# GSSAPI using Kerberos
#krb_server_keyfile = ''
#krb_caseins_users = off
# - SSL -
#ssl = off
#ssl_ca_file = ''
#ssl_cert_file = 'server.crt'
#ssl_crl_file = ''
#ssl_key_file = 'server.key'
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_min_protocol_version = 'TLSv1'
#ssl_max_protocol_version = ''
#ssl_dh_params_file = ''
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
@ -110,7 +118,7 @@ unix_socket_directories = '/run/postgresql,/tmp' # comma-separated list of direc
# - Memory - # - Memory -
shared_buffers = 192MB # min 128kB shared_buffers = 128MB # min 128kB
# (change requires restart) # (change requires restart)
#huge_pages = try # on, off, or try #huge_pages = try # on, off, or try
# (change requires restart) # (change requires restart)
@ -121,16 +129,20 @@ shared_buffers = 192MB # min 128kB
# you actively intend to use prepared transactions. # you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB #work_mem = 4MB # min 64kB
#maintenance_work_mem = 64MB # min 1MB #maintenance_work_mem = 64MB # min 1MB
#replacement_sort_tuples = 150000 # limits use of replacement selection sort
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem #autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#max_stack_depth = 2MB # min 100kB #max_stack_depth = 2MB # min 100kB
#shared_memory_type = mmap # the default is the first option
# supported by the operating system:
# mmap
# sysv
# windows
# (change requires restart)
dynamic_shared_memory_type = posix # the default is the first option dynamic_shared_memory_type = posix # the default is the first option
# supported by the operating system: # supported by the operating system:
# posix # posix
# sysv # sysv
# windows # windows
# mmap # mmap
# use none to disable dynamic shared memory
# (change requires restart) # (change requires restart)
# - Disk - # - Disk -
@ -138,15 +150,14 @@ dynamic_shared_memory_type = posix # the default is the first option
#temp_file_limit = -1 # limits per-process temp file space #temp_file_limit = -1 # limits per-process temp file space
# in kB, or -1 for no limit # in kB, or -1 for no limit
# - Kernel Resource Usage - # - Kernel Resources -
#max_files_per_process = 1000 # min 25 #max_files_per_process = 1000 # min 25
# (change requires restart) # (change requires restart)
#shared_preload_libraries = '' # (change requires restart)
# - Cost-Based Vacuum Delay - # - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds #vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
#vacuum_cost_page_hit = 1 # 0-10000 credits #vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 10 # 0-10000 credits #vacuum_cost_page_miss = 10 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits #vacuum_cost_page_dirty = 20 # 0-10000 credits
@ -155,7 +166,7 @@ dynamic_shared_memory_type = posix # the default is the first option
# - Background Writer - # - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds #bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round #bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round #bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables #bgwriter_flush_after = 512kB # measured in pages, 0 disables
@ -163,21 +174,23 @@ dynamic_shared_memory_type = posix # the default is the first option
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching #effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart) #max_worker_processes = 8 # (change requires restart)
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers #max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
#parallel_leader_participation = on
#max_parallel_workers = 8 # maximum number of max_worker_processes that #max_parallel_workers = 8 # maximum number of max_worker_processes that
# can be used in parallel queries # can be used in parallel operations
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate #old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart) # (change requires restart)
#backend_flush_after = 0 # measured in pages, 0 disables #backend_flush_after = 0 # measured in pages, 0 disables
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# WRITE AHEAD LOG # WRITE-AHEAD LOG
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# - Settings - # - Settings -
#wal_level = replica # minimal, replica, or logical wal_level = minimal # minimal, replica, or logical
# (change requires restart) # (change requires restart)
#fsync = on # flush data to disk for crash safety #fsync = on # flush data to disk for crash safety
# (turning this off can cause # (turning this off can cause
@ -195,6 +208,8 @@ dynamic_shared_memory_type = posix # the default is the first option
#wal_compression = off # enable compression of full-page writes #wal_compression = off # enable compression of full-page writes
#wal_log_hints = off # also do full page writes of non-critical updates #wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart) # (change requires restart)
#wal_init_zero = on # zero-fill new WAL files
#wal_recycle = on # recycle WAL files
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers #wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart) # (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds #wal_writer_delay = 200ms # 1-10000 milliseconds
@ -223,21 +238,57 @@ dynamic_shared_memory_type = posix # the default is the first option
#archive_timeout = 0 # force a logfile segment switch after this #archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables # number of seconds; 0 disables
# - Archive Recovery -
# These are only used in recovery mode.
#restore_command = '' # command to use to restore an archived logfile segment
# placeholders: %p = path of file to restore
# %f = file name only
# e.g. 'cp /mnt/server/archivedir/%f %p'
# (change requires restart)
#archive_cleanup_command = '' # command to execute at every restartpoint
#recovery_end_command = '' # command to execute at completion of recovery
# - Recovery Target -
# Set these only when performing a targeted recovery.
#recovery_target = '' # 'immediate' to end recovery as soon as a
# consistent state is reached
# (change requires restart)
#recovery_target_name = '' # the named restore point to which recovery will proceed
# (change requires restart)
#recovery_target_time = '' # the time stamp up to which recovery will proceed
# (change requires restart)
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
# (change requires restart)
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
# (change requires restart)
#recovery_target_inclusive = on # Specifies whether to stop:
# just after the specified recovery target (on)
# just before the recovery target (off)
# (change requires restart)
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
# (change requires restart)
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
# (change requires restart)
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# REPLICATION # REPLICATION
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# - Sending Server(s) - # - Sending Servers -
# Set these on the master and on any standby that will send replication data. # Set these on the master and on any standby that will send replication data.
#max_wal_senders = 10 # max number of walsender processes max_wal_senders = 0 # max number of walsender processes
# (change requires restart) # (change requires restart)
#wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables #wal_keep_segments = 0 # in logfile segments; 0 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables #wal_sender_timeout = 60s # in milliseconds; 0 disables
#max_replication_slots = 10 # max number of replication slots max_replication_slots = 0 # max number of replication slots
# (change requires restart) # (change requires restart)
#track_commit_timestamp = off # collect timestamp of transaction commit #track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart) # (change requires restart)
@ -256,6 +307,11 @@ dynamic_shared_memory_type = posix # the default is the first option
# These settings are ignored on a master server. # These settings are ignored on a master server.
#primary_conninfo = '' # connection string to sending server
# (change requires restart)
#primary_slot_name = '' # replication slot on sending server
# (change requires restart)
#promote_trigger_file = '' # file name whose presence ends recovery
#hot_standby = on # "off" disallows queries during recovery #hot_standby = on # "off" disallows queries during recovery
# (change requires restart) # (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries #max_standby_archive_delay = 30s # max delay before canceling queries
@ -273,14 +329,15 @@ dynamic_shared_memory_type = posix # the default is the first option
# in milliseconds; 0 disables # in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to #wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt # retrieve WAL after a failed attempt
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
# - Subscribers - # - Subscribers -
# These settings are ignored on a publisher. # These settings are ignored on a publisher.
#max_logical_replication_workers = 4 # taken from max_worker_processes max_logical_replication_workers = 0 # taken from max_worker_processes
# (change requires restart) # (change requires restart)
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers max_sync_workers_per_subscription = 0 # taken from max_logical_replication_workers
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
@ -297,9 +354,14 @@ dynamic_shared_memory_type = posix # the default is the first option
#enable_material = on #enable_material = on
#enable_mergejoin = on #enable_mergejoin = on
#enable_nestloop = on #enable_nestloop = on
#enable_parallel_append = on
#enable_seqscan = on #enable_seqscan = on
#enable_sort = on #enable_sort = on
#enable_tidscan = on #enable_tidscan = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_parallel_hash = on
#enable_partition_pruning = on
# - Planner Cost Constants - # - Planner Cost Constants -
@ -310,6 +372,16 @@ dynamic_shared_memory_type = posix # the default is the first option
#cpu_operator_cost = 0.0025 # same scale as above #cpu_operator_cost = 0.0025 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above #parallel_tuple_cost = 0.1 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above #parallel_setup_cost = 1000.0 # same scale as above
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
# -1 disables
#jit_inline_above_cost = 500000 # inline small functions if query is
# more expensive than this; -1 disables
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
# query is more expensive than this;
# -1 disables
#min_parallel_table_scan_size = 8MB #min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB #min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB #effective_cache_size = 4GB
@ -333,10 +405,13 @@ dynamic_shared_memory_type = posix # the default is the first option
#join_collapse_limit = 8 # 1 disables collapsing of explicit #join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses # JOIN clauses
#force_parallel_mode = off #force_parallel_mode = off
#jit = on # allow JIT compilation
#plan_cache_mode = auto # auto, force_generic_plan or
# force_custom_plan
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# ERROR REPORTING AND LOGGING # REPORTING AND LOGGING
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# - Where to Log - # - Where to Log -
@ -385,17 +460,6 @@ dynamic_shared_memory_type = posix # the default is the first option
# - When to Log - # - When to Log -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#log_min_messages = warning # values in order of decreasing detail: #log_min_messages = warning # values in order of decreasing detail:
# debug5 # debug5
# debug4 # debug4
@ -429,6 +493,9 @@ dynamic_shared_memory_type = posix # the default is the first option
# statements running at least this number # statements running at least this number
# of milliseconds # of milliseconds
#log_transaction_sample_rate = 0.0 # Fraction of transactions whose statements
# are logged regardless of their duration. 1.0 logs all
# statements from all transactions, 0.0 never logs.
# - What to Log - # - What to Log -
@ -464,15 +531,16 @@ log_line_prefix = '%m [%p] %q%u@%d ' # special values:
# %% = '%' # %% = '%'
# e.g. '<%u%%%d> ' # e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout #log_lock_waits = off # log lock waits >= deadlock_timeout
#log_statement = 'all' # none, ddl, mod, all #log_statement = 'none' # none, ddl, mod, all
#log_replication_commands = off #log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger #log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes; # than the specified size in kilobytes;
# -1 disables, 0 logs all temp files # -1 disables, 0 logs all temp files
log_timezone = 'Europe/Prague' log_timezone = 'Europe/Prague'
#------------------------------------------------------------------------------
# - Process Title - # PROCESS TITLE
#------------------------------------------------------------------------------
#cluster_name = '' # added to process titles if nonempty #cluster_name = '' # added to process titles if nonempty
# (change requires restart) # (change requires restart)
@ -480,10 +548,10 @@ log_timezone = 'Europe/Prague'
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# RUNTIME STATISTICS # STATISTICS
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# - Query/Index Statistics Collector - # - Query and Index Statistics Collector -
#track_activities = on #track_activities = on
#track_counts = on #track_counts = on
@ -493,7 +561,7 @@ log_timezone = 'Europe/Prague'
#stats_temp_directory = 'pg_stat_tmp' #stats_temp_directory = 'pg_stat_tmp'
# - Statistics Monitoring - # - Monitoring -
#log_parser_stats = off #log_parser_stats = off
#log_planner_stats = off #log_planner_stats = off
@ -502,7 +570,7 @@ log_timezone = 'Europe/Prague'
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# AUTOVACUUM PARAMETERS # AUTOVACUUM
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on' #autovacuum = on # Enable autovacuum subprocess? 'on'
@ -525,7 +593,7 @@ log_timezone = 'Europe/Prague'
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age #autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum # before forced vacuum
# (change requires restart) # (change requires restart)
#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for #autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
# autovacuum, in milliseconds; # autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay # -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for #autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
@ -539,10 +607,22 @@ log_timezone = 'Europe/Prague'
# - Statement Behavior - # - Statement Behavior -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#search_path = '"$user", public' # schema names #search_path = '"$user", public' # schema names
#row_security = on
#default_tablespace = '' # a tablespace name, '' uses the default #default_tablespace = '' # a tablespace name, '' uses the default
#temp_tablespaces = '' # a list of tablespace names, '' uses #temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace # only default tablespace
#default_table_access_method = 'heap'
#check_function_bodies = on #check_function_bodies = on
#default_transaction_isolation = 'read committed' #default_transaction_isolation = 'read committed'
#default_transaction_read_only = off #default_transaction_read_only = off
@ -555,6 +635,9 @@ log_timezone = 'Europe/Prague'
#vacuum_freeze_table_age = 150000000 #vacuum_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000 #vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_freeze_table_age = 150000000 #vacuum_multixact_freeze_table_age = 150000000
#vacuum_cleanup_index_scale_factor = 0.1 # fraction of total number of tuples
# before index cleanup, 0 always performs
# index cleanup
#bytea_output = 'hex' # hex, escape #bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64' #xmlbinary = 'base64'
#xmloption = 'content' #xmloption = 'content'
@ -573,7 +656,8 @@ timezone = 'Europe/Prague'
# India # India
# You can create your own file in # You can create your own file in
# share/timezonesets/. # share/timezonesets/.
#extra_float_digits = 0 # min -15, max 3 #extra_float_digits = 1 # min -15, max 3; any value >0 actually
# selects precise output mode
#client_encoding = sql_ascii # actually, defaults to database #client_encoding = sql_ascii # actually, defaults to database
# encoding # encoding
@ -587,11 +671,16 @@ lc_time = 'C' # locale for time formatting
# default configuration for text search # default configuration for text search
default_text_search_config = 'pg_catalog.english' default_text_search_config = 'pg_catalog.english'
# - Shared Library Preloading -
#shared_preload_libraries = '' # (change requires restart)
#local_preload_libraries = ''
#session_preload_libraries = ''
#jit_provider = 'llvmjit' # JIT library to use
# - Other Defaults - # - Other Defaults -
#dynamic_library_path = '$libdir' #dynamic_library_path = '$libdir'
#local_preload_libraries = ''
#session_preload_libraries = ''
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
@ -610,14 +699,13 @@ default_text_search_config = 'pg_catalog.english'
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# VERSION/PLATFORM COMPATIBILITY # VERSION AND PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# - Previous PostgreSQL Versions - # - Previous PostgreSQL Versions -
#array_nulls = on #array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding #backslash_quote = safe_encoding # on, off, or safe_encoding
#default_with_oids = off
#escape_string_warning = on #escape_string_warning = on
#lo_compat_privileges = off #lo_compat_privileges = off
#operator_precedence_warning = off #operator_precedence_warning = off
@ -636,6 +724,9 @@ default_text_search_config = 'pg_catalog.english'
#exit_on_error = off # terminate session on any error? #exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash? #restart_after_crash = on # reinitialize after backend crash?
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
@ -643,12 +734,13 @@ default_text_search_config = 'pg_catalog.english'
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the # These options allow settings to be loaded from files other than the
# default postgresql.conf. # default postgresql.conf. Note that these are directives, not variable
# assignments, so they can usefully be given more than once.
#include_dir = 'conf.d' # include files ending in '.conf' from #include_dir = '...' # include files ending in '.conf' from
# directory 'conf.d' # a directory, e.g., 'conf.d'
#include_if_exists = 'exists.conf' # include file only if it exists #include_if_exists = '...' # include file only if it exists
#include = 'special.conf' # include file #include = '...' # include file
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------

View File

@ -0,0 +1,8 @@
#!/bin/sh
set -ev
# Remove persistent data
rm -rf "${VOLUMES_DIR}/ecogis"
# Unregister application
vmmgr unregister-app ecogis

View File

@ -0,0 +1,28 @@
{
"version": "2.0.2-200403",
"meta": {
"title": "Sigmah",
"desc-cs": "Finanční řízení sbírek",
"desc-en": "Donation management",
"license": "GPL"
},
"containers": {
"sigmah": {
"image": "sigmah_2.0.2-200403",
"depends": [
"sigmah-postgres"
],
"mounts": {
"sigmah/sigmah_conf/persistence.xml": "srv/tomcat/webapps/sigmah/WEB-INF/classes/META-INF/persistence.xml:file",
"sigmah/sigmah_conf/sigmah.properties": "srv/tomcat/webapps/sigmah/WEB-INF/classes/sigmah.properties:file",
"sigmah/sigmah_data": "srv/sigmah"
}
},
"sigmah-postgres": {
"image": "postgres_12.2.0-200403",
"mounts": {
"sigmah/postgres_data": "var/lib/postgresql"
}
}
}
}

View File

@ -1,8 +1,5 @@
IMAGE sigmah IMAGE sigmah_2.0.2-200403
LAYER shared/alpine3.9 FROM alpine3.11-tomcat8.5_8.5.53-200403
LAYER shared/alpine3.9-java8
LAYER shared/alpine3.9-tomcat8.5
LAYER sigmah/sigmah
RUN EOF RUN EOF
# Download Sigmah # Download Sigmah
@ -12,15 +9,13 @@ RUN EOF
# Update Postgres JDBC driver # Update Postgres JDBC driver
rm /srv/tomcat/webapps/sigmah/WEB-INF/lib/postgresql-9.1-901-1.jdbc4.jar rm /srv/tomcat/webapps/sigmah/WEB-INF/lib/postgresql-9.1-901-1.jdbc4.jar
wget https://jdbc.postgresql.org/download/postgresql-42.2.5.jar -O /srv/tomcat/webapps/sigmah/WEB-INF/lib/postgresql-42.2.5.jar wget https://jdbc.postgresql.org/download/postgresql-42.2.11.jar -O /srv/tomcat/webapps/sigmah/WEB-INF/lib/postgresql-42.2.11.jar
# Remove logging config # Remove logging config
rm /srv/tomcat/webapps/sigmah/WEB-INF/classes/logback.xml rm /srv/tomcat/webapps/sigmah/WEB-INF/classes/logback.xml
# Create OS user # Change webapps ownership
addgroup -S -g 8011 sigmah chown -R tomcat:tomcat /srv/tomcat/webapps
adduser -S -u 8011 -h /srv/tomcat -s /bin/false -g sigmah -G sigmah sigmah
chown -R sigmah:sigmah /srv/tomcat/conf /srv/tomcat/logs /srv/tomcat/temp /srv/tomcat/webapps /srv/tomcat/work
# Download database files # Download database files
wget https://github.com/sigmah-dev/sigmah/releases/download/v2.0.2/sigmah-MinimumDataKit-2.0.postgresql.sql -O /srv/sigmah-MinimumDataKit.sql wget https://github.com/sigmah-dev/sigmah/releases/download/v2.0.2/sigmah-MinimumDataKit-2.0.postgresql.sql -O /srv/sigmah-MinimumDataKit.sql
@ -30,12 +25,8 @@ RUN EOF
rm /tmp/sigmah.war rm /tmp/sigmah.war
EOF EOF
COPY lxc COPY image.d
MOUNT DIR /srv/sigmah/data srv/sigmah/data USER tomcat
MOUNT FILE /srv/sigmah/conf/persistence.xml srv/tomcat/webapps/sigmah/WEB-INF/classes/META-INF/persistence.xml
MOUNT FILE /srv/sigmah/conf/sigmah.properties srv/tomcat/webapps/sigmah/WEB-INF/classes/sigmah.properties
USER 8011 8011
WORKDIR /srv/tomcat WORKDIR /srv/tomcat
CMD catalina.sh run CMD /usr/bin/catalina.sh run

View File

@ -0,0 +1,52 @@
#!/bin/sh
set -ev
# Volumes
POSTGRES_DATA="${VOLUMES_DIR}/sigmah/postgres_data"
SIGMAH_DATA="${VOLUMES_DIR}/sigmah/sigmah_data"
SIGMAH_CONF="${VOLUMES_DIR}/sigmah/sigmah_conf"
SIGMAH_LAYER="${LAYERS_DIR}/sigmah_2.0.2-200403"
# Create Postgres instance
install -o 105432 -g 105432 -m 700 -d ${POSTGRES_DATA}
spoc-container exec sigmah-postgres -- initdb -D /var/lib/postgresql
# Configure Postgres
install -o 105432 -g 105432 -m 600 postgres_data/postgresql.conf ${POSTGRES_DATA}/postgresql.conf
install -o 105432 -g 105432 -m 600 postgres_data/pg_hba.conf ${POSTGRES_DATA}/pg_hba.conf
# Create database
export SIGMAH_PWD=$(head -c 18 /dev/urandom | base64 | tr -d '+/=')
spoc-container start sigmah-postgres
envsubst <createdb.sql | spoc-container exec sigmah-postgres -- psql
# Configure Sigmah
install -o 108080 -g 108080 -m 750 -d ${SIGMAH_CONF}
install -o 108080 -g 108080 -m 750 -d ${SIGMAH_DATA}
install -o 108080 -g 108080 -m 750 -d ${SIGMAH_DATA}/files
install -o 108080 -g 108080 -m 750 -d ${SIGMAH_DATA}/archives
envsubst <sigmah_conf/persistence.xml | install -o 108080 -g 108080 -m 640 /dev/stdin ${SIGMAH_CONF}/persistence.xml
install -o 108080 -g 108080 -m 640 sigmah_conf/sigmah.properties ${SIGMAH_CONF}/sigmah.properties
cp -p ${SIGMAH_LAYER}/srv/tomcat/webapps/sigmah/sigmah/images/header/org-default-logo.png ${SIGMAH_DATA}/files/logo.png
# Populate database
export SIGMAH_ADMIN_USER="Admin"
export SIGMAH_ADMIN_EMAIL="admin@example.com"
export SIGMAH_ADMIN_PWD=$(head -c 12 /dev/urandom | base64 | tr -d '+/=')
export SIGMAH_ADMIN_HASH=$(python3 -c "import bcrypt; print(bcrypt.hashpw('${SIGMAH_ADMIN_PWD}'.encode(), bcrypt.gensalt(prefix=b'2a')).decode())")
cat ${SIGMAH_LAYER}/srv/sigmah-MinimumDataKit.sql | spoc-container exec sigmah-postgres -- sh -c "PGPASSWORD=${SIGMAH_PWD} psql -U sigmah sigmah"
sed -e "s|§OrganizationName§|Demo organization|g" \
-e "s|§OrganizationLogoFilename§|logo.png|g" \
-e "s|§HeadquartersCountryCode§|CZ|g" \
-e "s|§UserEmail§|${SIGMAH_ADMIN_EMAIL}|g" \
-e "s|§UserName§|${SIGMAH_ADMIN_USER}|g" \
-e "s|§UserFirstName§|${SIGMAH_ADMIN_USER}|g" \
-e "s|§UserLocale§|en|g" \
-e "s|\$2a\$10\$pMcTA1p9fefR8U9NoOPei.H0eq/TbbdSF27M0tn9iDWBrA4JHeCDC|${SIGMAH_ADMIN_HASH}|" \
${SIGMAH_LAYER}/srv/sigmah-newOrganizationLaunchScript.sql | spoc-container exec sigmah-postgres -- sh -c "PGPASSWORD=${SIGMAH_PWD} psql -U sigmah sigmah"
# Stop services required for setup
spoc-container stop sigmah-postgres
# Register application
vmmgr register-app sigmah sigmah "${SIGMAH_ADMIN_EMAIL}" "${SIGMAH_ADMIN_PWD}"

View File

@ -0,0 +1,3 @@
local all postgres peer
local all all md5
host all all 0.0.0.0/0 md5

View File

@ -0,0 +1,750 @@
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, run "pg_ctl reload", or execute
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
# require a server shutdown and restart to take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: kB = kilobytes Time units: ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
#data_directory = 'ConfigDir' # use data in another directory
# (change requires restart)
#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
# (change requires restart)
#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
#external_pid_file = '' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
listen_addresses = '*' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
#port = 5432 # (change requires restart)
max_connections = 100 # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
unix_socket_directories = '/run/postgresql,/tmp' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - TCP settings -
# see "man 7 tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
# 0 selects the system default
# - Authentication -
#authentication_timeout = 1min # 1s-600s
#password_encryption = md5 # md5 or scram-sha-256
#db_user_namespace = off
# GSSAPI using Kerberos
#krb_server_keyfile = ''
#krb_caseins_users = off
# - SSL -
#ssl = off
#ssl_ca_file = ''
#ssl_cert_file = 'server.crt'
#ssl_crl_file = ''
#ssl_key_file = 'server.key'
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_min_protocol_version = 'TLSv1'
#ssl_max_protocol_version = ''
#ssl_dh_params_file = ''
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 128MB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#maintenance_work_mem = 64MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#max_stack_depth = 2MB # min 100kB
#shared_memory_type = mmap # the default is the first option
# supported by the operating system:
# mmap
# sysv
# windows
# (change requires restart)
dynamic_shared_memory_type = posix # the default is the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# (change requires restart)
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kB, or -1 for no limit
# - Kernel Resources -
#max_files_per_process = 1000 # min 25
# (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 10 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart)
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
#parallel_leader_participation = on
#max_parallel_workers = 8 # maximum number of max_worker_processes that
# can be used in parallel operations
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#backend_flush_after = 0 # measured in pages, 0 disables
#------------------------------------------------------------------------------
# WRITE-AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
wal_level = minimal # minimal, replica, or logical
# (change requires restart)
#fsync = on # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
#synchronous_commit = on # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux)
# fsync
# fsync_writethrough
# open_sync
#full_page_writes = on # recover from partial page writes
#wal_compression = off # enable compression of full-page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_init_zero = on # zero-fill new WAL files
#wal_recycle = on # recycle WAL files
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
#max_wal_size = 1GB
#min_wal_size = 80MB
#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
# - Archive Recovery -
# These are only used in recovery mode.
#restore_command = '' # command to use to restore an archived logfile segment
# placeholders: %p = path of file to restore
# %f = file name only
# e.g. 'cp /mnt/server/archivedir/%f %p'
# (change requires restart)
#archive_cleanup_command = '' # command to execute at every restartpoint
#recovery_end_command = '' # command to execute at completion of recovery
# - Recovery Target -
# Set these only when performing a targeted recovery.
#recovery_target = '' # 'immediate' to end recovery as soon as a
# consistent state is reached
# (change requires restart)
#recovery_target_name = '' # the named restore point to which recovery will proceed
# (change requires restart)
#recovery_target_time = '' # the time stamp up to which recovery will proceed
# (change requires restart)
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
# (change requires restart)
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
# (change requires restart)
#recovery_target_inclusive = on # Specifies whether to stop:
# just after the specified recovery target (on)
# just before the recovery target (off)
# (change requires restart)
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
# (change requires restart)
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
# (change requires restart)
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Servers -
# Set these on the master and on any standby that will send replication data.
max_wal_senders = 0 # max number of walsender processes
# (change requires restart)
#wal_keep_segments = 0 # in logfile segments; 0 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
max_replication_slots = 0 # max number of replication slots
# (change requires restart)
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Master Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# method to choose sync standbys, number of sync standbys,
# and comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a master server.
#primary_conninfo = '' # connection string to sending server
# (change requires restart)
#primary_slot_name = '' # replication slot on sending server
# (change requires restart)
#promote_trigger_file = '' # file name whose presence ends recovery
#hot_standby = on # "off" disallows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from master
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
# - Subscribers -
# These settings are ignored on a publisher.
max_logical_replication_workers = 0 # taken from max_worker_processes
# (change requires restart)
max_sync_workers_per_subscription = 0 # taken from max_logical_replication_workers
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_bitmapscan = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_parallel_hash = on
#enable_partition_pruning = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
# -1 disables
#jit_inline_above_cost = 500000 # inline small functions if query is
# more expensive than this; -1 disables
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
# query is more expensive than this;
# -1 disables
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#force_parallel_mode = off
#jit = on # allow JIT compilation
#plan_cache_mode = auto # auto, force_generic_plan or
# force_custom_plan
#------------------------------------------------------------------------------
# REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, syslog, and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (win32):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
#log_transaction_sample_rate = 0.0 # Fraction of transactions whose statements
# are logged regardless of their duration. 1.0 logs all
# statements from all transactions, 0.0 never logs.
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_checkpoints = off
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
log_line_prefix = '%m [%p] %q%u@%d ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %p = process ID
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_statement = 'none' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Europe/Prague'
#------------------------------------------------------------------------------
# PROCESS TITLE
#------------------------------------------------------------------------------
#cluster_name = '' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# STATISTICS
#------------------------------------------------------------------------------
# - Query and Index Statistics Collector -
#track_activities = on
#track_counts = on
#track_io_timing = off
#track_functions = none # none, pl, all
#track_activity_query_size = 1024 # (change requires restart)
#stats_temp_directory = 'pg_stat_tmp'
# - Monitoring -
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#log_statement_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#search_path = '"$user", public' # schema names
#row_security = on
#default_tablespace = '' # a tablespace name, '' uses the default
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#default_table_access_method = 'heap'
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_min_age = 50000000
#vacuum_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_freeze_table_age = 150000000
#vacuum_cleanup_index_scale_factor = 0.1 # fraction of total number of tuples
# before index cleanup, 0 always performs
# index cleanup
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_fuzzy_search_limit = 0
#gin_pending_list_limit = 4MB
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Europe/Prague'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
# selects precise output mode
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'C' # locale for system error message
# strings
lc_monetary = 'C' # locale for monetary formatting
lc_numeric = 'C' # locale for number formatting
lc_time = 'C' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Shared Library Preloading -
#shared_preload_libraries = '' # (change requires restart)
#local_preload_libraries = ''
#session_preload_libraries = ''
#jit_provider = 'llvmjit' # JIT library to use
# - Other Defaults -
#dynamic_library_path = '$libdir'
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_relation = -2 # negative values mean
# (max_pred_locks_per_transaction
# / -max_pred_locks_per_relation) - 1
#max_pred_locks_per_page = 2 # min 0
#------------------------------------------------------------------------------
# VERSION AND PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#escape_string_warning = on
#lo_compat_privileges = off
#operator_precedence_warning = off
#quote_all_identifiers = off
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf. Note that these are directives, not variable
# assignments, so they can usefully be given more than once.
#include_dir = '...' # include files ending in '.conf' from
# a directory, e.g., 'conf.d'
#include_if_exists = '...' # include file only if it exists
#include = '...' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here

View File

@ -13,7 +13,7 @@
<property name="hibernate.connection.driver_class" value="org.postgresql.Driver" /> <property name="hibernate.connection.driver_class" value="org.postgresql.Driver" />
<property name="hibernate.connection.username" value="sigmah" /> <property name="hibernate.connection.username" value="sigmah" />
<property name="hibernate.connection.password" value="${SIGMAH_PWD}" /> <property name="hibernate.connection.password" value="${SIGMAH_PWD}" />
<property name="hibernate.connection.url" value="jdbc:postgresql://postgres:5432/sigmah" /> <property name="hibernate.connection.url" value="jdbc:postgresql://sigmah-postgres:5432/sigmah" />
<property name="hibernate.show_sql" value="false" /> <property name="hibernate.show_sql" value="false" />
<property name="hibernate.format_sql" value="false" /> <property name="hibernate.format_sql" value="false" />

View File

@ -10,10 +10,10 @@
# -- # --
# Root directory name where files are stored. # Root directory name where files are stored.
files.repository.name=/srv/sigmah/data/files files.repository.name=/srv/sigmah/files
# Root directory name where backup archives are stored. # Root directory name where backup archives are stored.
archives.repository.name=/srv/sigmah/data/archives/ archives.repository.name=/srv/sigmah/archives
#Maximum size of the uploaded files (bytes) #Maximum size of the uploaded files (bytes)
files.upload.maxSize=20971520 files.upload.maxSize=20971520

View File

@ -0,0 +1,9 @@
#!/bin/sh
# Volumes
SIGMAH_CONF="${VOLUMES_DIR}/sigmah/sigmah_conf"
# Replacements
sed -i "s|\(^mail\.from\.address=\).*|\1${EMAIL}|" ${SIGMAH_CONF}/sigmah.properties
sed -i "s|\(^mail\.support\.to=\).*|\1${EMAIL}|" ${SIGMAH_CONF}/sigmah.properties
sed -i "s|\(^maps\.key=\).*|\1${GMAPS_API_KEY}|" ${SIGMAH_CONF}/sigmah.properties

View File

@ -0,0 +1,8 @@
#!/bin/sh
set -ev
# Remove persistent data
rm -rf "${VOLUMES_DIR}/sigmah"
# Unregister application
vmmgr unregister-app sigmah

View File

@ -1,17 +0,0 @@
#!/bin/sh
set -ev
cd $(realpath $(dirname "${0}"))/install
# Configure CKAN DataPusher
mkdir -p /srv/ckan-datapusher/conf /srv/ckan-datapusher/data
cp srv/ckan-datapusher/conf/datapusher.wsgi /srv/ckan-datapusher/conf/datapusher.wsgi
cp srv/ckan-datapusher/conf/datapusher_settings.py /srv/ckan-datapusher/conf/datapusher_settings.py
chown -R 8004:8004 /srv/ckan-datapusher/data
# Install service
cp etc/init.d/ckan-datapusher /etc/init.d/ckan-datapusher
rc-update -u
# Install config update script
cp srv/ckan-datapusher/update-conf.sh /srv/ckan-datapusher/update-conf.sh

View File

@ -1,11 +0,0 @@
#!/sbin/openrc-run
description="CKAN DataPusher container"
start() {
lxc-start ckan-datapusher
}
stop() {
lxc-stop ckan-datapusher
}

View File

@ -1,3 +0,0 @@
#!/bin/sh
sed -i "s|\(^FROM_EMAIL = \).*|\1'${EMAIL}'|" /srv/ckan-datapusher/conf/datapusher_settings.py

View File

@ -1,4 +0,0 @@
#!/bin/sh
/bin/cat /etc/ssl/services.pem >>/usr/lib/python2.7/site-packages/requests/cacert.pem
/bin/cat /etc/ssl/services.pem >>/usr/lib/python2.7/site-packages/certifi/cacert.pem

View File

@ -1,9 +0,0 @@
{
"desc-cs": "Služba datového skladu pro extrakci dat",
"desc-en": "Data store data extraction service",
"lxcpath": "ckan-datapusher",
"version": "0.0.1",
"release": "0",
"license": "GPL",
"depends": ["alpine3.9-python2.7"]
}

View File

@ -1,6 +0,0 @@
#!/bin/sh
set -ev
# Remove service
rm -f /etc/init.d/ckan-datapusher
rc-update -u

50
lxc-apps/ckan/app Normal file
View File

@ -0,0 +1,50 @@
{
"version": "2.8.3-200403",
"meta": {
"title": "CKAN",
"desc-cs": "Datový sklad",
"desc-en": "Data store",
"license": "GPL"
},
"containers": {
"ckan": {
"image": "ckan_2.8.3-200403",
"depends": [
"ckan-datapusher",
"ckan-redis",
"ckan-solr",
"ckan-postgres"
],
"mounts": {
"ckan/ckan_conf": "etc/ckan",
"ckan/ckan_data": "srv/ckan/storage"
}
},
"ckan-datapusher": {
"image": "ckan-datapusher_0.0.16-200403",
"mounts": {
"ckan/datapusher_conf": "etc/ckan-datapusher",
"ckan/datapusher_data": "srv/ckan-datapusher/data"
}
},
"ckan-redis": {
"image": "redis_5.0.7-200403",
"mounts": {
"ckan/redis_conf/redis.conf": "etc/redis.conf:file",
"ckan/redis_data": "var/lib/redis"
}
},
"ckan-solr": {
"image": "solr6_6.5.1-200403",
"mounts": {
"ckan/solr_data": "var/lib/solr"
}
},
"ckan-postgres": {
"image": "postgis_3.0.0-200403",
"mounts": {
"ckan/postgres_data": "var/lib/postgresql"
}
}
}
}

View File

@ -1,11 +1,10 @@
IMAGE ckan-datapusher IMAGE ckan-datapusher_0.0.16-200403
LAYER shared/alpine3.9 FROM alpine3.10-python2.7_2.7.16-200403
LAYER shared/alpine3.9-python2.7 # Alpine 3.11 discontinued uwsgi-python2 module
LAYER ckan-datapusher/ckan-datapusher
RUN EOF RUN EOF
# Install runtime dependencies # Install runtime dependencies
apk --no-cache add libffi libressl uwsgi-python apk --no-cache add libffi uwsgi-python
# Install build dependencies # Install build dependencies
apk --no-cache add --virtual .deps build-base git libffi-dev libressl-dev libxml2-dev libxslt-dev py2-pip python2-dev apk --no-cache add --virtual .deps build-base git libffi-dev libressl-dev libxml2-dev libxslt-dev py2-pip python2-dev
@ -14,15 +13,19 @@ RUN EOF
mkdir -p /srv/ckan-datapusher mkdir -p /srv/ckan-datapusher
cd /srv/ckan-datapusher cd /srv/ckan-datapusher
pip install -U setuptools pip install -U setuptools
pip install -e 'git+https://github.com/ckan/datapusher.git#egg=datapusher' pip install -e 'git+https://github.com/ckan/datapusher.git@e662e3c33e069ac174cdb4fb1d61121f0ba4bb3a#egg=datapusher'
# Hackfix the X509_STORE_CTX wrapper # Hackfix the X509_STORE_CTX wrapper
sed -i 's/\[security\]//' /srv/ckan-datapusher/src/datapusher/requirements.txt sed -i 's/\[security\]//' /srv/ckan-datapusher/src/datapusher/requirements.txt
pip install -r /srv/ckan-datapusher/src/datapusher/requirements.txt pip install -r /srv/ckan-datapusher/src/datapusher/requirements.txt
# Hackfix werkzeug==1.0.0 proxy_fix import
# https://github.com/ckan/ckan-service-provider/pull/49
sed -i 's/werkzeug\.contrib\.fixers/werkzeug.middleware.proxy_fix/' /usr/lib/python2.7/site-packages/ckanserviceprovider/web.py
# Create OS user # Create OS user
addgroup -S -g 8004 ckandp addgroup -S -g 8080 ckandp
adduser -S -u 8004 -h /srv/ckan-datapusher -s /bin/false -g ckandp -G ckandp ckandp adduser -S -u 8080 -h /srv/ckan-datapusher -s /bin/false -g ckandp -G ckandp ckandp
chown -R ckandp:ckandp /srv/ckan-datapusher chown -R ckandp:ckandp /srv/ckan-datapusher
# Cleanup # Cleanup
@ -31,10 +34,6 @@ RUN EOF
rm -rf /root/.cache rm -rf /root/.cache
EOF EOF
COPY lxc COPY ckan-datapusher.image.d
MOUNT FILE /etc/ssl/services.pem etc/ssl/services.pem CMD /bin/execlineb -P /run
MOUNT DIR /srv/ckan-datapusher/conf etc/ckan-datapusher
MOUNT DIR /srv/ckan-datapusher/data srv/ckan-datapusher/data
CMD execlineb -P /run

View File

@ -0,0 +1,13 @@
#!/usr/bin/python
import ssl
with open('/etc/ckan-datapusher/add-ca-cert.env') as f:
env = dict(tuple(line.split('=')) for line in f.read().splitlines())
cert = ssl.get_server_certificate((env['HOST'], env['PORT']))
with open('/usr/lib/python2.7/site-packages/requests/cacert.pem', 'a') as f:
f.write(cert)
with open('/usr/lib/python2.7/site-packages/certifi/cacert.pem', 'a') as f:
f.write(cert)

View File

@ -1,5 +1,5 @@
#!/bin/execlineb -P #!/bin/execlineb -P
foreground { add-ca-cert } foreground { /bin/add-ca-cert }
s6-setuidgid ckandp s6-setuidgid ckandp
uwsgi --plugin python --http-socket 0.0.0.0:8080 --wsgi-file /etc/ckan-datapusher/datapusher.wsgi --enable-threads uwsgi --plugin python --http-socket 0.0.0.0:8080 --wsgi-file /etc/ckan-datapusher/datapusher.wsgi --enable-threads

View File

@ -1,24 +1,19 @@
IMAGE ckan IMAGE ckan_2.8.3-200403
LAYER shared/alpine3.9 FROM alpine3.11-python2.7_2.7.16-200403
LAYER shared/alpine3.9-python2.7
LAYER ckan/ckan
RUN EOF RUN EOF
# Install runtime dependencies # Install runtime dependencies
apk --no-cache add geos@vm libjpeg-turbo libmagic libpq mailcap py2-pip zlib apk --no-cache add geos libjpeg-turbo libmagic libpq mailcap py2-pip zlib
# Install build dependencies # Install build dependencies
apk --no-cache add --virtual .deps build-base git libjpeg-turbo-dev libxml2-dev libxslt-dev postgresql-dev python2-dev zlib-dev apk --no-cache add --virtual .deps build-base git libjpeg-turbo-dev libxml2-dev libxslt-dev postgresql-dev python2-dev zlib-dev
# Hackfix for python find_library('c') call
ln -s /lib/ld-musl-x86_64.so.1 /lib/libc.so.1
# Install CKAN # Install CKAN
mkdir -p /srv/ckan mkdir -p /srv/ckan
cd /srv/ckan cd /srv/ckan
pip install -U setuptools pip install -U setuptools
pip install flask-debugtoolbar pip install flask-debugtoolbar
pip install -e 'git+https://github.com/ckan/ckan.git#egg=ckan' pip install -e 'git+https://github.com/ckan/ckan.git@8e1cc60b2fa11da6843051678b7ee2cc08c2a7a9#egg=ckan'
pip install -r /srv/ckan/src/ckan/requirements.txt pip install -r /srv/ckan/src/ckan/requirements.txt
# Install CKAN extensions # Install CKAN extensions
@ -33,9 +28,13 @@ RUN EOF
pip install -r /srv/ckan/src/ckanext-geoview/pip-requirements.txt pip install -r /srv/ckan/src/ckanext-geoview/pip-requirements.txt
pip install -r /srv/ckan/src/ckanext-dgvat-xls/requirements.txt pip install -r /srv/ckan/src/ckanext-dgvat-xls/requirements.txt
# Hackfix support for PostgreSQL 12
# https://github.com/sqlalchemy/sqlalchemy/issues/4463
sed -i 's/cons\.consrc/pg_get_constraintdef(cons.oid)/' /usr/lib/python2.7/site-packages/sqlalchemy/dialects/postgresql/base.py
# Create OS user # Create OS user
addgroup -S -g 8003 ckan addgroup -S -g 8080 ckan
adduser -S -u 8003 -h /srv/ckan -s /bin/false -g ckan -G ckan ckan adduser -S -u 8080 -h /srv/ckan -s /bin/false -g ckan -G ckan ckan
chown -R ckan:ckan /srv/ckan chown -R ckan:ckan /srv/ckan
# Cleanup # Cleanup
@ -44,8 +43,6 @@ RUN EOF
rm -rf /root/.cache rm -rf /root/.cache
EOF EOF
MOUNT DIR /srv/ckan/conf etc/ckan COPY ckan.image.d
MOUNT DIR /srv/ckan/data srv/ckan/storage
USER 8003 8003 CMD /bin/s6-svscan /etc/services.d
CMD paster serve /etc/ckan/ckan.ini

View File

@ -0,0 +1,2 @@
0 * * * * paster --plugin=ckan tracking update -c /etc/ckan/ckan.ini >/dev/null
0 * * * * paster --plugin=ckan search-index rebuild -r -c /etc/ckan/ckan.ini >/dev/null

View File

@ -0,0 +1,4 @@
#!/bin/execlineb -P
foreground { s6-svwait -d -t 3000 ckan }
foreground { s6-svwait -d -t 3000 cron }

View File

@ -0,0 +1,5 @@
#!/bin/execlineb -P
fdmove -c 2 1
s6-setuidgid ckan
paster serve /etc/ckan/ckan.ini

View File

@ -0,0 +1,4 @@
#!/bin/execlineb -P
fdmove -c 2 1
crond -f -d 8

View File

@ -1,66 +1,82 @@
#!/bin/sh #!/bin/sh
set -ev set -ev
cd $(realpath $(dirname "${0}"))/install # Volumes
POSTGRES_DATA="${VOLUMES_DIR}/ckan/postgres_data"
REDIS_CONF="${VOLUMES_DIR}/ckan/redis_conf"
REDIS_DATA="${VOLUMES_DIR}/ckan/redis_data"
SOLR_DATA="${VOLUMES_DIR}/ckan/solr_data"
SOLR_LAYER="${LAYERS_DIR}/solr6_6.5.1-200403"
DATAPUSHER_CONF="${VOLUMES_DIR}/ckan/datapusher_conf"
DATAPUSHER_DATA="${VOLUMES_DIR}/ckan/datapusher_data"
CKAN_CONF="${VOLUMES_DIR}/ckan/ckan_conf"
CKAN_DATA="${VOLUMES_DIR}/ckan/ckan_data"
# Check prerequisites # Create Postgres instance
[ ! -e /run/openrc/started/postgres ] && service postgres start && STOP_POSTGRES=1 install -o 105432 -g 105432 -m 700 -d ${POSTGRES_DATA}
[ ! -e /run/openrc/started/redis ] && service redis start && STOP_REDIS=1 spoc-container exec ckan-postgres -- initdb -D /var/lib/postgresql
[ ! -e /run/openrc/started/solr ] && service solr start && STOP_SOLR=1
# Configure Postgres
install -o 105432 -g 105432 -m 600 postgres_data/postgresql.conf ${POSTGRES_DATA}/postgresql.conf
install -o 105432 -g 105432 -m 600 postgres_data/pg_hba.conf ${POSTGRES_DATA}/pg_hba.conf
# Create database # Create database
export CKAN_PWD=$(head -c 18 /dev/urandom | base64 | tr -d '+/=') export CKAN_PWD=$(head -c 18 /dev/urandom | base64 | tr -d '+/=')
export CKAN_DS_PWD=$(head -c 18 /dev/urandom | base64 | tr -d '+/=') export CKAN_DS_PWD=$(head -c 18 /dev/urandom | base64 | tr -d '+/=')
envsubst <createdb.sql | lxc-attach -u 5432 -g 5432 postgres -- psql spoc-container start ckan-postgres
envsubst <createdb.sql | spoc-container exec ckan-postgres -- psql
# Configure Redis
install -o 100000 -g 106379 -m 750 -d ${REDIS_CONF}
install -o 106379 -g 106379 -m 750 -d ${REDIS_DATA}
install -o 100000 -g 106379 -m 640 redis_conf/redis.conf ${REDIS_CONF}/redis.conf
spoc-container start ckan-redis
# Configure Solr
install -o 108983 -g 108983 -m 750 -d ${SOLR_DATA}
cp -p ${SOLR_LAYER}/opt/solr/server/solr/solr.xml ${SOLR_DATA}/solr.xml
spoc-container start ckan-solr
# Configure CKAN Solr core # Configure CKAN Solr core
lxc-attach -u 8983 -g 8983 solr -- solr create -p 8983 -c ckan spoc-container exec ckan-solr -- solr create -p 8983 -c ckan
cp srv/solr/data/ckan/conf/schema.xml /srv/solr/data/ckan/conf/schema.xml spoc-container stop ckan-solr
cp srv/solr/data/ckan/conf/solrconfig.xml /srv/solr/data/ckan/conf/solrconfig.xml install -o 108983 -g 108983 -m 640 solr_data/ckan/conf/schema.xml ${SOLR_DATA}/ckan/conf/schema.xml
chown 8983:8983 /srv/solr/data/ckan/conf/schema.xml install -o 108983 -g 108983 -m 640 solr_data/ckan/conf/solrconfig.xml ${SOLR_DATA}/ckan/conf/solrconfig.xml
service solr restart spoc-container start ckan-solr
# Configure CKAN DataPusher
install -o 100000 -g 108080 -m 750 -d ${DATAPUSHER_CONF}
install -o 108080 -g 108080 -m 750 -d ${DATAPUSHER_DATA}
install -o 100000 -g 108080 -m 640 datapusher_conf/add-ca-cert.env ${DATAPUSHER_CONF}/add-ca-cert.env
install -o 100000 -g 108080 -m 640 datapusher_conf/datapusher.wsgi ${DATAPUSHER_CONF}/datapusher.wsgi
install -o 100000 -g 108080 -m 640 datapusher_conf/datapusher_settings.py ${DATAPUSHER_CONF}/datapusher_settings.py
# Configure CKAN # Configure CKAN
mkdir -p /srv/ckan/conf /srv/ckan/data install -o 100000 -g 108080 -m 750 -d ${CKAN_CONF}
install -o 108080 -g 108080 -m 750 -d ${CKAN_DATA}
export CKAN_SECRET=$(head -c 18 /dev/urandom | base64 | tr -d '+/=') export CKAN_SECRET=$(head -c 18 /dev/urandom | base64 | tr -d '+/=')
export CKAN_UUID=$(cat /proc/sys/kernel/random/uuid) export CKAN_UUID=$(cat /proc/sys/kernel/random/uuid)
envsubst <srv/ckan/conf/ckan.ini >/srv/ckan/conf/ckan.ini envsubst <ckan_conf/ckan.ini | install -o 100000 -g 108080 -m 640 /dev/stdin ${CKAN_CONF}/ckan.ini
cp srv/ckan/conf/who.ini /srv/ckan/conf/who.ini install -o 100000 -g 108080 -m 640 ckan_conf/who.ini ${CKAN_CONF}/who.ini
chown -R 8003:8003 /srv/ckan/data
# Set "production values" (increases performance) only if the DEBUG environment variable is not set
if [ ${DEBUG:-0} -eq 0 ]; then
sed -i 's/debug = true/debug = false/' /srv/ckan/conf/ckan.ini
fi
# Populate database # Populate database
lxc-execute ckan -- paster --plugin=ckan db init -c /etc/ckan/ckan.ini spoc-container exec ckan -- paster --plugin=ckan db init -c /etc/ckan/ckan.ini
lxc-execute ckan -- paster --plugin=ckanext-spatial spatial initdb -c /etc/ckan/ckan.ini spoc-container exec ckan -- paster --plugin=ckanext-spatial spatial initdb -c /etc/ckan/ckan.ini
lxc-execute ckan -- paster --plugin=ckan datastore set-permissions -c /etc/ckan/ckan.ini | lxc-attach -u 5432 -g 5432 postgres -- psql spoc-container exec ckan -- paster --plugin=ckan datastore set-permissions -c /etc/ckan/ckan.ini | spoc-container exec ckan-postgres -- psql
# Create admin account # Create admin account
export CKAN_ADMIN_USER="admin" export CKAN_ADMIN_USER="admin"
export CKAN_ADMIN_UUID=$(cat /proc/sys/kernel/random/uuid) export CKAN_ADMIN_UUID=$(cat /proc/sys/kernel/random/uuid)
export CKAN_ADMIN_APIKEY=$(cat /proc/sys/kernel/random/uuid) export CKAN_ADMIN_APIKEY=$(cat /proc/sys/kernel/random/uuid)
export CKAN_ADMIN_PWD=$(head -c 12 /dev/urandom | base64 | tr -d '+/=') export CKAN_ADMIN_PWD=$(head -c 12 /dev/urandom | base64 | tr -d '+/=')
export CKAN_ADMIN_HASH=$(lxc-execute ckan -- python -c "from passlib.hash import pbkdf2_sha512;print pbkdf2_sha512.encrypt('${CKAN_ADMIN_PWD}')") export CKAN_ADMIN_HASH=$(spoc-container exec ckan -- python -c "from passlib.hash import pbkdf2_sha512;print pbkdf2_sha512.encrypt('${CKAN_ADMIN_PWD}')")
export CKAN_ADMIN_EMAIL="admin@example.com" export CKAN_ADMIN_EMAIL="admin@example.com"
envsubst <adminpwd.sql | lxc-attach -u 5432 -g 5432 postgres -- psql ckan envsubst <adminpwd.sql | spoc-container exec ckan-postgres -- psql ckan
# Install cron job
cp etc/periodic/hourly/ckan /etc/periodic/hourly/ckan
# Install service
cp etc/init.d/ckan /etc/init.d/ckan
rc-update -u
# Install config update script
cp srv/ckan/update-conf.sh /srv/ckan/update-conf.sh
# Stop services required for setup # Stop services required for setup
[ ! -z ${STOP_POSTGRES} ] && service postgres stop spoc-container stop ckan-solr
[ ! -z ${STOP_REDIS} ] && service redis stop spoc-container stop ckan-postgres
[ ! -z ${STOP_SOLR} ] && service solr stop spoc-container stop ckan-redis
# Register application # Register application
vmmgr register-app ckan ckan "${CKAN_ADMIN_USER}" "${CKAN_ADMIN_PWD}" vmmgr register-app ckan ckan "${CKAN_ADMIN_USER}" "${CKAN_ADMIN_PWD}"

View File

@ -13,8 +13,9 @@
[DEFAULT] [DEFAULT]
# WARNING: *THIS SETTING MUST BE SET TO FALSE ON A PRODUCTION ENVIRONMENT* # WARNING: *THIS SETTING MUST BE SET TO FALSE ON A PUBLIC ENVIRONMENT*
debug = true # With debug mode enabled, a visitor to your site could execute malicious commands.
debug = false
[server:main] [server:main]
use = egg:Paste#http use = egg:Paste#http
@ -45,15 +46,16 @@ who.log_file = %(cache_dir)s/who_log.ini
# who.timeout = 86400 # who.timeout = 86400
## Database Settings ## Database Settings
sqlalchemy.url = postgresql://ckan:${CKAN_PWD}@postgres/ckan sqlalchemy.url = postgresql://ckan:${CKAN_PWD}@ckan-postgres/ckan
ckan.datastore.write_url = postgresql://ckan:${CKAN_PWD}@postgres/ckan_datastore ckan.datastore.write_url = postgresql://ckan:${CKAN_PWD}@ckan-postgres/ckan_datastore
ckan.datastore.read_url = postgresql://ckan_datastore:${CKAN_DS_PWD}@postgres/ckan_datastore ckan.datastore.read_url = postgresql://ckan_datastore:${CKAN_DS_PWD}@ckan-postgres/ckan_datastore
# PostgreSQL' full-text search parameters # PostgreSQL' full-text search parameters
ckan.datastore.default_fts_lang = english ckan.datastore.default_fts_lang = english
ckan.datastore.default_fts_index_method = gist ckan.datastore.default_fts_index_method = gist
## Site Settings ## Site Settings
ckan.site_url = https://ckan.spotter.vm ckan.site_url = https://ckan.spotter.vm
@ -71,18 +73,20 @@ ckan.auth.user_delete_organizations = false
ckan.auth.create_user_via_api = false ckan.auth.create_user_via_api = false
ckan.auth.create_user_via_web = true ckan.auth.create_user_via_web = true
ckan.auth.roles_that_cascade_to_sub_groups = admin ckan.auth.roles_that_cascade_to_sub_groups = admin
ckan.auth.public_user_details = true
ckan.auth.public_activity_stream_detail = true
## Search Settings ## Search Settings
ckan.site_id = default ckan.site_id = default
solr_url = http://solr:8983/solr/ckan solr_url = http://ckan-solr:8983/solr/ckan
## Redis Settings ## Redis Settings
# URL to your Redis instance, including the database to be used. # URL to your Redis instance, including the database to be used.
ckan.redis.url = redis://redis:6379/0 ckan.redis.url = redis://ckan-redis:6379/0
## CORS Settings ## CORS Settings
@ -130,10 +134,6 @@ ckan.datasetthumbnail.auto_generate = true
## Front-End Settings ## Front-End Settings
# Uncomment following configuration to enable using of Bootstrap 2
#ckan.base_public_folder = public-bs2
#ckan.base_templates_folder = templates-bs2
ckan.site_title = CKAN ckan.site_title = CKAN
ckan.site_logo = /base/images/ckan-logo.png ckan.site_logo = /base/images/ckan-logo.png
ckan.site_description = ckan.site_description =
@ -146,7 +146,6 @@ ckan.display_timezone = server
# package_hide_extras = for_search_index_only # package_hide_extras = for_search_index_only
#package_edit_return_url = http://another.frontend/dataset/<NAME> #package_edit_return_url = http://another.frontend/dataset/<NAME>
#package_new_return_url = http://another.frontend/dataset/<NAME> #package_new_return_url = http://another.frontend/dataset/<NAME>
#ckan.recaptcha.version = 1
#ckan.recaptcha.publickey = #ckan.recaptcha.publickey =
#ckan.recaptcha.privatekey = #ckan.recaptcha.privatekey =
#licenses_group_url = http://licenses.opendefinition.org/licenses/groups/ckan.json #licenses_group_url = http://licenses.opendefinition.org/licenses/groups/ckan.json
@ -172,6 +171,11 @@ ckan.storage_path = /srv/ckan/storage
ckan.max_resource_size = 100 ckan.max_resource_size = 100
ckan.max_image_size = 10 ckan.max_image_size = 10
## Webassets Settings
#ckan.webassets.use_x_sendfile = false
#ckan.webassets.path = /var/lib/ckan/webassets
## Datapusher settings ## Datapusher settings
# Make sure you have set up the DataStore # Make sure you have set up the DataStore
@ -204,7 +208,10 @@ smtp.starttls = False
#smtp.user = username@example.com #smtp.user = username@example.com
#smtp.password = your_password #smtp.password = your_password
smtp.mail_from = admin@example.com smtp.mail_from = admin@example.com
#smtp.reply_to =
## Background Job Settings
ckan.jobs.timeout = 180
## Logging configuration ## Logging configuration
[loggers] [loggers]

View File

@ -0,0 +1,2 @@
HOST=ckan.spotter.vm
PORT=443

View File

@ -1,23 +0,0 @@
#!/sbin/openrc-run
description="CKAN container"
depend() {
need ckan-datapusher postgres redis solr
}
start() {
lxc-start ckan
}
start_post() {
vmmgr register-proxy ckan
}
stop_pre() {
vmmgr unregister-proxy ckan
}
stop() {
lxc-stop ckan
}

View File

@ -1,6 +0,0 @@
#!/bin/sh
if [ -e /run/openrc/started/ckan ]; then
lxc-attach -u 8003 -g 8003 ckan -- paster --plugin=ckan tracking update -c /etc/ckan/ckan.ini >/dev/null
lxc-attach -u 8003 -g 8003 ckan -- paster --plugin=ckan search-index rebuild -r -c /etc/ckan/ckan.ini >/dev/null
fi

View File

@ -0,0 +1,3 @@
local all postgres peer
local all all md5
host all all 0.0.0.0/0 md5

View File

@ -0,0 +1,750 @@
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, run "pg_ctl reload", or execute
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
# require a server shutdown and restart to take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: kB = kilobytes Time units: ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
#data_directory = 'ConfigDir' # use data in another directory
# (change requires restart)
#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
# (change requires restart)
#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
#external_pid_file = '' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
listen_addresses = '*' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
#port = 5432 # (change requires restart)
max_connections = 100 # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
unix_socket_directories = '/run/postgresql,/tmp' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - TCP settings -
# see "man 7 tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
# 0 selects the system default
# - Authentication -
#authentication_timeout = 1min # 1s-600s
#password_encryption = md5 # md5 or scram-sha-256
#db_user_namespace = off
# GSSAPI using Kerberos
#krb_server_keyfile = ''
#krb_caseins_users = off
# - SSL -
#ssl = off
#ssl_ca_file = ''
#ssl_cert_file = 'server.crt'
#ssl_crl_file = ''
#ssl_key_file = 'server.key'
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_min_protocol_version = 'TLSv1'
#ssl_max_protocol_version = ''
#ssl_dh_params_file = ''
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 128MB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#maintenance_work_mem = 64MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#max_stack_depth = 2MB # min 100kB
#shared_memory_type = mmap # the default is the first option
# supported by the operating system:
# mmap
# sysv
# windows
# (change requires restart)
dynamic_shared_memory_type = posix # the default is the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# (change requires restart)
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kB, or -1 for no limit
# - Kernel Resources -
#max_files_per_process = 1000 # min 25
# (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 10 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart)
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
#parallel_leader_participation = on
#max_parallel_workers = 8 # maximum number of max_worker_processes that
# can be used in parallel operations
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#backend_flush_after = 0 # measured in pages, 0 disables
#------------------------------------------------------------------------------
# WRITE-AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
wal_level = minimal # minimal, replica, or logical
# (change requires restart)
#fsync = on # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
#synchronous_commit = on # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux)
# fsync
# fsync_writethrough
# open_sync
#full_page_writes = on # recover from partial page writes
#wal_compression = off # enable compression of full-page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_init_zero = on # zero-fill new WAL files
#wal_recycle = on # recycle WAL files
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
#max_wal_size = 1GB
#min_wal_size = 80MB
#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
# - Archive Recovery -
# These are only used in recovery mode.
#restore_command = '' # command to use to restore an archived logfile segment
# placeholders: %p = path of file to restore
# %f = file name only
# e.g. 'cp /mnt/server/archivedir/%f %p'
# (change requires restart)
#archive_cleanup_command = '' # command to execute at every restartpoint
#recovery_end_command = '' # command to execute at completion of recovery
# - Recovery Target -
# Set these only when performing a targeted recovery.
#recovery_target = '' # 'immediate' to end recovery as soon as a
# consistent state is reached
# (change requires restart)
#recovery_target_name = '' # the named restore point to which recovery will proceed
# (change requires restart)
#recovery_target_time = '' # the time stamp up to which recovery will proceed
# (change requires restart)
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
# (change requires restart)
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
# (change requires restart)
#recovery_target_inclusive = on # Specifies whether to stop:
# just after the specified recovery target (on)
# just before the recovery target (off)
# (change requires restart)
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
# (change requires restart)
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
# (change requires restart)
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Servers -
# Set these on the master and on any standby that will send replication data.
max_wal_senders = 0 # max number of walsender processes
# (change requires restart)
#wal_keep_segments = 0 # in logfile segments; 0 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
max_replication_slots = 0 # max number of replication slots
# (change requires restart)
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Master Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# method to choose sync standbys, number of sync standbys,
# and comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a master server.
#primary_conninfo = '' # connection string to sending server
# (change requires restart)
#primary_slot_name = '' # replication slot on sending server
# (change requires restart)
#promote_trigger_file = '' # file name whose presence ends recovery
#hot_standby = on # "off" disallows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from master
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
# - Subscribers -
# These settings are ignored on a publisher.
max_logical_replication_workers = 0 # taken from max_worker_processes
# (change requires restart)
max_sync_workers_per_subscription = 0 # taken from max_logical_replication_workers
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_bitmapscan = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_parallel_hash = on
#enable_partition_pruning = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
# -1 disables
#jit_inline_above_cost = 500000 # inline small functions if query is
# more expensive than this; -1 disables
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
# query is more expensive than this;
# -1 disables
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#force_parallel_mode = off
#jit = on # allow JIT compilation
#plan_cache_mode = auto # auto, force_generic_plan or
# force_custom_plan
#------------------------------------------------------------------------------
# REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, syslog, and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (win32):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
#log_transaction_sample_rate = 0.0 # Fraction of transactions whose statements
# are logged regardless of their duration. 1.0 logs all
# statements from all transactions, 0.0 never logs.
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_checkpoints = off
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
log_line_prefix = '%m [%p] %q%u@%d ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %p = process ID
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_statement = 'none' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Europe/Prague'
#------------------------------------------------------------------------------
# PROCESS TITLE
#------------------------------------------------------------------------------
#cluster_name = '' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# STATISTICS
#------------------------------------------------------------------------------
# - Query and Index Statistics Collector -
#track_activities = on
#track_counts = on
#track_io_timing = off
#track_functions = none # none, pl, all
#track_activity_query_size = 1024 # (change requires restart)
#stats_temp_directory = 'pg_stat_tmp'
# - Monitoring -
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#log_statement_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#search_path = '"$user", public' # schema names
#row_security = on
#default_tablespace = '' # a tablespace name, '' uses the default
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#default_table_access_method = 'heap'
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_min_age = 50000000
#vacuum_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_freeze_table_age = 150000000
#vacuum_cleanup_index_scale_factor = 0.1 # fraction of total number of tuples
# before index cleanup, 0 always performs
# index cleanup
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_fuzzy_search_limit = 0
#gin_pending_list_limit = 4MB
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Europe/Prague'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
# selects precise output mode
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'C' # locale for system error message
# strings
lc_monetary = 'C' # locale for monetary formatting
lc_numeric = 'C' # locale for number formatting
lc_time = 'C' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Shared Library Preloading -
#shared_preload_libraries = '' # (change requires restart)
#local_preload_libraries = ''
#session_preload_libraries = ''
#jit_provider = 'llvmjit' # JIT library to use
# - Other Defaults -
#dynamic_library_path = '$libdir'
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_relation = -2 # negative values mean
# (max_pred_locks_per_transaction
# / -max_pred_locks_per_relation) - 1
#max_pred_locks_per_page = 2 # min 0
#------------------------------------------------------------------------------
# VERSION AND PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#escape_string_warning = on
#lo_compat_privileges = off
#operator_precedence_warning = off
#quote_all_identifiers = off
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf. Note that these are directives, not variable
# assignments, so they can usefully be given more than once.
#include_dir = '...' # include files ending in '.conf' from
# a directory, e.g., 'conf.d'
#include_if_exists = '...' # include file only if it exists
#include = '...' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here

View File

@ -191,7 +191,7 @@ databases 16
# #
# However it is possible to force the pre-4.0 behavior and always show a # However it is possible to force the pre-4.0 behavior and always show a
# ASCII art logo in startup logs by setting the following option to yes. # ASCII art logo in startup logs by setting the following option to yes.
always-show-logo yes always-show-logo no
################################ SNAPSHOTTING ################################ ################################ SNAPSHOTTING ################################
# #

View File

@ -0,0 +1,53 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!--
This is an example of a simple "solr.xml" file for configuring one or
more Solr Cores, as well as allowing Cores to be added, removed, and
reloaded via HTTP requests.
More information about options available in this configuration file,
and Solr Core administration can be found online:
http://wiki.apache.org/solr/CoreAdmin
-->
<solr>
<solrcloud>
<str name="host">${host:}</str>
<int name="hostPort">${jetty.port:8983}</int>
<str name="hostContext">${hostContext:solr}</str>
<bool name="genericCoreNodeNames">${genericCoreNodeNames:true}</bool>
<int name="zkClientTimeout">${zkClientTimeout:30000}</int>
<int name="distribUpdateSoTimeout">${distribUpdateSoTimeout:600000}</int>
<int name="distribUpdateConnTimeout">${distribUpdateConnTimeout:60000}</int>
<str name="zkCredentialsProvider">${zkCredentialsProvider:org.apache.solr.common.cloud.DefaultZkCredentialsProvider}</str>
<str name="zkACLProvider">${zkACLProvider:org.apache.solr.common.cloud.DefaultZkACLProvider}</str>
</solrcloud>
<shardHandlerFactory name="shardHandlerFactory"
class="HttpShardHandlerFactory">
<int name="socketTimeout">${socketTimeout:600000}</int>
<int name="connTimeout">${connTimeout:60000}</int>
</shardHandlerFactory>
</solr>

View File

@ -1,8 +0,0 @@
#!/bin/sh
HOST="${DOMAIN}"
[ "${PORT}" != "443" ] && HOST="${DOMAIN}:${PORT}"
sed -i "s|\(^ckan\.site_url = \).*|\1https://ckan.${HOST}|" /srv/ckan/conf/ckan.ini
sed -i "s|\(^smtp\.mail_from = \).*|\1${EMAIL}|" /srv/ckan/conf/ckan.ini
sed -i "s|\(^ckanext\.geoview\.gapi_key = \).*|\1${GMAPS_API_KEY}|" /srv/ckan/conf/ckan.ini

View File

@ -0,0 +1,20 @@
#!/bin/sh
# Volumes
DATAPUSHER_CONF="${VOLUMES_DIR}/ckan/datapusher_conf"
CKAN_CONF="${VOLUMES_DIR}/ckan/ckan_conf"
# Variables
HTTP_HOST="${HOST}"
[ "${PORT}" != "443" ] && HTTP_HOST="${HTTP_HOST}:${PORT}"
# Replacements
sed -i "s|\(^ckan\.site_url = \).*|\1https://${HTTP_HOST}|" ${CKAN_CONF}/ckan.ini
sed -i "s|\(^smtp\.mail_from = \).*|\1${EMAIL}|" ${CKAN_CONF}/ckan.ini
sed -i "s|\(^ckanext\.geoview\.gapi_key = \).*|\1${GMAPS_API_KEY}|" ${CKAN_CONF}/ckan.ini
cat <<EOF >${DATAPUSHER_CONF}/add-ca-cert.env
HOST=${HOST}
PORT=${PORT}
EOF
sed -i "s|\(^FROM_EMAIL = \).*|\1'${EMAIL}'|" ${DATAPUSHER_CONF}/datapusher_settings.py

View File

@ -1,10 +0,0 @@
{
"title": "CKAN",
"desc-cs": "Datový sklad",
"desc-en": "Data store",
"lxcpath": "ckan",
"version": "0.0.1",
"release": "0",
"license": "GPL",
"depends": ["alpine3.9-python2.7", "ckan-datapusher", "postgres", "redis", "solr"]
}

View File

@ -1,27 +1,8 @@
#!/bin/sh #!/bin/sh
set -ev set -ev
# Remove cronjob # Remove persistent data
rm -f /etc/periodic/hourly/ckan rm -rf "${VOLUMES_DIR}/ckan"
# Remove service
rm -f /etc/init.d/ckan
rc-update -u
# Drop database and user
[ ! -e /run/openrc/started/postgres ] && service postgres start && STOP_POSTGRES=1
echo 'DROP DATABASE IF EXISTS ckan; DROP DATABASE IF EXISTS ckan_datastore; DROP ROLE IF EXISTS ckan; DROP ROLE IF EXISTS ckan_datastore;' | lxc-attach -u 5432 -g 5432 postgres -- psql
[ ! -z ${STOP_POSTGRES} ] && service postgres stop
# Remove redis data
[ ! -e /run/openrc/started/redis ] && service redis start && STOP_REDIS=1
lxc-attach redis -- redis-cli -n 0 flushdb
[ ! -z ${STOP_REDIS} ] && service redis stop
# Remove solr core
[ -e /run/openrc/started/solr ] && service solr stop && START_SOLR=1
rm -rf /srv/solr/data/ckan
[ ! -z ${START_SOLR} ] && service solr start
# Unregister application # Unregister application
vmmgr unregister-app ckan vmmgr unregister-app ckan

View File

@ -0,0 +1,26 @@
{
"version": "2.2.0-200403",
"meta": {
"title": "Crisis Cleanup",
"desc-cs": "Mapování následků katastrof",
"desc-en": "Disaster relief mapping",
"license": "GPL"
},
"containers": {
"crisiscleanup": {
"image": "crisiscleanup_2.2.0-200403",
"depends": [
"crisiscleanup-postgres"
],
"mounts": {
"crisiscleanup/cc_conf": "srv/crisiscleanup/config"
}
},
"crisiscleanup-postgres": {
"image": "postgres_12.2.0-200403",
"mounts": {
"crisiscleanup/postgres_data": "var/lib/postgresql"
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More