Add documentation

This commit is contained in:
Disassembler 2019-03-19 11:32:31 +01:00
parent 938c47f950
commit 8cc7553acd
Signed by: Disassembler
GPG Key ID: 524BD33A0EE29499
16 changed files with 1269 additions and 2 deletions

View File

@ -3,9 +3,14 @@ set -ev
cd $(realpath $(dirname "${0}"))
# Install Alpine SDK and useful tools
# Install basic build tools
apk update
apk add alpine-sdk git file htop less openssh-client openssh-server openssh-sftp-server tar xz
apk add git file htop less openssh-client openssh-server openssh-sftp-server tar xz
# Install Alpine SDK
apk add alpine-sdk
# Install Sphinx support
apk-add py3-sphinx
pip3 install recommonmark sphinx-markdown-tables
# Copy root profile files and settings
mkdir -p /root/.config/htop /root/.ssh

19
_doc/Makefile Normal file
View File

@ -0,0 +1,19 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build-3
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

177
_doc/conf.py Normal file
View File

@ -0,0 +1,177 @@
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'SpotterVM'
copyright = '2019, Spotter'
author = 'Spotter'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['recommonmark', 'sphinx_markdown_tables']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown'
}
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
html_sidebars = { '**': ['localtoc.html', 'relations.html', 'searchbox.html'] }
html_show_sourcelink = False
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'SpotterVMdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SpotterVM.tex', 'SpotterVM Documentation',
'Disassembler', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'spottervm', 'SpotterVM Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SpotterVM', 'SpotterVM Documentation',
author, 'SpotterVM', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']

9
_doc/existing/index.rst Normal file
View File

@ -0,0 +1,9 @@
Information about existing containers
=====================================
.. toctree::
:maxdepth: 2
list
tech-overview
map-services

59
_doc/existing/list.md Normal file
View File

@ -0,0 +1,59 @@
# List of existing containers
## List of basic and runtime layers
| Layer | Container |
|-------------------------|---------------------|
| Alpine 3.8 | alpine3.8 |
| Alpine 3.8 - PHP 5.6 | alpine3.8-php5.6 |
| Alpine 3.8 - NodeJS 8 | alpine3.8-nodejs8 |
| Alpine 3.9 - Ruby 2.4 | alpine3.8-ruby2.4 |
| Alpine 3.9 | alpine3.9 |
| Alpine 3.9 - Java 8 | alpine3.9-java8 |
| Alpine 3.9 - PHP 7.2 | alpine3.9-php7.2 |
| Alpine 3.9 - Python 2.7 | alpine3.9-python2.7 |
| Alpine 3.9 - Python 3.6 | alpine3.9-python3.6 |
| Alpine 3.9 - NodeJS 10 | alpine3.9-nodejs10 |
| Alpine 3.9 - Ruby 2.4 | alpine3.9-ruby2.4 |
| Alpine 3.9 - Tomcat 7 | alpine3.9-tomcat7 |
| Alpine 3.9 - Tomcat 8.5 | alpine3.9-tomcat8.5 |
| Sahana - Shared | sahana-shared |
## List of service containers
| Service | Container | UID/GID | Internal Port |
|-----------------|-----------------|---------|------------------|
| ActiveMQ | activemq | 61616 | 61616 (ActiveMQ) |
| CKAN Datapusher | ckan-datapusher | 8004 | 8080 (HTTP) |
| MariaDB | mariadb | 3306 | 3306 (MySQL) |
| Postgres | postgres | 5432 | 5432 (Postgres) |
| RabbitMQ | rabbitmq | 5672 | 5672 (AMQP) |
| Redis | redis | 6379 | 6379 (Redis) |
| Solr | solr | 8983 | 8983 (HTTP) |
## List of application containers
All application containers listen on internal port 8080 (HTTP)
| Application | Container | UID/GID | Host |
|----------------|-------------------|---------|-------------|
| CKAN | ckan | 8003 | ckan |
| Crisis Cleanup | crisiscleanup | 8005 | cc |
| CTS | cts | 8006 | cts |
| EcoGIS | ecogis | 8020 | ecogis |
| FrontlineSMS | frontlinesms | 8018 | sms |
| GNU Health | gnuhealth | 8008 | gh |
| KanBoard | kanboard | 8009 | kb |
| Mifos X | mifosx | 8012 | mifosx |
| Motech | motech | 8013 | motech |
| ODK Aggregate | opendatakit | 8015 | odk |
| ODK Build | opendatakit-build | 8017 | odkbuild |
| Odoo | odoo | 8019 | odoo |
| OpenMapKit | openmapkit | 8007 | omk |
| Pan.do/ra | pandora | 8002 | pandora |
| Sahana | sahana | 8001 | sahana |
| Sahana - Demo | sahana-demo | 8001 | sahana-demo |
| SAMBRO | sambro | 8001 | sambro |
| SeedDMS | seeddms | 8010 | dms |
| Sigmah | sigmah | 8011 | sigmah |
| Ushahidi | ushahidi | 8014 | ush |

View File

@ -0,0 +1,27 @@
# Map services used in applications
| Application | Data sources | Map viewer | Configurable | Notes |
|---------------|--------------|-------------|--------------|-------|
| CKAN | OSM ([Stamen](http://maps.stamen.com)) | Leaflet | No | [1] |
| CrisisCleanup | Google Maps | Google Maps | No | |
| CTS | OSM, [ArcGIS](http://server.arcgisonline.com/arcgis/rest/services) | Leaflet | No | |
| EcoGIS | ? | FreeGIS + OpenLayers 2 | Probably | [2] |
| Odoo | Google Maps | Google Maps | No | [3] |
| OpenMapKit | OSM | N/A | Yes | [4] |
| Pan.do/ra | Google Maps | Google Maps + OxMap | No | |
| Sahana Eden | OSM ([HOT](https://www.hotosm.org/)) | OpenLayers 2 | Yes, very | [5] |
| Ushahidi | OSM ([Mapbox](https://www.mapbox.com/about/maps/), [HOT](https://www.hotosm.org/)) | Leaflet | No | |
| --- | | | | |
| WebODM | OSM | Leaflet | | |
| Crismapp | OSM | Leaflet | | |
| ThinkHazard! | OSM | Mapbox | | |
| Openforis | Google Earth | | No | |
| Tendenci | Google Maps | Google Maps | No | |
| GeoNode | OSM | ? OpenLayers 3 | | |
1. Used by CKAN extensions *reclineview*, *spatial* and *geoview*.
2. Untested as the EcoGIS source code is not fully open. Looks like the data sources are configurable, but the full documentation is only in italian.
3. Used by Odoo *Google Maps* module to display company/partner address on map.
4. Map is used by OMK and ODK Android clients. OMK Server only offers the API. See [area of interest depoyment](http://posm.io/docs/omk/walkthrough/) on POSM wiki.
5. Sahana Eden supports multitude of connectors and protocols to process map and feature data. ArcGIS REST, Bing maps, GeoJSON, GeoRSS, Google Maps, GPX, KML, MGRS, OSM, OWM, Shapefile, TMS, WFS, WMS and XYZ.

View File

@ -0,0 +1,316 @@
# Overview of technological requirements
## Basic system
Components which are installed directly as part of the basic virtual machine.
### Alpine linux
- **General description:** Operating system
- **Tech. description:** Lightweight linux distribution based on musl C libraries
- **Depends on:** -
- **Used by:** Everything
- **Related skills:** EXTLINUX / ISOLINUX, git, linux administration (cron, filesystems, iptables, networking, user/group mgmt etc.), LUKS, LVM, OpenRC init system, POSIX standards, s6 init system, shell scripting
### Acme.sh
- **General description:** Certificate renewal tool
- **Tech. description:** Shell-based Automated Certificate Management Environment client
- **Depends on:** -
- **Used by:** nginx, VMMgr
- **Related skills:** shell scripting, SSL/TLS
### Nginx
- **General description:** Web server
- **Tech. description:** Lightweight HTTP server
- **Depends on:** -
- **Used by:** All application containers, VMMgr
- **Related skills:** HTTP (proxying, rewriting)
### LXC
- **General description:** Container virtualization host
- **Tech. description:** Operating system-level container virtualization host
- **Depends on:** -
- **Used by:** All containers
- **Related skills:** container virtualization fundamentals, linux kernel (cgroups, overlayfs, seccomp), shell scripting
### Postfix
- **General description:** Mail server
- **Tech. description:** Outbound mail transfer agent
- **Depends on:** -
- **Used by:** All application containers
- **Related skills:** SMTP
### VMMgr
- **General description:** Virtual machine and application manager web interface
- **Tech. description:** In-house Werkzeug-based virtual machine and application manager WSGI application
- **Depends on:** Nginx, LXC, Python 3
- **Used by:** User
- **Related skills:** JSON, python 3 frameworks and modules (cryptography, jinja2, requests, subprocess, werkzeug), shell scripting, WSGI application development
## Runtimes
Components which are supplied as LXC overlay layers but don't run as standalone containers
### Java
- **General description:** Java runtime environment
- **Tech. description:** Java OpenJDK 8 runtime environment
- **Depends on:** -
- **Used by:** ActiveMQ, FrontlineSMS, Tomcat, CrisisCleanup, MifosX, Motech, OpenDataKit, OpenMapKit, Sigmah, Solr
- **Related skills:** -
### Node.js
- **General description:** JavaScript runtime environment
- **Tech. description:** Server-side Node.js 8 JavaScript runtime environment
- **Depends on:** -
- **Used by:** CrisisCleanup, GNU Health, Odoo, OpenDataKit Build, OpenMapKit
- **Related skills:** HTTP (proxying), JavaScript (language overview)
### PHP
- **General description:** PHP 7 runtime environment
- **Tech. description:** PHP 7 hypertext preprocessor scripting runtime
- **Depends on:** -
- **Used by:** KanBoard, SeedDMS, Ushahidi
- **Related skills:** HTTP (proxying), PHP 7 (language overview), PHP-FPM
### Python 2
- **General description:** Python 2 runtime environment
- **Tech. description:** Python 2 runtime environemnt and standard libraries
- **Depends on:** -
- **Used by:** CKAN, CKAN DataPusher, CTS, OpenMapKit, Sahana Eden
- **Related skills:** linux compilation toolchain (header files, gcc, make etc.), pip, python (language overview)
### Python 3
- **General description:** Python 3 runtime environment
- **Tech. description:** Python 3 runtime environemnt and standard libraries
- **Depends on:** -
- **Used by:** GNU Health, Odoo, Pan.do/ra, SeedDMS, VMMgr (doesn't use container)
- **Related skills:** linux compilation toolchain (header files, gcc, make etc.), pip, python (language overview)
### Ruby
- **General description:** Ruby runtime environment
- **Tech. description:** Ruby 2.4 runtime environment and gem package installer
- **Depends on:** -
- **Used by:** CrisisCleanup, OpenDataKit Build
- **Related skills:** linux compilation toolchain (header files, gcc, make etc.)
### Tomcat
- **General description:** Lightweight Java application server
- **Tech. description:** Java application JSP and servlet container
- **Depends on:** Java
- **Used by:** MifosX, Motech, OpenDataKit, Sigmah
- **Related skills:** JVM tuning, shell scripting
## Components
Components which are supplied as LXC containers required by other applications but aren't exposed directly to the end user
### ActiveMQ
- **General description:** Message broker middleware
- **Tech. description:** Java-based message broker and messaging server
- **Depends on:** Java
- **Used by:** Motech
- **Related skills:** JVM tuning, XML
### MariaDB
- **General description:** MySQL database server
- **Tech. description:** MySQL-compatible relational database management system
- **Depends on:** -
- **Used by:** MifosX, Ushahidi
- **Related skills:** SQL (language overview)
### Postgres
- **General description:** PostgreSQL database server
- **Tech. description:** PostgreSQL relational database management system
- **Depends on:** -
- **Used by:** CKAN, CrisisCleanup, CTS, GNU Health, KanBoard, Motech, Odoo, OpenDataKit, OpenDataKit Build, OpenMapKit, Pan.do/ra, Sahana Eden, SeedDMS, Sigmah
- **Related skills:** SQL (language overview)
### RabbitMQ
- **General description:** Message broker middleware
- **Tech. description:** Erlang-based message broker and messaging server
- **Depends on:** -
- **Used by:** Pan.do/ra
- **Related skills:** -
### Redis
- **General description:** Key-value NoSQL database server
- **Tech. description:** In-memory NoSQL key-value data structure object store
- **Depends on:** -
- **Used by:** CKAN
- **Related skills:** -
### Solr
- **General description:** Full-text search server
- **Tech. description:** Apache Lucene-based full-text search and indexing platform
- **Depends on:** Java
- **Used by:** CKAN
- **Related skills:** shell scripting, JVM tuning
## Applications
Components which are supplied as LXC containers exposed directly to the end user
### CKAN
- **General description:** Data management and data store system
- **Tech. description:** Flask-based data store application
- **Depends on:** CKAN DataPusher, Postgres, Python 2, Redis, Solr
- **Used by:** User
- **Related skills:** PostGIS, python frameworks and modules (flask, paster, setuptools), Solr core configuration
### CKAN DataPusher
- **General description:** Data file parser for CKAN
- **Tech. description:** Python-based data file parser service for CKAN
- **Depends on:** Python 2
- **Used by:** CKAN
- **Related skills:** MIME types identification, WSGI application development
### CrisisCleanup
- **General description:** Disaster response and relief coordination
- **Tech. description:** Ruby-on-Rails-based application with Node.js-generated frontend assets
- **Depends on:** LibXML, Node.js, Postgres, Ruby
- **Used by:** User
- **Related skills:** Node.js build systems (npm, yarn), Ruby build systems and frameworks (Bundle, Devise, Gems, Rails, Rake)
### CTS
- **General description:** Commodity tracking system
- **Tech. description:** Django-based forms applicaton
- **Depends on:** Postgres, Python 2
- **Used by:** User
- **Related skills:** python frameworks (Django), WSGI application development
### FrontlineSMS
- **General description:** Bulk text messaging
- **Tech. description:** Spring-based application for modem device access
- **Depends on:** Java
- **Used by:** User
- **Related skills:** Java build systems and frameworks (grails, Spring), Jetty, linux modem/tty handling, Xorg (X server)
### GNU Health
- **General description:** Hospital information system (ERP)
- **Tech. description:** Tryton-based form application RPC backend with Node.js-based frontend
- **Depends on:** Node.js, Postgres, Python3
- **Used by:** User
- **Related skills:** Node.js build systems (grunt, npm), python modules (setuptools, virtualenv)
### KanBoard
- **General description:** Kanban project management
- **Tech. description:** Symfony-based forms application
- **Depends on:** PHP, Postgres
- **Used by:** User
- **Related skills:** PHP build systems and frameworks (Composer, Symfony)
### MifosX
- **General description:**
- **Tech. description:** Spring-based forms application RPC backend with Node.js-packed frontend
- **Depends on:** MariaDB, Tomcat
- **Used by:** User
- **Related skills:** Java frameworks (Spring), Node.js and JavaScript development (Angular, Bower, Grunt)
### Motech
- **General description:**
- **Tech. description:** Apache Felix-based forms application
- **Depends on:** ActiveMQ, Postgres, Tomcat
- **Used by:** User
- **Related skills:** Java frameworks (Apache Felix, Spring)
### Odoo
- **General description:**
- **Tech. description:** Werkzeug-based forms application RPC backend with Node.js-based frontend
- **Depends on:** Node.js, Postgres, Python 3
- **Used by:** User
- **Related skills:** WSGI application development
### OpenDataKit
- **General description:**
- **Tech. description:** Spring and OpenRosa-based data store application
- **Depends on:** Postgres, Tomcat
- **Used by:** User
- **Related skills:** Java build systems and frameworks (Gradle, Spring)
### OpenDataKit Build
- **General description:**
- **Tech. description:** Ruby-based forms application with Node.js-based data converter
- **Depends on:** Node.js, Ruby
- **Used by:** User
- **Related skills:** Node.js build systems (npm), Ruby build systems and frameworks (Bundler, Rake)
### OpenMapKit
- **General description:**
- **Tech. description:** Node.js-based forms application with python2 dependencies
- **Depends on:** Node.js, Postgres, Python 2
- **Used by:** User
- **Related skills:** JavaScript development, Node.js build systems (Yarn)
### Pan.do/ra
- **General description:**
- **Tech. description:** Python-based media store with Javascript-based frontend
- **Depends on:** Postgres, Python 3, RabbitMQ
- **Used by:** User
- **Related skills:** ffmpeg, imagemagick, JavaScript development (oxjs)
### Sahana Eden
- **General description:**
- **Tech. description:** Web2py-based forms application
- **Depends on:** Postgres, Python 2
- **Used by:** User
- **Related skills:** PostGIS, python frameworks and modules (requests, selenium, Web2py)
### SeedDMS
- **General description:**
- **Tech. description:** PHP-based data store application with Lucene-based fulltext index
- **Depends on:** PHP, Postgres
- **Used by:** User
- **Related skills:** Apache Lucene, ghostscript, LibreOffice (unoconv), imagemagick, MIME types identification
### Sigmah
- **General description:**
- **Tech. description:** Apache Commons-based forms application
- **Depends on:** Postgres, Tomcat
- **Used by:** User
- **Related skills:** Java development (hibernate, log4j)
### Ushahidi
- **General description:**
- **Tech. description:** Laravel-based RPC backend with Angular-based frontend
- **Depends on:** MariaDB, PHP
- **Used by:** User
- **Related skills:** JavaScript build tools and frameworks (Angular, Babel, WebPck), PHP build tools and frameworks (composer, Laravel, phinx)

9
_doc/index.rst Normal file
View File

@ -0,0 +1,9 @@
Welcome to SpotterVM documentation!
===================================
.. toctree::
:caption: Technical documentation
:maxdepth: 3
toolchain/index
existing/index

49
_doc/toolchain/abuild.md Normal file
View File

@ -0,0 +1,49 @@
# Alpine build and packaging
Alpine build system is used for all custom non-LXC packages, such as Acme.sh Let's Encrypt ACME client and VMMgr virtual machine manager.
## Alpine wiki references
The usage of Abuild, APK package manager and syntax of `APKBUILD` files is best described on Alpine wiki.
- [Abuild and Helpers](https://wiki.alpinelinux.org/wiki/Abuild_and_Helpers)
- [Creating an Alpine package](https://wiki.alpinelinux.org/wiki/Creating_an_Alpine_package)
- [APKBUILD Reference](https://wiki.alpinelinux.org/wiki/APKBUILD_Reference)
- [APKBUILD Examples](https://wiki.alpinelinux.org/wiki/APKBUILD_examples)
## Abuild in a nutshell
Building with abuild requires `alpine-sdk` package installed, `/etc/abuild.conf` configured and an RSA private key created in `/srv/build/repokey.rsa` and subsequently registered by `abuild-keygen` command. All these are taken care of in `install-toolchain.sh` script as part of [Build environment installation](vm-creation).
Abuild toolchain is intended to be used in automated builds, therefore it requires some dependencies normally not found in other packaging systems. Abuild expects that `APKBUILD` files are a part of git repository and tries to read current commit hash. Then it tries to automatically download, build (compile), strip binaries, find out dependencies, and generally perform a lot of tasks normally useful when you are compiling from sources. Finally it packages the result to one or more subpackages, according to the build recipe. For purposes of LXC packaging, this is mostly useless, which is the reason why we have a [custom package manager](pkgmgr). It is however perfectly suitable for packages installed directly on the basic VM.
## APKFILE and build example
Following is an `APKFILE` example with some commonly used options to skip or bypass the default features of Abuild in case you simply need to package a bunch of existing files without any compilation or other build tasks.
```
# Contributor: Disassembler <disassembler@dasm.cz>
# Maintainer: Disassembler <disassembler@dasm.cz>
pkgname=somepackage
pkgver=0.0.1
pkgrel=0
pkgdesc="Some description"
url="https://spotter.vm/"
arch="noarch"
license="GPL"
depends="python3"
options="!check !strip"
build() {
return 0
}
package() {
mkdir -p ${pkgdir}
cp -rp mydir ${pkgdir}
}
```
The directive `options="!check !strip"` requests Abuild not to run post-build checks and not to strip binaries. The `build()` function is mandated by the Abuild documentation and simply returns an exit code without doing anything. Finally in `package()` function, the desired existing files are copied to `${pkgdir}` (which is a variable automatically set by Abuild) and packaged.
Such `APKFILE` recipe is then executed using `abuild` command. Abuild normally uses `fakeroot` to isolate the build environment and discourages using root user for packaging, however our build instance is highly specialized for this purpose, so we package as root anyway. Any user (including root) needs to be a member of `abuild` group in order to perform the task. For our root user, this is again handled in `install-toolchain.sh`. If you do the packaging as root, you need to run `abuild -F` as seen in `build-all.sh`.

13
_doc/toolchain/index.rst Normal file
View File

@ -0,0 +1,13 @@
VM building and packaging
=========================
.. toctree::
:maxdepth: 2
vm-creation
abuild
lxc-overview
lxc-build
lxc-pack
pkgmgr
vmmgr-hooks

200
_doc/toolchain/lxc-build.md Normal file
View File

@ -0,0 +1,200 @@
# Building LXC containers
## Overview
`lxc-build` utility creates a LXC container based on its build recipe and build context path given in command line parameter. If a filename is given, the build recipe is loaded from the file and the directory in which the file resides is taken as build context, ie. all relative paths are resolved from it. In case a directory path is passed as parameter, the directory is then used as build context and a file called `lxcfile` from the given directory is used as build recipe.
### Usage
```bash
lxc-build <buildpath>
where the buildpath can be either specific lxcfile or a directory containing one
```
## Directives used in lxcfile
The *lxcfile* syntax is designed to resemble *Dockerfile* syntax in order to ease the potential transition. Since LXC operates on much lower level of abstraction than Docker, some principles are applied more explicitly and verbosely. Major difference between Docker and *lxc-build* is that every directive in *Dockerfile* creates a new filesystem layer whereas layers in *lxc-build* are managed manually.
### IMAGE
- **Usage:** `IMAGE <name>`
- **Description:** Sets container name. Every *lxcfile* needs to have one, otherwise no LXC config can be written and no `RUN` scripts can be run. LXC configuration file is written to path `/var/lib/lxc/<name>/config`
- **Docker equivalent:** `-t` in `docker build` command line parameters
- **Populates LXC field:** `lxc.uts.name`
### LAYER
- **Usage:** `LAYER <path>`
- **Description:** Includes OverlayFS layer. Unlike *Dockerfile*'s `FROM`, in *lxcfile* all layers need to be defined. The `LAYER` directives are given from the lowermost to the uppermost layer, where the lowermost is usually the basic operating system and the uppermost is the working layer in which all subsequent `RUN` commands and other action will take place.
- **Docker equivalent:** `FROM`
- **Populates LXC field:** `lxc.rootfs.path`
### FIXLAYER
- **Usage:** `FIXLAYER <scriptname>`
- **Description:** Runs `<scriptname>` on LXC host and passes all layer paths as parameter to this script. This helps you to resolve the conflicts in cases where you mix multiple OverlayFS layers with overlapping files, ie. package manager cache. The idea is that all layers are read separately by the `<scriptname>` script and the fixed result is written back to the uppermost layer.
- **Docker equivalent:** None
- **Populates LXC field:** None
### RUN
- **Usage:**
```bash
RUN <label>
<commands>
<label>
```
- **Description:** Executes a shell script in the currently built container. The `<label>` is an arbitrary user defined string which needs to be given as the first parameter and repeated at the end of the script block. The shell script between the labels is passed as-is, including comments and empty lines, to a POSIX shell with `-e` and `-v` parameters set. Basically, following *lxcfile* entry:
```bash
RUN EOF
# Comment
command1
command2
EOF
```
translates to the following script:
```bash
#!/bin/sh
set -ev
# Comment
command1
command2
```
The command chaining via `&&` which is required in *Dockerfile* is optional in *lxcbuild*.
- **Docker equivalent:** `RUN`
- **Populates LXC field:** None
### COPY
- **Usage:** `COPY <source> [destination]`
- **Description:** Recursively copies `<source>` files into `<destination>`. Source path is relative to the build context directory, destination path is relative to the container root directory. The files are copied as `root:root`. Permissions can be changed by subsequent `RUN` command.
The `<source>` can be given as http:// or https:// URL in which case gzip, bzip2 or xz tar archive is expected to be downloaded and unpacked into the `<destination>`. This is commonly used for creating a basic root filesystem of the container in similar fashion like with Docker's `FROM scratch`.
- **Docker equivalent:** `COPY` or `ADD`
- **Populates LXC field:** None
### MOUNT
- **Usage:** `MOUNT DIR|FILE <source> <destination>`
- **Description:** Creates a directory or file mount for the container. The `<source>` is usually given as absolute path existing on the LXC host, the `<destination>` is a path relative to the container root directory. If the file doesn't exist in any of the container layers, it is automatically created on container startup.
- **Docker equivalent:** `VOLUME`
- **Populates LXC field:** `lxc.mount.entry`
### USER
- **Usage:** `USER <uid> <gid>`
- **Description:** Sets UID/GID of the container init process to `<uid>` and `<gid>`. The default UID/GID is `0:0 (root:root)`.
- **Docker equivalent:** `USER`
- **Populates LXC field:** `lxc.init.uid` and `lxc.init.gid`
### CMD
- **Usage:** `CMD <command> [parameters...]`
- **Description:** Sets the init process of the container. This is the process which is automatically started after the container is launched. The default command is `/bin/true` which immediately terminates with return code 0.
- **Docker equivalent:** `CMD`
- **Populates LXC field:** `lxc.init.cmd`
### ENV
- **Usage:** `ENV <variable> <value>`
- **Description:** Populates environment variable `<variable>` with `<value>` which is then passed to the init process when the container is launched.
- **Docker equivalent:** `ENV`
- **Populates LXC field:** `lxc.environment`
### WORKDIR
- **Usage:** `WORKDIR <dirname>`
- **Description:** Sets working directory of the container init process to `<dirname>`. The default working directory is the container's root directory.
- **Docker equivalent:** `WORKDIR`
- **Populates LXC field:** `lxc.init.uid` and `lxc.init.gid`
### HALT
- **Usage:** `HALT <signal>`
- **Description:** Sets container stop signal to `<signal>`. The default signal is SIGINT.
- **Docker equivalent:** `--signal` in `docker kill` command line parameters
- **Populates LXC field:** `lxc.signal.halt`
## LXC config
Although *lxcfile* populates some LXC config fields, there are lot of defaults with remain unchanged. The template file to which *lxc-build* fills in the values looks as follows:
```bash
# Image name
lxc.uts.name = {name}
# Network
lxc.net.0.type = veth
lxc.net.0.link = lxcbr0
lxc.net.0.flags = up
# Volumes
lxc.rootfs.path = {rootfs}
# Mounts
lxc.mount.entry = shm dev/shm tmpfs rw,nodev,noexec,nosuid,relatime,mode=1777,create=dir 0 0
lxc.mount.entry = /etc/hosts etc/hosts none bind,create=file 0 0
lxc.mount.entry = /etc/resolv.conf etc/resolv.conf none bind,create=file 0 0
{mounts}
# Init
lxc.init.cmd = {cmd}
lxc.init.uid = {uid}
lxc.init.gid = {gid}
lxc.init.cwd = {cwd}
# Environment
lxc.environment = PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
{env}
# Halt
lxc.signal.halt = {halt}
# Log
lxc.console.size = 1MB
lxc.console.logfile = /var/log/lxc/{name}.log
# Other
lxc.arch = x86_64
lxc.cap.drop = sys_admin
lxc.hook.pre-start = /usr/bin/vmmgr prepare-container
lxc.hook.start-host = /usr/bin/vmmgr register-container
lxc.hook.post-stop = /usr/bin/vmmgr unregister-container
lxc.include = /usr/share/lxc/config/common.conf
```
For explanation of hooks and overall container integration and behavior, refer to [VMMgr hooks](vmmgr-hooks) page.
## Example lxcfile
Following is an example of *lxcfile* for *Redis*:
```bash
IMAGE redis
LAYER shared/alpine
LAYER redis/redis
RUN EOF
# Create OS user (which will be picked up later by apk add)
addgroup -S -g 6379 redis
adduser -S -u 6379 -h /var/lib/redis -s /bin/false -g redis -G redis redis
# Install Redis
apk --no-cache add redis
EOF
MOUNT FILE /srv/redis/conf/redis.conf etc/redis.conf
MOUNT DIR /srv/redis/data var/lib/redis
USER 6379 6379
CMD redis-server /etc/redis.conf
```

View File

@ -0,0 +1,71 @@
# LXC containers overview
All user-installable applications run in LXC containers. A *container* is defined by a configuration file with following settings:
- Network type and interface configuration
- OverlayFS storage layers
- Mountpoints to store / load persistent data
- Functional user and binary to be executed on startup
- Environment variables propagated to the container namespace
- Signal used to stop the container
- TTY / console logging
- Syscall capability restrictions
- Event hooks
The container must have at least one storage layer defined. The term *layer* is used because the storage is handled by OverlayFS filesystem consisting of groups of files overlaid over each other. This allows to have a layer with the basic operating system, another layer with python runtime, and final layer with the application using both. Therefore it's not necessary to duplicate the functionality in every container and waste disk space.
Each layer is then packaged to a separate installable package and given as dependencies to the packages which require them. Packages with final application layer also contain the container configuration and installation scripts to set up the application and interface it with other components installed on host and in other containers.
## Why LXC
There are several container runtimes, with *Docker* being probably the most popular nowadays. There are several reasons why LXC was eventually selected instead.
First and foremost, Docker contains a huge set of tools for use with various orchestrators and large-scale applications. The premise of Docker is to run multiple instances of application containers where the individual instances are configured on runtime via command line parameters. Docker daemon and its shim processes contain a lot of abstraction, effectively obstructing the visibility on what is actually going on under the hood. LXC, on the other hand, keeps thing close to the bare minimum and transparently uses container techniques, syscalls and namespaces exposed by the linux kernel. The containers in LXC are fully defined via configuration files and don't require any additional configuration on runtime. This can arguably be achieved even on Docker via *docker-composer*, but that adds yet another layer of abstraction and generally is not suitable for scenarios where the container images need to be added or removed on the fly.
Docker is written in Go language, which is designed to create runtime-safe statically linked executables. With the shear amount of Docker capabilities, this unfortunately means that the whole Docker infrastructure occupies roughly 200 MB on the VM hard drive. The basic virtual machine image is designed to be as small as possible, so having a 200 MB large container host on an operating system which alone occupies roughly 40 MB does not seem ideal. LXC runtime written in C/C++ on the other hand occupies roughly 4 MB and doesn't need any other dependencies besides *cgroupfs* which, for performance reasons, is good to have installed anyway.
Due to the Docker's approach, storage overlay layers cannot be easily managed by the container builder and instead depend on the amount and order of directives in *Dockerfile* recipe file. This often leads to duplication of layers just because they are in slightly different order than another container has. So if one container has order of layers *system* -> *python* -> *java* and another has *system* -> *java* -> *nodejs*, only the *system will be shared but the *java* will be duplicated. This of course makes sense if reordering the layers makes the final content inconsistent, however this is not the case with Alpine linux (there is one specific case where it is a problem, but it can be circumvented), so with LXC, we have a full control on what will be in a single layer and in which order will the layers be overlaid.
Finally, Docker maintainers explicitly refuse to implement a possibility to isolate the docker daemon to private Docker repositories (registries) in the community edition of Docker. It is possible to have some custom and even private repositories, but it is not possible to deactivate the default public *Dockerhub*.
The downsides of using LXC is that its usage requires a bit more knowledge about how the linux containers actually work, and that most 3rd party applications are distributed using `Dockerfile`, which requires rewriting into LXC, however this is simplified by the [`lxc-build`](lxc-build) tool, which aims to automatize LXC container building using *Dockerfile*-like syntax.
## Container interfaces
Due to the fact that LXC container has all environment variables, mounts, used layers, init and everything it needs for starting, running and stopping in its `config` configuration file, you are completely free to build the container in any way possible. The only requirement imposed by the host (virtual machine) infrastructure is that in case of containers with user-accessible web interfaces via HTTP(S) which needs to be proxied via nginx HTTP server, the application needs to be reachable via plain HTTP on port 8080/TCP.
The container itself is normally handled as a service via hosts OpenRC init scripts, calling `lxc-start` and `lxc-stop`. Should the application in the container be user-accessible, it needs to register itself in host's nginx HTTP proxy server via hooks described in [VMMgr hooks](vmmgr-hooks). Full example of an application container init script is as follows:
```bash
#!/sbin/openrc-run
description="CKAN container"
depend() {
need ckan-datapusher postgres redis solr
}
start() {
lxc-start ckan
}
start_post() {
vmmgr register-proxy ckan
}
stop_pre() {
vmmgr unregister-proxy ckan
}
stop() {
lxc-stop ckan
}
```
See [`openrc-run(8)`](http://manpages.org/openrc-run/8) manual page for reference.
## Recommended tools and practices
If the application itself doesn't support connection via plain HTTP (e.g. it is a CGI/WSGI application), the container needs to contain also a web server which will proxy the connection. Recommended web server for this purpose is *nginx* HTTP server, which is lightweight and can proxy all commonly used gateway interfaces. Bear in mind that in some cases, the application needs to able to infer its own HTTP host, so some header tuning for both HTTP and CGI/WSGI protocols might be in order.
In case there are more components or services running within the same container (e.g. nginx HTTP server and PHP-FPM), it is advised to have them spawned and supervised using some lightweight init daemon. Recommended init system for LXC is *s6*, however if you are familiar with *daemontools* or *runit*, feel free to use them as well. In the worst case you can use *OpenRC*. Systems like *Upstart*, *Systemd* or *SysV init* are not recommended for their complexity or inability to properly supervise the spawned processes.

View File

@ -0,0 +1,78 @@
# Packaging LXC containers
## Overview
The `lxc-pack` utility creates a `.tar.xz` archives based on package metadata and manages the `packages.json` repository metadata file. If a filename is passed as command line parameter to `lxc-pack`, the metadata are loaded from the file. In case a directory path is given, the metadata are loaded from a file called `pkg` from the directory. All metadata files are in JSON format.
The product of *lxc-build* command described in LXC building documentation can be used in its entirety, ie. both filesystem layer and configuration, or only as dependency, in which case the container configuration is omitted and only the filesystem layer is used. Apart from that, the package can contain installation, upgrade and uninstallation script and data, all of which are optional. Accepted names are
- `install.sh` file and `install` directory for post-install scripts.
- `upgrade.sh` file and `upgrade` directory for post-upgrade scripts.
- `uninstall.sh` file and `uninstall` directory for post-uninstall scripts.
`lxc-pack` reads the metadata file, creates a tarball with the contents of a given directory under `/var/lib/lxc`, adds the install/upgrade/uninstall scripts and compresses the tarball into `.tar.xz` archive. Then it calculated size of the package and SHA512 hash and adds this information to the rest of the metadata, which are then stored as JSON dictionary in the repository-wide metadata file `packages`. Finally, it creates another SHA512 hash of the `packages` file and signs it using ECDSA key to ensure the integrity and tamperproofness of the repository data. The signature is stored in `packages.sig` file. Public key for the signature verification is already pre-imported on the LXC hosts as part of the basic VM installation. For more details on package manager, see the Package Manager documentation.
## Usage
```bash
lxc-pack <buildpath>
where the buildpath can be either specific meta file or a directory containing one
```
## Keys used in meta file
The `meta` file is in JSON format. All values are strings except for `depends` which is an array of strings and `size` which is an integer.
### title
- **Usage:** `"title": "<title>"`
- **Description:** Sets human readable package name. Also helps to distinguish if the package is user-installable or if it is a dependency / component to another application. All packages which have title set, will show up in a list of user-installable packages in VMMgr web GUI.
- **Mandatory:** Only for full user-installable container packages.
### desc-xx
- **Usage:** `"desc-<lang>": "<description>"`
- **Description:** Sets human readable long description of the package. Language code `lang` is ISO 639-1 two-letter code.
- **Mandatory:** Only for full user-installable container packages.
### lxcpath
- **Usage:** `"lxcpath": "<directory>[/subdirectory]"`
- **Description:** Sets the source path for `lxc-pack` and subsequently also for VMMgr. The `directory` is a relative path under `/var/lib/lxc`. In case only the directory is given, `lxc-pack` takes all subdirectories and files in the directory, usually resulting in a full container package (both filesystem layer and configuration). If a `/subdirectory` is given, then only the subdirectory is packaged, resulting in filesystem layer-only package, usually used as a shared dependency for other containers.
- **Mandatory:** Yes.
### version
- **Usage:** `"version": "<version>"`
- **Description:** Sets the package version. This should correspond to the actual version of the packaged product.
- **Mandatory:** Yes.
### release
- **Usage:** `"release": "<release>"`
- **Description:** Sets the package release version. Used when the same basic version of the packaged product needs to be repacked with updated base layers or install/upgrade/uninstall scripts.
- **Mandatory:** Yes.
### license
- **Usage:** `"license": "<license>"`
- **Description:** Sets the license of the packaged product.
- **Mandatory:** Yes.
### depends
- **Usage:** `"depends: ["<dependency1>", "<dependency2>", ...]`
- **Description:** Sets the package dependencies which needs to be installed before this package.
- **Mandatory:** Yes (can be empty for the basic OS filesystem layer).
### size
- **Usage:** `"size": "<bytes>"`
- **Description:** Sets the package archive size.
- **Mandatory:** Populated automatically by `lxc-pack`.
### sha512
- **Usage:** `"sha512": "<hash>"`
- **Description:** Sets the package archive SHA512 hash.
- **Mandatory:** Populated automatically by `lxc-pack`.

73
_doc/toolchain/pkgmgr.md Normal file
View File

@ -0,0 +1,73 @@
# Package manager
## Why custom package manager
Why use custom package management instead of the native APK package manager?
Native packaging toolchain [`abuild`](abuild) is designed for automated bulk package building. It doesn't support building packages from pre-existing directories without some considerable customizations and requires that full build takes place as part of the packaging process. That includes steps like binary stripping, symlink resolution and dependency tracing. It also requires to be run under non-root user inside `fakeroot` which is problematic when LXC containers should be packaged. Most of the limitations can be worked around (run as root using `-F`, spoof build process by bind-mounting existing directory to packaging directory, skip dependency tracing using `options="!tracedeps"` in `APKFILE` and omit majority of the build process by running only `build package prepare_metafiles create_apks index clean` abuild actions), however there is no real benefit in (ab)using the native tools this way.
Furthermore, when `apk` package manager installs a package, it first unpacks it, then runs post-install script and once all packages are installed, only then it modifies the permissions and ownership of the files to the original values contained in the package. This means that it's not possible to run container setup as part of post-install script as most applications require the permissions to be already correct. Every single file including its ownership and permissions along with a hash is recorded in `/lib/apk/db/installed`, which only unnecessarily bloats the database of locally installed packages (e.g. the basic python 3 layer contains ~6500 files).
With custom package manager, the whole download, unpacking and installation process can be observed directly, keeping the VMMgr web GUI user informed about the currently ongoing step, as opposed to a mere download percentage offered by the bare `apk`. Finally, the APK packages are only gzipped whereas the custom solution uses xz (LZMA2), allowing for up to 70% smaller packages.
## How does it work
The package manager is integrated into the VMMgr application. It can be invoked only via the VMMgr web GUI. The entry point is on `/setup-apps` URL and all the configuration related to the repository settings (URL, username and password) can be configured on the same page. The URL should point to the directory where all content previously created by repository maintainer using `lxc-pack` commands is uploaded (i.e. `packages`, `packages.sig` and all `*.tar.xz` files). Once the user opens this page, VMMgr tries to contact the repository using the configured values and attempts to download `packages` file with all packages metadata. If it fails, it checks the reason for the failure (either connection exception or HTTP status code) and displays appropriate error message to the user.
If the `packages` is successfully downloaded, the package manager immediately downloads also `packages.sig`, which contains ECDSA-signed SHA512 hash of the `packages` file, and verifies the signature using public key preloaded in `/etc/vmmgr/packages.pub`. In the signature matches, then it parses the `packages` file contents in JSON format and displays list of installable packages.
The information about installed packages, including their metadata, are stored in a local metadata file `/etc/vmmgr/config.json` along with VMMgr settings for the local virtual machine. The local metadata file is also in JSON format and the metadata are simply copied to it from the remote repository during installation.
All package manager actions (install / upgrade / uninstall) as well as stop / start actions are handled by VMMgr queue manager. The queue manager processes the actions sequentially in the order in which they were enqueued (FIFO), therefore it cannot happen that multiple package installations will run simultaneously or will interfere with stop / start actions. In the event of unexpected failure or VM shutdown, it is possible to safely repeat the failed or unfinished actions as the install / upgrade / uninstall methods are designed to ensure sanity of the environment.
The whole idea is generally the same as with any other packaging system - e.g. *rpm* or *dpkg* on linux, *homebrew* on Mac or *Chocolatey* on Windows, except this packaging system is highly customised for use with LXC containers and VMMgr web GUI.
### Anatomy of a package
The files in the package are structured as
```
*.tar.xz
├─ srv/
│ └─ <package>/
│ ├─ install/
│ ├─ install.sh
│ ├─ uninstall/
│ ├─ uninstall.sh
│ ├─ upgrade/
│ └─ upgrade.sh
└─ var/
└─ lib/
└─ lxc/
└─ <lxcpath>/
```
This structure is extracted directly to the root directory of the virtual machine as it would be with any other package manager. Every package may contain subdirectories `install`, `upgrade` and `uninstall` and files `install.sh`, `upgrade.sh`, `uninstall.sh` which are invoked during the respective actions. Their presence and the contents under `/var/lib/lxc` depend on the type of the package. If the package contains only a shared LXC OverlayFS layer, it doesn't contain `config` file with LXC container definition and it likely wouldn't contain any of the install / upgrade / uninstall scripts and directories as it is not needed in this context.
### Installing a package
First, the installation method builds and flattens a dependency tree using the metadata from the repository and compares it with list of currently installed packages taken from the local metadata file, resulting in a list of packages to be downloaded and installed, ordered by dependency requirements (i.e. packages with already satisfied dependencies are installed first, satisfying dependencies for the subsequent ones).
All packages in this list are then downloaded as `*.tar.xz` archives from the repository and stored in temporary directory `/var/cache/vmmgr` as `*.tar.xz.partial`. Once the package is downloaded, its SHA512 has is calculated and verified against the value in cryptographically signed `packages` metadata file. If the hashes don't match, the whole installation process is interrupted and an error message informing about the mismatch is displayed to user. If the hashes match, the `*.tar.xz.partial` is renamed as `*.tar.xz`. Therefore in the event of unexpected VM shutdown or connection interruption, all `*.tar.xz` archives in `/var/cache/vmmgr` can be considered as verified and don't need to be downloaded again when the user decides to retry the installation.
Once all the packages are downloaded and their checksums are verified, the installation method unpacks them. Prior to unpacking, the method ensures sanity of filesystem by purging the directories and files (if they exist) which are to be used by the currently installing packages. This includes `/var/lib/lxc/<lxcpath>`, `/srv/<package>` and `/var/log/lxc/<package>.log`. The `*.tar.xz` archive is deleted right after decompressing.
After all the package archives are unpacked, the `uninstall.sh` script is run (if it is present) to ensure sanity of other components. This script attempts to remove objects and interfaces installed within components which are not part of the currently installing package (databases and database users, Solr cores, MQ definitions...). This requires that the `uninstall.sh` script is written in a defensive manner (e.g. `DROP DATABASE IF EXISTS...`)and must not exit with non-zero code even if no objects and interfaces for this package exist yet.
Next, an `install.sh` script is run which sets all the objects and interfaces which need to be installed in other components (databases, database users) and performs all the post-installation steps for the currently installing package, such as creation of persistent configuration and data directory under `/srv/<package>` of the VM. In case of user-installable application packages, the very last command in the `install.sh` script is `vmmgr register-app` [VMMgr hook](vmmgr-hooks) which creates a definition for VMMgr web GUI, including administrator credentials and subdomain of which the application will be accessible.
Finally the package itself with its metadata, stripped of `size` and `sha512` keys automatically added by `lxc-pack` during packaging, is added to the local repository metadata in `/etc/vmmgr/config.json`. After this, the package is considered fully installed and can be used by the users or other applications.
### Upgrading a package
Upgrading process is not yet implemented in the package manager. The idea is that the VMMgr simply compares the version and release from the repository metadata with the local metadata and offers upgrade if the version don't match. The dependency list build, download and verification part will be the same as during installation. Upgrade process will purge only the LXC data and LXC log, but will leave configuration and data under `/srv/<package>` unchanged. Then it overwrites install / upgrade / uninstall scripts and directories and runs `upgrade.sh` script. Finally it re-registers the package metadata in local repository metadata file `/etc/vmmgr/config.json`.
### Uninstalling a package
Uninstallation process first compiles a dependency list in a similar fashion like in the first step of installation, except this time it checks which packages are recorded as dependencies and will become unused (and therefore unnecessary) after the current package is uninstalled.
For every package in this list the `uninstall.sh` script is run, removing objects and interfaces installed within components which are not part of the currently installing package (databases and database users, Solr cores, MQ definitions...).
After the `uninstall.sh` finishes, all files related to the currently uninstalling package are deleted. This includes `/var/lib/lxc/<lxcpath>`, `/srv/<package>` and `/var/log/lxc/<package>.log`.
As the final step, the package metadata are unregistered (removed) from the local repository metadata file `/etc/vmmgr/config.json`.

View File

@ -0,0 +1,72 @@
# Virtual machine creation
## Virtual machine specifications
- **Memory:** 4 GB
- **CPU:** 1 processor, 2 cores
- **Hard Disk:** SCSI, 300 MB
- **CD/DVD**: IDE
- **Network Adapter**: Bridged
In case you're setting up a VMWare virtual machine, select OS type *Other Linux 3.x kernel 64-bit* and after the VM is created, manually edit the `*.vmx` file using a text editor and add `mem.hotadd = "FALSE"`. Failing to do so will result in system unable to boot. Other hypervisors don't need this adjustment.
## Virtual machine installation
Download **Alpine Virtual 3.9.0 x86_64** from <https://alpinelinux.org/downloads/> and boot from it. At the login prompt, use the root user without password to log in.
```bash
# Set up interfaces (leave the default choices)
setup-interfaces
ifup eth0
# Download and launch the setup script
wget dl.dasm.cz/_vm.sh
sh _vm.sh
```
The script will perform installation and configuration of Alpine linux, LXC and the whole VMMgr platform. Virtual machine is protected by LUKS-on-LVM disk encryption. The encryption password, which is simultaneously also a password for VMMgr web administration interface, will be asked for at the beginning of the script execution. Root access is disabled.
After the script finishes and shuts down the virtual machine, remove CD/DVD drive from the virtual machine settings and extend the HDD to 80 GB. This is to minimize the overall size of the OVA (Open Virtual Appliance) file to which the VM will be exported.
## Build environment installation
Follow the VM creation steps as above, but comment the line disabling the root login in `_vm.sh` file before its execution. After the script finishes and HDD size is adjusted, add serial port in the virtual machine settings and set it to use Unix socket or named pipe (eg. `\\.\pipe\alpine`). Then start the VM and connect to the serial port using *screen*, *PuTTY* or any other terminal client.
### First time setup
```bash
# Install git and OpenSSH client
apk --no-cache add git openssh-client
# Create SSH key
ssh-keygen -t ecdsa
```
Assign the newly generated key to your GitLab account
```bash
# Clone the repository
git clone --recurse-submodules ssh://git@gitlab.dasm.cz:2222/Spotter-Cluster/Spotter-Cluster.git
# Install the build toolchain
Spotter-Cluster/_build/install-toolchain.sh
```
After the script finishes, it is possible to connect via SSH (provided you have personal key in `authorized_keys`) as the toolchain sets up also *openssh-server*. The serial connection will work only until disconnected. If it should work indefinitely, run
```bash
sed -i 's/ttyS0::once/ttyS0::respawn/' /etc/inittab
kill -1 1
```
### Building the packages
There are 3 distinct packaging systems.
1. Just a plain tar for basic OS setup used by `_vm.sh` installation script.
2. [Abuild](abuild) for the native Alpine linux packages (APK) used for ACME client and VMMgr packaging.
3. [`lxc-build`](lxc-build) / [`lxc-pack`](lxc-pack) for LXC container building and packaging.
Before any building and packaging can be started, build toolchain including signing keys needs to be set up. This is done via `install-toolchain.sh` script.
One the setup is complete, build and packaging can be done manually on per-container basis or a full build of all components can be run via `build-all.sh` script. The full build process takes considerable amount of time, so it is advised to have a persistent build VM.

View File

@ -0,0 +1,90 @@
# VMMgr command line hooks
VMMgr is mostly a WSGI web GUI application, but there are few specific cases when it is invoked via command line.
## Installation hooks
### register-app
This hook is invoked by [Package manager](pkgmgr), resp. `install.sh` script of the respective installing package. It is invoked as
```
vmmgr register-app <application> <subdomain> [admin-username] [admin-password]
```
Where the `application` is the internal application name, same as previously used in the package name, `subdomain` is the subdomain on which the application will be accessible and the `admin-username` and `admin-password` are optional admin credentials created by `install.sh` during the application setup. These parameters will show up as part of the application definition in `/etc/vmmgr/conf.json` and will be visible / usable via web GUI.
### unregister-app
Counterpart to `register-app`. This hook is invoked by `uninstall.sh` and simply removes the application definition from `/etc/vmmgr/conf.json`, rendering it invisible / unusable via web GUI, which is expected as part of the uninstallation process. It is invoked as
```
vmmgr unregister-app <application>
```
Where the `application` is the internal application name, same as previously used in the package name.
## LXC hooks
LXC hooks set various environment variables prior to calling the defined executables. For overview of native LXC hooks, see section *Container hooks* in the official [lxc.container.conf(5) documentation](https://linuxcontainers.org/lxc/manpages/man5/lxc.container.conf.5.html). All hooks mentioned in this chapter are hardcoded in the container configuration via a template used by[`lxc-build`](lxc-build).
### prepare-container
This hook is invoked by LXC's `lxc.hook.pre-start`, which is called in host environment (i.e. in the virtual machine, not in the container) before the container's ttys, consoles or mounts are up. This hook removes the contents of container's ephemeral OverlayFS layer in case of previous unclean container shutdown. Then calls application's `update-conf.sh` script (if it is present) with environment variables populated from configuration in `/etc/vmmgr/conf.json`, allowing for the correct configuration of application in components which are not part of the currently starting container, e.g. application URL in a database or persistent configuration file. It is invoked as
```
vmmgr lxc-container
```
The hook expects the `LXC_NAME` environment variable to be populated.
### register-container
This hook is invoked by LXC's `lxc.hook.start-host` which is called in host environment (i.e. in the virtual machine, not in the container) after the container has been setup, immediately before launching the container's init process. This hook sets up a container's network settings (IP address, netmask, default route). The network settings is managed via *poor man's DHCP server*, which simply assigns the first unassigned IP address from range 172.17.0.0/16 and holds the lease table in hosts's `/etc/hosts` which is also mounted in all containers, keeping the network settings consistent across all container stop / starts. The hook is invoked as
```
vmmgr register-container
```
The hook expects `LXC_NAME` and `LXC_PID` environment variables to be populated, so it can record the value of `LXC_NAME` into `/etc/hosts` leases and then `nsenter` the container namespace via `LXC_PID` and set the leased address.
### unregister-container
This hook is invoked by LXC's `lxc.hook.post-stop` which is called in host environment (i.e. in the virtual machine, not in the container) after the container has been shut down. This hook releases the IP address leased by `register-container` and then removes the contents of container's ephemeral OverlayFS layer. It is invoked as
```
vmmgr unregister-container
```
The hook expects the `LXC_NAME` environment variable to be populated.
## Init hooks
### rebuild-issue
This hook is called by `/sbin/vmtty`, which is the default *login program* used by `/sbin/getty` defined in `/etc/inittab`. This tty contains a banner with branding, legal notice and information about VM network settings and URL, stored in `/etc/issue`. The URL needs to be generated based on the settings in `/etc/vmmgr/conf.json`, which is exactly what this hook does. On every invocation, it regenerates the contents of `/etc/issue` with up-to-date information. The hook is invoked as-is with no parameters or environment variables.
```
vmmgr rebuild-issue
```
### register-proxy
This hook is invoked by `/etc/init.d/<application>` OpenRC init script as part of `start_post()` function. It is invoked as
```
vmmgr register-proxy <application>
```
The hook creates HTTP proxy configuration file for VM's nginx HTTP server for the given `application`, based on the settings in `/etc/vmmgr/conf.json`. Then it reloads the nginx service to take the configuration into effect. The proxy configuration requires a valid IP/hostname in `/etc/hosts`, which exists only after the container has been started and `register-container` hook called, hence the post-start invocation.
### unregister-proxy
Counterpart to `register-proxy`. This hook is called by `/etc/init.d/<application>` OpenRC init script as part of `stop_pre()` function. It is invoked as
```
vmmgr unregister-proxy <application>
```
The hook simply removes the configuration file for VM's nginx HTTP server for the given `application` and reloads the nginx service to take the configuration out of effect.