Removes not updated documentation

This commit is contained in:
pierreozoux 2015-01-29 11:11:12 +00:00
parent 0f3a92f626
commit a06a40e917
15 changed files with 2 additions and 637 deletions

View file

@ -1,60 +1,6 @@
## IndieHosters
This repository contains the configuration and scripts we use to control our servers.
It can run inside Vagrant or [deploy to a server](doc/getting-started-as-a-hoster.md)
This repository contains the configuration and scripts I use to control my servers.
## Prerequisites to running this code with Vagrant:
- [vagrant](http://www.vagrantup.com/)
- [virtualbox](https://www.virtualbox.org/)
- nfs
- linux: run `apt-get install nfs-kernel-server`, or your OS equivalent
- [vagrant-hostsupdater](https://github.com/cogitatio/vagrant-hostsupdater)
- run `vagrant plugin install vagrant-hostsupdater` to install
There is no documentation yet, work in progress. If you want to use this repo, please send me an email.
## Get started:
```bash
vagrant up
```
Wait for the provisioning to finish (~40mins), and go to your browser: https://indiehosters.dev
If the process fails, for instance due to network problems, you can retry by running `vagrant provision`.
### Set up a domain:
```bash
vagrant ssh core-1
sudo mkdir -p /data/import/example.dev/TLS
sudo cp /data/indiehosters/scripts/unsecure-certs/example.dev.pem /data/import/example.dev/TLS
sudo systemctl enable static@example.dev
sudo systemctl start static@example.dev
```
Check https://example.dev in your bowser!
### Cleaning up
To clean up stuff from previous runs of your VM, you can do:
```bash
vagrant destroy
vagrant up
```
## Tests
```bash
vagrant destroy
vagrant up
# Set up example.dev as above, and test https://example.dev in your browser
vagrant ssh core-1
sudo su
/data/indiehosters/tests/start.sh
exit
exit
vagrant reload --provision
vagrant ssh core-1
sudo su
/data/indiehosters/tests/finish.sh
```

81
Vagrantfile vendored
View file

@ -1,81 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
Vagrant.require_version ">= 1.5.0"
# Size of the CoreOS cluster created by Vagrant
$num_instances=1
# Official CoreOS channel from which updates should be downloaded
$update_channel='stable'
# Setting for VirtualBox VMs
$vb_memory = 1024
$vb_cpus = 1
BASE_IP_ADDR = ENV['BASE_IP_ADDR'] || "192.168.65"
HOSTNAME = ENV['HOSTNAME'] || "indiehosters.dev"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "coreos-%s" % $update_channel
config.vm.box_version = ">= 308.0.1"
config.vm.box_url = "http://%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json" % $update_channel
config.vm.define "backup" do |backup|
backup.vm.provider :virtualbox do |vb|
vb.memory = 512
vb.cpus = 1
vb.check_guest_additions = false
vb.functional_vboxsf = false
end
# plugin conflict
if Vagrant.has_plugin?("vagrant-vbguest") then
backup.vbguest.auto_update = false
end
backup.vm.hostname = "backup.dev"
backup.vm.network :private_network, ip: "192.168.65.100"
end
(1..$num_instances).each do |i|
config.vm.define "core-#{i}" do |core|
core.vm.provider :virtualbox do |vb|
vb.memory = $vb_memory
vb.cpus = $vb_cpus
# On VirtualBox, we don't have guest additions or a functional vboxsf
# in CoreOS, so tell Vagrant that so it can be smarter.
vb.check_guest_additions = false
vb.functional_vboxsf = false
end
# plugin conflict
if Vagrant.has_plugin?("vagrant-vbguest") then
core.vbguest.auto_update = false
end
core.vm.hostname = HOSTNAME
core.hostsupdater.aliases = ["example.dev"]
core.vm.network :private_network, ip: "#{BASE_IP_ADDR}.#{i+1}"
core.vm.synced_folder ".", "/data/indiehosters", id: "coreos-indiehosters", :nfs => true, :mount_options => ['nolock,vers=3,udp']
core.vm.provision :file, source: "./cloud-config", destination: "/tmp/vagrantfile-user-data"
$install_insecure_keys = <<SCRIPT
mkdir ~/.ssh
wget https://raw.githubusercontent.com/mitchellh/vagrant/master/keys/vagrant.pub -O ~/.ssh/id_rsa.pub
wget https://raw.githubusercontent.com/mitchellh/vagrant/master/keys/vagrant -O ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
SCRIPT
core.vm.provision :shell, inline: $install_insecure_keys
core.vm.provision :shell, inline: "mkdir -p /data/runtime/haproxy/approved-certs; cp /data/indiehosters/scripts/unsecure-certs/*.pem /data/runtime/haproxy/approved-certs"
core.vm.provision :shell, path: "./scripts/setup.sh", args: [HOSTNAME]
$start_indiehosters_dev = <<SCRIPT
sudo mkdir -p /data/import/indiehosters.dev/TLS
sudo cp /data/indiehosters/scripts/unsecure-certs/indiehosters.dev.pem /data/import/indiehosters.dev/TLS
sudo systemctl enable static@indiehosters.dev
sudo systemctl start static@indiehosters.dev
SCRIPT
core.vm.provision :shell, inline: $start_indiehosters_dev
end
end
end

View file

@ -1,27 +0,0 @@
#cloud-config
hostname: k1
coreos:
update:
reboot-strategy: best-effort
etcd:
addr: 172.17.42.1:4001
bind-addr: 172.17.42.1:4001
peer-addr: 172.17.42.1:7001
peer-bind-addr: 172.17.42.1:7001
units:
- name: etcd.service
command: start
write_files:
- path: /etc/environment
permissions: 0600
owner: root
content: |
ENCRYPT_KEY=""
BACKUP_DESTINATION="core@backup.dev"
ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key
users:
- name: backup
ssh-authorized-keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key (this should be ssh key of the root user of the other server)

View file

@ -1,36 +0,0 @@
# Adding an application
There are two types of application: server-wide, and per-user. Right now, server-wide applications are the postfix-forwarder,
and the haproxy. Available per-user applications right now are wordpress, static, and static-git.
# Adding a server-wide application
To add a server-wide application, first make sure it can run on one IPv4 address for multiple domains. If it can, then:
* Package it into one Dockerfile per process, and add these to the [dockerfiles](https://github.com/indiehosters/dockerfiles) repo.
* Add `docker pull`, `systemctl enable` and `systemctl start` commands to `scripts/setup.sh`.
* Make it take user data from /data/PROCESS/, and data that is needed at runtime but should not be backed up can
go into /data/runtime/PROCESS
* Create a systemd unit file for each process in the `unit-files/` folder
* Either add functionality to `scripts/add-site.sh` to configure domains on this new service, or describe the manual steps for this
in [deploying-a-server.md](deploying-a-server.md).
* Double-check all documentation to see if it is all still correct.
# Adding a per-user application
To add a per-user application, first make sure it can run behind an SNI offloader. For instance, for WordPress to work behind an SNI
offloader, we had to activate the "https" plugin. If it can, then:
* First study how for instance the WordPress application works in this repo, the easiest approach is to copy it as a starting point.
* Package it into one Dockerfile per process, and add these to the [dockerfiles](https://github.com/indiehosters/dockerfiles) repo.
* Add `docker pull` commands to `scripts/setup.sh`.
* Make it take user data from /data/domains/DOMAIN.COM/PROCESS/, and data that is needed at runtime but should not be backed up can
go into /data/runtime/domains/DOMAIN.COM/PROCESS.
* Create a systemd unit file for each process in the `unit-files/` folder, ending in "@.service", so they get symlinked for each domain.
* If there are cronjobs to be added, then this is done with "@.timer" files.
* Link the processes together, using docker linking and systemd dependencies.
* Add import logic, and if there is any pre-backup action necessary to make sure all relevant data is under /data/domain, then add this as
a pre-step to the backup unit.
* Either add functionality to `scripts/add-site.sh` to configure domains on this new service, or describe the manual steps for this
in [deploying-a-server.md](deploying-a-server.md).
* Double-check all documentation to see if it is all still correct.

View file

@ -1,51 +0,0 @@
# Architecture based on systemd, docker, haproxy, and some bash scripts
Our architecture revolves around a
[set of systemd unit files](https://github.com/indiehosters/indiehosters/tree/master/unit-files). They come in various types:
## Server-wide processes
The haproxy.* and postfix.* unit files correspond to two server wide processes. They run Docker containers from images in the
[server-wide/ folder of our dockerfiles repo](https://github.com/indiehosters/dockerfiles/tree/master/server-wide).
The haproxy-confd service unit starts configuration service for haproxy. It monitors `etcdctl ls /services` to see if any new backends were created, and updates the haproxy configuration files. The files lives in `/data/runtime/haproxy/` on the host sytem. It is required by the haproxy.* unit. That means that when you run `systemctl start haproxy`, and then run `docker ps` or `systemctl list-units`, you will see that systemd not only started the haproxy container, but also the haproxy-confd container.
There is currently no similar service for updating `/data/runtime/postfix/`, so you will have to update the configuration files in that folder manually, and then run `systemctl restart postfix`.
Etcd is configured in the `cloud-config` file. The `scripts/setup.sh` takes care of enabling and starting the haproxy and postfix service, and the haproxy-confd to listen for changes in the backends configuration in [etcd](https://github.com/coreos/etcd). New backends are automatically added to the haproxy configuration as soon as their private docker IP address is written into etcd.
## HAProxy backends: static, static-git, wordpress
A per user process is a haproxy backend for a specific domain name. For now, we have three applications available: static, static-git and wordpress. But we are working on adding more. Please, if you want, you can create more :)
You will notice there are also some other units in the `unit-files/` folder of this repository, like the gitpuller and mysql ones.
Whenever you start a wordpress unit, it requires a mysql service.
Whenever you start a static-git unit, it wants a static-git-puller unit.
In all three cases, an -importer unit and a -discovery unit are required.
This works through a
[`Requires=` directive](https://github.com/indiehosters/indiehosters/blob/0.1.0/unit-files/nginx@.service#L6-L7) which systemd interprets, so that if you start one service, its dependencies are also started (you can see that in `systemctl list-units`).
## Discovery
The `-discovery` units check find out the local IP address of the backend, checks if it is up by doing a `curl`, and if so, writes the IP address into etcd. The `haproxy-confd` service notices this, and update the haproxy config.
## Import
The `-import` units check if data exists, and if it doesn't, tries to import from the `/data/import` folder, and create initial data state, for instance by doing a git clone, untarring the content of a vanilla wp-content for wordpress installation.
Note that some initialization is also done by the Docker images themselves - for instance the wordpress image runs a [shell script](https://github.com/pierreozoux/tutum-docker-wordpress-nosql/blob/master/run-wordpress.sh) at container startup, that creates the initial mysql database if it didn't exist yet.
We are working on merging everything inside the docker image.
## Gitpuller
The `-gitpuller` unit is scheduled to run every 10 minutes by the .timer file. When it runs, it does a git pull to update the website content at one of the haproxy backends from the git repository mentioned in the GITURL file.
## Scripts
There is one important script you can run at your server. You can also run the commands they contain manually, then you just use them as a cheatsheet of how to [set up a new server](https://github.com/indiehosters/indiehosters/tree/master/scripts/setup.sh).
There are also deploy scripts which do the same from a jump box, so you can orchestrate multiple servers from one central vantage points. They are in the
[deploy/](https://github.com/indiehosters/indiehosters/tree/master/deploy)
folder of this repo, and they are the scripts referred to in the 'how to deploy a server' document. They basically run the scripts from the scripts/ folder over ssh.

View file

@ -1,97 +0,0 @@
# Deploying a server
## Before you start
Make sure you read [getting started](getting-started-as-a-hoster.md) first.
### Prepare your servers
#### with CoreOS
* Get 2 CoreOS server, for instance from [RackSpace](rackspace.com), [Vultr](vultr.com), or [Hetzner](http://serverboerse.de/).
* let's call them k1 and k2
* they will be backup of each other
* Modify the cloud-config according to your needs
* make sure the backup user get the ssh public key of the root of the other server
#### other linuxes
* * If you prefer another operating system, you can also run our Docker images [using just Docker and bash](using-just-docker-and-bash.md).
* If you didn't add your public ssh key during the order process (e.g. through your IaaS control panel or a cloud-config file),
scp your laptop's public ssh key (probably in `~/.ssh/id_rsa.pub`) to `.ssh/authorized_keys` for the remote user
you will be ssh-ing and scp-ing as (the default remote user of our deploy scripts is 'core').
* Give the new server a name (in this example, we call the server 'k3')
* Add k3 to your /etc/hosts with the right IP address
* If you have used this name before, run `ssh-keygen -R k3`
* Ssh into your server, and run `ssh-keygen -t rsa` (use all the default settings, empty passphrase)
* Set up a backups server at an independent location (at least a different data center, but preferably also a different IaaS provider, the bu25 plan of https://securedragon.net/ is a good option at 3 dollars per month).
* Set up a git server by following http://www.git-scm.com/book/en/v2/Git-on-the-Server-Setting-Up-the-Server (no need to set up any repos like 'project.git' yet). Let's call the backup server 'bu25' (add this to /etc/hosts on k3).
* Add the ssh key from k3:.ssh/id_rsa.pub to the authorized_keys for the git user (not the root user) on bu25.
* Check that you can `ssh git@bu25` from k3.
* Exit from the double ssh back to your laptop, and from the root folder of this repository, run `sh ./deploy/deploy.sh k3 git@bu25 master root`
* The rest should be automatic! (ignore the warning about backup.dev, and note that haproxy will not start as long as there are no websites on your server).
### Adding an existing website
* The IndieHosters architecture is migration-oriented, so it aims to make moving a domain from one server to another very easy.
* If you already have a domain in backups, say example.com, then it's easy to add it to this new server.
* Say domain example.com runs the 'static' image. Then ssh into k3, and run:
````bash
systemctl enable static@example.com
systemctl start static@example.com
````
* This will automatically do the following things:
* Clone the backup repo from bu25
* Set up an hourly backup job for the user data (which will live in `/data/domains/example.com` on k3)
* Start an nginx container
* Note its IP address in etcd
* Rewrite the haproxy configuration
* (Re)start haproxy
* If the domain runs wordpress (and similarly if it runs Known), then:
* the git clone from backup will fail because of [issue 46](https://github.com/indiehosters/indiehosters/issues/46), so make sure to follow the workaround steps from there.
* initial wordpress setup will be impossible with the redirect because of [Dockerfiles issue 39](https://github.com/indiehosters/dockerfiles/issues/39), so make sure to follow the workaround steps from there.
* Indiewebifying the WordPress instance is still a manual process, so make sure you know the user's preferred username (you can use this for the SiteName as well), and preferred language.
* Configure an arbitrarily long password, and save it in wordpress/login.txt in /data/domains/domain.com/wordpress/login.txt. The user will be logging in with IndieAuth, but it's good to make a not of which password you set.
* Activate the IndieWeb plugins/themes/widget as per [Dockerfiles issue 40](https://github.com/indiehosters/dockerfiles/issues/40).
* Make sure to edit the files in /data/runtime/postfix/ to set up email forwarding. Also, ask another indiehoster to add the same forwarding and act as a fallback MX.
### Adding a new website to your server
* For each site you want to deploy on the server, e.g. example.com, do the following:
* Does example.com already exist as a domain name?
* If yes, then find out to what extent it's currently in use (and needs to be migrated with care). There are a few options:
* Transfer the domain into your DNR account
* Set up DNS hosting for it and ask the owner to set authoritative DNS to the DNS servers you control
* Ask the user to keep DNR and DNS control where it is currently, but to switch DNS when it's ready at the new server, and every time
you add or remove an IP address (not a good idea, unless the user insists that they prefer this option)
* In any case, you will probably need access to the hostmaster@example.com email address, for the StartSSL process *before*
the final DNS switch. You could also ask them to tell you the verification code that arrives there, but that has to be done
in real time, immediately when you click 'verify' in the StartSSL UI. If they forward the email the next day, then the token
will already have expired.
* If no, register it (at Namecheap or elsewhere).
* Decide which image to run as the user's main website software (in version 0.2.2, 'static', 'static-git', 'wordpress', and 'known' are supported)
* For the 'wordpress' image, all you need is the TLS certificate. Use the 'static-git' image if you already have some static
content that should go on there, and which you can put in a public git repository somewhere.
* Unless you already have a TLS certificate for example.com, get one
(from StartSSL if you own the domain in question, from namecheap or elsewhere if you do not), and concatenate the certificate
and its unencrypted private key into one file.
* Make sure the TLS certificate is valid (use `scripts/check-cert.sh` for this), and scp it to `/data/import/example.com/TLS/example.com.pem` on k3.
* Now ssh into k3, and if for instance 'wordpress' is the image you chose, run:
systemctl enable wordpress@example.com
systemctl start wordpress@example.com
* In case you're going for the 'static-git' repo, store the git url with the content in `/data/domains/example.com/static-git/GITURL`.
* In case you're going for the 'static' repo, store the html content under `/data/domains/example.com/static/www-content`.
* Test the site using your /etc/hosts. You should see the data from the git repo, or the static content, or a wordpress start page
on both http and https.
* If all looks well, switch DNS and monitoring.
* If not, check what happened by looking at what's in `/data/domains/example.com`, `data/runtime/domains/example.com`, and `/data/runtime/haproxy` on k3. Note that this part of our scripts is currently a bit complex, we will clean this up in a next version. There are six different scripts that try to initialize the contents of `/data/domains/example.com`:
* The git clone from the back up service (will try to initialize an empty git repository)
* The local data import process (will try to move the data from `/data/import/example.com` into place
* The wordpress image (which we used from the wordpress-stackable Dockerfile published by Tutum)
* The mysql image (which we used from the mysql Dockerfile published by Tutum)
* The wordpress importer (a one-time systemd task)
* The mysql importer (a one-time systemd task)
* It might help to remove /data/domains/example.com, /data/runtime/domains/example.com, and /etc/systemd/system/*/*example.com*, and remove the git@bu25:example.com repo from the backup server, make sure /data/import/example.com/TLS/example.com.pem exists, make sure /data/BACKUP_DESTINATION contains git@bu25 (and not core@backup.dev), shutdown -r now, and retry.
* If you're setting up a fresh wordpress site, you have to access the admin panel over http first (e.g. run it `-p 80:80`), and activate the [https plugin](https://wordpress.org/plugins/wordpress-https/) before it will work behind the haproxy SSL-offloader.

View file

@ -1,29 +0,0 @@
# Developing Dockerfiles and infrastructure
## Developing Dockerfiles
To develop Dockerfiles, you can use a server that's not serving any live domains, use `docker` locally on your laptop, or use the `vagrant up` instructions to run the infrastructure inside vagrant.
## Developing infrastructure
To develop the infrastructure, create a branch on this repo and specify that branch at the end of the deploy command, for instance:
```bash
sh ./deploy/deploy.sh k4 dev
```
That will deploy a server at whatever IP address "k4" points to in your /etc/hosts, using the "dev" branch of https://github.com/indiehosters/indiehosters.
## Testing new Dockerfiles in the infrastructure
To test the infrastructure with a changed Dockerfile, you need to take several steps:
* Develop the new Dockerfiles as described above at "Developing Dockerfiles"
* When you're happy with the result, publish this new Dockerfile onto the docker hub registry under your own username (e.g. michielbdejong/haproxy-with-http-2.0)
* Now create a branch on the infrastructure repo (e.g. "dev-http-2.0")
* In this branch, grep for the Dockerfile you are updating, and replace its name with the experimental one everywhere:
* the `docker pull` statement in scripts/setup.sh
* the `docker run` statement in the appropriate systemd service file inside unit-files/
* Push the branch to the https://github.com/indiehosters/indiehosters repo (if you don't have access to that, you will have to edit
`deploy/onServer.sh` to use a different repo, to which you do have access).
* Now deploy a server from your experimental infrastructure branch (which references your experimental Docker image), as described above at "Developing infrastructure"

View file

@ -1,87 +0,0 @@
Getting started as an IndieHosters hoster
===========
# Prerequisites
Each IndieHoster is an entirely autonomous operator, without any infrastructural ties to other IndieHosters.
These scripts and docs will help you run and manage servers and services as an IndieHoster, whether you're
certified as a branch of the IndieHosters franchise or not.
# Hoster data
If you're used to working with git as a versioning tool, then it's a good idea to make `hoster-data` and
`billing` into (private!) git repos where you keep track of what you're doing, including e.g. TLS certificates, so
that you can track changes over time, and search the history to resolve mysteries when they occur. You may also use a different
versioning system, or just take weekly and daily backups (but then it's probably a good idea to retain the weeklies for a couple
of years, and even then it will not be as complete as a history in a versioning system).
The hoster-data is about what each one of your servers *should* be doing at this moment.
This is fed into CoreOS (systemd -> etcd -> confd -> docker) to make sure the server actually starts and keeps doing these things,
and also into monitoring, to make sure you get alerted when a server misbehaves.
You probably also want to keep track of Domain Name Registration, Transport
Layer Security, Monitoring, and Domain Name System services which you are probably getting from
third-party service providers, alongside the services which
you run on your own servers.
Note that although it's probably inevitable that you resell DNR and TLS services from some third party, and your monitoring would ideally
also run on a system that's decoupled from your actual servers, you may not be reselling DNS
hosting. If you host DNS for your customer on server-wide bind services that directly read data from files on the per domain data folders,
then DNS data will be considered user-data for you.
# User data
User data is data owned by one of your users. Which human owns which site is something you can administer
by hand somehow in the `billing` folder.
All user data is *untrusted* from your point of view, it is not owned by you as a hoster,
and users may change it at any time (and then probably contact you for a backup whenever they mess up!). It makes sense to give users
only read-only access to this data by default, and have a big "Are you sure? Warranty will be void!" warning before they can activate
write-access to their own data (and then probably trigger an extra backup run just before allowing them to edit their own raw data).
This is how some operating systems on user devices also deal with this.
But in the end, the user, and not you, owns this data, and they can do with it what they want, at their own risk.
Just like a mailman is not supposed to open and read letters, you also should treat each user's data as a closed envelope
which you never open up, unless in the following cases:
* There may be things you need to import from specific files on there (like a user-supplied TLS certificate or DNS zone)
* When running backups, you sometimes can't avoid seeing some of the modified filenames flying by (depending on the backup software)
* After explicit permission of the user, when this is useful for tech support (e.g. fix a corrupt mysql database for them)
In version 0.1 no user data exists because the TLS cert is part of the hoster-data, and so are the secondary email address to forward
to and the git repository to pull the website content from. We don't need to back up users' websites, because they are already versioned
and backed up in the git repository from which we're pulling it in.
# Backups
Your user-data, hoster-data, and billing folders together contain all the critical data
of your operations as an IndieHoster, from start to finish, so make sure you don't
ever lose it, no matter what calamity may strike. Once a month, put a copy of it on a USB stick, and put that in a physically safe place.
You may give a trusted person an emergency key to your infrastructure, in case you walk under a bus. Think about the risk of data loss and
establish an emergency recovery plan for when, for instance, the hard disk of your laptop or of one of your servers die.
Make sure you often rsync the live data from each of your servers to somewhere else, and store snapshots of it
regularly. Users *will* contact you sooner or later asking for "the backup from last Tuesday"
and they will expect you to have one.
# Basic digital hygiene
At the same time, be careful who may obtain access to your critical data. Is your laptop really safe? Does the NSA have access to the servers you run?
Someone may plant a Trojan on a computer in an internet cafe from where you access your Facebook account, access your gmail account
for which you use the same password, reset your RackSpace password and restore a backup from your Cloud Files to somewhere else.
Make a diagram of how your laptop talks to your USB sticks and your servers. Then make a diagram of the services you use and to which
email addresses they send password reset emails. Draw a perimeter of trust in both diagrams, and start taking some basic measures to
keep your daily operations secure.
Don't mix accounts and email addresses which you may
use from other computers, and keep your IndieHosters passwords and accounts separate from your other passwords and accounts, and reset
them every few months. It might even
make sense to dual-boot your laptop or boot from a live disk which resets on boot to make sure everything you do with IndieHosters data
is done in a sterile environment. Ubuntu has a 'guest account' option on the login screen which maybe also be handy for this.
Also: lock your screen when walking away from your laptop, and think about what someone could do with it if they were to steal your bag,
or your smartphone.
# Do I have to use this?
As an IndieHoster you can of course use any infrastructure and scripts you want, as long as it doesn't change the format of each user-data folder, so that
your customers can still migrate at will between you and other IndieHosters. However, you might find some of the scripts in this repo
helpful at some point, and we hope you contribute any features you add to it back upstream to this repo.
Thanks for taking the time to read through these general considerations - the next topic is [deploying a server](deploying-a-server.md)! :)

View file

@ -1,40 +0,0 @@
# IndieHosters migration format
## Deprecated
Deprecated by the [web app migration procedure](../proc/webapp.md)
## Version 0.2.2 (deprecated)
When a user exports their data for domain.com, they get a zip or tar file that contains different files, depending on which application is
running on their domain:
### If using the 'static' application
* TLS/domain.com.pem - Concatenation of the unencrypted private and public key of the TLS certificate, and intermediate CA cert if applicable.
* static/www-content - static content to be placed in the web root
### If using the 'static-git' application
* TLS/domain.com.pem - Concatenation of the unencrypted private and public key of the TLS certificate, and intermediate CA cert if applicable.
* static-git/GITURL - git url to pull the static website content from
### If using the 'WordPress' application
* TLS/domain.com.pem - Concatenation of the unencrypted private and public key of the TLS certificate, and intermediate CA cert if applicable.
* mysql/dump.sql - the dump of all their MySQL databases
* mysql/.env - contains the MySQL password
* wordpress/.env - contains the MySQL password
* wordpress/login.txt - username and password for the WordPress admin panel
* wordpress/.htaccess - htaccess file for WordPress
* wordpress/wp-content - php files to be placed in the web root
### If using the 'Known' application
* TLS/domain.com.pem - Concatenation of the unencrypted private and public key of the TLS certificate, and intermediate CA cert if applicable.
* mysql/dump.sql - the dump of all their MySQL databases
* mysql/.env - contains the MySQL password
* known/ - php files to be placed in the web root
* known/.env - contains the MySQL password
* known/login.txt - email address and password for the Known admin panel

View file

@ -1,31 +0,0 @@
# Migration procedure version 0.3
The IndieHosters network aims to allow users to easily migrate their hosted services from one hosting provider to another.
To this goal, we describe a number of services which a hosting provider may offer, and a migration procedure for each of these.
In this document, we will say the "user" migrates "services" from the "old hoster" to the "new hoster".
We distinguish two types of migration: full migrations, and partial migrations. A full migration includes the migration of
the domain name registration, DNS hosting, and all other hosted applications from the old hoster to the new hoster, and the user
will no longer be a customer of old hoster once the full migration is complete.
In a partial migration, only some services are migrated, and others are not. For instance, the IndieHosters product "Known hosting 0.3"
consists of:
* a domain name registration at a well-known registrar
* DNS hosting
* email forwarding
* a redirect from http to https on port 80
* a TLS certificate on port 443
* version 0.6.5-mysql of the Known application running behind that
If the old hoster offers this product, but the new hoster does not offer email forwarding, then only a partial migration is
possible. The user will then have to accept that their email forwarding will stop working. Presumably, the user is OK with
that, since they picked the new hoster themselves. But it's worth mentioning that this is then only a partial migration.
Migration procedures:
* [Domain name registration](../proc/dnr.md)
* [DNS hosting](../proc/dns.md)
* [Email forwarding](../proc/email.md)
* [TLS certificate](../proc/tls.md)
* [Web application](../proc/webapp.md)

View file

@ -1,29 +0,0 @@
# Migration procedure version 0.3
## Domain name registration
How to migrate a domain name from one hosting provider to another depends on the extension, and even for a given extension, there
are serveral possibilities. In version 0.3 of this migration procedure, we will only consider one basic case, which is quite easy
to deal with:
* the domain name registration is in an account at a well-known registrar (e.g. NameCheap)
* this registrar account is under control of the old hoster (not of the user directly)
* the new hoster also has an account at this same well-known registrar, or is willing to create one
* the registrar offers a "Transfer to another account" option
The migration process is then as follows:
* user has a service of type "domain name registration" with old hoster. Registrant is well-known and all above points apply
* old hoster is listed in the IndieHosters migration network as supporting emigrations with type 'domain name registration'
* new hoster is listed in the IndieHosters migration network as supporting immigrations with
* type: 'domain name registration'
* registrar: 'NameCheap' (or whichever well-known registrar)
* user contacts old hoster, stating clearly and unmistakably that they want to migrate to new hoster
* old hoster contacts new hoster to:
* confirm they will accept the migration
* agree on compensation for registration fee for months left until the domain is up for renewal at registrar
* agree on possible transfer of user's prepaid credit, e.g. if they were paying yearly at old hoster
* double check the new hoster's account identifier at the well-known registrar
* old hoster transfers the domain name registration into the new hoster's account at the well-known registrar
* old hoster notifies new hoster and user that this has been done, to reconfirm to the user what the next upcoming renewal date
is for the domain name registration, and if any account credit was transferred

View file

@ -1,6 +0,0 @@
# Migration procedure version 0.3
### DNS hosting
The migration of DNS hosting will usually result automatically when transferring a domain name registration and/or other hosted
services. However, if the user had custom DNS records, then these may be transferred as a text file.

View file

@ -1,5 +0,0 @@
# Migration procedure version 0.3
### Email forwarding
The old hoster tells the new hoster what the user's forwarding address is.

View file

@ -1,15 +0,0 @@
# Migration procedure version 0.3
## TLS certificate
### Without a passphrase
The old hoster sends the certificate to the new hoster over a secure channel, as one or more .pem files.
### With a passphrase
The old hoster sends the certificate to the new hoster as one or more .pem files, where the .pem file containing the private key is
encrypted with a passphrase.
The old hoster sends the passphrase over a different and secure medium, in an unrelated message. For instance, if the .pem files were sent via
scp, the passphrase may be sent via PGP-encrypted email.

View file

@ -1,47 +0,0 @@
# Migration procedure version 0.3
## Web application (simplistic procedure)
* The old hoster puts the site in read-only mode by changing the permissions of the database user to read-only
* The old hoster creates the migration archive as per the IndieHosters migration format (see below)
* The old hoster sends the migration archive to the new hoster
* The new hoster imports the migration archive
* Once DNR, DNS, and TLS have also been migrated, the old hoster terminates the service.
## Web application (advanced procedure)
* The TLS certificate is sent ahead first
* The old hoster programmatically creates the migration archive, and immediately *posts* it to the new hoster via a webhook
* The webhook programmatically imports the migration archive, and returns the IP address
* The old hoster programmatically configures the new hoster's public IP address into their load balancer
* The old hoster's load balancer now forwards (at the TCP level) all traffic to this new IP address
* Once DNR and DNS transfer are complete, the old hoster terminates the TCP forwarding service.
## IndieHosters migration format, version 0.3
### General
An IndieHosters migration archive is a directory structure (probably packaged up as a tar file or zip file).
There should be an 'indiehosters.json' file in the root of the archive. It should contain at least the following fields:
* format: the URL of this spec (probably https://indiehosters.net/spec/0.3)
* application: a string, which determines what the rest of the folder contents should be imported into.
### Known
When migrating a Known application, the 'indiehosters.json' file should furthermore contain the following fields:
* application: 'known'
* version: the version of Known as a string, for instance '0.6.5'
* database:
* engine: the database engine used, either 'mysql' or 'mongodb'
* name: the database name inside the dump file, for instance 'known'
* file: the database dump file inside the archive, for instance 'dump.sql'
* uploads: the uploads folder name inside the archive, for instance 'uploads/'
* plugins: the folder with any non-standard plugins for instance 'plugins/'
### WordPress
(to be determined)