diff --git a/.forgejo/workflows/playing-around.yaml b/.forgejo/workflows/playing-around.yaml deleted file mode 100644 index 8dff946..0000000 --- a/.forgejo/workflows/playing-around.yaml +++ /dev/null @@ -1,63 +0,0 @@ -on: - push: - branches: - - stage - - prod - -jobs: - build: - runs-on: docker - #environment: ${{ env.FORGEJO_REF_NAME }} - - container: - image: node:22 - - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Cache node modules - uses: actions/cache@v4 - with: - path: ~/.npm - key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} - restore-keys: | - ${{ runner.os }}-node- - - - name: Install dependencies - run: npm ci - - - name: Build Astro project - run: npm run build - - - name: Deploy dist to dist-${{ env.FORGEJO_REF_NAME }} - run: | - git config user.name "forgejo-actions[bot]" - git config user.email "forgejo-actions[bot]@users.noreply.local" - - git checkout --orphan temp - git rm -rf . - - git add dist - git commit -m "Update dist for ${{ env.FORGEJO_REF_NAME }}" - - git push \ - "https://x-access-token:${{ secrets.FORGEJO_TOKEN }}@git.workaround.org/${{ github.repository }}.git" \ - temp:dist-${{ env.FORGEJO_REF_NAME }} \ - --force - - - name: Trigger deployment webhook - env: - DEPLOY_WEBHOOK: ${{ secrets.COOLIFY_DEPLOY_WEBHOOK }} - DEPLOY_TOKEN: ${{ secrets.COOLIFY_DEPLOY_TOKEN }} - run: | - if [ "${{ env.FORGEJO_REF_NAME }}" = "stage" ]; then - DEPLOY_WEBHOOK="${{ secrets.COOLIFY_DEPLOY_WEBHOOK_STAGE }}" - DEPLOY_TOKEN="${{ secrets.COOLIFY_DEPLOY_TOKEN_STAGE }}" - else - DEPLOY_WEBHOOK="${{ secrets.COOLIFY_DEPLOY_WEBHOOK_PROD }}" - DEPLOY_TOKEN="${{ secrets.COOLIFY_DEPLOY_TOKEN_PROD }}" - fi - - curl -v "$DEPLOY_WEBHOOK" \ - --header "Authorization: Bearer $DEPLOY_TOKEN" diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 1bfc5f2..0000000 --- a/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# build output -dist/ -# generated types -.astro/ - -# dependencies -node_modules/ - -# logs -npm-debug.log* -yarn-debug.log* -yarn-error.log* -pnpm-debug.log* - - -# environment variables -.env -.env.production - -# macOS-specific files -.DS_Store -drawio/.$big-picture.drawio.bkp diff --git a/.prettierrc b/.prettierrc deleted file mode 100644 index d7f548d..0000000 --- a/.prettierrc +++ /dev/null @@ -1,6 +0,0 @@ -{ - "tabWidth": 2, - "useTabs": false, - "printWidth": 120, - "proseWrap": "always" -} \ No newline at end of file diff --git a/.vscode/extensions.json b/.vscode/extensions.json deleted file mode 100644 index 22a1505..0000000 --- a/.vscode/extensions.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "recommendations": ["astro-build.astro-vscode"], - "unwantedRecommendations": [] -} diff --git a/.vscode/launch.json b/.vscode/launch.json deleted file mode 100644 index d642209..0000000 --- a/.vscode/launch.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "version": "0.2.0", - "configurations": [ - { - "command": "./node_modules/.bin/astro dev", - "name": "Development server", - "request": "launch", - "type": "node-terminal" - } - ] -} diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index afc122c..0000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "editor.defaultFormatter": "esbenp.prettier-vscode", - "[javascript]": { - "editor.defaultFormatter": "esbenp.prettier-vscode" - }, - "[astro]": { - "editor.defaultFormatter": "astro-build.astro-vscode" - } -} diff --git a/README.md b/README.md deleted file mode 100644 index 26e60e8..0000000 --- a/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# ISPmail guide - -This repository contains the ISPmail guide as found on workaround.org. - -The web site is built the static site generator [Starlight](https://starlight.astro.build) which creates beautiful -documentation with table of contents, a search function and customizable widgets. - -The illustrations were created using DrawIO. The flip-book-style diagrams on the _big picture_ page were created using: - -- https://github.com/pascal-brand38/astro-splide -- https://pascal-brand38.github.io/astro-dev/packages/astro-splide/ -- https://splidejs.com/guides/options/ - -## Contributing - -Feel free to clone this repository (the _prod_ branch) and make changes. You will need NodeJS to create the HTML from -the sources: - -```sh -apt install nodejs -npm i -npm run dev -``` - -Merge requests are always welcome. Typo fixes and rephrasing are always welcome because I am not a native english -speaker. If you want to contribute or change larger sections please talk to me first. - -Or just create an _issue_ here on Github so that I know what needs fixing. - -…[Christoph](mailto:ispmail@christoph-haas.de) diff --git a/astro.config.mjs b/astro.config.mjs deleted file mode 100644 index 38ba6f9..0000000 --- a/astro.config.mjs +++ /dev/null @@ -1,84 +0,0 @@ -// @ts-check -import { defineConfig } from "astro/config"; -import starlight from "@astrojs/starlight"; -import sitemap from "@astrojs/sitemap"; -import remarkSmartypants from "remark-smartypants"; - -// https://astro.build/config -export default defineConfig({ - site: "https://workaround.org", - redirects: { - "/ispmail-trxie/imap/": "/ispmail-trixie/imap/", - "/ispmail-trxie/anti-spoofing-dkim-spf": "/ispmail-trixie/anti-spoofing-dkim-spf", - "/ispmail-trxie/catch-all": "/ispmail-trixie/catch-all", - "/ispmail-trxie/quotas": "/ispmail-trixie/quotas", - "/ispmail-trxie/going-live": "/ispmail-trixie/going-live", - }, - integrations: [ - starlight({ - head: [ - { - tag: "script", - attrs: { - src: "https://rybbit.workaround.org/api/script.js", - "data-site-id": "1", - async: true, - defer: true, - }, - }, - ], - expressiveCode: { - frames: { - removeCommentsWhenCopyingTerminalFrames: false, // keep the commented lines when copying shell snippets - }, - }, - lastUpdated: true, - title: "ISPmail Guide", - social: [ - { icon: "seti:git", label: "Git", href: "https://git.workaround.org/chaas/ispmail-workaround-org" }, - { icon: "matrix", label: "Matrix", href: "https://riot.im/app/#/room/#ispmail:matrix.org" }, - { - icon: "rss", - label: "Feed", - href: "https://comentario.workaround.org/api/rss/comments?domain=0f111a27-fbfa-48af-8beb-ab12e612d92f", - }, - ], - // https://expressive-code.com/key-features/word-wrap/#configuration - components: { - Footer: "./src/components/Footer.astro", - Banner: "./src/components/Banner.astro", - }, - customCss: ["./src/styles/custom.css"], - sidebar: [ - { - label: "ISPmail for Debian 13", - // slug: "ispmail-trixie", - autogenerate: { directory: "ispmail-trixie" }, - }, - { - label: "ISPmail for Debian 12", - // slug: "ispmail-bookworm", - autogenerate: { directory: "ispmail-bookworm" }, - }, - { - label: "Misc articles", - autogenerate: { directory: "articles" }, - }, - ], - logo: { - light: "./src/assets/logo.svg", - dark: "./src/assets/logo-dark.svg", - replacesTitle: true, - }, - }), - sitemap(), - ], - - markdown: { - remarkPlugins: [ - // remove the substitution of -- to – - // @ts-ignore - [remarkSmartypants, { dashes: false }], - ], - }, -}); diff --git a/dist/404.html b/dist/404.html new file mode 100644 index 0000000..4020ebc --- /dev/null +++ b/dist/404.html @@ -0,0 +1,56 @@ +
Reference pages are ideal for outlining how things work in terse and clear terms. Less concerned with telling a story or +addressing a specific use case, they should give a comprehensive outline of what you’re documenting.
+How long has it been since you last backed up your Linux system? Let me guess – you tried various backup systems and hate all of them? Let me show you how to use rsnapshot and an external inexpensive USB drive to back up precious data easily.
+ +I’m a sysadmin in my day job. How could I not care about half decent backups at home. For years I have been running Bacula which has served me half well. An old AIT drive, a couple of tapes, my trusted Adaptec 2940 card and a PostgreSQL-driven Bacula installation worked moderately well but became increasingly cumbersome and fragile. The server (a retired desktop computer) crashed randomly during backups (some ancient SCSI component started to die). Or I forgot to change one of the three needed tapes (as I lacked a changer) in time so that the backup job timeout killed the running backup. Then I had to declare the tapes as free again because cancelling a backup doesn’t make Bacula free the tapes again.Or I played with PostgreSQL and inadvertently killed the director process. So maybe one backup every two weeks really ran through. And restoring files took minutes until the database finally got me the list of files. Finally one of my tapes got stuck in the drive and the drive refused to eject it. Of course the emergency ejection screw did nothing. Enough was enough. So I thought I could use an external USB drive instead of tapes but Bacula did not actually support that. An ancient shell script (vchanger) should emulate a tape changer with USB disk drives. I was too far off from KISS. What in theory sounded like decent hard- and software failed me.
+ +I decided to spend 50€ (the price of one AIT tape) on a 500 GB external USB disk drive and learn about rsnapshot. And in no time I had a simple backup running where I didn’t have to worry about a huge index database and could instantly access any files backed up. What I did:
+After plugging in the disk for the first time I ran “dmesg” to find out which device the disk was occupying:
+[219991.641225] scsi 12:0:0:0: Direct-Access Seagate Portable 0130 PQ: 0 ANSI: 4[219991.641765] sd 12:0:0:0: Attached scsi generic sg4 type 0[219991.642462] sd 12:0:0:0: [sdc] 976773168 512-byte logical blocks: (500 GB/465 GiB)[219991.643080] sd 12:0:0:0: [sdc] Write Protect is off[219991.643083] sd 12:0:0:0: [sdc] Mode Sense: 2f 08 00 00[219991.643085] sd 12:0:0:0: [sdc] Assuming drive cache: write through[219991.644964] sd 12:0:0:0: [sdc] Assuming drive cache: write through[219991.646599] sdc: sdc1[219991.694834] sd 12:0:0:0: [sdc] Assuming drive cache: write through[219991.695212] sd 12:0:0:0: [sdc] Attached SCSI diskSo the disk was at /dev/sdc1. I formatted the disk using
+mkfs.ext4 /dev/sdc1and read the UUID (a unique identifier assigned to each disk while formatting) using
+tune2fs -l /dev/sdc1 | grep UUIDwhich gave me
+Filesystem UUID: 44449456-2b13-47df-bfcf-9c5eedf3b287You will want to have your USB mounted automatically when you plug it in and use it. On a server there is no plug-and-play like that by default. But the “autofs” software does that well. Install it:
+apt-get install autofsEdit the /etc/auto.master file and add this line:
+/var/autofs/removable /etc/auto.usbdrive –timeout=2,sync,nodev,nosuidAlso create an /etc/auto.usbdrive file (that you just pointed to) and add the line into it:
+usbdrive -fstype=auto UUID=44449456-2b13-47df-bfcf-9c5eedf3b287And finally restart the autofs process:
+/etc/init.d/autofs restartThis does not yet mount the disk though. But if you change into the /var/autofs/removable/usbdrive directory then autofs will look for a disk with the given UUID and mount it there on-the-fly. Try it:
+cd /var/autofs/removable/usbdriveYou may notice a short delay while autofs mounts the disk. Then you should find yourself on the mounted USB drive. Type “df .” to see the filesystem. It should look like:
+Filesystem Size Used Avail Use% Mounted on/dev/sdc1 459G 198M 435G 1% /var/autofs/removable/usbdriveInstall the rsnapshot package:
+apt-get install rsnapshotThe default configuration file is located in /etc/rsnapshot.conf. Edit it. But beware that all elements have to be seperated by actual Tabs. I’m using VIM and in my default settings I used “expandtabs” which automatically turned my Tabs into spaces. You don’t want that.
+In that file configure “snapshot_root” to point to your autofs directory:
+snapshot_root /var/autofs/removable/usbdriveUnless you are happy with the default backup times you will want to change the “interval” section. Make sure that you edit the /etc/cron.d/rsnapshot, too, or else rsnapshot won’t run automatically at all. I found the intervals a bit tricky but the the “man rsnapshot” manpage helped me understand it. You can use different names for the different frequencies of backups you run. But the names like “hourly” or “daily” do not mean anything. rsnapshot doesn’t have any association of “hourly” to 60 minutes for example.
+My configuration reads:
+retain daily 7retain weekly 4This is much less magical than you might imagine. It just means that if you run “rsnapshot daily” then it will create backups called daily.0 to daily.6 and rotate the numbers on every rsnapshot run. You won’t have more than 7 “daily” directories though which is what you specify in the “retain” line. And you need to make sure that you call “rsnapshot daily” through a crontab. As you can imagine I’m running 7 daily backups (up to one week) and 4 weekly backups (up to one month). So my /etc/cron.d/rsnapshot file has these lines:
+30 2 * * * root /usr/bin/rsnapshot daily0 4 * * 1 root /usr/bin/rsnapshot weeklyAre you unfamiliar with crontab entries? It’s quite easy. You specify the times that you want the certain command run. The columns stand for minute, hour, day of month, month and the day of the week. So my daily job runs at 2:30 at night every day. And the weekly job is run at 4:00 at night on every monday. See “man 5 crontab” for a reference.
+Back to your /etc/rsnapshot.conf. Define which directories you want to back up and which you want to have excluded. This is what I use:
+backup /var/ myserver/backup /home/ myserver/backup /etc/ myserver/exclude /home/*/tmp/exclude /home/*/.local/share/Trash/exclude /home/*/.cache/exclude /var/lib/mysql/exclude /var/lib/postgresql/exclude /var/tmp/exclude /var/log/exclude /var/cache/apt/archives/Of course you can decide to backup your entire server and just exclude evil mount points like /mnt, /dev, /sys, /media and /proc. But in a case of total emergency I’d rather reinstall Debian, install the packages and restore the files. I’m excluding the database directories for MySQL and PostgreSQL here because I cannot just copy the files but need to run a proper backup.
+What I also do back up a list of installed Debian packages in case I would need to reinstall:
+backup_script /usr/bin/dpkg –get-selections > packages.txt installed-packages/And I backup the databases:
+backup_script /usr/bin/mysqldump –opt –databases mailserver mysql | gzip > mysqldump mysql/I have the MySQL root password stored in /root/.my.cnf so I don’t need to mention it here.
+To make sure your configuration is correct run
+rsnapshot configtestFix any errors until rsnapshot is happy and shows “Syntax OK”.
+You can simulate a daily backup by running:
+rsnapshot -t dailyIt will print out the commands that rsnapshot would run.
+If you want to access the files that rsnapshot backed up this is as simple as could be. In /var/autofs/removable/usbdrive/… you will find directories for hourly, daily and weekly backups. Since rsnapshot cleverly uses hardlinks unchanged files barely take up any space. You can just browse around in the respective subdirectories and access your files.
+That way you can even buy a second external USB disk drive and put the first disk off-site in case your house burns down, get burglared or your cat pees on the first disk.
+Of course if you lost the one external disk then all your backups would be ruined. So I suggest you get a second external disk and once a month swap them. Depending on your paranoia you can lock them in your bank’s deposit box or give it to your mother-in-law. As opposed to other backup solutions you can just use the second disk without much configuration. Make sure the autofs knows about it and plug it in.
+Kudos to Jochen R. who recommended rsnapshot to me.
For less experienced Squid administrators the concept of ACLs can be confusing at first. But they offer a great way of controlling who is allowed to access which web pages when.
+ +First you need to define certain criteria like accesses from the marketing department or accesses to google.com or need to authenticate. There are certain types of ACLs for that purpose. The complete list of ACLs can be found at http://www.visolve.com/squid/squid24s1/access_controls.php
+The syntax of an acl is:
+acl name type definition1 definition2 definition3 ...Examples:
+acl accesses_to_google dstdomain .google.comacl accesses_to_search_engines dstdomain .yahoo.com .google.com .vivisimo.comacl accesses_from_marketing_department src 10.52.0.0/16acl need_to_authenticate proxy_authYou can also use lists of definitions that are stored in files on your hard disk. Let’s assume you have a list of search engines URLs that you want to allow:
+/etc/squid/search-engines-urls.txt:.google.com.yahoo.com.altavista.com.vivisimo.comThen the ACL for that file would look like:
+acl accessess_to_search_engines dstdomain "/etc/squid/search-engines-urls.txt"The quotes are important here to tell Squid it needs to look up definitions in that file.
+Defining the ACLs alone does not actually block anything – it’s just a definition. ACLs can be used in various places of your squid.conf. The most useful feature is the http_access statement. It works similar to the way a firewall would handle rules. For each request that Squid receives it will look through all the http_access statements in order until it finds a line that matches. It then either _accept_s or _deny_s depending on your setting. The remaining rules are ignored.
+The general syntax of an http_access line is:
+http_access (allow|deny) acl1 acl2 acl3 ...Example:
+http_access allow accesses_from_adminshttp_access deny accesses_to_porn_urlshttp_access allow accesses_during_lunchtimehttp_access deny allThis would allow accessing from the admins (whatever that ACL looks like – probably a src ACL pointing to the subnet where the admin workstations are in). For everyone else it will deny accesses to porn URLs. Then it would allow accesses from everyone to every web site during lunch time. And finally all other accesses would be denied.
+Often you need to combine ACLs. Let’s say you want to allow access to google.com only for the back office. This combines two ACLS with an AND. This would look like this:
+http_access allow accesses_to_google.com accesses_from_back_officeIf you wanted to use an OR and say either accesses from the back office or accesses to google.com are allowed then the line would look like this:
+http_access allow accesses_to_google.comhttp_access allow accesses_from_back_officeTo summarize: AND means putting the conditions in one line. OR means using seperate lines.
+By default when you deny access the user gets the error page that is stored in the ERR_ACCESS_DENIED file. But luckily you can define your own custom error pages and display them when you deny certain accesses. A simple example:
+acl google dstdomain google.comdeny_info error-google googlehttp_access deny googlePut an error page into the directory where the HTML files are stored (look for error_directory in your squid.conf) and name it error-google. If the user tries to access www.google.com the access is denied and your error page is shown.
Careful when you combine ACLs on a http_access line. Example:
+acl google dstdomain google.comacl admin src 10.0.5.16deny_info google error-googlehttp_access deny admin googleThis will deny access only for the user from the IP address 10.0.5.16 when www.google.com is accessed. As you can see I have combined the ACLs admin and google. In such a combination the last ACL in the line is taken into account for lookups of deny_info. So it’s important that you define a deny_info for the google ACL.
Usually when a user is authenticated at the proxy you cannot “log out” and re-authenticate. The user has to close and re-open the browser windows to be able to re-login at the proxy. A simple configuration will probably look like this:
+acl my_auth proxy_auth REQUIREDhttp_access allow my_authhttp_access deny allNow there is a tricky change that was introduced in Squid 2.5.10. It allows to control when the user is prompted to authenticate. Now it’s possible to force the user to re-authenticate although the username and password are still correct. Example configuration:
+acl my_auth proxy_auth REQUIREDacl google dstdomain .google.comhttp_access allow my_authhttp_access deny google my_authhttp_access deny allIn this case if the user requests www.google.com then the second http_access line matches and triggers re-authentication. Remember: it’s always the last ACL on a http_access line that “matches”. If the matching ACL has to do with authentication a re-authentication is triggered. If you didn’t want that you would need to switch the order of ACLs so that you get http_access deny my_auth google.
You might also run into an authentication loop if you are not careful. Assume that you use LDAP group lookups and want to deny access based on an LDAP group (e.g. only members of a certain LDAP group are allowed to reach certain web sites). In this case you may trigger re-authentication although you don’t intend to. This config is likely wrong for you:
+acl ldap-auth proxy_auth REQUIREDacl ldapgroup-allowed external LDAP_group PROXY_ALLOWED
+http_access deny !ldap-authhttp_access deny !ldapgroup-allowedhttp_access allow allThe second http_access line would force the user to re-authenticate time and again if he/she is not member of the PROXY_ALLOWED group. This is perhaps not what you want. You rather wanted to deny access to non-members. So you need to rewrite this http_access line so that an ACL matches that has nothing to do with authentication. This is the correct example:
+acl ldap-auth proxy_auth REQUIREDacl ldapgroup-allowed external LDAP_group PROXY_ALLOWEDacl dummy src 0.0.0.0/0.0.0.0
+http_access deny !ldap-authhttp_access deny !ldapgroup-allowed dummyhttp_access allow allThis way the second http_access line still matches. But it’s the dummy ACL which is now last in the line. Since dummy is a static ACL (that always matches) and has nothing to do with authentication you will find that the access is just denied.
LVM is a neat feature that some system administrators still shy away from. But it’s really not that hard to learn. And these are some awesome features you get:
+LVM is just a thin layer of software between the disks on your system and the partitions. On a Debian system you just “apt install lvm2” and you are ready to go.
+Three terms are commonly used:
+A diagram is worth a thousand words so let’s use an illustration:
+
On the left you see your three hard disks. Your computer has found them and made them accessible as /dev/sda, /dev/sdb and /dev/sdc. Usually you would create partitions on them (e.g. using cfdisk), put a file system on the partitions (mkfs) and mount them into your file system (mount /dev/sda1 /home).
+But this time we create a volume group from it. So first we turn the disks into PVs so that LVM recognizes them:
+pvcreate /dev/sdapvcreate /dev/sdbpvcreate /dev/sdcAll this does is write a little meta-data onto each disk.
+You can use the “pvs” command command to list the PVs you have just created.
+When you take a close look at a PV for example (“pvdisplay” command) you will notice terms like “PE size” or “Free PE” or “Allocated PE”. PE is short for physical extent. Such an extent is the smallest data size that LVM handles. By default it’s set to 4 MiB. That means you can grow or shrink a logical volume only by a factor of 4 MiB. Using “lvextend” you can specify the number of extents using “-l …” (lowercase L) instead of the size “-L …” (uppercase L). Further down on this page you will find a tip on replacing a small harddisk by a larger harddisk. That essentially moves the extents from one disk to another.
+Next we create a new volume group (VG) from these three disks:
+vgcreate vg1 /dev/sda /dev/sdb /dev/sdcNow you have VG called “vg1” consisting of the three disks. The “vgs” command shows you an overview:
+VG #PV #LV #SN Attr VSize VFreevg1 3 0 0 wz--n- <6t <6tSo you see that there is one VG called “vg1” which consists of 3 PVs (disks). And so far no LVs are using it. We will get to that in a moment. Its size is roughly 6 TiB and all of that is free to use.
+Using the “vgdisplay” command shows you even more information about it.
+The final step is to bite chunks out of the VG. Check out the diagram above. We want a partition for “/home” with a size of 100 GiB. So the command to create your LV is:
+lvcreate -n lvhome -L 100G vg1Pretty simple. The “-n” parameter sets the name of the new PV. “-L” is the size you want to use. And “vg1” is the name of the VG you want to cut a piece out of.
+The “lvs” command will show you an overview of your LVs.
+LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convertlvhome vg1 -wi-ao---- 100,00gThere is also an “lvdisplay” command showing more verbose information about the LV.
+Finally we have something to put a file system on. You have probably used partitions on devices like /dev/sda1 before. But now you are using LVM. And the device for your “lvhome” is “/dev/vg1/lvhome”. Right, it’s “dev” + VG + LV. You could also use “/dev/mapper/vg1-lvhome”.
+Put an EXT4 file system onto it:
+mkfs.ext4 /dev/vg1/lvhomeAnd mount that file system:
+mount /dev/vg1/lvhome /homeThere are PVs (disks), VGs (groups of disk) and LVs (fractions of a VG).
+To use LVM first turn disks into PVs (pvcreate), then join them to a VG (vgcreate), then take a fraction of that (lvcreate) and finally create a file system on that (/dev/vgfoo/lvbar).
+Every part has a list and a display command. These are:
+You may not be impressed yet. LVM just made your life more complicated. Of course there is a reason for it because now begins the fun part. These are some common features:
+Oh, no. Your /home partition is 99% full? With LVM this is easy to solve. If you have free space on your VG (check with “vgs”) you can just extend the disk. No need to unmount anything. No downtime. Let’s give the partition 20 GiB more space:
+lvextend -L +20G -r /dev/vg1/lvhomeThe “-r” parameter not only extends the LV but also the file system that lives on top. That allows you to enlarge a partition without taking it offline. This is the neatest feature that LVM delivers in my opinion.
+If your volume group is also out of space then you could add another disk (physical volume) and use “pvcreate” and “vgextend” to enlarge it.
+No problem either. Let’s assume that one of your disks (physical volumes) on /dev/sda was 2 TB and you just bought a shiny new 10 TB disk (found on /dev/sdg). Now you want to move the data over to the new disk. As usual you need to turn /dev/sdg into a PV:
+pvcreate /dev/sdgAnd now you can just move all blocks (aka physical extents – see below) to the new disk:
+pvmove /dev/sda /dev/sdgAnd finally you can remove the PV from your VG:
+vgreduce vg1 /dev/sdaBy the way: once a disk is a PV it doesn’t matter whether your system finds it on /dev/sdb, /dev/sdc or any other device. As long as all the necessary PVs are found somewhere the VG will work. Just if your boot sector was written on /dev/sda you may need to re-install it if you change that disk.
+A snapshot is like taking a photo with your camera. You get an image of a situation at a certain point in time. Reality will continue to alter the world but your photo will always show that specific moment. You can still take a pen and draw something on the photo so it’s not read-only. (It used to be on LVM 1.x.) I commonly use this technique to get consistent database snapshots of large MySQL/MariaDB databases.
+Let’s just say that you have a huge 1 TiB-sized LV called “lvmysql” that is mounted to /var/lib/mysql. Running a backup of those files takes an hour. And while you back up one file after another the SQL database is accessing the various files making arbitrary changes. Your backup would contain unusable garbage. Some files were from minute 5 while others might be from minute 30. Such a backup is unusable.
+Now let’s instead use snapshots. Briefly stop the database and take a snapshot:
+lvcreate -n mysnap -L 20G -s /dev/vg1/lvmysqlNote that we use “lvcreate” to take the /dev/vg1/lvmysql LV and create a new /dev/vg1/mysnap LV. Just that the latter is a snapshot.
+You can start your database again. With a bit of luck this has just taken a few seconds. And now you have a perfectly consistent copy of the MySQL data directory. You can mount this snapshot anywhere in your file system:
+mount /dev/vg1/mysnap /mnt/mysnapNow you can take your time and just make a backup of /mnt/mysnap. It won’t change.
+However the magic comes at a price. Have you noticed the “-L 20 G” parameter? That does not mean that the snapshot has a size of 20 GiB. After all we started with a 1 TiB LV. So why did we specify a size at all?
+The answer lies in the way that snapshots work. Once you started MySQL again the data directory was changed. LVM needs to provide you with your snapshot but at the same time allow MySQL to continue doing its work. That works by a mechanism called copy-on-write. If the original LV would not change then it would be identical to the snapshot. If however the files on the LV are changed then LVM needs to keep a copy of the snapshotted state. The more changes you do the more space for those copies you will need. And that’s what is meant by “-L 20 G”. It gives your snapshot a 20 GiB storage area to track the changes.
+The size depends on how much change you expect while you want to use the snapshot. If the backup takes an hour and the database typically changes 100 GiB during that period then you should give the snapshot at least that space. The “lvs” command shows you much much of that space has been used already. So you should keep the snapshot no longer than needed for a backup. Should you hit the 100% mark then your snapshot becomes unusable and all you can do it remove it. That won’t affect the original LV fortunately. So you won’t break your database.
+Another use case of snapshots would be to try out things on the snapshot. And if you like what you did then merge the changes to the original LV. That can be done using “lvconvert –merge /dev/vg1/mysnap”. But I suggest you consult the man page of “lvconvert” before you do that.
+Using LV for all partitions used to be a problem in the past. Debian created an ext2 partition for /boot to make sure the system boots. This has become obsolete for quite a while. You can use LVs everywhere and Debian will happily boot the system.
+ +By default LVM uses RAID-0. That is the RAID level that makes you lose everything if a single disk fails. LVM support RAID levels 1 and 5 though. Besides the LVM man pages I mainly found this web page describing it.
{step.title}
-| Which files shall be backed up? | show filesets | I=Included, E=Excluded |
| What’s the server doing? | status dir | |
| What’s the status of a certain job? | status jobid=xx | |
| What’s the client doing? | status client | |
| What’s the streamer doing? | status storage | |
| Anything new? | messages |
| Start a backup | run | …and choose the backup job |
| Label a new tape | label | …and run mount afterwards |
| Last jobs | list jobs | …or list jobid=xx’ for a specific job |
| Statistics about last jobs | list jobtotal | |
| Which files were backed up? | list files jobid=xx |
| Status | means… |
| T | Terminated normally |
| C | Created but not yet running |
| R | Running |
| B | Blocked |
| E | Terminated in Error |
| e | Non-fatal error |
| f | Fatal error |
| D | Verify Differences |
| A | Canceled by the user |
| F | Waiting on the File daemon |
| S | Waiting on the Storage daemon |
| m | Waiting for a new Volume to be mounted |
| M | Waiting for a Mount |
| s | Waiting for Storage resource |
| j | Waiting for Job resource |
| c | Waiting for Client resource |
| d | Wating for Maximum jobs |
| t | Waiting for Start Time |
| p | Waiting for higher priority job to finish |
| W | Terminated with warnings |
| Which tapes are in the pool? | list media |
| Remove a tape | delete media |
| Which pools are defined? | list pools |
| Which tapes are/were used for a certain job? | list jobmedia |
| Assign a tape to a certain pool | add |
| Change parameters of a tape | update volume |
| Erase a label on the tape | mt rewind && mt weof && mt rewind |
-postconf virtual_alias_maps=mysql:/etc/postfix/mysql-virtual-alias-maps.cf,mysql:/etc/postfix/mysql-email2email.cf -- -The order of the two mappings is not important here. Postfix will check all ‘cf’ files anyway and merges what it finds. - -You did it! All mappings are set up and the database is generally ready to be filled with domains and users. Make sure that only ‘root’ and the ‘postfix’ user can read the “.cf” files – after all your database password is stored there: -``` -chgrp postfix /etc/postfix/mysql-*.cf -chmod u=rw,g=r,o= /etc/postfix/mysql-*.cf -``` diff --git a/src/content/docs/ispmail-bookworm/120-setting-up-dovecot.mdx b/src/content/docs/ispmail-bookworm/120-setting-up-dovecot.mdx deleted file mode 100644 index a1fd9c7..0000000 --- a/src/content/docs/ispmail-bookworm/120-setting-up-dovecot.mdx +++ /dev/null @@ -1,261 +0,0 @@ ---- -title: Setting up Dovecot -lastUpdated: 2023-10-04 -slug: ispmail-bookworm/setting-up-dovecot -sidebar: - order: 120 ---- - -import { Aside } from "@astrojs/starlight/components"; - -This chapter of our journey leads us to Dovecot – the software that… - -- gets emails destined to your users from Postfix and saves them to disk -- executes user-based _sieve_ filter rules (can be used to e.g. move emails to different folders based on certain - criteria or to send automated vacation responses) -- allows the user to fetch emails using POP3 or IMAP - -Before we get to the actual configuration for security reasons I recommend that you create a new system user that will -own all virtual mailboxes. The following shell commands will create a system group “vmail” with GID (group ID) 5000 and -a system user “vmail” with UID (user ID) 5000. (Make sure that UID and GID are not yet used or choose another – the -number can be anything between 1000 and 65000 that is not yet used): - -``` -groupadd -g 5000 vmail -useradd -g vmail -u 5000 vmail -d /var/vmail -m -``` - -If the /var/vmail directory was already there because you assigned it a dedicated mount point then you should make sure -that the permissions are set correctly: - -``` -chown -R vmail:vmail /var/vmail -``` - -The configuration files for Dovecot are found in `/etc/dovecot/conf.d`. All these files are loaded by Dovecot. This is -done by this magical line at the end of the `/etc/dovecot/dovecot.conf` file: - -``` -!include conf.d/*.conf -``` - -It loads all files in `/etc/dovecot/conf.d/` that end in “.conf” in alphanumerical order. So “10-auth.conf” is loaded -first and “90-sieve-extprograms.conf” is loaded last. The big advantage is that you can edit or replace parts of the -configuration without having to overwrite the entire configuration. The main `/etc/dovecot/dovecot.conf` file does not -require any changes. Those other files in conf.d/ however do. - -## conf.d/ - -### 10-auth.conf - -The most common -[authentication mechanism](https://doc.dovecot.org/configuration_manual/authentication/authentication_mechanisms/#authentication-authentication-mechanisms) -is called _PLAIN_. However if you have Outl\*\*k users then you may need to add the _LOGIN_ mechanism, too.: - -``` -auth_mechanisms = plain login -``` - -These two mechanisms would ask for a password without enforcing encryption to secure the password. But don’t worry. By -default Dovecot sets `disable_plaintext_auth = yes` which ensures that authentication is only accepted over -TLS-encrypted connections. - -At the end of this file you will find various authentication backends that Dovecot ships with. By default it will use -system users (those from the /etc/passwd). But we want to use the MariaDB database backend so go ahead and change this -block to: - -``` -#!include auth-system.conf.ext -!include auth-sql.conf.ext -#!include auth-ldap.conf.ext -#!include auth-passwdfile.conf.ext -#!include auth-checkpassword.conf.ext -#!include auth-static.conf.ext -``` - -### 10-mail.conf - -Change the mail_location setting to: - -``` -mail_location = maildir:~/Maildir -``` - -This is the directory where Dovecot will look for the emails of a specific user. The tilde character (~) means the -user’s _home directory_. That does not make sense yet. But further down on this page we will tell Dovecot what the _home -directory_ is supposed to mean. For example `john@example.org` will have his home directory in -/var/vmail/example.org/john. - -Further down in the 10-mail.conf file you will find sections defining the -[namespaces](https://doc.dovecot.org/configuration_manual/namespace/). Those are folder structures that your email -program sees when connecting to the mail server. If you use POP3 you can only access the “inbox” – which is where all -incoming email is stored. Using the IMAP protocol you get access to a hierarchy of folders and subfolders. And you can -even share folders between users. Or use a public folder that can be accessed by anyone – even anonymously. So IMAP is -generally to be preferred. - -Also edit the “mail*plugins” line to enable the \_quota* plugin we will configure later and turn it into: - -``` -mail_plugins = quota -``` - - - -### 10-master.conf - -This configuration file deals with typical service ports like IMAP or POP3. - - - -So most settings are sane here and do not have to be changed. However one change is required in the “service auth” -section because we want Postfix to allow Dovecot as an authentication service. Make it look like this: - -``` -# Postfix smtp-auth -unix_listener /var/spool/postfix/private/auth { - mode = 0660 - user = postfix - group = postfix -} -``` - -Well, Postfix runs in a chroot environment located at /var/spool/postfix. It can't access anything outside of that -directory. So to allow communication with Postfix we tell Dovecot to place a communication socket into that chroot. - -### 10-ssl.conf - -Earlier in this guide you created both a key and a certificate file to encrypt the communication with POP3, IMAPs and -HTTPS between the users and your mail server. You need to tell Dovecot where to find these files: - -``` -ssl\_cert = \ - Ending a line with a backslash (\) means that it is continued on the next line. It keeps the configuration more - readable when it is split over multiple lines. - - -What these lines mean: - -- driver: the kind of database. MariaDB is the same kind as MySQL. -- connect: where to find the MySQL database and how to access it (username, password) -- user_query: an SQL query that returns the user name (=the email address), the quota, the home directory, user ID and - group ID. -- password_query: this SQL query just gets the password hash from the database -- iterate_query: ‘doveadm’ uses this query to get a list of all users. That allows you to use the “doveadm user ‘\*'” - command later. - -The _user_query_ gets several pieces of information from the database. Let’s look at it one by one: - -- email AS user - It gets the the _email_ field from the database which corresponds to the user name. Dovecot expects it in the _user_ - field so we set an alias to _“user”._ -- userdb_quota_rule - This is the user’s quota in bytes. Think of it as the maximum possible space on disk that the user can occupy. As - [documented](https://doc.dovecot.org/configuration_manual/quota/#per-user-quota) Dovecot expects the quota in a - special format like “\*:bytes=10000” if the user should not be able to store more than 10,000 bytes. That’s why we - begin the string with ‘\*:bytes=’. -- userdb_home - This leads to the directory where all emails and various control files for this user are located. The placeholder - ‘%d’ replaces the domain and ‘%n’ the user part. So for John that makes it “/var/vmail/example.org/john”. -- userdb*uid and userdb_gid - Those are the user ID and group ID of \_vmail* user – 5000 for both. Dovecot uses it to set the permissions of files - it creates. As all users share the same system user “vmail” this is just a static number.s - -## Fix permissions - -Make sure that only root can access the SQL configuration file so nobody else is reading your database access passwords: - -``` -chown root:root /etc/dovecot/dovecot-sql.conf.ext -chmod go= /etc/dovecot/dovecot-sql.conf.ext -``` - -Restart Dovecot from the shell: - -``` -systemctl restart dovecot -``` - -Look at your /var/log/mail.log logfile. You should see: - -``` -... Dovecot v2.3.13 (f79e8e7e4) starting up for imap, lmtp, sieve, pop3 (core dumps disabled) -``` - -If you get any error messages please double-check your configuration files. diff --git a/src/content/docs/ispmail-bookworm/130-postfix-send-to-dovecot.mdx b/src/content/docs/ispmail-bookworm/130-postfix-send-to-dovecot.mdx deleted file mode 100644 index eefde7f..0000000 --- a/src/content/docs/ispmail-bookworm/130-postfix-send-to-dovecot.mdx +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Let Postfix send emails to Dovecot -lastUpdated: 2023-10-04 -slug: ispmail-bookworm/let-postfix-send-emails-to-dovecot -sidebar: - order: 130 ---- - -import { Aside } from "@astrojs/starlight/components"; - -I hope you haven’t lost your mind yet. If you are unsure how Postfix and Dovecot work together take a moment and go back to the _big picture_ page. - -In a previous chapter we made sure that Postfix knows which emails it is allowed to receive. Now what to do with the email? It has to be saved to disk into the mailbox of the mail user who is eagerly waiting for it. You could let Postfix handle that using its built-in mail delivery agent (MDA) called “_virtual_“. However compared to the capabilities that Dovecot provides like server-based sieve rules or quotas the Postfix delivery agent is pretty basic. We are using Dovecot anyway to provide the IMAP (and optionally POP3) service. So let’s use its _delivery agent_. - -How can we make Postfix hand over the email to Dovecot? There are generally two ways to establish that link. - -1. Using the _dovecot-lda_ (local delivery agent) process. It can process one email at a time. And it starts up a new process for every email. This was for long the default way. But as you can imagine that it does not scale well. -2. The better option is to use [LMTP (local mail transport protocol)](https://en.wikipedia.org/wiki/Local_Mail_Transfer_Protocol) that was conceived for this purpose. It can handle multiple recipients at the same time and has a permanently running process which provides a better performance than using the LDA. In short, LMTP is a variant of SMTP with fewer features. It is meant for email communication between internal services that trust each other. - -You guessed it already – we will go for the second option. You installed the _dovecot-lmtpd_ package earlier. So let’s configure it. - -## Tell Dovecot where to listen for LMTP connections from Postfix - -Edit Dovecot’s configuration file that deals with the LMTP daemon – you can find it at `/etc/dovecot/conf.d/10-master.conf`. Look for the “service lmtp” section and edit it so that it looks like: - -``` -service lmtp { - unix_listener /var/spool/postfix/private/dovecot-lmtp { - group = postfix - mode = 0600 - user = postfix - } -} -``` - -This makes Dovecot’s _lmtp daemon_ create a UNIX socket at /var/spool/postfix/private/dovecot-lmtp. Just like in the section dealing with setting up Dovecot we make it put a socket into the /var/spool/postfix chroot directory because Postfix is restricted to that directory and cannot access anything outside of it. So from Postfix’s point of view the socket is located at “/private/dovecot-lmtp”. - -Restart Dovecot… -``` -systemctl restart dovecot -``` -Check if dovecot accepted that change: -``` -systemctl status dovecot -``` -The output should contain “Active: active (running)”. - - -## Tell Postfix to deliver emails to Dovecot using LMTP - -This is even easier. The “_virtual\_transport_” in Postfix defines the service to use for delivering emails to the local system. Dovecot has created a socket file and is ready to listen to incoming LMTP connections. We just need to tell Postfix to send emails there: - -``` -postconf virtual_transport=lmtp:unix:private/dovecot-lmtp -``` -The syntax looks crazy, but it’s actually simple. You just told Postfix to use the LMTP protocol. And that we want to use a UNIX socket on the same system (instead of a TCP connection). And the socket file is located at `/var/spool/postfix/private/dovecot-lmtp`. - -(You will find further information on these steps in the [Dovecot configuration on Postfix integration](https://doc.dovecot.org/configuration_manual/howto/postfix_dovecot_lmtp/).) - -## Enable server-side mail rules - -One of my favorite features of Dovecot are automatic rules for incoming email that are processed on the server. You can sort away your mailing list emails into special folders. You can reject certain senders. Or you can set up vacation auto-responders. No need to have a mail client running – it all happens automatically on the server even when your mail users are not connected. - -The open standard (RFC 5228) for such rules is called Sieve. Basically, Sieve is a way to manage server-side email rules. A rule consists of conditions and actions. For example if the sender address matches `steve@example.org` you could tell Dovecot to move such emails to your “steve” folder automatically. These rules are stored on the Dovecot server and executed automatically. Whether you connect from your smartphone your laptop or use the webmail access – the rules always work and require no configuration on the client side. - -As we use LMTP that’s where we need to tell the lmtp service that we want to use Dovecot’s “sieve” plugin. Edit the file `/etc/dovecot/conf.d/20-lmtp.conf` and within the “protocol lmtp” section change the “mail\_plugins” line to: - -``` -mail_plugins = $mail_plugins sieve -``` -Restart Dovecot and you are done: -``` -systemctl restart dovecot -``` diff --git a/src/content/docs/ispmail-bookworm/140-quotas.mdx b/src/content/docs/ispmail-bookworm/140-quotas.mdx deleted file mode 100644 index 65eeb72..0000000 --- a/src/content/docs/ispmail-bookworm/140-quotas.mdx +++ /dev/null @@ -1,201 +0,0 @@ ---- -title: Quotas -lastUpdated: 2023-10-04 -slug: ispmail-bookworm/quotas -sidebar: - order: 140 ---- - -import { Aside } from "@astrojs/starlight/components"; - - - -Quotas are size limits for users. You can make sure that users do not waste arbitrary amounts of disk space but are -forced to clean up old emails every now and then. - -The magic happens in two places: - -1. Postfix needs to reject new emails if the user’s mailbox is over quota. -2. Dovecot needs to keep track of the quota and how much the user has already used up of it. - -### Dovecot quota policy service - -Let’s start with Dovecot. Find the file `/etc/dovecot/conf.d/90-quota.conf` and edit it. There are several `plugin {}` -sections. Take one and make it look like: - -``` -plugin { - quota = count:User quota - quota_vsizes = yes - - quota_status_success = DUNNO - quota_status_nouser = DUNNO - quota_status_overquota = "452 4.2.2 Mailbox is full and cannot receive any more emails" -} -``` - -The first line defines that you want to calculate the used space in a user’s _maildir_. There are several -[backends](https://doc.dovecot.org/configuration_manual/quota_plugin/) like that but the -_[count](https://doc.dovecot.org/configuration_manual/quota/quota_count/#quota-backend-count)_ is the best choice in -this context. (Previous guides used _maildir_ here.) The string “User quota” is just an arbitrary string that may be -queried from a mail user agent. - -The lines starting with “`quota_status_…`” set return values for the service that you will set up in a minute. It will -tell Postfix that it will not interfere (_DUNNO_ – colloquial way to say “I don’t know”). And it will return a string -with a return code 452 if the user is over quota. Codes starting with “4” mean temporary errors. It will tell the -sending party that it is worth retrying at a later time. However if the user does not resolve the issue it will lead to -a _bounce_ error email after three days. - -In the same file (_90-quota.conf_) add another section: - -``` -service quota-status { - executable = /usr/lib/dovecot/quota-status -p postfix - unix_listener /var/spool/postfix/private/quota-status { - user = postfix - } -} -``` - -This creates a new [Dovecot service](https://doc.dovecot.org/configuration_manual/service_configuration/) responding to -requests from other processes. You surely recognize that we put it into the jail that Postfix runs in -(_/var/spool/postfix_), so that Postfix can access it. - -Time to restart Dovecot: - -``` -systemctl restart dovecot -``` - -Take a look at the /var/spool/postfix/private directory. If all went as intended you will find a socket file called -`quota-status` there. Otherwise please check the `/var/log/mail.log` file for errors. - -### Postfix recipient restrictions - -If we stopped here, then Dovecot would reject emails for users who have no space left. However Postfix would still -happily receive new emails and attempt to forward them to Dovecot via LMTP. Dovecot however will deny that. It will then -keep the email in its queue and retry for a while. In the end it will send a _bounce_ back to the sender telling them -about the problem. So why is this bad? - -1. The sender will assume that the email was delivered while it is stuck in the queue for up to three days. -2. Spam emails use forged senders. So at the time that Postfix generates the _bounce email_ it will likely send it to an - innocent person. This is called _backscatter_ and considered a mail server misconfiguration. Such a problem may get - your mail server blacklisted. You don’t want that. - -So the next logical step is to make Postfix check whether a mailbox is over quota whenever a new email arrives. Let’s -hook up into the “RCPT TO” phase of the SMTP dialog when a new email comes in. Postfix checks its -_smtpd_recipient_restrictions_ configuration at this stage. Run this command in the shell: - -``` -postconf smtpd_recipient_restrictions=reject_unauth_destination, \ - "check_policy_service unix:private/quota-status" -``` - -This adds two checks: - -1. `reject_unauth_destination` checks whether the mail server is the final destination for the recipient’s email - address. This is pretty much the default behavior if you do not define any restrictions. -2. `check_policy_service` connects to the socket file at `/var/spool/postfix/private/quota-status` that was put there by - Dovecot. It will use it to ask Dovecot whether the user is over quota in which case the email would get rejected. - -### Test it - -If you are curious to see this working, then set John’s mailbox quota to 5 KB: - -```sql -# mariadb mailserver -mysql> update virtual_users set quota=4000 where email='john@example.org'; -``` - -Send him a few emails using the ‘swaks’ tool: - -``` -swaks --server localhost --to john@example.org -``` - -After a few emails you will see the rejection message: - -``` --> RCPT TO:john@example.org - <** 452 4.2.2 john@example.org: Recipient address rejected: Mailbox is full and cannot receive any more emails -``` - -### Troubleshooting - -These are things you should consider if quotas do not seem to work properly: - -- Check if you have enabled “quota” in the “mail_plugins” in the 10-mail.conf file. -- Your users may complain that they have deleted many emails but are still over quota. Let them check if they actually - emptied the _Trash_ folder. Of course emails in that folder also contribute to the disk space usage. Once the Trash - folder is expunged the problem should be gone. You may also allow your users more space in the Trash folder. That’s - explained in the [Dovecot documentation](https://doc.dovecot.org/configuration_manual/quota/#quota-rules). -- If you directly remove files from a user’s Maildir instead of properly accessing the mailbox using IMAP then you will - screw up the quota calculation. In that case let Dovecot recalculate the quota: - `doveadm quota recalc -u john@example.org` - -### Automatic warning emails - -The last step is to inform the poor users if they accidentally went over quota. After all they do not necessarily -recognize that on their own. Let’s do that by sending them an email with a warning. Yes, we will make sure that the -email gets through even if the quota is reached. - -Edit the `90-quota.conf` file again. Add this section to the file (derived from the -[Dovecot documentation](https://doc.dovecot.org/configuration_manual/quota/#quota-warning-scripts)): - -``` -plugin { - quota_warning = storage=95%% quota-warning 95 %u - quota_warning2 = storage=80%% quota-warning 80 %u -} -service quota-warning { - executable = script /usr/local/bin/quota-warning.sh - unix_listener quota-warning { - user = vmail - group = vmail - mode = 0660 - } -} -``` - -This section defines two automatic quota warnings. The first (quota_warning) is triggered if the user reaches 95% of the -quota. The second (quota_warning2) at 80%. These lines follow this schema: - -- **Trigger** (e.g. “storage=95%”). The “%” sign needs to be used twice if you want to emit a literal percent sign. So - this is not a typo. -- The **socket** you want to call in that case. Our socket is the “service quota-warning” that calls a shell script. -- Additional **parameters** that are passed to the shell script in our case. They tell the script the percentage that - has been reached (e.g. 95) and the address of the user who should get the warning. - -Apparently we need the script to run. So please create a new file at `/usr/local/bin/quota-warning.sh` and put these -lines into it: - -``` -#!/bin/sh -PERCENT=$1 -USER=$2 -cat << EOF | /usr/lib/dovecot/dovecot-lda -d $USER -o "plugin/quota=maildir:User quota:noenforcing" -From: postmaster@webmail.example.org -Subject: Quota warning - $PERCENT% reached - -Your mailbox can only store a limited amount of emails. -Currently it is $PERCENT% full. If you reach 100% then -new emails cannot be stored. Thanks for your understanding. -EOF -``` - -Make this file executable: - -``` -chmod +x /usr/local/bin/quota-warning.sh -``` - -Time to restart Dovecot again: - -``` -systemctl restart dovecot -``` - -Dovecot’s quota limits can be configured in many ways. If you have special needs then give -[their documentation](https://doc.dovecot.org/configuration_manual/quota/) a look. diff --git a/src/content/docs/ispmail-bookworm/150-testing-imap.mdx b/src/content/docs/ispmail-bookworm/150-testing-imap.mdx deleted file mode 100644 index 3ba30b2..0000000 --- a/src/content/docs/ispmail-bookworm/150-testing-imap.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Testing IMAP -lastUpdated: 2023-10-04 -slug: ispmail-bookworm/testing-imap -sidebar: - order: 150 ---- - -You have already completed the configuration of Dovecot. So fetching emails via IMAP should already work. Let’s give it -a try using a simple-looking but powerful IMAP client: _mutt_. - -``` -mutt -f imaps://john@example.org@webmail.example.org -``` - -The connection URL may look a little confusing because of the two “@” characters. Usually _mutt_ expects the format -`imaps://user@server`. And as we use the email address as the “user” part you get this look. - -You should get prompted for the password which we set to “summersun”. If you get any certificate warnings then check if -you used the correct server name to connect to and if you completed the certificate/LetsEncrypt part earlier in this -guide. - -After logging in you will see an empty inbox: - - - -Or if you have played around with quotas in the previous section you will see a couple of emails plus the quota -warnings: - - - -Very good – IMAP connections and authentication works. That’s all we wanted to test. Exit _mutt_ by pressing “q”. - -Congratulations. At this point your server can already receive emails. diff --git a/src/content/docs/ispmail-bookworm/160-webmail-using-roundcube.mdx b/src/content/docs/ispmail-bookworm/160-webmail-using-roundcube.mdx deleted file mode 100644 index 30f89b7..0000000 --- a/src/content/docs/ispmail-bookworm/160-webmail-using-roundcube.mdx +++ /dev/null @@ -1,156 +0,0 @@ ---- -title: Webmail using Roundcube -lastUpdated: 2023-10-04 -slug: ispmail-bookworm/webmail-using-roundcube -sidebar: - order: 160 ---- - -import { Aside } from "@astrojs/starlight/components"; - - - -Power users may still want to use a mail client like Thunderbird. But most users nowadays seem to prefer reading their emails in the web browser. Let us install a web application for that purpose: [Roundcube](https://roundcube.net/). Roundcube is the software that was also used in the previous versions of this guide. So if your users are used to it… just stay with it. - -## Installation - -Start by installing the software packages: - -``` -apt install -y roundcube roundcube-plugins \ - roundcube-plugins-extra roundcube-mysql -``` - -Roundcube stores user settings in the database. So you will get asked to set up database access: - - - -Choose Yes. - -When asked for a password just press _ENTER_. - - - -## Configure Apache - -Do you remember that earlier in this guide I asked you how want to name your mail server? Whether you want to use one common name like “webmail.example.org” for all your domains? Or if you prefer different host names for each domain like “webmail.domain1.com” and “webmail.domain2.com”? If you want to use just more then you will have to create one virtual host configuration per domain. The following instructions will just deal with one common host name. - -To get Apache to serve the Roundcube application you need to edit the /etc/apache2/sites-available/**webmail.example.org**-https.conf file. I suggest you change the `DocumentRoot` line to: -``` -DocumentRoot /var/lib/roundcube/public_html -``` -All URLs are relative to that directory. So if you go to `https://webmail.example.com/` then files are looked up in that directory. - -Also add this line within the same `VirtualHost` section to add a couple of prepared security settings: -``` -Include /etc/roundcube/apache.conf -``` - -And as usual Apache needs to be restarted after the configuration change: -``` -systemctl restart apache2 -``` -Check that Apache is running properly: -``` -systemctl status apache2 -``` -In case of a problem run “`apache2ctl configtest`” to find the cause. - -## Limit access to localhost - -The main configuration file of Roundcube is located at `/etc/roundcube/config.inc.php`. Feel free to customize the file. Fortunately nowadays the basic settings are already as we need them. However these two settings need to be changed by you: - -``` -$config['imap_host'] = "tls://webmail.example.org:143"; -$config['smtp_host'] = 'tls://webmail.example.org:587'; -``` -So now when your users enter `https://webmail.example.org/` in their browser they should get the Roundcube login form: - - - -Keep in mind that we are using the email address as the account name of the user. So when logging in please enter the email address as the user name. E.g. ‘john@example.org’ and password ‘summersun’. - - - - - -## Plugins - -Roundcube comes with various plugins that you can offer your users. I recommend at least these two: - -- password: Let the user change their access password. -- managesieve: Let the user manage rules that apply to incoming email. They can move mails to specific folders automatically for example. - -Again edit the `/etc/roundcube/config.inc.php` file and look for the _plugins_ configuration. To enable the recommended plugins change it to: - -``` -$config['plugins'] = array( - 'managesieve', - 'password', - ); -``` - -### password plugin - -Plugins are configured through files located in the `/etc/roundcube/plugins` directory. Let’s begin with the password plugin. Edit the `/etc/roundcube/plugins/password/config.inc.php` file. - -Oops, that file looks pretty empty. But it refers us to an example file at `/usr/share/roundcube/plugins/password/config.inc.php.dist`. There are many different methods to let users change their passwords. As we store that information in the SQL database, that is the part we need to set up. - - - -Remove the empty definition line of $config from your `config.inc.php` file. Let’s go through the required settings one by one: - -- `$config['password_driver'] = 'sql';`\ - Simple. Use SQL as a backend. -- `$config['password_minimum_length'] = 12;`\ - Allow no passwords shorter than 12 characters. I consider longer passwords more secure than short passwords with weird characters. You can even choose a larger minimum. -- `$config['password_force_save'] = true;`\ - This will overwrite the password in the database even if it hasn’t changed. It helps us improve the strength of the password hash by re-encoding it with a better algorithm even if the user chooses to keep his old password. -- `$config['password_algorithm'] = 'blowfish-crypt';`\ - The cryptographic algorithm to encode the password. This one is considered very secure and supported by Dovecot. -- `$config['password_algorithm_prefix'] = '{CRYPT}';`\ - Prepend every password with this string so that Dovecot knows how we encrypted the password. -- `$config['password_db_dsn'] = 'mysql://mailadmin:gefk6lA2brMOeb8eR5WYaMEdKDQfnF@localhost/mailserver';`\ - Connection information for the local database. Use your own password for the _mailadmin_ (!) database user here. We cannot use the restricted _mailserver_ user because we have to write to the database if the user changes his password. -- `$config['password_query'] = "UPDATE virtual_users SET password=%P WHERE email=%u";`\ - The SQL query that is run to write the new password hash into the database. %P is a placeholder for the new password hash. And %u is the logged-in user and conveniently matches the email address. - -Make sure that this config file is not world-readable: - -``` -chown root:www-data /etc/roundcube/plugins/password/config.inc.php -chmod u=rw,g=r,o= /etc/roundcube/plugins/password/config.inc.php -``` -Try it. Log into Roundcube as `john@example.org` with password ‘summersun’. Go to the _Settings_. Choose _Password_. Enter a new password twice. You should get a success message at the bottom right. Now logout and login with the new password. Does it work? Great. - -### sieve plugin - -[Sieve](https://en.wikipedia.org/wiki/Sieve_\(mail_filtering_language\)) is a simple programming language to be used for server-side rules. Dovecot executes these rules every time a new email comes in. There are global rules that are executed for every email. And of course every user/mailbox can have its own rules. To manage sieve rules Dovecot offers the _managesieve_ interface that you enabled earlier. So we just need to tell Roundcube how to access it. - -The configuration file for Roundcube’s _managesieve_ plugin is found at `/etc/roundcube/plugins/managesieve/config.inc.php`. Edit the file and again remove the empty or comment the `$config` line. You can again find all possible configuration options in the `/usr/share/roundcube/plugins/managesieve/config.inc.php.dist` file. - -This time just one setting is required to tell Roundcube which server to talk to: -``` -$config['managesieve_host'] = 'localhost'; -``` -Sieve rules are stored in a special syntax on the server. This is an example that moves all incoming emails to the _test_ folder that have “test” in the subject: -``` -require ["fileinto"]; -if header :contains "subject" "test" -{ - fileinto "INBOX/test"; -} -``` -You do not need to learn this syntax though. Roundcube’s sieve rule editor is way more user-friendly. - -Try adding a sieve rule for `john@example.org` in Roundcube. That feature is located in Settings/Filters. You will find the machine-readable sieve code at `/var/vmail/example.org/john/sieve/roundcube.sieve`. - -The rule editor looks like this: - - diff --git a/src/content/docs/ispmail-bookworm/170-testing-email-delivery.mdx b/src/content/docs/ispmail-bookworm/170-testing-email-delivery.mdx deleted file mode 100644 index 5d7e841..0000000 --- a/src/content/docs/ispmail-bookworm/170-testing-email-delivery.mdx +++ /dev/null @@ -1,168 +0,0 @@ ---- -title: Testing email delivery -lastUpdated: 2023-10-04 -slug: ispmail-bookworm/testing-email-delivery -sidebar: - order: 170 ---- - -So far you have spent considerable time with theory and configuration. Are you worried whether all you did actually leads to a working mail server? Before we do the final steps let’s take a break and verify that everything you did so far works as expected. - -At this point the /var/vmail directory should be empty or maybe contain an “example.org” directory if you played with the john@example.org account previously. You can get a list of all files and directories within by running: -``` -find /var/vmail -``` -Although there are not actually any emails on the server yet, you may still get something along the lines of: -``` -/var/vmail -/var/vmail/example.org -/var/vmail/example.org/john -/var/vmail/example.org/john/.dovecot.sieve -/var/vmail/example.org/john/sieve -/var/vmail/example.org/john/sieve/roundcube.sieve -/var/vmail/example.org/john/sieve/tmp -/var/vmail/example.org/john/Maildir -/var/vmail/example.org/john/Maildir/subscriptions -/var/vmail/example.org/john/Maildir/maildirfolder -/var/vmail/example.org/john/Maildir/new -/var/vmail/example.org/john/Maildir/cur -/var/vmail/example.org/john/Maildir/.INBOX.test -/var/vmail/example.org/john/Maildir/.INBOX.test/maildirfolder -/var/vmail/example.org/john/Maildir/.INBOX.test/new -/var/vmail/example.org/john/Maildir/.INBOX.test/dovecot.index.log -/var/vmail/example.org/john/Maildir/.INBOX.test/cur -/var/vmail/example.org/john/Maildir/.INBOX.test/dovecot-uidlist -/var/vmail/example.org/john/Maildir/.INBOX.test/tmp -/var/vmail/example.org/john/Maildir/dovecot-uidvalidity.5ddc842b -/var/vmail/example.org/john/Maildir/tmp -``` -Basically the schema you see here is /var/vmail/DOMAIN/USER/Maildir/… - -Eeach IMAP mail folder has three subdirectories: - -- `new` – every file here is an email that was stored in this mail folder but not yet read -- `cur` – the same but for email that has been read already -- `tmp` – for temporary files from the mail server - -Nested folders (folders within folders) will be separated by a dot like this: - -- …/Maildir/new/… – the main inbox -- …/Maildir/.INBOX.reddit/new/… – the “reddit” mail folder below the inbox -- …/Maildir/.INBOX.servers.inga/new/… – the “servers”/”inga” mail folder below the inbox - -## Check Postfix - -To check for obvious configuration error in Postfix please run: -``` -postfix check -``` -Did you get the error… -“_error: open database /etc/aliases.db: No such file or directory_“? Don’t worry. Just run the “newaliases” command to create a new machine-readable file from what aliases were defined in the `/etc/aliases`. - -Very likely you will get this error: “_postfix/postfix-script: warning: symlink leaves directory: /etc/postfix/./makedefs.out_“. That is a [harmless bug of the Debian package](https://bugs.debian.org/926331) that can safely be ignored. - -## Send a test email - -It is time to send a new email into the system. Open a new terminal window and run -``` -tail -f /var/log/mail.log -``` -to see what the mail server is doing. Now let’s send an email to John. My favorite tool for mail tests is _swaks_ that you installed earlier. In a second terminal run: -``` -swaks --to john@example.org --server localhost -``` -If all works as expected, your mail.log will show a lot of technical information about the email delivery. Let me explain what happens at each stage. - -- `postfix/smtpd[29225]: connect from localhost.localdomain[127.0.0.1]` - Postfix receives an incoming SMTP connection. -- `postfix/smtpd[29225]: 8BA46A0A3A: client=localhost.localdomain[127.0.0.1]` - Postfix assigns a unique identifier (8BA46A0A3A) to this connection so that you see which log lines belong together. This is especially important with busy mail servers where multiple mails are handled in parallel. -- `postfix/cleanup[29233]: 8BA46A0A3A: *message-id=20191126153053.029243@webmail.example.org` - _swaks_ created a unique [message id](https://en.wikipedia.org/wiki/Message-ID) to the email which helps you identify specific mails in the log file. -- `postfix/qmgr[13667]: 8BA46A0A3A: from=root@webmail.example.org, size=485, nrcpt=1 (queue active)` - The sender was root@webmail.example.org. This is logged after _swaks_ sent the “MAIL FROM” line during the SMTP dialog. -- `postfix/smtpd[29225]: disconnect from localhost.localdomain[127.0.0.1] ehlo=1 mail=1 rcpt=1 data=1 quit=1 commands=5` - The SMTP communication ends. Postfix has now received and queued the email. -- `dovecot: lmtp(29237): Connect from local` - Postfix connects to Dovecot to hand over the email via the LMTP interface. -- `dovecot: lmtp(john@example.org)<29237><2PJTIh033V01cgAARGEcaw>: sieve: msgid=20191126153053.029243@webmail.example.org: stored mail into mailbox 'INBOX.test'` - Dovecot received the email and even evaluated John’s sieve rule which made the email get stored to the ‘test’ folder of his mailbox. -- `dovecot: lmtp(29237): Disconnect from local: Client has quit the connection (state=READY)` - The LMTP connection between Postfix and Dovecot is closed. -- `postfix/lmtp[29236]: 8BA46A0A3A: to=john@example.org, relay=webmail.example.org[private/dovecot-lmtp], delay=0.01, delays=0/0/0/0.01, dsn=2.0.0, status=sent (250 2.0.0 john@example.org 2PJTIh033V01cgAARGEcaw Saved)` - This is the one of the most interesting lines in your mail log. It tells you what happened with a certain email. In this case it says that it was handed over to _dovecot-lmtp_ and that the delivery was successful (_status=sent_). The status codes like 2.0.0 are defined in [RFC 3463](https://tools.ietf.org/html/rfc3463) and work similar to status codes in HTTP. Codes beginning with ‘2’ are good. Those with ‘4’ are temporary errors. And ‘5’ stands for a permanent failure. - -Your output may look slightly differently. Just focus on the parts that are printed in **bold letters**. If everything worked as expected Postfix has accepted the email and forwarded it to Dovecot which in turn wrote the email in John’s maildir. If you get any errors in the log file then try to understand the error message and find the cause of the problem before you proceed. - -Look again: - -find /var/vmail - -Dovecot has now created a directory structure for John and created a new file: -``` -/var/vmail/ -[…] -/var/vmail/example.org/john/Maildir/new/1515485447.M404984P2636.mail,S=510,W=522 -[…] -``` -The file will have a different name on your system – that’s okay. It is the only file in the “new” folder. - -## Accessing the email as a file on disk - -The file just contains the email: - -``` -Return-Path:
- <VirtualHost *:80> ServerName **webmail.example.org** DocumentRoot /var/www/**webmail.example.org** - </VirtualHost> -- -The simple configuration makes Apache handle HTTP requests (on the standard TCP port 80) if a certain line in the -request header from the browser reads “Host: webmail.example.org”. So the browser actually tells your Apache web server -which server name it is looking for. That allows for multiple web sites on a single IP address. (Thanks to -[Server Name Indication](https://en.wikipedia.org/wiki/Server_Name_Indication) as explained earlier this works well for -HTTPS, too.) - -Enable the site: - -
a2ensite **webmail.example.org**-http- -You will be told: - -``` -To activate the new configuration, you need to run: - systemctl reload apache2 -``` - -Do that. - - - -Let’s check if the configuration works. Put a test file into your web root directory: - -
echo "Just a test" > /var/www/**webmail.example.org**/test- -Now when you open the URL http://**webmail.example.org**/test in your browser you should see the text “Just a test”. - -This is enough setup to make LetsEncrypt issue a certificate for you. - -## Getting a LetsEncrypt certificate - -Now you can use the _certbot_ tool to request an encryption certificate from LetsEncrypt. What will happen? - -- certbot creates a _private key_ and a _certificate request_. It sends the _certificate request_ to the LetsEncrypt - server. -- the LetsEncrypt server replies with a _challenge_/_token_. -- certbot puts that token into a file in the /var/www/**webmail.example.org**/.well-known/acme-challenge directory. -- the LetsEncrypt server does an HTTP connection to `http://webmail.example.org/.well-known/acme-challenge/…` and - expects to find that token. This verifies that you are in charge of the domain and the web server. -- If all works well the LetsEncrypt server signs your _certificate request_ and thus creates the actual _certificate_. -- certbot receives the certificate and puts it into /etc/letsencrypt/archive/**webmail.example.org**/ - -To get a certificate for your domain run: - -
- certbot certonly --webroot --webroot-path /var/www/**webmail.example.org** -d **webmail.example.org** -- -You can use multiple occurences of “-d” here to get a certificate valid for multiple domains. For example: “-d -webmail.example.org -d something-else.example.org”. (See also: -[https://eff-certbot.readthedocs.io/en/stable/using.html#webroot](https://eff-certbot.readthedocs.io/en/stable/using.html#webroot)) - -The first time you do that you will get asked for your email address so LetsEncrypt can send you reminders if your -certificate would expire. You will also have to agree to their terms of service. - -If everything worked well you should get output like: - -``` - Requesting a certificate for webmail.example.org Successfully received certificate. Certificate is saved at: - /etc/letsencrypt/live/webmail.example.org/fullchain.pem Key is saved at: - /etc/letsencrypt/live/webmail.example.org/privkey.pem This certificate expires on 2024-01-02. These files will be - updated when the certificate renews. Certbot has set up a scheduled task to automatically renew this certificate in - the background. -``` - -In /etc/letsencrypt/live/**webmail.example.org** you will find a couple of files now: - -- cert.pem: the certificate file -- chain.pem: the _chaining_ or _intermediate_ certificate. This certificate provides information how the LetsEncrypt - certificates are linked to other known certificate authorities. It is generally a good idea to always send this - certificate along with your own for clients who may not know LetsEncrypt properly yet. -- fullchain.pem: this file contains a concatenation of the _cert.pem_ and the _chain.pem_. This is the preferred file to - use when a piece of software asks where to find the _certificate_. -- privkey.pem: the private key file. Keep it secret. - - - -## Add HTTPS - -Now that you have a valid certificate you can finally enable HTTPS for your web server. Create a new file -/etc/apache2/sites-available/**webmail.example.org**-https.conf containing: - -
- <VirtualHost *:443> ServerName **webmail.example.org** DocumentRoot /var/www/**webmail.example.org** SSLEngine - on SSLCertificateFile /etc/letsencrypt/live/**webmail.example.org**/fullchain.pem SSLCertificateKeyFile - /etc/letsencrypt/live/**webmail.example.org**/privkey.pem </VirtualHost> -- -This virtual host configuration looks suspiciously similar to the HTTP virtual host above. It just listens on port 443 -(standard port for HTTPS) instead of port 80. And it uses the “SSLEngine” that handles encryption and gets information -about the certificate for your web server (that is shown to your users) and the private key (that the web servers uses -to decrypt the user’s communication). - -Enable the SSL module in Apache: - -
a2enmod ssl- -Then enable the virtual host for HTTPS: - -
a2ensite **webmail.example.org**-https- -And _restart_ the web server. A _reload_ is not sufficient this time because you added a module. - -systemctl restart apache2 - -Now when you point your web browser to **webmail.example.org**, your browser should tell you that it trusts the web -site’s certificate: - - - -(Yes, sorry, this is not **webmail.example.org**. But I do not own the example.org domain and thus cannot get a valid -certificate for it. This is my own site.) - -So should you keep the HTTP virtual host? Yes. First for the HTTP->HTTPS redirection. And second to keep _certbot_ -working. - -## Redirect HTTP to HTTPS - -Sometimes users forget to enter https://… when accessing your webmail service. So they access the HTTP web site. We -obviously don’t want them to send their password over HTTP. So we should redirect all HTTP connections to HTTPS. - -One exception though. Let’s Encrypt will use HTTP to verify your challenge token. So we need to serve files at -http://**webmail.example.org**/.well-known/acme-challenge/… directly while redirecting all other requests to HTTPS. You -can accomplish that by putting these lines inside the <VirtualHost> section of your -`/etc/apache2/sites-available/webmail.example.org-http.conf` file: - -``` -RewriteEngine On -RewriteCond %{REQUEST_URI} !.well-known/acme-challenge -RewriteRule ^(.*)$ https://%{SERVER_NAME}$1 \[R=301,L\] -``` - -This requires the _rewrite_ module to be enabled in Apache. That is simple though: - -``` -a2enmod rewrite -systemctl restart apache2 -``` - -So now entering http://**webmail.example.org** will redirect you to https://**webmail.example.org**. - -## Automatic certificate renewal - -The *certbot* package automatically adds a timed job that runs twice a day at random times. The random part is important -to avoid millions of server hammering the LetsEncrypt service at the same second. - - - -So the renewal already happens automatically. Should it fail then LetsEncrypt start sending you reminder emails that -your certificate should be renewed. That’s a clear sign that something went wrong with the automatic renewal. - -There is one puzzle piece missing though. Even if the renewal worked it will only update the certificate files. But the -software components – Postfix, Dovecot and Apache – will not notice the change. So we need to add a so called -_post-hook_ to certbot that triggers a restart of all processes thereafter. - -For that purpose edit the /etc/letsencrypt/cli.ini file and add: - -``` -post-hook = systemctl restart postfix dovecot apache2 -``` - -Well done. You have implemented Let’s Encrypt for all your services now. Let’s go on. diff --git a/src/content/docs/ispmail-bookworm/90-prepare-database.mdx b/src/content/docs/ispmail-bookworm/90-prepare-database.mdx deleted file mode 100644 index 93e4676..0000000 --- a/src/content/docs/ispmail-bookworm/90-prepare-database.mdx +++ /dev/null @@ -1,219 +0,0 @@ ---- -title: Preparing the database -lastUpdated: 2023-10-03 -slug: ispmail-bookworm/prepare-the-database -sidebar: - order: 90 ---- - -import { Aside } from "@astrojs/starlight/components"; -import { Tabs, TabItem } from '@astrojs/starlight/components'; - -Now it’s time to prepare the MariaDB database that stores the information that controls your mail server. In the process you will have to enter [SQL](http://en.wikipedia.org/wiki/SQL) queries – the language of relational database servers. You may enter them in a terminal window using the ‘mysql’ command. But if you are less experienced with SQL you may prefer using a web interface. That’s what you installed _[Adminer](https://www.adminer.org/)_ for. - - - - -## Setting up Adminer - -Basically Adminer is just a couple of PHP files served from your Apache web server. The setup is simple. Edit your /etc/apache2/sites-available/**webmail.example.org**-https.conf file and put this line anywhere between the <VirtualHost> and the </VirtualHost> tags: -``` -Alias /adminer /usr/share/adminer/adminer -``` -Reload the Apache process: -``` -systemctl reload apache2 -``` - - - -You will not be able to login yet. The only available database user is ‘root’, but it is only usable from the shell by default – not over a network. - - -## Generate two random passwords - -In this section you will create the basic database “mailserver” and two users. One user (“mailadmin”) will be able to change the data in the database and is meant for you. The other user (“mailserver”) can only read from the database and is meant for the server processes. - -Use the _pwgen_ tool to create two random passwords for these users: -``` -pwgen -s1 30 2 -``` -Take a note of the passwords or store them somewhere safe. - - -## Create the ‘mailserver’ database - -This step is simple. Connect to the database using the ‘mysql’ command: -``` -mysql -``` -You should see the MariaDB prompt that allows you to enter further SQL commands: -``` -MariaDB [(none)]> -``` - -Now you are expected to speak SQL. To create a new database for our needs enter: - -``` -CREATE DATABASE mailserver; -``` -You will be told that your query was OK and that one new row was added. - - -## Create the database users - -Now you have an empty database. Let us give the “mailadmin” database user the required privileges to manage it. - -You are still connected to the database, right? To create a user with full permissions enter this SQL command. Please use the **first** password you just generated instead of mine: -``` -grant all privileges on mailserver.* to 'mailadmin'@'localhost' identified by 'gefk6lA2brMOeb8eR5WYaMEdKDQfnF'; -``` - -Also create the read-only user that will grant Postfix and Dovecot database access later (use your **second** random password here). - -``` -grant select on mailserver.* to 'mailserver'@'127.0.0.1' identified by 'x893dNj4stkHy1MKQq0USWBaX4ZZdq'; -``` - - - -Now you can use _Adminer_ to log in using the _mailadmin_ account and the **first** password: - - - -You should get logged in and see the “mailserver” database: - - - -## Creating the database tables - -Do you remember that I introduced three Postfix _mappings_ earlier? One for _virtual domains_, one for _virtual aliases_ and another for _virtual users_? Each of the mappings needs a database table that you will create now. Feel free to use _Adminer_. I will however also show the SQL statement to create the tables that you can enter on the ‘mysql’ command-line tool. Below you can click on either \[Adminer\] or \[SQL\] to choose. - -The first table to create is… - -## virtual\_domains - -This table just holds the list of domains that you will use as _virtual\_mailbox\_domains_ in Postfix. - -