#!/bin/sh # # export wiki pages to the cryptobox development tree # this creates static and integrated pages # set -ue # root directory of the cryptobox development environment ROOT_DIR="$(dirname $0)/.." # retrieve these pages from the wiki PAGES="CryptoBox CryptoBoxUser CryptoBoxUserGettingStarted CryptoBoxUserConfiguration CryptoBoxUserDailyUse CryptoBoxDev CryptoBoxDevPreparation CryptoBoxDevCustomBuild CryptoBoxDevWorkFlow CryptoBoxDevValidation CryptoBoxDevCustomConfigure CryptoBoxDevBackground CryptoBoxDevKnownProblems" #PAGES="CryptoBox" # base URL WIKI_HOST="https://systemausfall.org" # the trailing slash is important WIKI_URL=/trac/cryptobox/wiki/ CBOX_CGI="/cryptobox?action=doc\&page=" LANGUAGES="de en" DEST_DIR="$ROOT_DIR/cbox-tree.d/usr/share/doc/cryptobox/html" OFFLINE_DIR="$ROOT_DIR/cbox-tree.d/_offline/doc" IMAGE_DIR="$ROOT_DIR/cbox-tree.d/var/www/cryptobox-img" TMP_DIR=/tmp/$(basename $0)-$$.d HEADER_FILE=doc_header.inc FOOTER_FILE=doc_footer.inc [ ! -e "$DEST_DIR" ] && echo "$DEST_DIR does not exist" && exit 1 for LANG in $LANGUAGES; do for PAGE in $PAGES; do PAGE_SRC="$WIKI_HOST$WIKI_URL$PAGE/$LANG" echo "Importing $PAGE/$LANG:" # replace sub-page-style '/' like moin does it (by '_2f') TMP_FILE=$TMP_DIR/${PAGE}.html mkdir -p "$TMP_DIR" echo " downloading the page ..." wget --quiet --output-document="$TMP_FILE" "$PAGE_SRC" || { echo "Downloading ($PAGE_SRC) failed!"; exit 1; } # check if this moin page exists (by looking for the template selection) if grep -q "^describe $PAGE/$LANG here$" "$TMP_FILE" then rm "$TMP_FILE" PAGE_SRC=$(dirname $PAGE_SRC) echo " trying to download default language page instead" wget --quiet --output-document="$TMP_FILE" "$PAGE_SRC" || { echo "Downloading ($PAGE_SRC) failed!" >&2; exit 1; } # check, if there is even no default page grep -q "^describe $PAGE/$LANG here$" "$TMP_FILE" && echo "This page ($PAGE_SRC) was not found!" >&2 && exit 1 fi echo " removing header and footer ..." # break lines before start of content sed -i 's#