initial checkin

This commit is contained in:
account for www-related files and proxied applications 2015-03-08 22:11:15 +00:00
commit a814ca0cd8
28 changed files with 1053 additions and 0 deletions

15
blog.conf Normal file
View File

@ -0,0 +1,15 @@
# vim: set syntax=yaml :
url: https://maff.scot/
name: colourful words and phrases
tagline: techy tangents and general life chatter from a tired sysadmin.
author: Maff
about: A dorky dude who spends too much time with perl.
keywords: sysadmin,unix,bsd,freebsd,linux,software,web,developer,perl,bash,shell,gentoo,maff,matthew,connelly,dundee,scotland,furry,blog
posturlprepend: wrote/
config:
indexable: 1
per_page: 6
date_format: "%H:%M on %A, %d/%m/%y"
links:
Twitter: https://twitter.com/maffsie
Github: https://github.com/MaffC

173
blogalba Executable file
View File

@ -0,0 +1,173 @@
#!/usr/bin/env perl
# BlogAlba - no-frills markdown blogging system
package App::BlogAlba;
use strict;
use warnings;
use feature qw/say/;
use POSIX qw/strftime/;
use Date::Parse qw/str2time/;
use File::Spec;
use HTML::Template;
use Text::MultiMarkdown qw/markdown/;
use Unicode::Normalize;
use YAML;
use Dancer2;
my $basedir=File::Spec->rel2abs(__FILE__);$basedir=~s/blogalba$//;
my $cfg="$basedir/blog.conf";
my $blog=YAML::LoadFile("$basedir/blog.conf") or die "Couldn't load $basedir/blog.conf!";
$blog->{url} .= '/' unless $blog->{url} =~ /\/$/;
my ($page,@posts,@pages,%defparams);
my $nposts=0;my $npages=1;my $lastcache=0;
sub readpost {
my $file = shift;my $psh = shift || 1;
my $postb = ""; my $postmm = "";
open POST, $file or warn "Couldn't open $file!" and return 0;
my $status = 0;
while (<POST>) {
$postb .= $_ if $status==2;
/^-{3,}$/ and not $status==2 and $status = $status==1? 2 : 1;
$postmm .= $_ if $status==1;
}
close POST; undef $status;
my %postm = %{YAML::Load($postmm)}; undef $postmm;
$postm{filename} = $1 if $file =~ /(?:^|\/)([a-zA-Z0-9\-]*)\.md$/;
$postm{body} = markdown($postb); undef $postb;
if (defined $postm{date}) {
$postm{slug} = slugify($postm{title}) unless $postm{slug}; #we allow custom slugs to be defined
$postm{excerpt} = $1 if $postm{body} =~ /(<p>.*?<\/p>)/s;
$postm{time} = str2time($postm{date});
$postm{datetime} = timefmt($postm{date},'datetime');
$postm{permaurl} = $blog->{url}.$blog->{posturlprepend}.timefmt($postm{time},'permalink').$postm{slug};
}
push @posts,{%postm} if $psh==1; push @pages,{%postm} if $psh==2;return %postm;
}
sub slugify {
my $t = shift;
$t = lc NFKD($t); #Unicode::Normalize
$t =~ tr/\000-\177//cd; #Strip non-ascii
$t =~ s/[^\w\s-]//g; #Strip non-words
chomp $t;
$t =~ s/[-\s]+/-/g; #Prevent multiple hyphens or any spaces
return $t;
}
sub timefmt {
my ($epoch,$context)=@_;
$epoch=str2time $epoch if $context eq 'readpost' or $context eq 'datetime';
return strftime "%Y-%m-%dT%H:%M%z",localtime $epoch if $context eq 'datetime';
return strftime "%Y-%m",localtime $epoch if $context eq 'writepost';
return strftime "%Y/%m/",localtime $epoch if $context eq 'permalink';
return strftime $context, localtime $epoch if $context;
return strftime $blog->{config}->{date_format},localtime $epoch;
}
sub pagination_calc {
my $rem=$nposts % $blog->{config}->{per_page};
$npages=($nposts-$rem)/$blog->{config}->{per_page};
$npages++ if $rem>0 or $npages<1;
}
sub get_index {
my @iposts = @_;
$page->param(pagetitle => $blog->{name}, INDEX => 1, POSTS => [@iposts]);
return $page->output;
}
sub paginate {
my $pagenum = shift; my $offset = ($pagenum-1)*$blog->{config}->{per_page};
my $offset_to = $offset+($blog->{config}->{per_page}-1); $offset_to = $#posts if $offset_to > $#posts;
$page->param(PAGINATED => 1, prevlink => ($pagenum>1? 1 : 0), prevpage => $pagenum-1, nextlink => ($pagenum<$npages? 1 : 0), nextpage => $pagenum+1);
return get_index @posts[$offset..(($offset+$blog->{config}->{per_page})>$#posts? $#posts : ($offset+($blog->{config}->{per_page}-1)))];
}
sub page_init {
$page = HTML::Template->new(filename => "$basedir/layout/base.html",die_on_bad_params => 0,utf8 => 1,global_vars => 1);
$page->param(%defparams);
}
sub get_post {
my ($y,$m,$slug) = @_;
for my $r (@posts) {
my %post = %$r;
next unless $post{slug} eq $slug and timefmt($post{time},'writepost') eq "$y-$m";
$page->param(pagetitle => "$post{title} - $blog->{name}",%post);
return 1;
}
return undef;
}
sub get_page {
my $pname = shift;
for my $r (@pages) {
my %cpage = %$r;
next unless $cpage{filename} eq $pname;
$page->param(pagetitle => "$cpage{title} - $blog->{name}",%cpage);
return 1;
}
return undef;
}
sub do_cache {
return if $lastcache > (time - 3600);
$lastcache = time;
undef @posts and undef @pages if $#posts > 0 or $#pages > 0;
opendir POSTS, "$basedir/posts/" or die "Couldn't open posts directory $basedir/posts/";
while(readdir POSTS) {
next if /^\./ or /draft$/;
say "Error reading post $_" and next unless readpost "$basedir/posts/$_";
$nposts++;
}
closedir POSTS;
@posts = map {$_->[1]} sort {$b->[0] <=> $a->[0]} map {[$_->{time},$_]} @posts;
opendir PAGES, "$basedir/pages/" or die "Couldn't open pages directory $basedir/pages/";
while(readdir PAGES) {
next if /^\./ or /draft$/;
say "Error reading page $_" and next unless readpost("$basedir/pages/$_",2);
}
closedir PAGES;
my @nav;
push @nav, {navname => $_->{title}, navurl => "$blog->{url}$_->{filename}",} for @pages;
push @nav, {navname => $_, navurl => $blog->{links}->{$_},} for keys $blog->{links};
%defparams = (
INDEX => 0, NAV => [@nav], url => $blog->{url}, recent => [@posts[0 .. ($#posts > 7? 7 : $#posts)]],
about => $blog->{about}, author => $blog->{author}, name => $blog->{name}, tagline => $blog->{tagline}, keywords => $blog->{keywords},
robots => $blog->{config}->{indexable}? '<meta name="ROBOTS" content="INDEX, FOLLOW" />' : '<meta name="ROBOTS" content="NOINDEX, NOFOLLOW" />',
);
pagination_calc;
}
do_cache;
page_init;
set server => '127.0.0.1';
set port => 10420;
hook 'before' => sub {
page_init;
};
get '/' => sub {
return get_index @posts if $npages==1;
return paginate 1;
};
get '/page/:id' => sub {
pass unless params->{id} =~ /^[0-9]+$/ and params->{id} <= $npages;
return redirect '/' unless $npages > 1 and params->{id} > 1;
return paginate params->{id};
};
get '/wrote/:yyyy/:mm/:slug' => sub {
pass unless params->{yyyy} =~ /^[0-9]{4}$/ and params->{mm} =~ /^(?:0[1-9]|1[0-2])$/ and params->{slug} =~ /^[a-z0-9\-]+$/i;
$page->param(ISPOST => 1);
get_post params->{yyyy}, params->{mm}, params->{slug} or pass;
return $page->output;
};
get '/:extpage' => sub {
pass unless params->{extpage} =~ /^[a-z0-9\-]+$/i;
$page->param(ISPOST => 0);
get_page params->{extpage} or pass;
return $page->output;
};
start;

9
layout/base.html Normal file
View File

@ -0,0 +1,9 @@
<TMPL_INCLUDE NAME="head.inc">
<TMPL_IF NAME="INDEX">
<TMPL_LOOP NAME="POSTS">
<TMPL_INCLUDE NAME="post.inc">
</TMPL_LOOP>
<TMPL_ELSE>
<TMPL_INCLUDE NAME="post.inc">
</TMPL_IF>
<TMPL_INCLUDE NAME="foot.inc">

12
layout/foot.inc Normal file
View File

@ -0,0 +1,12 @@
<TMPL_IF NAME="PAGINATED">
<div class='col col-sm-9'><div id='pagination'>
<TMPL_IF NAME="prevlink"><p style='float: left'><a href="/page/<TMPL_VAR NAME="prevpage">">Go forth in time (page <TMPL_VAR NAME="prevpage">)</a></p></TMPL_IF>
<TMPL_IF NAME="nextlink"><p style='float: right'><a href="/page/<TMPL_VAR NAME="nextpage">">Plunge further into posts of yore (page <TMPL_VAR NAME="nextpage">)</a></p></TMPL_IF>
</div></div>
</TMPL_IF>
</div>
</div>
<script src="//cdnjs.cloudflare.com/ajax/libs/jquery/2.1.3/jquery.min.js"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.2/js/bootstrap.min.js"></script>
</body>
</html>

63
layout/head.inc Normal file
View File

@ -0,0 +1,63 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta http-equiv="Content-Type" content="text/html;charset=UTF-8" />
<meta name="keywords" content="<TMPL_VAR NAME="keywords">" />
<meta name="author" content="<TMPL_VAR NAME="author">" />
<meta name="description" content="<TMPL_VAR NAME="tagline">" />
<meta property="og:title" content="<TMPL_VAR NAME="name">" />
<meta property="og:site_name" content="<TMPL_VAR NAME="name">" />
<meta property="og:description" content="<TMPL_VAR NAME="tagline">" />
<title><TMPL_VAR NAME="pagetitle"></title>
<TMPL_VAR NAME="robots">
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1" />
<link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/bootswatch/3.3.2/custom/bootstrap.min.css" />
<link rel="stylesheet" href="/main.css" />
</head>
<body>
<nav class="navbar navbar-static">
<div class="container">
<a class="navbar-toggle" data-toggle="collapse" data-target=".nav-collapse">
</a>
<div class="nav-collapse collase">
<ul class="nav navbar-nav">
<li><a href="<TMPL_VAR NAME="url">">Home</a></li>
<TMPL_LOOP NAME="NAV">
<li><a href="<TMPL_VAR NAME="navurl">"><TMPL_VAR NAME="navname"></a></li>
</TMPL_LOOP>
</ul>
<ul class="nav navbar-right navbar-nav">
</ul>
</div>
</div>
</nav>
<header class="masthead">
<div class="container">
<div class="row">
<div class="col col-sm-6">
<h1><a href="<TMPL_VAR NAME="url">" title="<TMPL_VAR NAME="name">"><TMPL_VAR NAME="name"></a>
<p class="lead"><TMPL_VAR NAME="tagline"></p></h1>
</div>
</div>
</div>
</header>
<div class="container">
<div class="row">
<div class="col col-sm-3">
<div id="sidebar">
<ul class="nav nav-stacked">
<TMPL_IF NAME="ISPOST">
<li><h3 class="highlight">This Post</h3></li>
[% use date %][% dsuffix = 'th' %][% dsuffix = 'st' if page.day == 01 %][% dsuffix = 'nd' if page.day == 02 %]
<li>Published: [% date.format(page.date, "%A, %e$dsuffix %b. %Y", 'en_GB', 1) %]</li>
<li>Tags: [% page.tags %]</li>
</TMPL_IF>
<li><h3 class="highlight">Posts</h3></li>
<TMPL_LOOP NAME="recent">
<li><a href="<TMPL_VAR NAME="permaurl">"><TMPL_VAR NAME="title"></a></li>
</TMPL_LOOP>
</ul>
</div>
</div>

11
layout/post.inc Normal file
View File

@ -0,0 +1,11 @@
<div class="col col-sm-9">
<div class="panel">
<a href="<TMPL_VAR NAME="permaurl">"><h1><TMPL_VAR NAME="title"></h1></a>
<TMPL_IF NAME="INDEX">
<TMPL_VAR NAME="excerpt">
<p><a href="<TMPL_VAR NAME="permaurl">">Read more</a></p>
<TMPL_ELSE>
<TMPL_VAR NAME="body">
</TMPL_IF>
</div>
</div>

5
pages/about.md Normal file
View File

@ -0,0 +1,5 @@
---
title: About Me
---
I'm some huge meganerd.

5
pages/contact.md Normal file
View File

@ -0,0 +1,5 @@
---
title: Contact
---
I'm some huge meganerd.

13
posts/post1.md Normal file
View File

@ -0,0 +1,13 @@
---
layout: post
title: Start as you mean to go on
date: 2012-02-12 20:09:00
tags: introduction, life, linux
---
So for the millionth time, I'm starting a blog. Hopefully, this time I'll actually have stuff to write about. As a bit of an introduction, I'm Maff. I'm a computer science student, sysadmin and server support drone from Scotland. I'm also a bit of a designer, a developer and a security dork. I also love videogames, music and a whole host of other things.
I'm starting this blog in part because I want somewhere to document my pursuits (software or otherwise), but also as a place to post about the funny things that happen in real life, and as a place to rant. I may also post about the software I write, from time to time - I'm a semi-active developer over at [github][1], and I'm the gentoo package maintainer for [Monitorix][2].
[1]: http://github.com/maffc/
[2]: http://monitorix.org

14
posts/post10.md Normal file
View File

@ -0,0 +1,14 @@
---
layout: post
title: File descriptor counting
date: 2012-07-01 04:03:00
tags: bash, linux, one-liners, servers
---
Yes I know I still haven't done part two of that mail server post. I'll get it done soon, I promise.
While chatting on IRC, someone mentioned that they were having a problem with a process going mental and creating a bunch of file descriptors in linux, eventually hitting the "max FD limit" linux has. They couldn't figure out which process it was and they couldn't find a program that would list a count of how many FDs a process has open. A few minutes later I'd thrown together this bash one-liner for him. I'm posting it here just in case someone else might find it useful.
`echo "$(for pid in $(ls -a /proc|egrep '^([0-9])*$'|sort -n 2>/dev/null); do if [ -e /proc/$pid/fd ]; then FHC=$(ls -l /proc/$pid/fd|wc -l); if [ $FHC -gt 0 ]; then PNAME="$(cat /proc/$pid/comm)"; echo "$FHC files opened by $pid ($PNAME)"; fi; fi; done)"|sort -r -n|head -n4`
To explain: It loops through every file/folder in /proc that is a process ID, then checks that there's a file descriptor folder. Then it gets a count of all the FDs that process currently holds, gets the process name and outputs how many file descriptors that process has open, as well as the process name. This is then reverse-sorted and cut down to only the four processes with the most FDs open.

47
posts/post11.md Normal file
View File

@ -0,0 +1,47 @@
---
layout: post
title: UnrealIRCd and SANICK
date: 2012-07-22 03:17:00
tags: C, development, irc, servers, unrealircd
---
I know, I still haven't done that mail server post. It's coming.
I just wanted to quickly write a post about a module I've just finished working on for UnrealIRCd 3.2.x.
Unreal has the `m_svsnick` module, which is specifically meant to be used by U-lined servers, such as IRC services. This module facilitates the forced changing of nicks. Other IRCd's have a command like that built-in and usable by regular opers, but not Unreal. The reasoning for this is that such a command could easily be abused, and the management of nicks is Services' job anyway. This is all fine and dandy as some IRC services packages such as Anope come with an SVSNICK command available in operserv by default, and other services packages may have 3rd-party modules to add such a command. However, what if you're running UnrealIRCd and Atheme 6.x? Unreal has no user-accessable SVSNICK command, and atheme doesn't have such a command either. That's where the subject of this post comes in.
I've just finished working on `m_sanick.c`, a module which adds a SANICK command, accessible by IRC opers, which force-changes a given user's nick.
### Download
You can download the module source here: [m_sanick.c][1]
Source is licensed under the GPL, original code by Mouse, some code used by this module was pulled from `m_svsnick.c`.
### Installation
* Download the module to `src/modules/m_sanick.c` in the Unreal3.2 source directory
* `./Config` and `make` unreal as normal, if you haven't already done so or if the source directory is completely clean
* Run `make custommodule MODULEFILE="m_sanick"`
* Copy the resulting .so (which will be at `src/modules/m_sanick.so`) to the Unreal modules directory. If you installed unreal as a package, it'll probably be `/usr/lib/unrealircd/modules`. If you compiled unreal yourself, the modules directory will be in the unreal configuration directory you set during configuration. This is usually inside the source directory or at `/etc/unrealircd/modules`.
* Add `loadmodule "/your/module/directory/m_sanick.so";` to your unrealircd.conf
* `unreal rehash` your server.
If you run an Unreal-based network with multiple servers, you'll need to repeat this process in full on every server.
### Usage
Once installed, run `/sanick TargetNick NewNick` where TargetNick is the nick you wish to change, and NewNick is the nick you wish to change the user's nick to.
If this does not work, `/quote sanick TargetNick NewNick` should work. In irssi, you will have to use this command instead. Alternatively, in irssi, you can use [dispatch.pl][2] or add sanick as an alias with the following command: `/alias sanick quote sanick`
### Disclaimer
The original source was written by Mouse, and was modified by myself in order for it to compile and function correctly. The "CHGNICK" function was removed. Parts of this module are copyright to Mouse and the UnrealIRCd dev team. I am not liable under any circumstance for any damage, service disruption or any other issues that may arise during the installation or use of this module. This module contains no malicious code and is freely downloadable and may be modified by anyone. This module is in use on at least one production IRC network, however no guarantees are made as to the module's stability. Use at your own risk. This module will not work on other IRCd's and may not work on older (3.2.8 or older) versions of UnrealIRCd.
This module is an unofficial third-party module and is unsupported. However if you experience issues while compiling or using this module, please [email me][3] and I'll try to help as best I can.
[1]: https://pub.maff.scot/code/m_sanick.c
[2]: http://scripts.irssi.org/scripts/dispatch.pl
[3]: https://maff.scot/contact

38
posts/post13.md Normal file
View File

@ -0,0 +1,38 @@
---
layout: post
title: Why I love lighttpd
date: 2012-09-04 00:35:00
---
It's a well-known fact that I absolutely adore lighttpd. Why, you ask?
I run lighttpd on a small cluster of web servers that serve a mix of static and dynamic content. One web server is dedicated to serving static content, and the other two are dedicated to serving dynamic content. The average memory use across all three is around 3.5MB. Average CPU use is almost zero.
But it's not just the low resource usage that I love about lighttpd. It's the downright gorgeous configuration syntax.
We recently began rolling out proper SSL to all of our client-accessable services at work. We're primarily an Apache shop at work, but one server runs lighttpd. Forcing all connections to run over SSL was as simple as:
server.modules += ("mod_redirect")
$HTTP["scheme"] == "http" {
$HTTP["host"] =~ "(.*)" {
url.redirect = ("^/(.*)" => "https://%1/$1")
}
}
And then there's the fact that managing vhosts is just brilliant. Adding a new vhost is as simple as `mkdir /var/www/new-vhost` and adding the following to my lighttpd config file:
$HTTP["host"] =~ "^new\.vhost\.net$" {
server.document-root = var.basedir + "/new-vhost"
accesslog.filename = var.logdir + "/access-new-vhost.log"
}
There's a lot more to love about lighttpd, though. I'll be updating this post with more tips and config snippets as I go.
Adding on to the earlier config snippet for forcing SSL across all vhosts, it might even be possible to have a single block for all vhosts (I haven't personally tested this, use at your own risk):
$HTTP["host"] =~ "(.*)" {
server.document-root = var.basedir + "/$1"
accesslog.filename = var.logdir + "/access-$1.log"
}
This would, in theory, accept any vhost as long as it has a corresponding folder in /var/www.

19
posts/post14.md Normal file
View File

@ -0,0 +1,19 @@
---
layout: post
title: Why I never (or rarely) place phone calls
date: 2013-01-19 22:35:00
---
If I want or need to contact someone, typically the first thing I check is my instant messenger. I use [Trillian][1], which keeps me logged into every IM service I use regularly (AIM, MSN and Gtalk). I also log into Skype on it if I need to. If the person I want to talk to isn't online or available there, I check Steam. If they're not available there, I text them. If I don't have their number or something, I message them on Facebook.
Calling people is, for me, an absolute last resort.
This is entirely because, while I have way more minutes and texts on my phone contract than I will reasonably use in a month, I grew up with SMS and instant messaging as my primary form of communication. I know that, if I send a message to someone, they're not required to act upon it immediately and can respond when they're ready to. Phone calls don't afford that luxury. If someone phones you, you have to decide to answer or decline the phone call right there. If you miss the call, you have to contact the person later to find out why they were calling you, unless they left a voicemail (and who does that?).
So when I want to phone someone, I have to take into consideration how busy that person is and how likely it is that they'll be available to both answer the phone and have a chat. I'm not sure if this is simply a problem I have or something, but I generally have no idea what time is a good time to call someone (which is why, in the years that she's been living away from home, I've barely called my sister even once.) I rarely even call my best friend for this reason.
This isn't to say I think phone calls are bad. Not at all, I'll defend the importance of phone calls to the ends of the earth. I simply think they could be improved. Skype (and other internet-based voice calling systems) give you a good indicator, by whether or not the person: A) is online, and B) has their status set to "Available".
Yes, I know I haven't posted anything in quite a while. I have quite a few draft posts, and for the most part I _just can't be bothered finishing them._ I will eventually, don't worry. I also have a lot more stuff I want to talk about.
[1]: http://trillian.im

20
posts/post15.md Normal file
View File

@ -0,0 +1,20 @@
---
layout: post
title: Gone, but never forgotten
date: 2013-02-22 00:29:00
tags: personal
---
Just a short post today.
![][1]
Yesterday (Wednesday the 20th), we took our dog to the vet for what we thought was a stomach bug. He's had diabetes since about July of last year, and can't have insulin without food, so we were concerned that he wasn't eating. He was diagnosed with Pancreatitis, which is treated through starving him for a few days until the pancreas heals a bit, however due to his diabetes, this could have an extremely adverse effect - he'd lost 1.5 stone over the course of a day or so already, and was in a tremendous amount of pain.
The vet informed us that, even if he pulled through and his pancreas got better, he'd be in a terribly fragile state and would have a long time to go before he regains weight - and there's a high chance of recurrence, meaning he could be in and out of the vet very often, and he was in so much pain due to it.
He was put to sleep yesterday afternoon. We were with him the whole time, and right to the end, he was wagging his tail because he was so happy to see us.
Here's to you, Max. You were a better friend and companion than anyone could ever have asked for.
[1]: https://pub.maff.scot/files/max.png

12
posts/post16.md Normal file
View File

@ -0,0 +1,12 @@
---
layout: post
title: On life and issues faced
date: 2014-09-22 13:12:00
tags: life, personal, short
---
I've been, to varying degrees, missing or unavailable for the better half of two months now. It's probably best I explain why.
For some time, I've been considerably depressed - an issue I've been dealing with for a number of years. Early this year I began seeking professional help to deal with it, with the hope that it would affect my personal and professional life less, as I realised my issues had compounded in essentially failing my last year of university. Unfortunately that hasn't been the case yet. I've been on anti-depressants for a few months now, however near the end of July I became severely depressed, withdrew socially and essentially dropped off the face of the internet. Thankfully, my friends have been very supportive and I've been improving in the past few weeks thanks to one in particular. To all those who worried, thank you. To all those who my depressive episode affected, I apologise.
I'd imagined this post might be a big longer, but I suppose I had less to say than I thought. Ah well.

26
posts/post2.md Normal file
View File

@ -0,0 +1,26 @@
---
layout: post
title: The case for not using Arch
date: 2012-02-28 17:04:00
tags: arch, arch linux, linux, rant, sysadmin
---
I run a number of different servers from a number of different providers. I also run servers for friends. In this post, I'll be discussing one server in particular, a friend's server that's primarily used for IRC, web hosting and minecraft. This server runs Arch.
Now I'm going to start by saying I don't always make fantastic decisions and I'm not always known for making sure everything's perfect before rebooting a server. I have, in the past, screwed servers up. But this case was a little different.
Yesterday I log into the server to update a firewall rule, and discover that, because I've never used the nat table in iptables before on that box, the module was never loaded. Of course, the box has fantastic uptime and hasn't been rebooted in over 160 days. Now I'll stop for a second to mention how Arch handles kernel upgrades. When the kernel's upgraded, the previous kernel is left on the system, and all kernels older than that are removed completely. This includes the currently running kernel, and all modules. And this box hadn't been rebooted since kernel 3 was released. I have the box set up to be as zero-maintenance as possible. Emails whenever anything happens, cronjobs taking care of updates and removal of old packages from the cache, scripts to reboot any services that have crashed, gone down or have stopped responding. But as I discovered during a routine check a week prior, the Arch box hadn't been upgrading any of its packages due to a [ recent change][1] to the filesystem package. I manually started the update process, it informed me that it couldn't upgrade the system because of a file conflict (as the news article mentioned). No problem, I force-installed the update to filesystem and then upgraded the rest of the system as usual.
Now fast-forward back to yesterday, I'm telling my friend I need to reboot his server to apply the firewall rule. He gives the okay, and I reboot the server. Emails flood in saying that various services are down and that the server's offline, as always. But then a few minutes pass, there's no email that everything's back online again. I ping the server, nothing. Log into the server host's control panel, it's listed as online, so I VNC into it to see what the issue is. It can't find the harddrive and has thrown itself into a recovery terminal. What.
I figure a kernel change has messed with the partitions, so I boot the server into my usual recovery system - the gentoo live CD. Nothing seems out of place with grub or the fstab, so I look at the next culprit - the config file for mkinitcpio. It's blank.
Somehow, pacman disregarded the usual rules about protecting config files against being overwritten and messed with the config file, so the initramfs Arch needs in order to boot up properly was completely broken. No problem, I'll just chroot in -- OH WAIT. The Gentoo LiveCD runs kernel 2.6.31, and arch refuses to do /absolutely anything/ unless the kernel is new enough (In this case it had to be 2.6.32 or newer). The server host isn't exactly good, and doesn't provide any recent install media. Cue ten minutes of me googling for the kernel versions of everything in the media list, eventually settling for a Fedora 13 disk that had a recent-enough kernel. I get chrooted in, fix the mkinitcpio config and start generating the images. It complains that /dev isn't mounted. Okay. I look at the source for mkinitcpio, discover it's trying to access /dev/fd, which somehow doesn't exist. I symlink it over from /proc/self/fd and start it again. Everything seems to work, so I reboot.
This time, it recognises the hard drive, but the partition device names have changed. It's now seeing the disk as /dev/xvda. Bizarre, but has happened before. I boot the gentoo livecd again, edit grub's menu.lst and fstab, reboot back into Arch. It boots! But doesn't have a network connection. By this point, I'm close to pulling out my hair. I google around and find that because I included a bunch of xen drivers that mkinitcpio forgot before, everything's working a little differently. Specifically, it now works closer with Xen, and is trying to unplug everything at boot. No problem, I'll put `xen-emul-unplug=none` in the kernel boot line. Reboot the server and the harddrive device name has changed again. I boot into gentoo, change menu.lst and fstab to use /dev/sda again, and reboot.
Finally, the server's booted and has a network connection. And this is why I no longer install arch on any of my boxen. I can't trust it to reboot without throwing a hissy fit and killing itself.
**TL;DR Arch package issue sends me on an hour-long crusade to make a box boot again.**
[1]: http://www.archlinux.org/news/filesystem-upgrade-manual-intervention-required/

14
posts/post3.md Normal file
View File

@ -0,0 +1,14 @@
---
layout: post
title: s/keyboard/leopard/
date: 2012-04-08 03:06:00
tags: short
---
A while ago I installed [this script][1] which was inspired by [this xkcd comic][2]. I keep forgetting it's installed, but that itself makes for some hilarious double-take moments when bowsing.
![][3]
[1]: http://userscripts.org/scripts/show/128626
[2]: http://xkcd.com/1031/
[3]: http://s.fag.wf/Ok.png

18
posts/post4.md Normal file
View File

@ -0,0 +1,18 @@
---
layout: post
title: The Journey of a Thousand Frustrations Begins with a Single Step
date: 2012-04-16 13:45:00
tags: bash, gentoo, linux, one-liners, rant
---
There are times when linux frustrates me. Not with issues specific to one distro, but software packages in general which are written for linux.
My prime example here is Watch. In Gentoo, it's included in the [procps package][1]. I recently was confused to find that my ident daemon, which I keep running because I'm an avid IRC user, was being flooded with traffic near constantly. Netstat told me it was because of two IRC servers I ran. So I logged in, checked netstat there, and sure enough, it was them. But I had no idea what process on the servers was actually creating the connections.
I assumed it would be the ircd itself, but I wanted proof before investigating further. No problem, I thought, I'll just run `watch --differences=cumulative -n 0.1 'lsof +M -i4|grep auth'`, which according to watch's manpage, would show what's changed in a command's output, rather than clearing the screen and displaying the output every .1 seconds. It did do this, in a way, however, because the program creating the connection to my ident server only kept that connection for a fraction of a second, the output vanished, and thanks to the unhelpful way that watch handles output which only shows up once, all I got was some white blocks showing that there had at one point been text there.
My solution? Throw together a bash one-liner which looped infinitely until the offending program was identified: `while true; do UNR=$(lsof -M -i4|grep auth); if [ -n "$UNR" ]; then echo "$UNR"; break; fi; done`
This did eventually work, and it turned out to be a runaway process on my personal box constantly creating connections to both IRC servers.
[1]: http://packages.gentoo.org/package/sys-process/procps

24
posts/post5.md Normal file
View File

@ -0,0 +1,24 @@
---
layout: post
title: Google Drive, or Why People Need To Stop Causing Unnecessary Drama
date: 2012-04-27 04:17:00
tags: drama, google, google drive, idiots, rant, terms of service
---
So there's been some ruckus lately because, following the launch of Google Drive, people took to the internet to compare the Terms & Conditions of Google Drive, to that of Dropbox and SkyDrive. The main point seems to be that people see Google Drive's T&Cs as being too unrestrictive. Case in point: [https://twitter.com/#!/jmacdonald/s...][1]
What I have a big problem with, is that people don't seem to have properly read the relevant portion of Google's terms and conditions. The full text of this portion is as follows:
> Some of our Services allow you to submit content. You retain ownership of any intellectual property rights that you hold in that content. In short, what belongs to you stays yours.
>
> When you upload or otherwise submit content to our Services, you give Google (and those we work with) a worldwide license to use, host, store, reproduce, modify, create derivative works (such as those resulting from translations, adaptations or other changes we make so that your content works better with our Services), communicate, publish, publicly perform, publicly display and distribute such content. The rights you grant in this license are for the limited purpose of operating, promoting, and improving our Services, and to develop new ones. This license continues even if you stop using our Services (for example, for a business listing you have added to Google Maps). Some Services may offer you ways to access and remove content that has been provided to that Service. Also, in some of our Services, there are terms or settings that narrow the scope of our use of the content submitted in those Services. Make sure you have the necessary rights to grant us this license for any content that you submit to our Services.
What this essentially states is "You retain full rights to all content you submit to a google service. When you do this, you grant us and our partners the right to store, copy, modify (This is for things like converting document formats), create derivative works (This is things like thumbnails, scaled/rotated/edits that you perform to photos in Picasa's photo editor), communicate (Transmit over the internet), publish (Display on a blog post, for instance), publicly display (Displaying things on the internet is a public display) and distribute the content (Transmitting your data through various google services). This license you grant us, only allows us to use the agreed rights when operating our services, promoting our services (Remember, this is not a google drive specific Terms & Conditions, this is for all of google's services, so this can include things like reviews on Google Maps), improving our services, and creating new services."
The main issue that people seem to have is they don't read the line that states google can only use the agreed rights when operating/improving/promoting their services, or when creating new services. It also states that there are "terms or settings" which further narrow down what google is actually allowed to do with your data, and chances are there is, or soon will be, a supplimentary Terms & Conditions page specific to Google Drive.
The jist of this is, you retain full rights to all content you put on google drive. Google is only allowed to do things like convert your files between data formats (Word Document to RTF, for instance, or JPG to PNG), or transfer your data between their servers. The license includes stuff like that because you clicking "Convert my document to Word please" does not constitute you giving google the right to actually perform that action, they have to have the rights in the license to avoid any legal issues. Google does not, contrary to what everyone seems to believe, reserve the right to take your holiday photos from Google Drive and use it as the background on the main google home page.
Calm down, people. It's not nearly as bad as you claim.
[1]: https://twitter.com/#!/jmacdonald/status/195184740209401856/photo/1 "https://twitter.com/#!/jmacdonald/status/195184740209401856/photo/1"

86
posts/post6.md Normal file
View File

@ -0,0 +1,86 @@
---
layout: post
title: Virtual Servers and their providers
date: 2012-04-28 02:48:00
tags: benchmarks, bhost, rant, review, servers, simplexwebs, thrustvps, VPS
---
Since I figure it's bad form to have a blog and /only/ use it for ranting, here's a somewhat useful post. I've been with [a][1] [number][2] [of][3] [different][4] [hosts][5] over the past year or two, and I figure it'd be useful for others to know why I like or don't like them.
### **[SimplexWebs][6]**
SimplexWebs has been in the hosting business for quite a while now, and do enterprise webhosting, online radio hosting and domains, as well as VPS (Xen, powered by OnApp) hosting. I was lucky enough to grab one of their limited birthday sale servers, which gave you 256MB RAM, decent CPU speed, 20GB disk and 100GB monthly bandwidth, for £25 a year.
This server's basically been my primary server/main workhorse - despite initially purchasing it to run a VPN server, and has held up ridiculously well. Uptime has been fantastic - my server's only been down a few times over the last half a year or so, including migrations between their old SolusVM platform to their new OnApp platform, and including the recent downtime when they moved datacentres.
Server and network speed are _really_ good. I think this is one of the fastest servers I've ever used, and that includes the brief time I was with Linode. Support is fantastic, their team is always quick to respond and very helpful, and you definitely get the feeling that they actually care about their customers. Overall, they're one of the best hosts I've ever been with, and I don't feel I'll ever need to move to another host.
Here's some benchmarks:
Network speed:
~ wget cachefly.cachefly.net/100mb.test -O /dev/null
--2012-04-28 02:34:32-- http://cachefly.cachefly.net/100mb.test
Resolving cachefly.cachefly.net... 140.99.94.175
Connecting to cachefly.cachefly.net|140.99.94.175|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 104857600 (100M) [application/octet-stream]
Saving to: `/dev/null'
100%[======>] 104,857,600 11.8M/s in 8.7s
2012-04-28 02:34:41 (11.5 MB/s) - `/dev/null' saved [104857600/104857600]
Disk I/O:
~ dd if=/dev/zero of=/tmp/disktest bs=64k count=16k
16384+0 records in
16384+0 records out
1073741824 bytes (1.1 GB) copied, 4.52632 s, 237 MB/s
### **[Bhost][7]**
Bhost are a UK-centric VPS provider, primarily dealing with OpenVZ servers although they've recently launched Xen PV plans. I should mention their servers are some of the cheapest you can get, the cheapest OpenVZ server being £4.70 (before VAT) and including 512MB RAM and 512MB burst RAM.
Their support is pretty great, considering they're a budget host, and the servers themselves are quite speedy. I do have some issues with the OS templates they have available - their Arch images are broken due to the host kernel being too old, although I'd say that's an Arch issue, and their Gentoo image is too old to actually use. Debian works fine though, so that's what I'm using. Overall, network and server speed is _really_ good. Uptime is fantastic, in the entire time I've been with them (a number of months now), my server's almost never been down. Definitely one of the most reliable hosts I've been with.
Here's some benchmarks, since I still have a server with them: Network speed:
# wget cachefly.cachefly.net/100mb.test -O /dev/null
--2012-04-28 02:30:40-- http://cachefly.cachefly.net/100mb.test
Resolving cachefly.cachefly.net... 205.234.175.175
Connecting to cachefly.cachefly.net|205.234.175.175|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 104857600 (100M) [application/octet-stream]
Saving to: `/dev/null'
100%[======>] 104,857,600 10.2M/s in 9.8s
2012-04-28 02:30:49 (10.2 MB/s) - `/dev/null' saved [104857600/104857600]
Disk I/O:
# dd if=/dev/zero of=/tmp/disktest bs=64k count=16k
16384+0 records in
16384+0 records out
1073741824 bytes (1.1 GB) copied, 8.31944 s, 129 MB/s
### **[ThrustVPS][8]**
This was actually the first VPS host I'd ever been with. I found them after browsing [LowEndBox][9], and was interested by their very low pricing. Their website design made me feel like they were a good host, and so I bought a server there (Xen HVM, 512MB RAM, 1TB bandwidth, four cores). The setup time really should've indicated something, though, as it took somewhere between a day or two before my server actually got provisioned.
The speed of the server itself was reasonably good - network speed was great (Less ThrustVPS's doing and more that it's just a good datacentre in general), disk I/O and general performance was good. It never felt _speedy_, but it got the job done without delay. Uptime was, for the most part, good. It wasn't often down, and when it was, it was down for less than an hour at most. I quickly learned (through talking with others in their IRC channel - #thrustvps on OFTC) that this was a rarity, and that I was on one of their "good" nodes. Complaints of servers being down were oddly common. Their support was nothing really to write home about. It was neither good nor bad. Your issues did get resolved (eventually - by someone who usually had less than stellar english skills), but you never got the feel that they actually cared.
My main issues with ThrustVPS started in September, when I got an email stating that the node my server was on, had experienced some issues and they were investigating it. After it came back online, my server was left without internet connectivity. Nothing was wrong as far as I could see, so I emailed their support asking what was wrong. Eventually I was told that the connectivity issue had been resolved, though no explanation about what happened was ever given. 9 days later, the same thing happened again - Node went down, my server had no connectivity when it came back. I emailed support again, and was eventually told that I'd been DDOSed and that my server's IP had been null-routed. I explained to them that I had done nothing to warrant an attack, and asked if they were sure - as I could find absolutely no evidence that this was actually the case. Their support claimed that what they were saying was true, and refused to restore connectivity until I reinstalled my server's operating system. So I backed up the data on my server to a separate drive, quick-installed Debian, got them to restore connectivity and then sent my backup to another server.
In summary, while the servers and prices themselves are good, uptime is a bit unpredictable, support is unpleasant and they seem to have a habit of restricting your server without explanation, or without supplying any actual evidence to back up their claims. I wouldn't recommend them to anyone.
[1]: http://thrustvps.com
[2]: http://linode.com
[3]: http://bhost.net
[4]: http://simplexwebs.com
[5]: http://vps6.net
[6]: http://www.simplexwebs.com
[7]: http://www.bhost.net
[8]: http://www.thrustvps.com
[9]: http://lowendbox.com

46
posts/post8.md Normal file
View File

@ -0,0 +1,46 @@
---
layout: post
title: How to set up effective mail systems, pt. 1
date: 2012-06-11 00:31:00
tags: gentoo, guide, howto, linux, mail, postfix, servers, sysadmin
---
So a few months ago, I moved my primary mail hosting to my own VPS. Over the months since then, I've been tweaking and adding to my mail system, and I figure it'd help both myself and others if I documented what I've done, so I'll start with a list of all the software I use.
### Main Software
* **Gentoo** My VPS runs **Gentoo**. I personally prefer it over other distros, as it's both lighter and less screwed up.
* **postfix** I was originally going to use **Exim**, but found it strangely difficult to configure, plus **postfix** seems to universally have lower transaction times.
* **dovecot 2** I'm switching away from **Gmail**, so obviously the things I would've missed most would be things like push email and mail filters. Dovecot supports IMAP IDLE, and has sieve/managesieve, so it was easy to port my Gmail filters over.
* **saslauthd** While dovecot has its own SASL authentication, I prefer to use this when authenticating over SMTP. **EDIT:** I have since switched from **saslauthd** to Dovecot 2 for SASL authentication. Dovecot works well enough for it that I questioned why I actually needed **saslauthd**.
* **Mutt** Mutt is my primary MUA. I'll also be discussing configuration changes I made to **Mutt**, and ways I made it work more like **Gmail**.
### Extra Software
* **Spamassassin** This should be obvious. Does a great deal to cut down on spam. Plus, with a filter set up with **dovecot**'s sieve, I have a spam folder like before. **EDIT:** When I first published this post, I had only just set up **Postgrey** and had no idea what kind of impact it would have on incoming spam/junk mail. It had a massive impact I haven't received a single spam email since. **Spamassassin** and **Amavisd** may actually be unnecessary when using **Postgrey** (Unless of course you plan to host mail for others, or if you receive a \*lot\* of spam).
* **Postgrey** This does a fantastic job of cutting down on spam.
* **Amavisd** Somewhat necessary for making **postfix** work with **SpamAssassin**, but also makes it easy to offload the antispam part of the mail system to another server. **Amavisd** can also be used for integrating antivirus systems into your mail scanning process, but I don't need that.
* **OpenDKIM** Used for signing outgoing mail with my DKIM key, and for validating incoming signed mail. This does a good job of ensuring that mail sent from my domain is actually coming from one of my servers.
* **policyd-spf** Originally, I used **pypolicyd-spf**, but it quite literally breaks every time there's an update to python, it's since been replaced with [this perl equivalent][1] which has never had any issues. This uses SPF to validate incoming mail, and ensure that the sending server is actually authorised to send mail for the given domain.
* **fail2ban** This isn't strictly part of the process I go through when setting mail up, but **fail2ban** helps cut down load a lot when bots are trying (and failing) to use a server as an unauthenticated relay.
In the near future, I'll write a second post detailing how I linked all this together, including config excerpts, but in this post I'm just discussing the software I used, as well as why I use each package. I'll also leave you with a list of extremely good tips.
* **Get an SSL certificate.** This makes setting secure mail up a **lot** easier, especially if you plan to send or receive mail remotely with stuff like IMAP.
* If you do get an SSL certificate, **disable or firewall unencrypted mail ports**. Obviously leave port 25 in place, but if you're sending or receiving mail remotely, disable the unencrypted IMAP/POP3 ports (143 for IMAP and 110 for POP3), and set your MTA up to only accept submission mail through 465.
* **Set up SPF records for your domain appropriately**. SPF does a good job of telling other mail systems who is or is not allowed to send mail for your domain.
* **Generate a DKIM key, and add it to your domain's DNS**. As with SPF, DKIM (DomainKeys Identified Mail) does a fantastic job of indicating to other mail systems whether an email is actually legitimate or not.
* **Use blacklists**. There's a large number of DNS-based blacklists which indicate whether a given IP address is known for sending spam or for attempting to compromise servers. This can go a long way in preventing spam.
* **Report any spam you receive.** Reporting received spam to places like **SpamCop** not only reduces the chance of you receiving similar spam in the future, but it helps others too. It helps identify servers that send spam (Contributing to blacklists), helps identify possible domains used for spam (again, contributing to blacklists), and can contribute to the accuracy of antispam systems like **SpamAssassin**.
* **Monitor your services extensively**. This is definitely a big one. It's not easy to monitor your server by looking at logs, and often unless you've got systems set up to email you when anything out of the ordinary happens, you just plain don't know what's happening with your server. Packages like [**Monitorix**][2] (disclaimer: I'm the package maintainer for **monitorix** on **Gentoo**), do a fantastic job of showing you at-a-glance whether anything abnormal is happening, so it's easy to see if and when your mail server is rejecting mail. This can also be great for indicating when you've misconfigured something.
* **Use external monitoring services**. Services like [**MXToolbox**][3] have free accounts, and you can use them to set up checks so that you get an email if your server's IP is on any IP blacklists. Services like [**Pingdom**][4] are also great for monitoring both uptime and external availability.
* **Make sure your forward and reverse DNS match** and that your reverse DNS is your primary mail domain (or what your server actually identifies itself as). This is definitely a good way of ensuring your mail isn't identified as spam.
One final thing to note, the guide and so on will discuss **my** current mail setup. This means it assumes you'll be using sockets for things like **Postgrey**. Please read everything carefully before making configuration changes to your own mail setup, as what works for me may not work for you.
That's all for now, but I'll be adding to this list, and writing a second post documenting how I actually set my mail system up, very soon. **EDIT:** Disregard that. Second part of this post will arrive _eventually_ but I am tremendously lazy.
[1]: https://launchpad.net/postfix-policyd-spf-perl/
[2]: http://www.monitorix.org/
[3]: http://mxtoolbox.com/
[4]: http://www.pingdom.com/

32
posts/post9.md Normal file
View File

@ -0,0 +1,32 @@
---
layout: post
title: I hate the term 'unlimited'.
date: 2012-06-26 01:40:00
tags: bullshit, fair use, mobile phones, rant, tariffs, ThreeUK, Virgin Mobile
---
Okay yes I know I said the second part of my mail server post was incoming, it still is, but for now I'd like to break out and complain about something again.
Virgin Media recently announced a new 'Premiere' tariff for Virgin Mobile. This tariff gives you unlimited calls to landlines, 2500 minutes to mobiles, unlimited texts and unlimited data, for as low as £21 for existing Virgin Media customers.
As soon as I read "all-you-can-eat data", I was skeptical. Very few mobile operators offer truly unlimited data (and I'm with Three UK, who are one of maybe two operators here that offer proper unlimited data - their policies state that customers have an effective data cap of 1TB per month). So I did what anyone else does, and I looked up their policies (which you can find [here][1]), which state that:
> **Unlimited Mobile internet for daily use by Pay Monthly customers:** We'll monitor how much mobile internet you use each month so that we can protect the network we use for all of our customers. **If we consider your use to be excessive**, we won't charge you any more, **but we may restrict your access to the mobile web depending on how often and how excessive we think your usage is. As a rule of thumb, we are likely to consider any usage over 1GB per month to be excessive.** Unlimited use is within the UK and is for your personal, non-commercial use only. It doesn't include making internet phone or video calls, peer to peer file sharing, using your phone as a modem, or while you are abroad.
Virgin Mobile are basically stating that they'll either restrict or throttle you if you use more than 1GB of data a month. That's hardly unlimited.
Then you've got the unlimited texts and landline minutes. But Maff, you may cry. How can Virgin screw up unlimited texts and landline calls? Quite easy. From the same document on virgin's website:
> **Unlimited texts:** Unlimited texts are **subject to a fair use allowance of 3000 texts per month**. If your usage exceeds this amount then **we reserve the right to charge you for the excessive element of your usage** at the text rate for other mobile networks for your tariff outlined in our Tariff Table. Unlimited use is within the UK and is for your personal, non-commercial use only. It **doesnt include texts to shortcode services, group text, or picture messages** and any of these uses will be charged at the text rate for other mobile networks for your tariff outlined in our Tariff Table.
>
> **Unlimited landline minutes:** Unlimited landline minutes are **subject to a fair use allowance of 9000 minutes per quarter (3 months).** If your usage exceeds this amount then **we reserve the right to charge you for the excessive element of your usage at the rate for calls to landlines** for your tariff outlined in our Tariff Table. Unlimited use is only for UK originating calls from the eligible Virgin Media phone to UK landlines (01,02 and 03 numbers). All other call types will be charged at the rates indicated in the Tariff Table and are not included in the allowance of minutes. Unlimited minutes are for your personal, non-commercial use only.
Right there. "Unlimited texts" actually means "3000 texts per month and we'll probably charge you standard rates if you go over that". "Unlimited landline minutes" actually means "9000 minutes per quarter year, and we'll probably charge you standard rates if you go over that".
Now it's fair to assume that most people won't use more than 3000 minutes or texts per month, but please, don't call that unlimited. It's misleading. As for Virgin's so-called unlimited data, I really don't understand how it can be called "unlimited data" when Virgin considers usage exceeding 1GB per month "excessive".
I really have nothing against Virgin, they were one of the first operators I was with (The other was BT CELLNET, now o2), and I'm sure their 3G network is great (I last used Virgin's network several years before 3G was even a thing in the UK so I have no idea, but the 2G network they had was good and coverage was always great), but it just rubs me up the wrong way when an ISP or mobile network provider claims that a service is "unlimited" - I did the same thing with BT when I discovered their top-tier "Unlimited 8mbps" broadband plan was actually subject to a fair use cap of 100GB. We didn't find this out until BT emailed us saying we'd hit 80GB that month and that we'd be billed for any usage past 100GB.
I'm happy to change this if I just found the wrong policies page on Virgin's website. If I pointed out the wrong policies and Virgin's "Premiere" tariff is subject to different policies, please point it out and I'll happily reflect that in this post.
[1]: http://www.virginmobile.com/vm/genericContent.do?contentId=our.service.footer.sm068#Our%20Fair%20Use%20Policies

5
public/bootstrap.css vendored Normal file

File diff suppressed because one or more lines are too long

6
public/bootstrap.min.js vendored Executable file

File diff suppressed because one or more lines are too long

52
public/key.asc Executable file
View File

@ -0,0 +1,52 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
mQINBFSsb9oBEADaQRtH5POB2VAb0NJuWCmpy0Un9vwscVFYNJF5tnlNkHsS8tye
LGamkLkJ8MRgFcbaB0OxaEVxjVeNixCtKe/R4m7gqEcB6/ikqof+fEjVxVPjnDEl
dEjE82xAfYD9/4TJmeCXGAxgSpT/LQGwW8+EqvgLj+y++ROJvRxJH4kAQ+f27+px
/yURS81ZJMkUC21sAsVvHzjqhW5/1XPS2cZTgHrYjPMMkXWn6PI+N2oTNHzrV/00
R80kwHaIojXtxf3iG/HjD6Pkp9Y2Mt3Ewj6NdLdhCYksBoZkwRV+0bc0PbhrAc8o
sOIFvXAzhXhG929T3HbNa/dbtQ2zRRePJTn+1WF7iwAOc2MSvnvkQOpDCUXRtyKy
iiDXqbsnvZLwy4V6YRfJZiZZI5OY3jCK2iRDbhnqO5ZQKQ3cJ2ZXBt5JEkumjNg7
WdG/eMOQ244NUSAWNptSRvBaz2ZbrpCDGarcnQcsIppJmcoIJL0FScGHJesIzWzM
GhYMdlUwnuKYGRSWeWSICckuPuSaxXhUE4eApD6+nUasqntkm8D/s/v6Oj3a6cr+
b/vpzU9mbbeskfCc+S9c6h95G0yhJ1xFZZ900ZBdKZJMPWaxbnGhpTCwLGmk4Qpp
n748gpI3I9V6Y7YxBRtMfJdrmfn49YEe+7uG7/Ixg3OE0FFIVxazM4cacwARAQAB
tChNYXR0aGV3IENvbm5lbGx5IChNYWZmKSA8bWFmZkBtYWZmLnNjb3Q+iQI+BBMB
AgAoBQJUrG/aAhsDBQkDwmcABgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRB7
ikV9KCt8tos9D/oDMUavNt06fvC2eUZ5/MPDVn+9cTRTCpcIxP7jc/eQaAQ1QoMk
spIBz8rL6F+m0KlUGYJ4QLiiJG3b+ppj+PWFzWH1kZ8owGhubN/No1U+0If+oqBD
Lrcn6DiSqXMrgjktl3u9tiUZANK4jNtbjOioMIkVROsoxR/EBoXLf2258+Q9isx1
GfcIHv2KLwCdOHkUdE73TvozELClpmWDjZ2NQTb/OC5xR4infwEc2d1w+zxELyzS
NngRBH61B+t9fyOFxU+lFLwq/0H8MALdfn7M6rD6iYGOFw5W8l+wpkqI3BmTY/7y
eV7TbjUvAxA9Fc8G8XvIQJWEVJ4zmpKIyIzXYczyoHjqNP218rLEYoyTDJpJpXGp
6dfKx0jOZVo7o6Iy51CvFYkpSsg2Cus18Km0BXSxNhauHmNTnHPvuFAGwJVpfeh9
lijJ5qydD/yZ+Hc8cwCaPJvKZkjePvMrtJudRJf2NcAhCHn8LqgGXlZDGJVO1DFo
hxn4WnHvF9gA2WwTrdL8TBkP1eIRiXTG0WjpJQhEoA41P3zGw4dwtjp3Wkhogu1B
uGmZ0JNU0A65Ha1ouY6ncdeyJ9JrLfGKKTu/LGYzxVCgCU2qKfA8cLAa0DZEF1Xa
a39KO0fxT5/arDZcW1HnpWgPi9C/3ndjKp5y24JP0gSioz/k7YjiBaocVbkCDQRU
rG/aARAA133HSRcaPx4aX80xgwNYlf1hi4uBAgPA95Q/tlr9gV+CtU/fhjuk97EF
ZTpoj5vj+wi/awKc5/Bj2xVPqW8WqoAF3viMwzovRx70U8gkLQ3/zd/VuN2cQr6o
+PTuqRq3XUG8RGJRnn1NzCh2s5E/q+4nhlyYfFeiZc+uP4BIJFnrT6bIbeV6Eg9y
sVh5si2xwNNzjBIOAFicoi++r265mvWGFOrjA8y3mFElX3mJYUjp1sHLiUOReM96
tpstnVmOX2d7uxlpFn7yq3BLDsImBayl/PN1uGEfXuhw/EjydioHbituMkUkFIDo
ImacLdQnDxyAesItuvZC7IzgFU4O3ziUhR9elWXKcrjmavzuADywWpo/umMMvmt7
rgl6qzjZRvHlseVN48pOuhdVHONincYSKZXy5OuaOtIJUEy0ZFq5zRoNxIu3uyu+
uP++wHHHgD5cA0G783Cj4o8fITGubg7TJ/a4Dsekm/NbKObeyT1d2G0EDMjsx5Im
931IBAiTfNJLVNESedC+pFs5tF+1kcABOieYLMTe+mcTkwbGFtrx3przymEqrCYh
G3u7DYwDhoPm+ojNSEp7xWrRGzuXDA8ilei9t8oyHiBXwj3XK8y7MpVH3b0rU1UL
EW3uFzbD2/IxjXxuZpKkW2/M94mHd794i88+CpjysbN5BeDnHJEAEQEAAYkCJQQY
AQIADwUCVKxv2gIbDAUJA8JnAAAKCRB7ikV9KCt8to44D/4zIX4WTlewtk4y8OQX
YH+uwdzRTXLhwIgd+NvUGDExAfS3aZG+cWYW4zr5v2hEHlnFm+WbhgJZEhrxjGPk
N0yebfkXNSVAIx//uu4RTRihtolCEh7db6fwAbR8bOwI/dir1zrDF5RXZy82BJD+
5IZEuMUnI+yYMOI/7VcM4UHbjbYaNZqDhY2ZD/eWyNdOOPlAbUtOTmyjvLawZU5N
5KyfmsivprjsoSa6S+pjCXJSDe61UqNnli/jqP7h938J8tc27BLP+D4ADQ7qRRIi
86sdm/AvMtQRYuRAq+59hX7Hn/N4T/jTIwaSa+boaM0VZ56f3ElW74ZtCoYyE3O8
x+wDGyK8YQiFVZGHhS9OcqLnZjEteAR9KKbSZTiXan8k+jHV0pZFKjJZnc2kNWCm
ZVfiiOs6SXA93/sn9WolLpCcCUHiBUmy0uLQ+SMhHDOIroT5jS3oHinEvxusGqzP
hQ7dJL8FcgGptWZgqAr30FsZjM96FefHH95TDP59aoKg7duj01xADGJI6OBluJVh
rZqYIqKAVDtDpzAAf4XQksNp+N80lq8WL7hRiL3nGZJc4A/ZjZ+M9pqbwBN7lAxX
GIAbgizODsz32rUzW2p7M75SI16TCXQ+CnfTcaZiVM8TO5iaW+86wenXoSPTvCg9
UDQS87gXqrhrAVmPCRBVMLNS0Q==
=opB4
-----END PGP PUBLIC KEY BLOCK-----

80
public/keybase.txt Executable file
View File

@ -0,0 +1,80 @@
==================================================================
https://keybase.io/maff
--------------------------------------------------------------------
I hereby claim:
* I am an admin of https://maff.scot
* I am maff (https://keybase.io/maff) on keybase.
* I have a public key with fingerprint 174B D339 05B9 BDF0 E392 C0DB 7B8A 457D 282B 7CB6
To claim this, I am signing this object:
{
"body": {
"key": {
"fingerprint": "174bd33905b9bdf0e392c0db7b8a457d282b7cb6",
"host": "keybase.io",
"key_id": "7b8a457d282b7cb6",
"kid": "0101173cbe7c5c238b56d0b5342e502f42e3819eb3efe3758402ebe01309ffb1e86d0a",
"uid": "16ecf0e834040844c37cb87fccf07a00",
"username": "maff"
},
"revoke": {
"sig_ids": [
"6a735cf8438516b1145fc2108a2e9e56c7145826586afeb8c90c8884c7e28bf70f"
]
},
"service": {
"hostname": "maff.scot",
"protocol": "https:"
},
"type": "web_service_binding",
"version": 1
},
"ctime": 1420589618,
"expire_in": 157680000,
"prev": "558fab3b83e1744abd4f611b1c436e2802b35a82d44e49157f350913f3c63360",
"seqno": 14,
"tag": "signature"
}
with the aforementioned key, yielding the PGP signature:
-----BEGIN PGP MESSAGE-----
Version: Keybase OpenPGP v2.0.1
Comment: https://keybase.io/crypto
yMNwAnicbVJrTFRHGN2laOsiqTx+GCRpe6UCSt1779zH3BVpbEOMUmqlPkqx0Dt3
58LlsbvsS1ZcoYVoA1oQBFpKKr41oMFEC9KiQmktbNBoaoktbSB9xMhDRLRKfXQu
aZP+6Pz5MmfOd86Zb6Y39BmDyVhckeyPW7b9C2Ogu8Fj2NC6jS2mkN3qoyzFVB6e
Lapmy8ZOh1OzuSkLxYgcsgIg0TySkFWlMZBYhbYiEUGZ40UrC1kkKkigEqgcu0vv
IDJIduHlmp1gZJOlWQn6P/y82QOaoRlGBArCosIrLICIF6w04gHHYp5mVVIAZCSM
AFYxEHnI0SxGmGYALakqYjAkdJnIeWblGAErJCUEHM3RkOMUQNygqCoEFWWa1oku
7LTJBZiwC2RVpfwJlBN77XlYv71LyyaBXZQlgxJkEfCKCjkAeUZADMPxqsIyNJRZ
LGFeUESCQFbgoSCrGEFFohUIIaeImIVIFWmVeo9oEzevpsyK6xP6j/Nyl2J3k0AO
p91tV+z5BM5xux0uix7J7XPovK0YZf2jkIU0m5U8DunwYqdLs9soC0OYilvTJRmO
pXkoCQxMoHCRQ3PiLE1n8KIAabJ0H+wlkjwPVRkBBAEmr8vJyMqpAsMgRuGAQKLT
LAK8DFkrx2FOIv0q4GmJASpQBAAEfYQuXGiz65Ykp5xNNMnYbLLb48SUv6d7S7DB
aDLMnROk/y+Dad6Cf39dQuECw4HY8sRjbz/yZx6b0/LzoV/M2UFmY2tEVfxus/bp
g4WlvoV+10zvwRvBO1/U8ph1i9w1E2GHPG3e0YbUxE2XuOF8idoYdcbDV10N71+/
M+jIypha+enEt6d3hARXp8TMf3PLwI298k9zq9vlussxxm86zlYcp1B+5IOytc2N
F8/cpQM1wx88Gx418Li7NXXEWl7WotxqzDDH+47H7po31OA722nusfq3TZsSwwfG
XLe6hl+SDIGpz1+NuGxaMTLUjofmi3e0Uxf6r+xuWvl+xA/K3XO/1weu5ewZks69
e8jg/TI3UFpwp7XAfF5u33f0yeFO8MmV2sa7g6aRdypPno579MKazHvpe6o2N89E
l6L9LcrasILDS/3RhWdOvfVbIPBx1Mz6vso4x9KWTpoNbZZzn6vfFRnrr1i3eqNx
tD++uO7H2q9C+uI3pH3XtWRw0JH5+uTY1CQEXaFH6tacbzrRe+EPcUdS2Vh605M3
nv91SQvuDRvO3fTXh5PJptBXxqfubS3KuW2TLeXXud7V19meyenEsb4Vw/sj2yJi
D5YZr4FLNzc2XqyOHu9O57ZvTvP8WRJtTpNyJ06AksSa0Y82Z2ScTEoJe9m3rGrZ
axNHV42fNrMHVrExRR2LwcPP9rWllFbETd/fu7jE8vT+1STOWxkz0nF4dFHy43rn
wO2Qm997Kh+mfN3kS/0bNn7Awg==
=ldGo
-----END PGP MESSAGE-----
And finally, I am proving ownership of this host by posting or
appending to this document.
View my publicly-auditable identity here: https://keybase.io/maff
==================================================================

137
public/main.css Executable file
View File

@ -0,0 +1,137 @@
/* bootstrap 3 helpers */
.navbar-form input, .form-inline input {
width:auto;
}
/* end */
/* custom theme */
header {
min-height:140px;
margin-bottom:5px;
}
@media (min-width: 979px) {
#sidebar.affix-top {
position: static;
}
#sidebar.affix {
position: fixed;
top: 0;
width:21.2%;
}
}
.affix,.affix-top {
position:static;
}
/* theme */
body {
color:#828282;
background-color:#eee;
}
a,a:hover {
color:#e8449f;
text-decoration:none;
}
a:hover {
text-decoration: underline;
}
.highlight-bk {
background-color:#e8449f;
padding:1px;
width:100%;
}
.highlight {
color:#e8449f;
}
h3.highlight {
padding-top:6px;
padding-bottom:14px;
border-bottom:2px solid #e8449f;
}
.navbar {
background-color:#e8449f;
color:#ffffff;
border:0;
border-radius:0;
}
.navbar-nav > li > a {
color:#fff;
padding-left:20px;
padding-right:20px;
border-left:1px solid #d7338e;
}
.navbar-nav > li > a:hover, .navbar-nav > li > a:focus {
color:#666666;
}
.navbar-nav > li:last-child > a {
border-right:1px solid #d7338e;
}
.navbar-nav > .active > a, .navbar-nav > .active > a:hover, .navbar-nav > .active > a:focus {
color: #ffffff;
background-color:transparent;
}
.navbar-nav > .open > a, .navbar-nav > .open > a:hover, .navbar-nav > .open > a:focus {
color: #f0f0f0;
background-color:transparent;
opacity:.9;
border-color:#e8449f;
}
.nav .open > a {
border-color:#777777;
border-width:0;
}
.accordion-group {
border-width:0;
}
.dropdown-menu {
min-width: 250px;
}
.accordion-heading .accordion-toggle, .accordion-inner, .nav-stacked li > a {
padding-left:1px;
}
.caret {
color:#fff;
}
.navbar-toggle {
color:#fff;
border-width:0;
}
.navbar-toggle:hover {
background-color:#fff;
}
.col-sm-9 {
float:right;
}
.pagination {
clear: both;
text-align: center;
}
.panel {
padding-left:27px;
padding-right:27px;
}
/* end theme */

71
scripts/old-rss-import-script Executable file
View File

@ -0,0 +1,71 @@
#!/usr/bin/env perl
# blog.Alba - no-frills markdown blogging system
package Maff::Blog::Import;
use strict;
use warnings;
use diagnostics;
use feature qw/say/;
use POSIX qw/strftime/;
use Date::Parse qw/str2time/;
use Encode qw/encode/;
use File::Spec;
use HTML::Entities qw/decode_entities/;
use HTML::WikiConverter;
use LWP::Simple qw/get/;
use URI::Escape qw/uri_unescape/;
use XML::RSS::Parser::Lite;
our (%blog,%links);
our $basedir=File::Spec->rel2abs(__FILE__);$basedir=~s/import\.pl$//;
my $cfg="$basedir/blog.conf";
unless(my $ret = do $cfg) {
die "Couldn't parse config file $cfg!" if $@;
die "Couldn't load config file $cfg!" unless defined $ret and $ret;
}
my $blogRSS="http://blog.maff.me.uk/feed/rss2";
sub writepost {
my ($file,$post)=@_;
open TPL, "<$basedir/post/.tpl.md" or warn "Couldn't open post $basedir/post/.tpl.md!" and return 0;
my ($title,$tags)=($post->get('title'),$post->get('category'));
my $date=timefmt($post->get('pubDate'));
my $slug=lc uri_unescape $1 if $post->get('url')=~/^.*(?:(?:\d{2})\/)+([\w\d%\.\/\-_\(\)]+)$/i;
$slug=~s/[%\d\/\.,\(\)]/-/g;$slug=~s/\-\-+/-/g;$slug=~s/\-*$//;$slug=lc $slug;
my $html2md=new HTML::WikiConverter(dialect => 'Markdown', encoding => 'utf8', escape_entities => 0);
my $body = $html2md->html2wiki(encode 'utf8', decode_entities $post->get('description'));$body=~s/^ / /gm;
my $md="";
while(<TPL>) {
s/\|ptitle\|/$title/;
s/\|ptags\|/$tags/;
s/\|pdate\|/$date/;
s/\|slug\|/$slug/;
next if /\|pbody\|/;
$md.=$_;
}
close TPL;$md.=$body;
open MD, ">$basedir/post/$file.md" or warn "Couldn't open post $basedir/post/$file.md" and return 0;
print MD $md;
close MD;
say "..converted to Markdown and saved to post/$file.md";
}
sub timefmt {
my ($epoch,$context)=@_;
$epoch=str2time $epoch;
return strftime "%Y-%m-%d %H:%M",localtime $epoch;
}
print "Fetching $blogRSS..";
my $origfeed=get($blogRSS) or die "Couldn't fetch $blogRSS";
$origfeed=~s/<\/category><category>/, /g;
my $parser=new XML::RSS::Parser::Lite;$parser->parse($origfeed);
say "found blog '".$parser->get('title')."'\nDescription: ".decode_entities $parser->get('description')."\nConverting to blog.Alba Markdown using template $basedir/post/.tpl.md.\n";
for(my $i=1;$i<=$parser->count();$i++) {
my $item=$parser->get($parser->count()-$i);
print "Found post ".$item->get('title');
writepost $i, $item;
}
say "Import complete. Please check all posts in $basedir/post/ and then run gen.pl to generate your new blog.";