Compare commits

..

1 Commits

Author SHA1 Message Date
Cadey Ratio e697886ba7
Create mara-sh0rk-of-justice-2020-12-28.markdown 2020-12-28 15:42:11 -05:00
56 changed files with 963 additions and 2306 deletions

View File

@ -15,4 +15,24 @@ jobs:
- uses: cachix/cachix-action@v7
with:
name: xe
- run: nix build --no-link
- run: nix-build
- name: Log into GitHub Container Registry
if: github.ref == 'refs/heads/main'
run: echo "${{ secrets.CR_PAT }}" | docker login https://ghcr.io -u ${{ github.actor }} --password-stdin
- name: Docker push
if: github.ref == 'refs/heads/main'
run: |
docker load -i result
docker tag xena/christinewebsite:latest ghcr.io/xe/site:$GITHUB_SHA
docker push ghcr.io/xe/site
- name: deploy
if: github.ref == 'refs/heads/main'
run: ./scripts/release.sh
env:
DIGITALOCEAN_ACCESS_TOKEN: ${{ secrets.DIGITALOCEAN_TOKEN }}
MI_TOKEN: ${{ secrets.MI_TOKEN }}
PATREON_ACCESS_TOKEN: ${{ secrets.PATREON_ACCESS_TOKEN }}
PATREON_CLIENT_ID: ${{ secrets.PATREON_CLIENT_ID }}
PATREON_CLIENT_SECRET: ${{ secrets.PATREON_CLIENT_SECRET }}
PATREON_REFRESH_TOKEN: ${{ secrets.PATREON_REFRESH_TOKEN }}
DHALL_PRELUDE: https://raw.githubusercontent.com/dhall-lang/dhall-lang/v17.0.0/Prelude/package.dhall

25
.github/workflows/rust.yml vendored Normal file
View File

@ -0,0 +1,25 @@
name: Rust
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
env:
CARGO_TERM_COLOR: always
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Build
run: cargo build --all
- name: Run tests
run: |
cargo test
(cd lib/jsonfeed && cargo test)
(cd lib/patreon && cargo test)
env:
PATREON_ACCESS_TOKEN: ${{ secrets.PATREON_ACCESS_TOKEN }}
PATREON_CLIENT_ID: ${{ secrets.PATREON_CLIENT_ID }}
PATREON_CLIENT_SECRET: ${{ secrets.PATREON_CLIENT_SECRET }}
PATREON_REFRESH_TOKEN: ${{ secrets.PATREON_REFRESH_TOKEN }}

987
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
[package]
name = "xesite"
version = "2.2.0"
version = "2.1.0"
authors = ["Christine Dodrill <me@christine.website>"]
edition = "2018"
build = "src/build.rs"
@ -14,43 +14,40 @@ chrono = "0.4"
comrak = "0.9"
envy = "0.4"
glob = "0.3"
hyper = "0.14"
hyper = "0.13"
kankyo = "0.3"
lazy_static = "1.4"
log = "0.4"
mime = "0.3.0"
prometheus = { version = "0.11", default-features = false, features = ["process"] }
prometheus = { version = "0.10", default-features = false, features = ["process"] }
rand = "0"
reqwest = { version = "0.11", features = ["json"] }
sdnotify = { version = "0.1", default-features = false }
serde_dhall = "0.9.0"
serde_dhall = "0.8.0"
serde = { version = "1", features = ["derive"] }
serde_yaml = "0.8"
sitemap = "0.4"
thiserror = "1"
tokio = { version = "1", features = ["full"] }
tokio = { version = "0.2", features = ["macros"] }
tracing = "0.1"
tracing-futures = "0.2"
tracing-subscriber = { version = "0.2", features = ["fmt"] }
warp = "0.3"
warp = "0.2"
xml-rs = "0.8"
url = "2"
uuid = { version = "0.8", features = ["serde", "v4"] }
# workspace dependencies
cfcache = { path = "./lib/cfcache" }
go_vanity = { path = "./lib/go_vanity" }
jsonfeed = { path = "./lib/jsonfeed" }
mi = { path = "./lib/mi" }
patreon = { path = "./lib/patreon" }
[build-dependencies]
ructe = { version = "0.13", features = ["warp02"] }
ructe = { version = "0.12", features = ["warp02"] }
[dev-dependencies]
pfacts = "0"
serde_json = "1"
eyre = "0.6"
reqwest = { version = "0.10", features = ["json"] }
pretty_env_logger = "0"
[workspace]

View File

@ -1,4 +1,4 @@
Copyright (c) 2017-2021 Christine Dodrill <me@christine.website>
Copyright (c) 2017-2020 Christine Dodrill <me@christine.website>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
@ -16,4 +16,4 @@ freely, subject to the following restrictions:
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
3. This notice may not be removed or altered from any source distribution.

View File

@ -1,229 +0,0 @@
---
title: "</kubernetes>"
date: 2021-01-03
---
# &lt;/kubernetes&gt;
Well, since I posted [that last post](/blog/k8s-pondering-2020-12-31) I have had
an adventure. A good friend pointed out a server host that I had missed when I
was looking for other places to use, and now I have migrated my blog to this new
server. As of yesterday, I now run my website on a dedicated server in Finland.
Here is the story of my journey to migrate 6 years of cruft and technical debt
to this new server.
Let's talk about this goliath of a server. This server is an AX41 from Hetzner.
It has 64 GB of ram, a 512 GB nvme drive, 3 2 TB drives, and a Ryzen 3600. For
all practical concerns, this beast is beyond overkill and rivals my workstation
tower in everything but the GPU power. I have named it `lufta`, which is the
word for feather in [L'ewa](https://lewa.within.website/dictionary.html).
## Assimilation
For my server setup process, the first step it to assimilate it. In this step I
get a base NixOS install on it somehow. Since I was using Hetzner, I was able to
boot into a NixOS install image using the process documented
[here](https://nixos.wiki/wiki/Install_NixOS_on_Hetzner_Online). Then I decided
that it would also be cool to have this server use
[zfs](https://en.wikipedia.org/wiki/ZFS) as its filesystem to take advantage of
its legendary subvolume and snapshotting features.
So I wrote up a bootstrap system definition like the Hetzner tutorial said and
ended up with `hosts/lufta/bootstrap.nix`:
```nix
{ pkgs, ... }:
{
services.openssh.enable = true;
users.users.root.openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPg9gYKVglnO2HQodSJt4z4mNrUSUiyJQ7b+J798bwD9 cadey@shachi"
];
networking.usePredictableInterfaceNames = false;
systemd.network = {
enable = true;
networks."eth0".extraConfig = ''
[Match]
Name = eth0
[Network]
# Add your own assigned ipv6 subnet here here!
Address = 2a01:4f9:3a:1a1c::/64
Gateway = fe80::1
# optionally you can do the same for ipv4 and disable DHCP (networking.dhcpcd.enable = false;)
Address = 135.181.162.99/26
Gateway = 135.181.162.65
'';
};
boot.supportedFilesystems = [ "zfs" ];
environment.systemPackages = with pkgs; [ wget vim zfs ];
}
```
Then I fired up the kexec tarball and waited for the server to boot into a NixOS
live environment. A few minutes later I was in. I started formatting the drives
according to the [NixOS install
guide](https://nixos.org/manual/nixos/stable/index.html#sec-installation) with
one major difference: I added a `/boot` ext4 partition on the SSD. This allows
me to have the system root device on zfs. I added the disks to a `raidz1` pool
and created a few volumes. I also added the SSD as a log device so I get SSD
caching.
From there I installed NixOS as normal and rebooted the server. It booted
normally. I had a shiny new NixOS server in the cloud! I noticed that the server
had booted into NixOS unstable as opposed to NixOS 20.09 like my other nodes. I
thought "ah, well, that probably isn't a problem" and continued to the
configuration step.
[That's ominous...](conversation://Mara/hmm)
## Configuration
Now that the server was assimilated and I could SSH into it, the next step was
to configure it to run my services. While I was waiting for Hetzner to provision
my server I ported a bunch of my services over to Nixops services [a-la this
post](/blog/nixops-services-2020-11-09) in [this
folder](https://github.com/Xe/nixos-configs/tree/master/common/services) of my
configs repo.
Now that I had them, it was time to add this server to my Nixops setup. So I
opened the [nixops definition
folder](https://github.com/Xe/nixos-configs/tree/master/nixops/hexagone) and
added the metadata for `lufta`. Then I added it to my Nixops deployment with
this command:
```console
$ nixops modify -d hexagone -n hexagone *.nix
```
Then I copied over the autogenerated config from `lufta`'s `/etc/nixos/` folder
into
[`hosts/lufta`](https://github.com/Xe/nixos-configs/tree/master/hosts/lufta) and
ran a `nixops deploy` to add some other base configuration.
## Migration
Once that was done, I started enabling my services and pushing configs to test
them. After I got to a point where I thought things would work I opened up the
Kubernetes console and started deleting deployments on my kubernetes cluster as
I felt "safe" to migrate them over. Then I saw the deployments come back. I
deleted them again and they came back again.
Oh, right. I enabled that one Kubernetes service that made it intentionally hard
to delete deployments. One clever set of scale-downs and kills later and I was
able to kill things with wild abandon.
I copied over the gitea data with `rsync` running in the kubernetes deployment.
Then I killed the gitea deployment, updated DNS and reran a whole bunch of gitea
jobs to resanify the environment. I did a test clone on a few of my repos and
then I deleted the gitea volume from DigitalOcean.
Moving over the other deployments from Kubernetes into NixOS services was
somewhat easy, however I did need to repackage a bunch of my programs and static
sites for NixOS. I made the
[`pkgs`](https://github.com/Xe/nixos-configs/tree/master/pkgs) tree a bit more
fleshed out to compensate.
[Okay, packaging static sites in NixOS is beyond overkill, however a lot of them
need some annoyingly complicated build steps and throwing it all into Nix means
that we can make them reproducible and use one build system to rule them
all. Not to mention that when I need to upgrade the system, everything will
rebuild with new system libraries to avoid the <a
href="https://blog.tidelift.com/bit-rot-the-silent-killer">Docker bitrot
problem</a>.](conversation://Mara/hacker)
## Reboot Test
After a significant portion of the services were moved over, I decided it was
time to do the reboot test. I ran the `reboot` command and then...nothing.
My continuous ping test was timing out. My phone was blowing up with downtime
messages from NodePing. Yep, I messed something up.
I was able to boot the server back into a NixOS recovery environment using the
kexec trick, and from there I was able to prove the following:
- The zfs setup is healthy
- I can read some of the data I migrated over
- I can unmount and remount the ZFS volumes repeatedly
I was confused. This shouldn't be happening. After half an hour of
troubleshooting, I gave in and ordered an IPKVM to be installed in my server.
Once that was set up (and I managed to trick MacOS into letting me boot a .jnlp
web start file), I rebooted the server so I could see what error I was getting
on boot. I missed it the first time around, but on the second time I was able to
capture this screenshot:
![The error I was looking
for](https://cdn.christine.website/file/christine-static/blog/Screen+Shot+2021-01-03+at+1.13.05+AM.png)
Then it hit me. I did the install on NixOS unstable. My other servers use NixOS
20.09. I had downgraded zfs and the older version of zfs couldn't mount the
volume created by the newer version of zfs in read/write mode. One more trip to
the recovery environment later to install NixOS unstable in a new generation.
Then I switched my tower's default NixOS channel to the unstable channel and ran
`nixops deploy` to reactivate my services. After the NodePing uptime
notifications came in, I ran the reboot test again while looking at the console
output to be sure.
It booted. It worked. I had a stable setup. Then I reconnected to IRC and passed
out.
## Services Migrated
Here is a list of all of the services I have migrated over from my old dedicated
server, my kubernetes cluster and my dokku server:
- aerial -> discord chatbot
- goproxy -> go modules proxy
- lewa -> https://lewa.within.website
- hlang -> https://h.christine.website
- mi -> https://mi.within.website
- printerfacts -> https://printerfacts.cetacean.club
- xesite -> https://christine.website
- graphviz -> https://graphviz.christine.website
- idp -> https://idp.christine.website
- oragono -> ircs://irc.within.website:6697/
- tron -> discord bot
- withinbot -> discord bot
- withinwebsite -> https://within.website
- gitea -> https://tulpa.dev
- other static sites
Doing this migration is a bit of an archaeology project as well. I was
continuously discovering services that I had littered over my machines with very
poorly documented requirements and configuration. I hope that this move will let
the next time I do this kind of migration be a lot easier by comparison.
I still have a few other services to move over, however the ones that are left
are much more annoying to set up properly. I'm going to get to deprovision 5
servers in this migration and as a result get this stupidly powerful goliath of
a server to do whatever I want with and I also get to cut my monthly server
costs by over half.
I am very close to being able to turn off the Kubernetes cluster and use NixOS
for everything. A few services that are still on the Kubernetes cluster are
resistant to being nixified, so I may have to use the Docker containers for
that. I was hoping to be able to cut out Docker entirely, however we don't seem
to be that lucky yet.
Sure, there is some added latency with the server being in Europe instead of
Montreal, however if this ever becomes a practical issue I can always launch a
cheap DigitalOcean VPS in Toronto to act as a DNS server for my WireGuard setup.
Either way, I am now off Kubernetes for my highest traffic services. If services
of mine need to use the disk, they can now just use the disk. If I really care
about the data, I can add the service folders to the list of paths to back up to
`rsync.net` (I have a post about how this backup process works in the drafting
stage) via [borgbackup](https://www.borgbackup.org/).
Let's hope it stays online!
---
Many thanks to [Graham Christensen](https://twitter.com/grhmc), [Dave
Anderson](https://twitter.com/dave_universetf) and everyone else who has been
helping me along this journey. I would be lost without them.

View File

@ -1,178 +0,0 @@
---
title: "How to Set Up Borg Backup on NixOS"
date: 2021-01-09
series: howto
tags:
- nixos
- borgbackup
---
# How to Set Up Borg Backup on NixOS
[Borg Backup](https://www.borgbackup.org/) is a encrypted, compressed,
deduplicated backup program for multiple platforms including Linux. This
combined with the [NixOS options for configuring
Borg Backup](https://search.nixos.org/options?channel=20.09&show=services.borgbackup.jobs.%3Cname%3E.paths&from=0&size=30&sort=relevance&query=services.borgbackup.jobs)
allows you to backup on a schedule and restore from those backups when you need
to.
Borg Backup works with local files, remote servers and there are even [cloud
hosts](https://www.borgbackup.org/support/commercial.html) that specialize in
hosting your backups. In this post we will cover how to set up a backup job on a
server using [BorgBase](https://www.borgbase.com/)'s free tier to host the
backup files.
## Setup
You will need a few things:
- A free BorgBase account
- A server running NixOS
- A list of folders to back up
- A list of folders to NOT back up
First, we will need to create a SSH key for root to use when connecting to
BorgBase. Open a shell as root on the server and make a `borgbackup` folder in
root's home directory:
```shell
mkdir borgbackup
cd borgbackup
```
Then create a SSH key that will be used to connect to BorgBase:
```shell
ssh-keygen -f ssh_key -t ed25519 -C "Borg Backup"
```
Ignore the SSH key password because at this time the automated Borg Backup job
doesn't allow the use of password-protected SSH keys.
Now we need to create an encryption passphrase for the backup repository. Run
this command to generate one using [xkcdpass](https://pypi.org/project/xkcdpass/):
```shell
nix-shell -p python39Packages.xkcdpass --run 'xkcdpass -n 12' > passphrase
```
[You can do whatever you want to generate a suitable passphrase, however
xkcdpass is proven to be <a href="https://xkcd.com/936/">more random</a> than
most other password generators.](conversation://Mara/hacker)
## BorgBase Setup
Now that we have the basic requirements out of the way, let's configure BorgBase
to use that SSH key. In the BorgBase UI click on the Account tab in the upper
right and open the SSH key management window. Click on Add Key and paste in the
contents of `./ssh_key.pub`. Name it after the hostname of the server you are
working on. Click Add Key and then go back to the Repositories tab in the upper
right.
Click New Repo and name it after the hostname of the server you are working on.
Select the key you just created to have full access. Choose the region of the
backup volume and then click Add Repository.
On the main page copy the repository path with the copy icon next to your
repository in the list. You will need this below. Attempt to SSH into the backup
repo in order to have ssh recognize the server's host key:
```shell
ssh -i ./ssh_key o6h6zl22@o6h6zl22.repo.borgbase.com
```
Then accept the host key and press control-c to terminate the SSH connection.
## NixOS Configuration
In your `configuration.nix` file, add the following block:
```nix
services.borgbackup.jobs."borgbase" = {
paths = [
"/var/lib"
"/srv"
"/home"
];
exclude = [
# very large paths
"/var/lib/docker"
"/var/lib/systemd"
"/var/lib/libvirt"
# temporary files created by cargo and `go build`
"**/target"
"/home/*/go/bin"
"/home/*/go/pkg"
];
repo = "o6h6zl22@o6h6zl22.repo.borgbase.com:repo";
encryption = {
mode = "repokey-blake2";
passCommand = "cat /root/borgbackup/passphrase";
};
environment.BORG_RSH = "ssh -i /root/borgbackup/ssh_key";
compression = "auto,lzma";
startAt = "daily";
};
```
Customize the paths and exclude lists to your needs. Once you are satisfied,
rebuild your NixOS system using `nixos-rebuild`:
```shell
nixos-rebuild switch
```
And then you can fire off an initial backup job with this command:
```shell
systemctl start borgbackup-job-borgbase.service
```
Monitor the job with this command:
```shell
journalctl -fu borgbackup-job-borgbase.service
```
The first backup job will always take the longest to run. Every incremental
backup after that will get smaller and smaller. By default, the system will
create new backup snapshots every night at midnight local time.
## Restoring Files
To restore files, first figure out when you want to restore the files from.
NixOS includes a wrapper script for each Borg job you define. you can mount your
backup archive using this command:
```
mkdir mount
borg-job-borgbase mount o6h6zl22@o6h6zl22.repo.borgbase.com:repo ./mount
```
Then you can explore the backup (and with it each incremental snapshot) to
your heart's content and copy files out manually. You can look through each
folder and copy out what you need.
When you are done you can unmount it with this command:
```
borg-job-borgbase umount /root/borgbase/mount
```
---
And that's it! You can get more fancy with nixops using a setup [like
this](https://github.com/Xe/nixos-configs/blob/master/common/services/backup.nix).
In general though, you can get away with this setup. It may be a good idea to
copy down the encryption passphrase onto paper and put it in a safe space like a
safety deposit box.
For more information about Borg Backup on NixOS, see [the relevant chapter of
the NixOS
manual](https://nixos.org/manual/nixos/stable/index.html#module-borgbase) or
[the list of borgbackup
options](https://search.nixos.org/options?channel=20.09&query=services.borgbackup.jobs)
that you can pick from.
I hope this is able to help.

View File

@ -1,78 +0,0 @@
---
title: hlang in 30 Seconds
date: 2021-01-04
series: h
tags:
- satire
---
# hlang in 30 Seconds
hlang (the h language) is a revolutionary new use of WebAssembly that enables
single-paridigm programming without any pesky state or memory accessing. The
simplest program you can use in hlang is the h world program:
```
h
```
When run in [the hlang playground](https://h.christine.website/play), you can
see its output:
```
h
```
To get more output, separate multiple h's by spaces:
```
h h h h
```
This returns:
```
h
h
h
h
```
## Internationalization
For internationalization concerns, hlang also supports the Lojbanic h `'`. You can
mix h and `'` to your heart's content:
```
' h '
```
This returns:
```
'
h
'
```
Finally an easy solution to your pesky Lojban internationalization problems!
## Errors
For maximum understandability, compiler errors are provided in Lojban. For
example this error tells you that you have an invalid character at the first
character of the string:
```
h: gentoldra fi'o zvati fe li no
```
Here is an interlinear gloss of that error:
```
h: gentoldra fi'o zvati fe li no
grammar-wrong existing-at second-place use-number 0
```
And now you are fully fluent in hlang, the most exciting programming language
since sliced bread.

View File

@ -1,160 +0,0 @@
---
title: Kubernetes Pondering
date: 2020-12-31
tags:
- k8s
- kubernetes
- soyoustart
- kimsufi
- digitalocean
- vultr
---
# Kubernetes Pondering
Right now I am using a freight train to mail a letter when it comes to hosting
my web applications. If you are reading this post on the day it comes out, then
you are connected to one of a few replicas of my site code running across at
least 3 machines in my Kubernetes cluster. This certainly _works_, however it is
not very ergonomic and ends up being quite expensive.
I think I made a mistake when I decided to put my cards into Kubernetes for my
personal setup. It made sense at the time (I was trying to learn Kubernetes and
I am cursed into learning by doing), however I don't think it is really the best
choice available for my needs. I am not a large company. I am a single person
making things that are really targeted for myself. I would like to replace this
setup with something more at my scale. Here are a few options I have been
exploring combined with their pros and cons.
Here are the services I currently host on my Kubernetes cluster:
- [this site](/)
- [my git server](https://tulpa.dev)
- [hlang](https://h.christine.website)
- A few personal services that I've been meaning to consolidate
- The [olin demo](https://olin.within.website/)
- The venerable [printer facts server](https://printerfacts.cetacean.club)
- A few static websites
- An IRC server (`irc.within.website`)
My goal in evaluating other options is to reduce cost and complexity. Kubernetes
is a very complicated system and requires a lot of hand-holding and rejiggering
to make it do what you want. NixOS, on the other hand, is a lot simpler overall
and I would like to use it for running my services where I can.
Cost is a huge factor in this. My Kubernetes setup is a money pit. I want to
prioritize cost reduction as much as possible.
## Option 1: Do Nothing
I could do nothing about this and eat the complexity as a cost of having this
website and those other services online. However over the year or so I've been
using Kubernetes I've had to do a lot of hacking at it to get it to do what I
want.
I set up the cluster using Terraform and Helm 2. Helm 3 is the current
(backwards-incompatible) release, and all of the things that are managed by Helm
2 have resisted being upgraded to Helm 3.
I'm going to say something slightly controversial here, but YAML is a HORRIBLE
format for configuration. I can't trust myself to write unambiguous YAML. I have
to reference the spec constantly to make sure I don't have an accidental
Norway/Ontario bug. I have a Dhall package that takes away most of the pain,
however it's not flexible enough to describe the entire scope of what my
services need to do (IE: pinging Google/Bing to update their indexes on each
deploy), and I don't feel like putting in the time to make it that flexible.
[This is the regex for determining what is a valid boolean value in YAML:
`y|Y|yes|Yes|YES|n|N|no|No|NO|true|True|TRUE|false|False|FALSE|on|On|ON|off|Off|OFF`.
This can bite you eventually. See the <a
href="https://hitchdev.com/strictyaml/why/implicit-typing-removed/">Norway
Problem</a> for more information.](conversation://Mara/hacker)
I have a tor hidden service endpoint for a few of my services. I have to use an
[unmaintained tool](https://github.com/kragniz/tor-controller) to manage these
on Kubernetes. It works _today_, but the Kubernetes operator API could change at
any time (or the API this uses could be deprecated and removed without much
warning) and leave me in the dust.
I could live with all of this, however I don't really think it's the best idea
going forward. There's a bunch of services that I added on top of Kubernetes
that are dangerous to upgrade and very difficult (if not impossible) to
downgrade when something goes wrong during the upgrade.
One of the big things that I have with this setup that I would have to rebuild
in NixOS is the continuous deployment setup. However I've done that before and
it wouldn't really be that much of an issue to do it again.
NixOS fixes all the jank I mentioned above by making my specifications not have
to include the version numbers of everything the system already provides. You
can _actually trust the package repos to have up to date packages_. I don't
have to go around and bump the versions of shims and pray they work, because
with NixOS I don't need them anymore.
## Option 2: NixOS on top of SoYouStart or Kimsufi
This is a doable option. The main problem here would be doing the provision
step. SoYouStart and Kimsufi (both are offshoot/discount brands of OVH) have
very little in terms of customization of machine config. They work best when you
are using "normal" distributions like Ubuntu or CentOS and leave them be. I
would want to run NixOS on it and would have to do several trial and error runs
with a tool such as [nixos-infect](https://github.com/elitak/nixos-infect) to
assimilate the server into running NixOS.
With this option I would get the most storage out of any other option by far. 4
TB is a _lot_ of space. However, SoYouStart and Kimsufi run decade-old hardware
at best. I would end up paying a lot for very little in the CPU department. For
most things I am sure this would be fine, however some of my services can have
CPU needs that might exceed what second-generation Xeons can provide.
SoYouStart and Kimsufi have weird kernel versions though. The last SoYouStart
dedi I used ran Fedora and was gimped with a grsec kernel by default. I had to
end up writing [this gem of a systemd service on
boot](https://github.com/Xe/dotfiles/blob/master/ansible/roles/soyoustart/files/conditional-kexec.sh)
which did a [`kexec`](https://en.wikipedia.org/wiki/Kexec) to boot into a
non-gimped kernel on boot. It was a huge hack and somehow worked every time. I
was still afraid to reboot the machine though.
Sure is a lot of ram for the cost though.
## Option 3: NixOS on top of Digital Ocean
This shares most of the problems as the SoYouStart or Kimsufi nodes. However,
nixos-infect is known to have a higher success rate on Digital Ocean droplets.
It would be really nice if Digital Ocean let you upload arbitrary ISO files and
go from there, but that is apparently not the world we live in.
8 GB of ram would be _way more than enough_ for what I am doing with these
services.
## Option 4: NixOS on top of Vultr
Vultr is probably my top pick for this. You can upload an arbitrary ISO file,
kick off your VPS from it and install it like normal. I have a little shell
server shared between some friends built on top of such a Vultr node. It works
beautifully.
The fact that it has the same cost as the Digital Ocean droplet just adds to the
perfection of this option.
## Costs
Here is the cost table I've drawn up while comparing these options:
| Option | Ram | Disk | Cost per month | Hacks |
| :--------- | :----------------- | :------------------------------------ | :-------------- | :----------- |
| Do nothing | 6 GB (4 GB usable) | Not really usable, volumes cost extra | $60/month | Very Yes |
| SoYouStart | 32 GB | 2x2TB SAS | $40/month | Yes |
| Kimsufi | 32 GB | 2x2TB SAS | $35/month | Yes |
| Digital Ocean | 8 GB | 160 GB SSD | $40/month | On provision |
| Vultr | 8 GB | 160 GB SSD | $40/month | No |
I think I am going to go with the Vultr option. I will need to modernize some of
my services to support being deployed in NixOS in order to do this, however I
think that I will end up creating a more robust setup in the process. At least I
will create a setup that allows me to more easily maintain my own backups rather
than just relying on DigitalOcean snapshots and praying like I do with the
Kubernetes setup.
Thanks farcaller, Marbles, John Rinehart and others for reviewing this post
prior to it being published.

View File

@ -40,7 +40,7 @@ The following table is a history of my software career by title, date and salary
| Software Engineer | August 24, 2016 | November 22, 2016 | 90 days | 21 days | $105,000/year | Terminated |
| Consultant | Feburary 13, 2017 | November 13, 2017 | 273 days | 83 days | don't remember | Hired |
| Senior Software Engineer | November 13, 2017 | March 8, 2019 | 480 days | 0 days | $150,000/year | Voulntary quit |
| Senior Site Reliability Expert | May 6, 2019 | October 27, 2020 | 540 days | 48 days | CAD$115,000/year (about USD$ 80k and change) | Voluntary quit |
| Senior Site Reliability Expert | May 6, 2019 | October 27, 2020 | 540 days | 48 days | CAD$115,000/year (about USD$ 80k and change) | n/a |
| Software Designer | December 14, 2020 | *current* | n/a | n/a | CAD$135,000/year (about USD$ 105k and change) | n/a |
Even though I've been fired three times, I don't regret my career as it's been

View File

@ -1,38 +0,0 @@
---
title: New PGP Key Fingerprint
date: 2021-01-15
---
# New PGP Key Fingerprint
This morning I got an encrypted email, and in the process of trying to decrypt
it I discovered that I had _lost_ my PGP key. I have no idea how I lost it. As
such, I have created a new PGP key and replaced the one on my website with it.
I did the replacement in [this
commit](https://github.com/Xe/site/commit/66233bcd40155cf71e221edf08851db39dbd421c),
which you can see is verified with a subkey of my new key.
My new PGP key ID is `803C 935A E118 A224`. The key with the ID `799F 9134 8118
1111` should not be used anymore. Here are all the subkey fingerprints:
```
Signature key ....: 378E BFC6 3D79 B49D 8C36 448C 803C 935A E118 A224
created ....: 2021-01-15 13:04:28
Encryption key....: 8C61 7F30 F331 D21B 5517 6478 8C5C 9BC7 0FC2 511E
created ....: 2021-01-15 13:04:28
Authentication key: 7BF7 E531 ABA3 7F77 FD17 8F72 CE17 781B F55D E945
created ....: 2021-01-15 13:06:20
General key info..: pub rsa2048/803C935AE118A224 2021-01-15 Christine Dodrill (Yubikey) <me@christine.website>
sec> rsa2048/803C935AE118A224 created: 2021-01-15 expires: 2031-01-13
card-no: 0006 03646872
ssb> rsa2048/8C5C9BC70FC2511E created: 2021-01-15 expires: 2031-01-13
card-no: 0006 03646872
ssb> rsa2048/CE17781BF55DE945 created: 2021-01-15 expires: 2031-01-13
card-no: 0006 03646872
```
I don't really know what the proper way is to go about revoking an old PGP key.
It probably doesn't help that I don't use PGP very often. I think this is the
first encrypted email I've gotten in a year.
Let's hope that I don't lose this key as easily!

View File

@ -1,332 +0,0 @@
---
title: Encrypted Secrets with NixOS
date: 2021-01-20
series: nixos
tags:
- age
- ed25519
---
# Encrypted Secrets with NixOS
One of the best things about NixOS is the fact that it's so easy to do
configuration management using it. The Nix store (where all your packages live)
has a huge flaw for secret management though: everything in the Nix store is
globally readable. This means that anyone logged into or running code on the
system could read any secret in the Nix store without any limits. This is
sub-optimal if your goal is to keep secret values secret. There have been a few
approaches to this over the years, but I want to describe how I'm doing it.
Here are my goals and implementation for this setup and how a few other secret
management strategies don't quite pan out.
At a high level I have these goals:
* It should be trivial to declare new secrets
* Secrets should never be globally readable in any useful form
* If I restart the machine, I should not need to take manual human action to
ensure all of the services come back online
* GPG should be avoided at all costs
As a side goal being able to roll back secret changes would also be nice.
The two biggest tools that offer a way to help with secret management on NixOS
that come to mind are NixOps and Morph.
[NixOps](https://github.com/NixOS/nixops) is a tool that helps administrators
operate NixOS across multiple servers at once. I use NixOps extensively in my
own setup. It calls deployment secrets "keys" and they are documented
[here](https://hydra.nixos.org/build/115931128/download/1/manual/manual.html#idm140737322649152).
At a high level they are declared like this:
```nix
deployment.keys.example = {
text = "this is a super sekrit value :)";
user = "example";
group = "keys";
permissions = "0400";
};
```
This will create a new secret in `/run/keys` that will contain our super secret
value.
[Wait, isn't `/run` an ephemeral filesystem? What happens when the system
reboots?](conversation://Mara/hmm)
Let's make an example system and find out! So let's say we have that `example`
secret from earlier and want to use it in a job. The job definition could look
something like this:
```nix
# create a service-specific user
users.users.example.isSystemUser = true;
# without this group the secret can't be read
users.users.example.extraGroups = [ "keys" ];
systemd.services.example = {
wantedBy = [ "multi-user.target" ];
after = [ "example-key.service" ];
wants = [ "example-key.service" ];
serviceConfig.User = "example";
serviceConfig.Type = "oneshot";
script = ''
stat /run/keys/example
'';
};
```
This creates a user called `example` and gives it permission to read deployment
keys. It also creates a systemd service called `example.service` and runs
[`stat(1)`](https://linux.die.net/man/1/stat) to show the permissions of the
service and the key file. It also runs as our `example` user. To avoid systemd
thinking our service failed, we're also going to mark it as a
[oneshot](https://www.digitalocean.com/community/tutorials/understanding-systemd-units-and-unit-files#the-service-section).
Altogether it could look something like
[this](https://gist.github.com/Xe/4a71d7741e508d9002be91b62248144a). Let's see
what `systemctl` has to report:
```console
$ nixops ssh -d blog-example pa -- systemctl status example
● example.service
Loaded: loaded (/nix/store/j4a8f6mnaw3v4sz7dqlnz95psh72xglw-unit-example.service/example.service; enabled; vendor preset: enabled)
Active: inactive (dead) since Wed 2021-01-20 20:53:54 UTC; 37s ago
Process: 2230 ExecStart=/nix/store/1yg89z4dsdp1axacqk07iq5jqv58q169-unit-script-example-start/bin/example-start (code=exited, status=0/SUCCESS)
Main PID: 2230 (code=exited, status=0/SUCCESS)
IP: 0B in, 0B out
CPU: 3ms
Jan 20 20:53:54 pa example-start[2235]: File: /run/keys/example
Jan 20 20:53:54 pa example-start[2235]: Size: 31 Blocks: 8 IO Block: 4096 regular file
Jan 20 20:53:54 pa example-start[2235]: Device: 18h/24d Inode: 37428 Links: 1
Jan 20 20:53:54 pa example-start[2235]: Access: (0400/-r--------) Uid: ( 998/ example) Gid: ( 96/ keys)
Jan 20 20:53:54 pa example-start[2235]: Access: 2021-01-20 20:53:54.010554201 +0000
Jan 20 20:53:54 pa example-start[2235]: Modify: 2021-01-20 20:53:54.010554201 +0000
Jan 20 20:53:54 pa example-start[2235]: Change: 2021-01-20 20:53:54.398103181 +0000
Jan 20 20:53:54 pa example-start[2235]: Birth: -
Jan 20 20:53:54 pa systemd[1]: example.service: Succeeded.
Jan 20 20:53:54 pa systemd[1]: Finished example.service.
```
So what happens when we reboot? I'll force a reboot in my hypervisor and we'll
find out:
```console
$ nixops ssh -d blog-example pa -- systemctl status example
● example.service
Loaded: loaded (/nix/store/j4a8f6mnaw3v4sz7dqlnz95psh72xglw-unit-example.service/example.service; enabled; vendor preset: enabled)
Active: inactive (dead)
```
The service is inactive. Let's see what the status of `example-key.service` is:
```console
$ nixops ssh -d blog-example pa -- systemctl status example-key
● example-key.service
Loaded: loaded (/nix/store/ikqn64cjq8pspkf3ma1jmx8qzpyrckpb-unit-example-key.service/example-key.service; linked; vendor preset: enabled)
Active: activating (start-pre) since Wed 2021-01-20 20:56:05 UTC; 3min 1s ago
Cntrl PID: 610 (example-key-pre)
IP: 0B in, 0B out
IO: 116.0K read, 0B written
Tasks: 4 (limit: 2374)
Memory: 1.6M
CPU: 3ms
CGroup: /system.slice/example-key.service
├─610 /nix/store/kl6lr3czkbnr6m5crcy8ffwfzbj8a22i-bash-4.4-p23/bin/bash -e /nix/store/awx1zrics3cal8kd9c5d05xzp5ikazlk-unit-script-example-key-pre-start/bin/example-key-pre-start
├─619 /nix/store/kl6lr3czkbnr6m5crcy8ffwfzbj8a22i-bash-4.4-p23/bin/bash -e /nix/store/awx1zrics3cal8kd9c5d05xzp5ikazlk-unit-script-example-key-pre-start/bin/example-key-pre-start
├─620 /nix/store/kl6lr3czkbnr6m5crcy8ffwfzbj8a22i-bash-4.4-p23/bin/bash -e /nix/store/awx1zrics3cal8kd9c5d05xzp5ikazlk-unit-script-example-key-pre-start/bin/example-key-pre-start
└─621 inotifywait -qm --format %f -e create,move /run/keys
Jan 20 20:56:05 pa systemd[1]: Starting example-key.service...
```
The service is blocked waiting for the keys to exist. We have to populate the
keys with `nixops send-keys`:
```console
$ nixops send-keys -d blog-example
pa> uploading key example...
```
Now when we check on `example.service`, we get the following:
```console
$ nixops ssh -d blog-example pa -- systemctl status example
● example.service
Loaded: loaded (/nix/store/j4a8f6mnaw3v4sz7dqlnz95psh72xglw-unit-example.service/example.service; enabled; vendor preset: enabled)
Active: inactive (dead) since Wed 2021-01-20 21:00:24 UTC; 32s ago
Process: 954 ExecStart=/nix/store/1yg89z4dsdp1axacqk07iq5jqv58q169-unit-script-example-start/bin/example-start (code=exited, status=0/SUCCESS)
Main PID: 954 (code=exited, status=0/SUCCESS)
IP: 0B in, 0B out
CPU: 3ms
Jan 20 21:00:24 pa example-start[957]: File: /run/keys/example
Jan 20 21:00:24 pa example-start[957]: Size: 31 Blocks: 8 IO Block: 4096 regular file
Jan 20 21:00:24 pa example-start[957]: Device: 18h/24d Inode: 27774 Links: 1
Jan 20 21:00:24 pa example-start[957]: Access: (0400/-r--------) Uid: ( 998/ example) Gid: ( 96/ keys)
Jan 20 21:00:24 pa example-start[957]: Access: 2021-01-20 21:00:24.588494730 +0000
Jan 20 21:00:24 pa example-start[957]: Modify: 2021-01-20 21:00:24.588494730 +0000
Jan 20 21:00:24 pa example-start[957]: Change: 2021-01-20 21:00:24.606495751 +0000
Jan 20 21:00:24 pa example-start[957]: Birth: -
Jan 20 21:00:24 pa systemd[1]: example.service: Succeeded.
Jan 20 21:00:24 pa systemd[1]: Finished example.service.
```
This means that NixOps secrets require _manual human intervention_ in order to
repopulate them on server boot. If your server went offline overnight due to an
unexpected issue, your services using those keys could be stuck offline until
morning. This is undesirable for a number of reasons. This plus the requirement
for the `keys` group (which at time of writing was undocumented) to be added to
service user accounts means that while they do work, they are not very
ergonomic.
[You can read secrets from files using something like
`deployment.keys.example.text = "${builtins.readFile ./secrets/example.env}"`,
but it is kind of a pain to have to do that. It would be better to just
reference the secrets by filesystem paths in the first
place.](conversation://Mara/hacker)
On the other hand [Morph](https://github.com/DBCDK/morph) gets this a bit
better. It is sadly even less documented than NixOps is, but it offers a similar
experience via [deployment
secrets](https://github.com/DBCDK/morph/blob/master/examples/secrets.nix). The
main differences that Morph brings to the table are taking paths to secrets and
allowing you to run an arbitrary command on the secret being uploaded. Secrets
are also able to be put anywhere on the disk, meaning that when a host reboots it
will come back up with the most recent secrets uploaded to it.
However, like NixOps, Morph secrets don't have the ability to be rolled back.
This means that if you mess up a secret value you better hope you have the old
information somewhere. This violates what you'd expect from a NixOS machine.
So given these examples, I thought it would be interesting to explore what the
middle path could look like. I chose to use
[age](https://github.com/FiloSottile/age) for encrypting secrets in the Nix
store as well as using SSH host keys to ensure that every secret is decryptable
at runtime by _that machine only_. If you get your hands on the secret
cyphertext, it should be unusable to you.
One of the harder things here will be keeping a list of all of the server host
keys. Recently I added a
[hosts.toml](https://github.com/Xe/nixos-configs/blob/master/ops/metadata/hosts.toml)
file to my config repo for autoconfiguring my WireGuard overlay network. It was
easy enough to add all the SSH host keys for each machine using a command like
this to get them:
[We will cover how this WireGuard overlay works in a future post.](conversation://Mara/hacker)
```console
$ nixops ssh-for-each -d hexagone -- cat /etc/ssh/ssh_host_ed25519_key.pub
firgu....> ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB8+mCR+MEsv0XYi7ohvdKLbDecBtb3uKGQOPfIhdj3C root@nixos
chrysalis> ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGDA5iXvkKyvAiMEd/5IruwKwoymC8WxH4tLcLWOSYJ1 root@chrysalis
lufta....> ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMADhGV0hKt3ZY+uBjgOXX08txBS6MmHZcSL61KAd3df root@lufta
keanu....> ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGDZUmuhfjEIROo2hog2c8J53taRuPJLNOtdaT8Nt69W root@nixos
```
age lets you use SSH keys for decryption, so I added these keys to my
`hosts.toml` and ended up with something like
[this](https://github.com/Xe/nixos-configs/commit/14726e982001e794cd72afa1ece209eed58d3f38#diff-61d1d8dddd71be624c0d718be22072c950ec31c72fded8a25094ea53d94c8185).
Now we can encrypt secrets on the host machine and safely put them in the Nix
store because they will be readable to each target machine with a command like
this:
```shell
age -d -i /etc/ssh/ssh_host_ed25519_key -o $dest $src
```
From here it's easy to make a function that we can use for generating new
encrypted secrets in the Nix store. First we need to import the host metadata
from the toml file:
```nix
let
cfg = config.within.secrets;
metadata = lib.importTOML ../../ops/metadata/hosts.toml;
mkSecretOnDisk = name:
{ source, ... }:
pkgs.stdenv.mkDerivation {
name = "${name}-secret";
phases = "installPhase";
buildInputs = [ pkgs.age ];
installPhase =
let key = metadata.hosts."${config.networking.hostName}".ssh_pubkey;
in ''
age -a -r "${key}" -o $out ${source}
'';
};
```
And then we can generate systemd oneshot jobs with something like this:
```nix
mkService = name:
{ source, dest, owner, group, permissions, ... }: {
description = "decrypt secret for ${name}";
wantedBy = [ "multi-user.target" ];
serviceConfig.Type = "oneshot";
script = with pkgs; ''
rm -rf ${dest}
${age}/bin/age -d -i /etc/ssh/ssh_host_ed25519_key -o ${dest} ${
mkSecretOnDisk name { inherit source; }
}
chown ${owner}:${group} ${dest}
chmod ${permissions} ${dest}
'';
};
```
And from there we just need some [boring
boilerplate](https://github.com/Xe/nixos-configs/blob/master/common/crypto/default.nix#L8-L38)
to define a secret type. Then we declare the secret type and its invocation:
```nix
in {
options.within.secrets = mkOption {
type = types.attrsOf secret;
description = "secret configuration";
default = { };
};
config.systemd.services = let
units = mapAttrs' (name: info: {
name = "${name}-key";
value = (mkService name info);
}) cfg;
in units;
}
```
And we have ourself a NixOS module that allows us to:
* Trivially declare new secrets
* Make secrets in the Nix store useless without the key
* Make every secret be transparently decrypted on startup
* Avoid the use of GPG
* Roll back secrets like any other configuration change
Declaring new secrets works like this (as stolen from [the service definition
for the website you are reading right now](https://github.com/Xe/nixos-configs/blob/master/common/services/xesite.nix#L35-L41)):
```nix
within.secrets.example = {
source = ./secrets/example.env;
dest = "/var/lib/example/.env";
owner = "example";
group = "nogroup";
permissions = "0400";
};
```
Barring some kind of cryptographic attack against age, this should allow the
secrets to be stored securely. I am working on a way to make this more generic.
This overall approach was inspired by [agenix](https://github.com/ryantm/agenix)
but made more specific for my needs. I hope this approach will make it easy for
me to manage these secrets in the future.

View File

@ -1,12 +0,0 @@
---
title: "Tailscale on NixOS: A New Minecraft Server in Ten Minutes"
date: 2021-01-19
tags:
- link
redirect_to: https://tailscale.com/blog/nixos-minecraft/
---
# Tailscale on NixOS: A New Minecraft Server in Ten Minutes
Check out this post [on the Tailscale
blog](https://tailscale.com/blog/nixos-minecraft/)!

View File

@ -1,69 +0,0 @@
---
title: "Site Update: RSS Bandwidth Fixes"
date: 2021-01-14
tags:
- devops
- optimization
---
# Site Update: RSS Bandwidth Fixes
Well, so I think I found out where my Kubernetes cluster cost came from. For
context, this blog gets a lot of traffic. Since the last deploy, my blog has
served its RSS feed over 19,000 times. I have some pretty naiive code powering
the RSS feed. It basically looked something like this:
- Write RSS feed content-type and beginning of feed
- For every post I have ever made, include its metadata and content
- Write end of RSS feed
This code was _fantastically simple_ to develop, however it was very expensive
in terms of bandwidth. When you add all this up, my RSS feed used to be more
than a _one megabyte_ response. It was also only getting larger as I posted more
content.
This is unsustainable, so I have taken multiple actions to try and fix this from
several angles.
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">Rationale: this is my
most commonly hit and largest endpoint. I want to try and cut down its size.
<br><br>current feed (everything): 1356706 bytes<br>20 posts: 177931 bytes<br>10
posts: 53004 bytes<br>5 posts: 29318 bytes <a
href="https://t.co/snjnn8RFh8">pic.twitter.com/snjnn8RFh8</a></p>&mdash; Cadey
A. Ratio (@theprincessxena) <a
href="https://twitter.com/theprincessxena/status/1349892662871150594?ref_src=twsrc%5Etfw">January
15, 2021</a></blockquote> <script async
src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
[Yes, that graph is showing in _gigabytes_. We're so lucky that bandwidth is
free on Hetzner.](conversation://Mara/hacker)
First I finally set up the site to run behind Cloudflare. The Cloudflare
settings are set very permissively, so your RSS feed reading bots or whatever
should NOT be affected by this change. If you run into any side effects as a
result of this change, [contact me](/contact) and I can fix it.
Second, I also now set cache control headers on every response. By default the
"static" pages are cached for a day and the "dynamic" pages are cached for 5
minutes. This should allow new posts to show up quickly as they have previously.
Thirdly, I set up
[ETags](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag) for the
feeds. Each of my feeds will send an ETag in a response header. Please use this
tag in future requests to ensure that you don't ask for content you already
have. From what I recall most RSS readers should already support this, however
I'll monitor the situation as reality demands.
Lastly, I adjusted the
[ttl](https://cyber.harvard.edu/rss/rss.html#ltttlgtSubelementOfLtchannelgt) of
the RSS feed so that compliant feed readers should only check once per day. I've
seen some feed readers request the feed up to every 5 minutes, which is very
excessive. Hopefully this setting will gently nudge them into behaving.
As a nice side effect I should have slightly lower ram usage on the blog server
too! Right now it's sitting at about 58 and a half MB of ram, however with fewer
copies of my posts sitting in memory this should fall by a significant amount.
If you have any feedback about this, please [contact me](/contact) or mention me
on Twitter. I read my email frequently and am notified about Twitter mentions
very quickly.

View File

@ -1,57 +1,23 @@
{ sources ? import ./nix/sources.nix, pkgs ? import sources.nixpkgs { } }:
with pkgs;
{ system ? builtins.currentSystem }:
let
rust = pkgs.callPackage ./nix/rust.nix { };
sources = import ./nix/sources.nix;
pkgs = import sources.nixpkgs { inherit system; };
callPackage = pkgs.lib.callPackageWith pkgs;
site = callPackage ./site.nix { };
srcNoTarget = dir:
builtins.filterSource
(path: type: type != "directory" || builtins.baseNameOf path != "target")
dir;
dockerImage = pkg:
pkgs.dockerTools.buildLayeredImage {
name = "xena/christinewebsite";
tag = "latest";
naersk = pkgs.callPackage sources.naersk {
rustc = rust;
cargo = rust;
};
dhallpkgs = import sources.easy-dhall-nix { inherit pkgs; };
src = srcNoTarget ./.;
contents = [ pkgs.cacert pkg ];
xesite = naersk.buildPackage {
inherit src;
doCheck = true;
buildInputs = [ pkg-config openssl git ];
remapPathPrefix = true;
};
config = {
Cmd = [ "${pkg}/bin/xesite" ];
Env = [ "CONFIG_FNAME=${pkg}/config.dhall" "RUST_LOG=info" ];
WorkingDir = "/";
};
};
config = stdenv.mkDerivation {
pname = "xesite-config";
version = "HEAD";
buildInputs = [ dhallpkgs.dhall-simple ];
phases = "installPhase";
installPhase = ''
cd ${src}
dhall resolve < ${src}/config.dhall >> $out
'';
};
in pkgs.stdenv.mkDerivation {
inherit (xesite) name;
inherit src;
phases = "installPhase";
installPhase = ''
mkdir -p $out $out/bin
cp -rf ${config} $out/config.dhall
cp -rf $src/blog $out/blog
cp -rf $src/css $out/css
cp -rf $src/gallery $out/gallery
cp -rf $src/signalboost.dhall $out/signalboost.dhall
cp -rf $src/static $out/static
cp -rf $src/talks $out/talks
cp -rf ${xesite}/bin/xesite $out/bin/xesite
'';
}
in dockerImage site

31
k8s/job.yml Normal file
View File

@ -0,0 +1,31 @@
apiVersion: batch/v1
kind: Job
metadata:
name: christinewebsite-ping
namespace: apps
labels:
app: christinewebsite
spec:
template:
spec:
containers:
- name: ping-bing
image: xena/alpine
command:
- "busybox"
- "wget"
- "-O"
- "-"
- "-q"
- "https://www.bing.com/ping?sitemap=https://christine.website/sitemap.xml"
- name: ping-google
image: xena/alpine
command:
- "busybox"
- "wget"
- "-O"
- "-"
- "-q"
- "https://www.google.com/ping?sitemap=https://christine.website/sitemap.xml"
restartPolicy: Never
backoffLimit: 4

View File

@ -1,20 +0,0 @@
[package]
name = "cfcache"
version = "0.1.0"
authors = ["Christine Dodrill <me@christine.website>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
reqwest = { version = "0.11", features = ["json"] }
serde_json = "1"
serde = { version = "1", features = ["derive"] }
thiserror = "1"
tracing = "0.1"
tracing-futures = "0.2"
[dev-dependencies]
eyre = "0.6.5"
kankyo = "0.3"
tokio = { version = "1", features = ["full"] }

View File

@ -1,15 +0,0 @@
use eyre::Result;
#[tokio::main]
async fn main() -> Result<()> {
kankyo::init()?;
let key = std::env::var("CF_TOKEN")?;
let zone_id = std::env::var("CF_ZONE_ID")?;
let cli = cfcache::Client::new(key, zone_id)?;
cli.purge(vec!["https://christine.website/.within/health".to_string()])
.await?;
Ok(())
}

View File

@ -1,64 +0,0 @@
use reqwest::header;
use tracing::instrument;
pub type Result<T = ()> = std::result::Result<T, Error>;
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("json error: {0}")]
Json(#[from] serde_json::Error),
#[error("request error: {0}")]
Request(#[from] reqwest::Error),
#[error("invalid header value: {0}")]
InvalidHeaderValue(#[from] reqwest::header::InvalidHeaderValue),
}
pub struct Client {
zone_id: String,
cli: reqwest::Client,
}
static USER_AGENT: &str = concat!(
"xesite ",
env!("CARGO_PKG_NAME"),
"/",
env!("CARGO_PKG_VERSION")
);
impl Client {
pub fn new(api_key: String, zone_id: String) -> Result<Self> {
let mut headers = header::HeaderMap::new();
headers.insert(
header::AUTHORIZATION,
header::HeaderValue::from_str(&format!("Bearer {}", api_key))?,
);
let cli = reqwest::Client::builder()
.user_agent(USER_AGENT)
.default_headers(headers)
.build()?;
Ok(Self { zone_id, cli })
}
#[instrument(skip(self), err)]
pub async fn purge(&self, urls: Vec<String>) -> Result {
#[derive(serde::Serialize)]
struct Files {
files: Vec<String>,
}
self.cli
.post(&format!(
"https://api.cloudflare.com/client/v4/zones/{}/purge_cache",
self.zone_id
))
.json(&Files { files: urls })
.send()
.await?
.error_for_status()?;
Ok(())
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "go_vanity"
version = "0.2.0"
version = "0.1.0"
authors = ["Christine Dodrill <me@christine.website>"]
edition = "2018"
build = "src/build.rs"
@ -8,8 +8,8 @@ build = "src/build.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
mime = "0.3"
warp = "0.3"
mime = "0.3.0"
warp = "0.2"
[build-dependencies]
ructe = { version = "0.13", features = ["warp02"] }
ructe = { version = "0.12", features = ["warp02"] }

View File

@ -1,12 +1,12 @@
use crate::templates::RenderRucte;
use warp::{http::Response, Rejection, Reply};
use crate::templates::{RenderRucte};
include!(concat!(env!("OUT_DIR"), "/templates.rs"));
pub async fn gitea(pkg_name: &str, git_repo: &str, branch: &str) -> Result<impl Reply, Rejection> {
Response::builder().html(|o| templates::gitea_html(o, pkg_name, git_repo, branch))
pub async fn gitea(pkg_name: &str, git_repo: &str) -> Result<impl Reply, Rejection> {
Response::builder().html(|o| templates::gitea_html(o, pkg_name, git_repo))
}
pub async fn github(pkg_name: &str, git_repo: &str, branch: &str) -> Result<impl Reply, Rejection> {
Response::builder().html(|o| templates::github_html(o, pkg_name, git_repo, branch))
pub async fn github(pkg_name: &str, git_repo: &str) -> Result<impl Reply, Rejection> {
Response::builder().html(|o| templates::github_html(o, pkg_name, git_repo))
}

View File

@ -1,11 +1,11 @@
@(pkg_name: &str, git_repo: &str, branch: &str)
@(pkg_name: &str, git_repo: &str)
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
<meta name="go-import" content="@pkg_name git @git_repo">
<meta name="go-source" content="@pkg_name @git_repo @git_repo/src/@branch@{/dir@} @git_repo/src/@branch@{/dir@}/@{file@}#L@{line@}">
<meta name="go-source" content="@pkg_name @git_repo @git_repo/src/master@{/dir@} @git_repo/src/master@{/dir@}/@{file@}#L@{line@}">
<meta http-equiv="refresh" content="0; url=https://godoc.org/@pkg_name">
</head>
<body>

View File

@ -1,11 +1,11 @@
@(pkg_name: &str, git_repo: &str, branch: &str)
@(pkg_name: &str, git_repo: &str)
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
<meta name="go-import" content="@pkg_name git @git_repo">
<meta name="go-source" content="@pkg_name @git_repo @git_repo/tree/@branch@{/dir@} @git_repo/blob/@branch@{/dir@}/@{file@}#L@{line@}">
<meta name="go-source" content="@pkg_name @git_repo @git_repo/tree/master@{/dir@} @git_repo/blob/master@{/dir@}/@{file@}#L@{line@}">
<meta http-equiv="refresh" content="0; url=https://godoc.org/@pkg_name">
</head>
<body>

View File

@ -1,7 +1,7 @@
use std::default::Default;
use errors::*;
use feed::{Attachment, Author, Feed};
use feed::{Feed, Author, Attachment};
use item::{Content, Item};
/// Feed Builder
@ -160,7 +160,7 @@ impl ItemBuilder {
match self.content {
Some(Content::Text(t)) => {
self.content = Some(Content::Both(i.into(), t));
}
},
_ => {
self.content = Some(Content::Html(i.into()));
}
@ -172,10 +172,10 @@ impl ItemBuilder {
match self.content {
Some(Content::Html(s)) => {
self.content = Some(Content::Both(s, i.into()));
}
},
_ => {
self.content = Some(Content::Text(i.into()));
}
},
}
self
}
@ -197,7 +197,8 @@ impl ItemBuilder {
date_modified: self.date_modified,
author: self.author,
tags: self.tags,
attachments: self.attachments,
attachments: self.attachments
})
}
}

View File

@ -1,6 +1,7 @@
use serde_json;
error_chain! {
error_chain!{
foreign_links {
Serde(serde_json::Error);
}
}

View File

@ -1,7 +1,7 @@
use std::default::Default;
use builder::Builder;
use item::Item;
use builder::Builder;
const VERSION_1: &'static str = "https://jsonfeed.org/version/1";
@ -145,9 +145,9 @@ pub struct Hub {
#[cfg(test)]
mod tests {
use super::*;
use serde_json;
use std::default::Default;
use super::*;
#[test]
fn serialize_feed() {
@ -168,16 +168,18 @@ mod tests {
#[test]
fn deserialize_feed() {
let json =
r#"{"version":"https://jsonfeed.org/version/1","title":"some title","items":[]}"#;
let json = r#"{"version":"https://jsonfeed.org/version/1","title":"some title","items":[]}"#;
let feed: Feed = serde_json::from_str(&json).unwrap();
let expected = Feed {
version: "https://jsonfeed.org/version/1".to_string(),
title: "some title".to_string(),
items: vec![],
..Default::default()
title: "some title".to_string(),
items: vec![],
..Default::default()
};
assert_eq!(feed, expected);
assert_eq!(
feed,
expected
);
}
#[test]
@ -206,7 +208,10 @@ mod tests {
size_in_bytes: Some(1),
duration_in_seconds: Some(1),
};
assert_eq!(attachment, expected);
assert_eq!(
attachment,
expected
);
}
#[test]
@ -224,15 +229,17 @@ mod tests {
#[test]
fn deserialize_author() {
let json =
r#"{"name":"bob jones","url":"http://example.com","avatar":"http://img.com/blah"}"#;
let json = r#"{"name":"bob jones","url":"http://example.com","avatar":"http://img.com/blah"}"#;
let author: Author = serde_json::from_str(&json).unwrap();
let expected = Author {
name: Some("bob jones".to_string()),
url: Some("http://example.com".to_string()),
avatar: Some("http://img.com/blah".to_string()),
};
assert_eq!(author, expected);
assert_eq!(
author,
expected
);
}
#[test]
@ -255,7 +262,10 @@ mod tests {
type_: "some-type".to_string(),
url: "http://example.com".to_string(),
};
assert_eq!(hub, expected);
assert_eq!(
hub,
expected
);
}
#[test]

View File

@ -1,11 +1,11 @@
use std::default::Default;
use std::fmt;
use std::default::Default;
use feed::{Author, Attachment};
use builder::ItemBuilder;
use feed::{Attachment, Author};
use serde::de::{self, Deserialize, Deserializer, MapAccess, Visitor};
use serde::ser::{Serialize, SerializeStruct, Serializer};
use serde::ser::{Serialize, Serializer, SerializeStruct};
use serde::de::{self, Deserialize, Deserializer, Visitor, MapAccess};
/// Represents the `content_html` and `content_text` attributes of an item
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
@ -61,8 +61,7 @@ impl Default for Item {
impl Serialize for Item {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
where S: Serializer
{
let mut state = serializer.serialize_struct("Item", 14)?;
state.serialize_field("id", &self.id)?;
@ -79,15 +78,15 @@ impl Serialize for Item {
Content::Html(ref s) => {
state.serialize_field("content_html", s)?;
state.serialize_field("content_text", &None::<Option<&str>>)?;
}
},
Content::Text(ref s) => {
state.serialize_field("content_html", &None::<Option<&str>>)?;
state.serialize_field("content_text", s)?;
}
},
Content::Both(ref s, ref t) => {
state.serialize_field("content_html", s)?;
state.serialize_field("content_text", t)?;
}
},
};
if self.summary.is_some() {
state.serialize_field("summary", &self.summary)?;
@ -118,9 +117,8 @@ impl Serialize for Item {
}
impl<'de> Deserialize<'de> for Item {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'de>
{
enum Field {
Id,
@ -137,12 +135,11 @@ impl<'de> Deserialize<'de> for Item {
Author,
Tags,
Attachments,
}
};
impl<'de> Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
where D: Deserializer<'de>
{
struct FieldVisitor;
@ -154,8 +151,7 @@ impl<'de> Deserialize<'de> for Item {
}
fn visit_str<E>(self, value: &str) -> Result<Field, E>
where
E: de::Error,
where E: de::Error
{
match value {
"id" => Ok(Field::Id),
@ -190,8 +186,7 @@ impl<'de> Deserialize<'de> for Item {
}
fn visit_map<V>(self, mut map: V) -> Result<Item, V::Error>
where
V: MapAccess<'de>,
where V: MapAccess<'de>
{
let mut id = None;
let mut url = None;
@ -215,93 +210,99 @@ impl<'de> Deserialize<'de> for Item {
return Err(de::Error::duplicate_field("id"));
}
id = Some(map.next_value()?);
}
},
Field::Url => {
if url.is_some() {
return Err(de::Error::duplicate_field("url"));
}
url = map.next_value()?;
}
},
Field::ExternalUrl => {
if external_url.is_some() {
return Err(de::Error::duplicate_field("external_url"));
}
external_url = map.next_value()?;
}
},
Field::Title => {
if title.is_some() {
return Err(de::Error::duplicate_field("title"));
}
title = map.next_value()?;
}
},
Field::ContentHtml => {
if content_html.is_some() {
return Err(de::Error::duplicate_field("content_html"));
}
content_html = map.next_value()?;
}
},
Field::ContentText => {
if content_text.is_some() {
return Err(de::Error::duplicate_field("content_text"));
}
content_text = map.next_value()?;
}
},
Field::Summary => {
if summary.is_some() {
return Err(de::Error::duplicate_field("summary"));
}
summary = map.next_value()?;
}
},
Field::Image => {
if image.is_some() {
return Err(de::Error::duplicate_field("image"));
}
image = map.next_value()?;
}
},
Field::BannerImage => {
if banner_image.is_some() {
return Err(de::Error::duplicate_field("banner_image"));
}
banner_image = map.next_value()?;
}
},
Field::DatePublished => {
if date_published.is_some() {
return Err(de::Error::duplicate_field("date_published"));
}
date_published = map.next_value()?;
}
},
Field::DateModified => {
if date_modified.is_some() {
return Err(de::Error::duplicate_field("date_modified"));
}
date_modified = map.next_value()?;
}
},
Field::Author => {
if author.is_some() {
return Err(de::Error::duplicate_field("author"));
}
author = map.next_value()?;
}
},
Field::Tags => {
if tags.is_some() {
return Err(de::Error::duplicate_field("tags"));
}
tags = map.next_value()?;
}
},
Field::Attachments => {
if attachments.is_some() {
return Err(de::Error::duplicate_field("attachments"));
}
attachments = map.next_value()?;
}
},
}
}
let id = id.ok_or_else(|| de::Error::missing_field("id"))?;
let content = match (content_html, content_text) {
(Some(s), Some(t)) => Content::Both(s.to_string(), t.to_string()),
(Some(s), _) => Content::Html(s.to_string()),
(_, Some(t)) => Content::Text(t.to_string()),
(Some(s), Some(t)) => {
Content::Both(s.to_string(), t.to_string())
},
(Some(s), _) => {
Content::Html(s.to_string())
},
(_, Some(t)) => {
Content::Text(t.to_string())
},
_ => return Err(de::Error::missing_field("content_html or content_text")),
};
@ -362,12 +363,7 @@ mod tests {
banner_image: Some("http://img.com/blah".into()),
date_published: Some("2017-01-01 10:00:00".into()),
date_modified: Some("2017-01-01 10:00:00".into()),
author: Some(
Author::new()
.name("bob jones")
.url("http://example.com")
.avatar("http://img.com/blah"),
),
author: Some(Author::new().name("bob jones").url("http://example.com").avatar("http://img.com/blah")),
tags: Some(vec!["json".into(), "feed".into()]),
attachments: Some(vec![]),
};
@ -391,12 +387,7 @@ mod tests {
banner_image: Some("http://img.com/blah".into()),
date_published: Some("2017-01-01 10:00:00".into()),
date_modified: Some("2017-01-01 10:00:00".into()),
author: Some(
Author::new()
.name("bob jones")
.url("http://example.com")
.avatar("http://img.com/blah"),
),
author: Some(Author::new().name("bob jones").url("http://example.com").avatar("http://img.com/blah")),
tags: Some(vec!["json".into(), "feed".into()]),
attachments: Some(vec![]),
};
@ -420,12 +411,7 @@ mod tests {
banner_image: Some("http://img.com/blah".into()),
date_published: Some("2017-01-01 10:00:00".into()),
date_modified: Some("2017-01-01 10:00:00".into()),
author: Some(
Author::new()
.name("bob jones")
.url("http://example.com")
.avatar("http://img.com/blah"),
),
author: Some(Author::new().name("bob jones").url("http://example.com").avatar("http://img.com/blah")),
tags: Some(vec!["json".into(), "feed".into()]),
attachments: Some(vec![]),
};
@ -451,12 +437,7 @@ mod tests {
banner_image: Some("http://img.com/blah".into()),
date_published: Some("2017-01-01 10:00:00".into()),
date_modified: Some("2017-01-01 10:00:00".into()),
author: Some(
Author::new()
.name("bob jones")
.url("http://example.com")
.avatar("http://img.com/blah"),
),
author: Some(Author::new().name("bob jones").url("http://example.com").avatar("http://img.com/blah")),
tags: Some(vec!["json".into(), "feed".into()]),
attachments: Some(vec![]),
};
@ -479,12 +460,7 @@ mod tests {
banner_image: Some("http://img.com/blah".into()),
date_published: Some("2017-01-01 10:00:00".into()),
date_modified: Some("2017-01-01 10:00:00".into()),
author: Some(
Author::new()
.name("bob jones")
.url("http://example.com")
.avatar("http://img.com/blah"),
),
author: Some(Author::new().name("bob jones").url("http://example.com").avatar("http://img.com/blah")),
tags: Some(vec!["json".into(), "feed".into()]),
attachments: Some(vec![]),
};
@ -507,15 +483,11 @@ mod tests {
banner_image: Some("http://img.com/blah".into()),
date_published: Some("2017-01-01 10:00:00".into()),
date_modified: Some("2017-01-01 10:00:00".into()),
author: Some(
Author::new()
.name("bob jones")
.url("http://example.com")
.avatar("http://img.com/blah"),
),
author: Some(Author::new().name("bob jones").url("http://example.com").avatar("http://img.com/blah")),
tags: Some(vec!["json".into(), "feed".into()]),
attachments: Some(vec![]),
};
assert_eq!(item, expected);
}
}

View File

@ -2,7 +2,7 @@
//! instead of XML
//!
//! This crate can serialize and deserialize between JSON Feed strings
//! and Rust data structures. It also allows for programmatically building
//! and Rust data structures. It also allows for programmatically building
//! a JSON Feed
//!
//! Example:
@ -40,20 +40,18 @@
//! ```
extern crate serde;
#[macro_use]
extern crate error_chain;
#[macro_use]
extern crate serde_derive;
#[macro_use] extern crate error_chain;
#[macro_use] extern crate serde_derive;
extern crate serde_json;
mod builder;
mod errors;
mod feed;
mod item;
mod feed;
mod builder;
pub use errors::*;
pub use feed::{Attachment, Author, Feed};
pub use item::*;
pub use feed::{Feed, Author, Attachment};
use std::io::Write;
@ -118,16 +116,14 @@ pub fn to_vec_pretty(value: &Feed) -> Result<Vec<u8>> {
/// Serialize a Feed to JSON and output to an IO stream
pub fn to_writer<W>(writer: W, value: &Feed) -> Result<()>
where
W: Write,
where W: Write
{
Ok(serde_json::to_writer(writer, value)?)
}
/// Serialize a Feed to pretty-printed JSON and output to an IO stream
pub fn to_writer_pretty<W>(writer: W, value: &Feed) -> Result<()>
where
W: Write,
where W: Write
{
Ok(serde_json::to_writer_pretty(writer, value)?)
}
@ -141,7 +137,10 @@ mod tests {
fn from_str() {
let feed = r#"{"version": "https://jsonfeed.org/version/1","title":"","items":[]}"#;
let expected = Feed::default();
assert_eq!(super::from_str(&feed).unwrap(), expected);
assert_eq!(
super::from_str(&feed).unwrap(),
expected
);
}
#[test]
fn from_reader() {
@ -149,27 +148,39 @@ mod tests {
let feed = feed.as_bytes();
let feed = Cursor::new(feed);
let expected = Feed::default();
assert_eq!(super::from_reader(feed).unwrap(), expected);
assert_eq!(
super::from_reader(feed).unwrap(),
expected
);
}
#[test]
fn from_slice() {
let feed = r#"{"version": "https://jsonfeed.org/version/1","title":"","items":[]}"#;
let feed = feed.as_bytes();
let expected = Feed::default();
assert_eq!(super::from_slice(&feed).unwrap(), expected);
assert_eq!(
super::from_slice(&feed).unwrap(),
expected
);
}
#[test]
fn from_value() {
let feed = r#"{"version": "https://jsonfeed.org/version/1","title":"","items":[]}"#;
let feed: serde_json::Value = serde_json::from_str(&feed).unwrap();
let expected = Feed::default();
assert_eq!(super::from_value(feed).unwrap(), expected);
assert_eq!(
super::from_value(feed).unwrap(),
expected
);
}
#[test]
fn to_string() {
let feed = Feed::default();
let expected = r#"{"version":"https://jsonfeed.org/version/1","title":"","items":[]}"#;
assert_eq!(super::to_string(&feed).unwrap(), expected);
assert_eq!(
super::to_string(&feed).unwrap(),
expected
);
}
#[test]
fn to_string_pretty() {
@ -179,19 +190,28 @@ mod tests {
"title": "",
"items": []
}"#;
assert_eq!(super::to_string_pretty(&feed).unwrap(), expected);
assert_eq!(
super::to_string_pretty(&feed).unwrap(),
expected
);
}
#[test]
fn to_value() {
let feed = r#"{"version":"https://jsonfeed.org/version/1","title":"","items":[]}"#;
let expected: serde_json::Value = serde_json::from_str(&feed).unwrap();
assert_eq!(super::to_value(Feed::default()).unwrap(), expected);
assert_eq!(
super::to_value(Feed::default()).unwrap(),
expected
);
}
#[test]
fn to_vec() {
let feed = r#"{"version":"https://jsonfeed.org/version/1","title":"","items":[]}"#;
let expected = feed.as_bytes();
assert_eq!(super::to_vec(&Feed::default()).unwrap(), expected);
assert_eq!(
super::to_vec(&Feed::default()).unwrap(),
expected
);
}
#[test]
fn to_vec_pretty() {
@ -201,7 +221,10 @@ mod tests {
"items": []
}"#;
let expected = feed.as_bytes();
assert_eq!(super::to_vec_pretty(&Feed::default()).unwrap(), expected);
assert_eq!(
super::to_vec_pretty(&Feed::default()).unwrap(),
expected
);
}
#[test]
fn to_writer() {
@ -226,3 +249,4 @@ mod tests {
assert_eq!(result, feed);
}
}

View File

@ -9,7 +9,7 @@ edition = "2018"
[dependencies]
chrono = { version = "0.4", features = ["serde"] }
color-eyre = "0.5"
reqwest = { version = "0.11", features = ["json"] }
reqwest = { version = "0.10", features = ["json"] }
serde_json = "1.0"
serde = { version = "1", features = ["derive"] }
thiserror = "1"
@ -17,6 +17,6 @@ tracing = "0.1"
tracing-futures = "0.2"
[dev-dependencies]
tokio = { version = "1", features = ["macros"] }
tokio = { version = "0.2", features = ["macros"] }
envy = "0.4"
pretty_env_logger = "0"

View File

@ -34,7 +34,7 @@ impl Client {
})
}
#[instrument(skip(self), err)]
#[instrument(skip(self))]
pub async fn mentioners(&self, url: String) -> Result<Vec<WebMention>> {
Ok(self
.cli
@ -46,16 +46,6 @@ impl Client {
.json()
.await?)
}
#[instrument(skip(self), err)]
pub async fn refresh(&self) -> Result<()> {
self.cli
.post("https://mi.within.website/api/blog/refresh")
.send()
.await?
.error_for_status()?;
Ok(())
}
}
#[derive(Debug, Deserialize, Eq, PartialEq, Clone)]

View File

@ -8,7 +8,7 @@ edition = "2018"
[dependencies]
chrono = { version = "0.4", features = ["serde"] }
reqwest = { version = "0.11", features = ["json"] }
reqwest = { version = "0.10", features = ["json"] }
serde_json = "1.0"
serde = { version = "1", features = ["derive"] }
thiserror = "1"
@ -16,6 +16,6 @@ tracing = "0.1"
tracing-futures = "0.2"
[dev-dependencies]
tokio = { version = "1", features = ["macros"] }
tokio = { version = "0.2", features = ["macros"] }
envy = "0.4"
pretty_env_logger = "0"

View File

@ -4,7 +4,7 @@ let
pkgs =
import sources.nixpkgs { overlays = [ (import sources.nixpkgs-mozilla) ]; };
channel = "nightly";
date = "2021-01-14";
date = "2020-11-25";
targets = [ ];
chan = pkgs.rustChannelOfTargets channel date targets;
chan = pkgs.latest.rustChannels.stable.rust;
in chan

View File

@ -5,10 +5,10 @@
"homepage": "",
"owner": "justinwoo",
"repo": "easy-dhall-nix",
"rev": "eae7f64c4d6c70681e5a56c84198236930ba425e",
"sha256": "1y2x15v8a679vlpxazjpibfwajp6zph60f8wjcm4xflbvazk0dx7",
"rev": "8f0840b7c94dfba4d5fbbb8709ba6df2fe6a2158",
"sha256": "0rf0fx80kh2825sa07hg36d98r1gnjwrfdw2n2d9y8kkhvd03ppx",
"type": "tarball",
"url": "https://github.com/justinwoo/easy-dhall-nix/archive/eae7f64c4d6c70681e5a56c84198236930ba425e.tar.gz",
"url": "https://github.com/justinwoo/easy-dhall-nix/archive/8f0840b7c94dfba4d5fbbb8709ba6df2fe6a2158.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
},
"naersk": {
@ -17,10 +17,10 @@
"homepage": "",
"owner": "nmattia",
"repo": "naersk",
"rev": "a76924cbbb17c387e5ae4998a4721d88a3ac95c0",
"sha256": "09b5g2krf8mfpajgz2bgapkv3dpimg0qx1nfpjafcrsk0fhxmqay",
"rev": "8d562105f960c487cfc013c0e666c1951b03d392",
"sha256": "16a1k5wamp67sqc341rlbmppm2xldfl207k9yhksgcvd4rw3iy20",
"type": "tarball",
"url": "https://github.com/nmattia/naersk/archive/a76924cbbb17c387e5ae4998a4721d88a3ac95c0.tar.gz",
"url": "https://github.com/nmattia/naersk/archive/8d562105f960c487cfc013c0e666c1951b03d392.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
},
"niv": {
@ -29,10 +29,10 @@
"homepage": "https://github.com/nmattia/niv",
"owner": "nmattia",
"repo": "niv",
"rev": "94dadba1a3a6a2f0b8ca2963e49daeec5d4e3098",
"sha256": "1y2h9wl7w60maa2m4xw9231xdr325xynzpph8xr4j5vsznygv986",
"rev": "ba57d5a29b4e0f2085917010380ef3ddc3cf380f",
"sha256": "1kpsvc53x821cmjg1khvp1nz7906gczq8mp83664cr15h94sh8i4",
"type": "tarball",
"url": "https://github.com/nmattia/niv/archive/94dadba1a3a6a2f0b8ca2963e49daeec5d4e3098.tar.gz",
"url": "https://github.com/nmattia/niv/archive/ba57d5a29b4e0f2085917010380ef3ddc3cf380f.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
},
"nixpkgs": {

12
scripts/release.sh Executable file
View File

@ -0,0 +1,12 @@
#!/usr/bin/env nix-shell
#! nix-shell -p doctl -p kubectl -p curl -i bash
#! nix-shell -I nixpkgs=https://releases.nixos.org/nixpkgs/nixpkgs-21.03pre252431.4f3475b113c/nixexprs.tar.xz
nix-env -if ./nix/dhall-yaml.nix
doctl kubernetes cluster kubeconfig save kubermemes
dhall-to-yaml-ng < ./site.dhall | kubectl apply -n apps -f -
kubectl rollout status -n apps deployment/christinewebsite
kubectl apply -f ./k8s/job.yml
sleep 10
kubectl delete -f ./k8s/job.yml
curl --http1.1 -H "Authorization: $MI_TOKEN" https://mi.within.website/api/blog/refresh -XPOST

View File

@ -5,7 +5,7 @@ let
dhall-yaml = dhallpkgs.dhall-yaml-simple;
dhall = dhallpkgs.dhall-simple;
xepkgs = import sources.xepkgs { inherit pkgs; };
rust = pkgs.callPackage ./nix/rust.nix { };
rust = import ./nix/rust.nix { };
in with pkgs;
with xepkgs;
mkShell {

View File

@ -244,34 +244,4 @@ in [ Person::{
, gitLink = "https://github.com/nasirhm"
, twitter = "https://twitter.com/_nasirhm_"
}
, Person::{
, name = "Eliot Partridge"
, tags =
[ "python"
, "linux"
, "typescript"
, "javascript"
, "docker"
, "c#"
, "dotnet"
, "php"
]
, gitLink = "https://github.com/BytewaveMLP"
}
, Person::{
, name = "İlteriş Eroğlu"
, tags =
[ "linux"
, "javascript"
, "node.js"
, "bash"
, "nfc"
, "python"
, "devops"
, "networking"
, "bgp"
]
, gitLink = "https://github.com/linuxgemini"
, twitter = "https://twitter.com/linuxgemini"
}
]

45
site.dhall Normal file
View File

@ -0,0 +1,45 @@
let kms = https://tulpa.dev/cadey/kubermemes/raw/branch/master/k8s/package.dhall
let kubernetes =
https://raw.githubusercontent.com/dhall-lang/dhall-kubernetes/master/1.15/package.dhall
let tag = env:GITHUB_SHA as Text ? "latest"
let image = "ghcr.io/xe/site:${tag}"
let vars
: List kubernetes.EnvVar.Type
= [ kubernetes.EnvVar::{ name = "PORT", value = Some "3030" }
, kubernetes.EnvVar::{ name = "RUST_LOG", value = Some "info" }
, kubernetes.EnvVar::{
, name = "PATREON_CLIENT_ID"
, value = Some env:PATREON_CLIENT_ID as Text
}
, kubernetes.EnvVar::{
, name = "PATREON_CLIENT_SECRET"
, value = Some env:PATREON_CLIENT_SECRET as Text
}
, kubernetes.EnvVar::{
, name = "PATREON_ACCESS_TOKEN"
, value = Some env:PATREON_ACCESS_TOKEN as Text
}
, kubernetes.EnvVar::{
, name = "PATREON_REFRESH_TOKEN"
, value = Some env:PATREON_REFRESH_TOKEN as Text
}
, kubernetes.EnvVar::{
, name = "MI_TOKEN"
, value = Some env:MI_TOKEN as Text
}
]
in kms.app.make
kms.app.Config::{
, name = "christinewebsite"
, appPort = 3030
, image
, replicas = 2
, domain = "christine.website"
, leIssuer = "prod"
, envVars = vars
}

51
site.nix Normal file
View File

@ -0,0 +1,51 @@
{ sources ? import ./nix/sources.nix, pkgs ? import sources.nixpkgs { } }:
with pkgs;
let
srcNoTarget = dir:
builtins.filterSource
(path: type: type != "directory" || builtins.baseNameOf path != "target")
dir;
naersk = pkgs.callPackage sources.naersk { };
dhallpkgs = import sources.easy-dhall-nix { inherit pkgs; };
src = srcNoTarget ./.;
xesite = naersk.buildPackage {
inherit src;
buildInputs = [ pkg-config openssl git ];
remapPathPrefix = true;
};
config = stdenv.mkDerivation {
pname = "xesite-config";
version = "HEAD";
buildInputs = [ dhallpkgs.dhall-simple ];
phases = "installPhase";
installPhase = ''
cd ${src}
dhall resolve < ${src}/config.dhall >> $out
'';
};
in pkgs.stdenv.mkDerivation {
inherit (xesite) name;
inherit src;
phases = "installPhase";
installPhase = ''
mkdir -p $out $out/bin
cp -rf ${config} $out/config.dhall
cp -rf $src/blog $out/blog
cp -rf $src/css $out/css
cp -rf $src/gallery $out/gallery
cp -rf $src/signalboost.dhall $out/signalboost.dhall
cp -rf $src/static $out/static
cp -rf $src/talks $out/talks
cp -rf ${xesite}/bin/xesite $out/bin/xesite
'';
}

View File

@ -5,7 +5,6 @@ use std::{fs, path::PathBuf};
use tracing::{error, instrument};
pub mod markdown;
pub mod poke;
#[derive(Clone, Deserialize)]
pub struct Config {
@ -87,8 +86,6 @@ pub async fn init(cfg: PathBuf) -> Result<State> {
everything.sort();
everything.reverse();
let everything: Vec<Post> = everything.into_iter().take(20).collect();
let mut jfb = jsonfeed::Feed::builder()
.title("Christine Dodrill's Blog")
.description("My blog posts and rants about various technology things.")

View File

@ -1,86 +0,0 @@
use color_eyre::eyre::Result;
use std::{env, time::Duration};
use tokio::time::sleep as delay_for;
#[instrument(err)]
pub async fn the_cloud() -> Result<()> {
info!("waiting for things to settle");
delay_for(Duration::from_secs(10)).await;
info!("purging cloudflare cache");
cloudflare().await?;
info!("waiting for the cloudflare cache to purge globally");
delay_for(Duration::from_secs(45)).await;
info!("poking mi");
mi().await?;
info!("poking bing");
bing().await?;
info!("poking google");
google().await?;
Ok(())
}
#[instrument(err)]
async fn bing() -> Result<()> {
let cli = reqwest::Client::new();
cli.get("https://www.bing.com/ping")
.query(&[("sitemap", "https://christine.website/sitemap.xml")])
.header("User-Agent", crate::APPLICATION_NAME)
.send()
.await?
.error_for_status()?;
Ok(())
}
#[instrument(err)]
async fn google() -> Result<()> {
let cli = reqwest::Client::new();
cli.get("https://www.google.com/ping")
.query(&[("sitemap", "https://christine.website/sitemap.xml")])
.header("User-Agent", crate::APPLICATION_NAME)
.send()
.await?
.error_for_status()?;
Ok(())
}
#[instrument(err)]
async fn cloudflare() -> Result<()> {
let cli = cfcache::Client::new(env::var("CF_TOKEN")?, env::var("CF_ZONE_ID")?)?;
cli.purge(
vec![
"https://christine.website/sitemap.xml",
"https://christine.website",
"https://christine.website/blog",
"https://christine.website/blog.atom",
"https://christine.website/blog.json",
"https://christine.website/blog.rss",
"https://christine.website/gallery",
"https://christine.website/talks",
"https://christine.website/resume",
"https://christine.website/signalboost",
"https://christine.website/feeds",
]
.into_iter()
.map(|i| i.to_string())
.collect(),
)
.await?;
Ok(())
}
#[instrument(err)]
async fn mi() -> Result<()> {
let cli = mi::Client::new(env::var("MI_TOKEN")?, crate::APPLICATION_NAME.to_string())?;
cli.refresh().await?;
Ok(())
}

View File

@ -4,23 +4,8 @@ use std::process::Command;
fn main() -> Result<()> {
Ructe::from_env()?.compile_templates("templates")?;
let output = Command::new("git")
.args(&["rev-parse", "HEAD"])
.output()
.unwrap();
if std::env::var("out").is_err() {
println!("cargo:rustc-env=out=/yolo");
}
let output = Command::new("git").args(&["rev-parse", "HEAD"]).output().unwrap();
let git_hash = String::from_utf8(output.stdout).unwrap();
println!(
"cargo:rustc-env=GITHUB_SHA={}",
if git_hash.as_str() == "" {
env!("out").into()
} else {
git_hash
}
);
println!("cargo:rustc-env=GITHUB_SHA={}", git_hash);
Ok(())
}

View File

@ -11,11 +11,10 @@ lazy_static! {
&["kind"]
)
.unwrap();
pub static ref ETAG: String = format!(r#"W/"{}""#, uuid::Uuid::new_v4().to_simple());
}
#[instrument(skip(state))]
pub async fn jsonfeed(state: Arc<State>, since: Option<String>) -> Result<impl Reply, Rejection> {
pub async fn jsonfeed(state: Arc<State>) -> Result<impl Reply, Rejection> {
HIT_COUNTER.with_label_values(&["json"]).inc();
let state = state.clone();
Ok(warp::reply::json(&state.jf))
@ -30,22 +29,7 @@ pub enum RenderError {
impl warp::reject::Reject for RenderError {}
#[instrument(skip(state))]
pub async fn atom(state: Arc<State>, since: Option<String>) -> Result<impl Reply, Rejection> {
if let Some(etag) = since {
if etag == ETAG.clone() {
return Response::builder()
.status(304)
.header("Content-Type", "text/plain")
.body(
"You already have the newest version of this feed."
.to_string()
.into_bytes(),
)
.map_err(RenderError::Build)
.map_err(warp::reject::custom);
}
}
pub async fn atom(state: Arc<State>) -> Result<impl Reply, Rejection> {
HIT_COUNTER.with_label_values(&["atom"]).inc();
let state = state.clone();
let mut buf = Vec::new();
@ -55,29 +39,13 @@ pub async fn atom(state: Arc<State>, since: Option<String>) -> Result<impl Reply
Response::builder()
.status(200)
.header("Content-Type", "application/atom+xml")
.header("ETag", ETAG.clone())
.body(buf)
.map_err(RenderError::Build)
.map_err(warp::reject::custom)
}
#[instrument(skip(state))]
pub async fn rss(state: Arc<State>, since: Option<String>) -> Result<impl Reply, Rejection> {
if let Some(etag) = since {
if etag == ETAG.clone() {
return Response::builder()
.status(304)
.header("Content-Type", "text/plain")
.body(
"You already have the newest version of this feed."
.to_string()
.into_bytes(),
)
.map_err(RenderError::Build)
.map_err(warp::reject::custom);
}
}
pub async fn rss(state: Arc<State>) -> Result<impl Reply, Rejection> {
HIT_COUNTER.with_label_values(&["rss"]).inc();
let state = state.clone();
let mut buf = Vec::new();
@ -87,7 +55,6 @@ pub async fn rss(state: Arc<State>, since: Option<String>) -> Result<impl Reply,
Response::builder()
.status(200)
.header("Content-Type", "application/rss+xml")
.header("ETag", ETAG.clone())
.body(buf)
.map_err(RenderError::Build)
.map_err(warp::reject::custom)

View File

@ -5,11 +5,11 @@ use crate::{
use lazy_static::lazy_static;
use prometheus::{opts, register_int_counter_vec, IntCounterVec};
use std::{convert::Infallible, fmt, sync::Arc};
use tracing::instrument;
use warp::{
http::{Response, StatusCode},
Rejection, Reply,
};
use tracing::instrument;
lazy_static! {
static ref HIT_COUNTER: IntCounterVec =
@ -86,6 +86,12 @@ impl fmt::Display for PostNotFound {
impl warp::reject::Reject for PostNotFound {}
impl From<PostNotFound> for warp::reject::Rejection {
fn from(error: PostNotFound) -> Self {
warp::reject::custom(error)
}
}
#[derive(Debug, thiserror::Error)]
struct SeriesNotFound(String);
@ -97,6 +103,12 @@ impl fmt::Display for SeriesNotFound {
impl warp::reject::Reject for SeriesNotFound {}
impl From<SeriesNotFound> for warp::reject::Rejection {
fn from(error: SeriesNotFound) -> Self {
warp::reject::custom(error)
}
}
lazy_static! {
static ref REJECTION_COUNTER: IntCounterVec = register_int_counter_vec!(
opts!("rejections", "Number of rejections by kind"),

View File

@ -1,6 +1,3 @@
#[macro_use]
extern crate tracing;
use color_eyre::eyre::Result;
use hyper::{header::CONTENT_TYPE, Body, Response};
use prometheus::{Encoder, TextEncoder};
@ -27,7 +24,7 @@ async fn main() -> Result<()> {
color_eyre::install()?;
let _ = kankyo::init();
tracing_subscriber::fmt::init();
info!("starting up commit {}", env!("GITHUB_SHA"));
log::info!("starting up commit {}", env!("GITHUB_SHA"));
let state = Arc::new(
app::init(
@ -98,39 +95,20 @@ async fn main() -> Result<()> {
.and(with_state(state.clone()))
.and_then(handlers::patrons);
let files = warp::path("static")
.and(warp::fs::dir("./static"))
.map(|reply| {
warp::reply::with_header(
reply,
"Cache-Control",
"public, max-age=86400, stale-if-error=60",
)
});
let css = warp::path("css").and(warp::fs::dir("./css")).map(|reply| {
warp::reply::with_header(
reply,
"Cache-Control",
"public, max-age=86400, stale-if-error=60",
)
});
let files = warp::path("static").and(warp::fs::dir("./static"));
let css = warp::path("css").and(warp::fs::dir("./css"));
let sw = warp::path("sw.js").and(warp::fs::file("./static/js/sw.js"));
let robots = warp::path("robots.txt").and(warp::fs::file("./static/robots.txt"));
let favicon = warp::path("favicon.ico").and(warp::fs::file("./static/favicon/favicon.ico"));
let jsonfeed = warp::path("blog.json")
.and(with_state(state.clone()))
.and(warp::header::optional("if-none-match"))
.and_then(handlers::feeds::jsonfeed);
let atom = warp::path("blog.atom")
.and(with_state(state.clone()))
.and(warp::header::optional("if-none-match"))
.and_then(handlers::feeds::atom);
let rss = warp::path("blog.rss")
.and(with_state(state.clone()))
.and(warp::header::optional("if-none-match"))
.and_then(handlers::feeds::rss);
let sitemap = warp::path("sitemap.xml")
.and(with_state(state.clone()))
@ -139,7 +117,6 @@ async fn main() -> Result<()> {
let go_vanity_jsonfeed = warp::path("jsonfeed")
.and(warp::any().map(move || "christine.website/jsonfeed"))
.and(warp::any().map(move || "https://tulpa.dev/Xe/jsonfeed"))
.and(warp::any().map(move || "master"))
.and_then(go_vanity::gitea);
let metrics_endpoint = warp::path("metrics").and(warp::path::end()).map(move || {
@ -154,37 +131,14 @@ async fn main() -> Result<()> {
.unwrap()
});
let static_pages = index
.or(feeds)
.or(resume.or(signalboost))
.or(patrons)
.or(jsonfeed.or(atom.or(sitemap)).or(rss))
.or(favicon.or(robots).or(sw))
.or(contact)
.map(|reply| {
warp::reply::with_header(
reply,
"Cache-Control",
"public, max-age=86400, stale-if-error=60",
)
});
let dynamic_pages = blog_index
.or(series.or(series_view).or(post_view))
let site = index
.or(contact.or(feeds).or(resume.or(signalboost)).or(patrons))
.or(blog_index.or(series.or(series_view).or(post_view)))
.or(gallery_index.or(gallery_post_view))
.or(talk_index.or(talk_post_view))
.map(|reply| {
warp::reply::with_header(
reply,
"Cache-Control",
"public, max-age=600, stale-if-error=60",
)
});
let site = static_pages
.or(dynamic_pages)
.or(jsonfeed.or(atom).or(rss.or(sitemap)))
.or(files.or(css).or(favicon).or(sw.or(robots)))
.or(healthcheck.or(metrics_endpoint).or(go_vanity_jsonfeed))
.or(files.or(css))
.map(|reply| {
warp::reply::with_header(
reply,
@ -207,37 +161,7 @@ async fn main() -> Result<()> {
.with(warp::log(APPLICATION_NAME))
.recover(handlers::rejection);
match sdnotify::SdNotify::from_env() {
Ok(ref mut n) => {
// shitty heuristic for detecting if we're running in prod
tokio::spawn(async {
if let Err(why) = app::poke::the_cloud().await {
error!("Unable to poke the cloud: {}", why);
}
});
n.notify_ready().map_err(|why| {
error!("can't signal readiness to systemd: {}", why);
why
})?;
n.set_status(format!("hosting {} posts", state.clone().everything.len()))
.map_err(|why| {
error!("can't signal status to systemd: {}", why);
why
})?;
}
Err(why) => error!("not running under systemd with Type=notify: {}", why),
}
warp::serve(site)
.run((
[0, 0, 0, 0],
std::env::var("PORT")
.unwrap_or("3030".into())
.parse::<u16>()
.unwrap(),
))
.await;
warp::serve(site).run(([0, 0, 0, 0], 3030)).await;
Ok(())
}

View File

@ -1,6 +1,7 @@
/// This code was borrowed from @fasterthanlime.
use color_eyre::eyre::Result;
use serde::{Deserialize, Serialize};
use color_eyre::eyre::{Result};
use serde::{Serialize, Deserialize};
#[derive(Eq, PartialEq, Deserialize, Default, Debug, Serialize, Clone)]
pub struct Data {
@ -12,7 +13,6 @@ pub struct Data {
pub image: Option<String>,
pub thumb: Option<String>,
pub show: Option<bool>,
pub redirect_to: Option<String>,
}
enum State {
@ -81,7 +81,7 @@ impl Data {
};
}
}
_ => panic!("Expected newline, got {:?}", ch),
_ => panic!("Expected newline, got {:?}",),
},
State::ReadingFrontMatter { buf, line_start } => match ch {
'-' if *line_start => {

View File

@ -20,6 +20,7 @@ impl Into<jsonfeed::Item> for Post {
let mut result = jsonfeed::Item::builder()
.title(self.front_matter.title)
.content_html(self.body_html)
.content_text(self.body)
.id(format!("https://christine.website/{}", self.link))
.url(format!("https://christine.website/{}", self.link))
.date_published(self.date.to_rfc3339())

View File

@ -1,49 +1,74 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQENBGABktwBCACygH18iP698tm50PdmNeOd5orUVTV3nfB7z5wyPt7ZocUrlA3o
ok4D0Uu0ffJob91BquneCRyXdcbwUal29p/6JApTB5yO6kYJgDodJJ9/EEOhNXho
KEauzm25KGkcyiFVgapymBpvZEnec1gWO0/NGkv59aRGd48I45U+QicxltYbE1Wa
BTGu5B8z02q0IJp+M+Qji7iRISCWc78lRA+G4U6TZ8qckoWWz8GomKtd5y9pxlUQ
6tuYHcTxy8NLBnmSfUkg81tJ6Tym7gBAJdh2VdmJkxKOe2g92a4u3Azo5yUobBkP
rRkkoeCGf4A9A/hicPwpYTTVIrJ9RYX1gtAvABEBAAG0MkNocmlzdGluZSBEb2Ry
aWxsIChZdWJpa2V5KSA8bWVAY2hyaXN0aW5lLndlYnNpdGU+iQFUBBMBCAA+FiEE
N46/xj15tJ2MNkSMgDyTWuEYoiQFAmABktwCGwMFCRLMAwAFCwkIBwIGFQoJCAsC
BBYCAwECHgECF4AACgkQgDyTWuEYoiTNKAf8DWvbJWRlBrUN7CRRr+KBfd9X/UEv
wus7odDiEuAlnODFVnsE63/K+XBOzDtrpr/4Ldr8WQkFGbbFoG8hg6SUhE3rpBrS
h7cpNe8PkBeHA6ekeVcBUGV6XvZ65FjPUan8xsoBDyrIPkIFzsqFOpQ7hjes+lJa
J3s2bgpw7z3/rs8o8mOxMU0A2D2UFVn8OtiHT6WgeXL6UnZqgZwEY+oipVLNP6ZG
lfi4UIexpbSzciS1qZ4/qJfQeiVM2LkIJgpV8fn42XQ10VDkarKmx1XNN+sZI5vn
3jJHtB+D6ZjFzVLFqW//N//MslQOrkXnBfa/KeU1ULdY9hEShpAePyfaQ7kBDQRg
AZLcAQgArCh+XEWsg9OfTrrIuFtCyRxr94yb2EMUCFJMKvsbeklmNQBaZ7N1RyE4
IPFnxyk/0y7XZ3gfo5k1SCr5m03vQuyem8SbUMHppaGUp5ZgZA/RWOh68ygrvHTG
gWAhe3T8/gklUeLcp8reOZa/wSbv1VGewgOwplgUkQxa1v7YJpbhJtnKoiJmWcfc
abie7bt1ok10UVSLNTbPUiSIP1Sb1i9NXtkg0lFQjxPB5zAQbtuqnO/LAVHbt1U+
xzfh5xJ+DKoBQhuKbFftUp4Hjwr/qv53XMz6MMUMJIDp9j3icQm2ifSKx74ic5vn
kaF3oWRJODTS/fR+FEUpdakIozCURwARAQABiQE8BBgBCAAmFiEEN46/xj15tJ2M
NkSMgDyTWuEYoiQFAmABktwCGwwFCRLMAwAACgkQgDyTWuEYoiTSEQgAruSRZBLi
JwHNQz2ZtPhGa8Avbj5mqhD8Zs627gKM4SdgYka+DjoaGImoqdhM1K0zBVGrfDZV
CDD+YILyW6C6+9/0TLHuhD9oo+byo6XXgHmtodiZBFLYHvtNNZMYoN/1eWaJBmxX
39r1BHA2fTSjeg3YChdIqMtFhHps/5ckyPUzTFrzJPOaz4xLC5QPog1aOzKzL8UA
oWseZjWgDJJbWIbiyoz3J7oHfqwRIhZEOJyVn2N46lXk7Xg6dLbqwq3+XCT0tph8
0O/Q+zIvy/1q8dAQJsvomf81GsZdPsR9MJZiGbbM/gyCOjRFX163TiTIyeQPLTbA
Er7mIpM0HYgK1rkBDQRgAZNMAQgAz+3aBcBzaLasO7ksB8o+3xctw2NydTR+VEw+
Pxub7CDY0BEcs7IuqjuPbFJ74MU1TriCCB5zP7bHFrqdwS+51E0WVunkOgxPYm9O
vEtkxyPHJW6PiY0xeSQt9hhqJe5TV/HpscQISfovd9DZkTbEjvCnpVnWjfGih3iR
xy3o51gj5l47oSZFeRDZr9gNkJ+gY4I2GfgGA40UWXyj9jHyjh6jA32YDo19XKud
UqyLgPeUjOuGp8Y4Gu5JNmqb0Wqb2AEqOQTSGRCJaOzNxgxSUeECT7xzBYgn7Ghf
7iJV+U9hqr9Jp3+6b5OJDv3QIfh48jOSIigbnyGs/4g7kUvmFQARAQABiQJyBBgB
CAAmFiEEN46/xj15tJ2MNkSMgDyTWuEYoiQFAmABk0wCGy4FCRLMAwABQAkQgDyT
WuEYoiTAdCAEGQEIAB0WIQR79+Uxq6N/d/0Xj3LOF3gb9V3pRQUCYAGTTAAKCRDO
F3gb9V3pRf/EB/9SuYeFL5bzg8TwbO/bhnAovYiiURW2bDtUnHKhiHHquuMo7iWN
EbaSGFyURiffJJhjSq5+H+I8CeW+rHVJQ6yxoDzQfXHsBaAwP+b9geVhUEHvnQMy
ydTvyvoiT84XrMJ4KuOti2lqpCoHRzBodLRaXLia2kyyTCj3QGyzzlFEChM0sZM5
rStSkexixGSIthFV9xx+wfdcA6Er3RagNYBb9scFNg1vM/v8YC0sI/bzwdjltBeH
F9wWpmOvDEvmY35hnMEpjrrvJkbi12sd33Tzh+pvhFxMa3HZihQ8MsST750kejNq
ZAZ9D+DmJDYAD6aycAJCONtnivtvReQWACKQgkUH/jb5I7osdN8s5ndoUy+iInX5
SU5K04LYK/oo/S8hLQ+lZeqJrEYqTmEJjzdULQS6TXSpriVm4b70Qtgr5X929JSo
lqNa0kWR2LdF4q1wFAxkPEskPrM/fPEqZfjBfaezvSUTOU32KoCoWoeZqqbdBwXp
ONwH73yiX9dc6wP9prW3laqUWAsSwMMBOYdKhOQJAy5J6ym37Q0noe1VuGQAGIlb
OTOquCjjj8k63TfOPuJonKQUU1UoHtuukGJ27yUXljbsy2BmbgLcsm/R9xtz5Jxj
q4D/oYcgejx26NsV3alg1VfmqQiUD7/xUIOnR9bllPmOnUtjqaotwe/wUD+47z8=
=O4RS
-----END PGP PUBLIC KEY BLOCK-----
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQENBF3pGlsBCACaR3eO9ELleaQypUJYRqI8rMHBE6oV6cexCbVL5efTy0wvvI4P
tgA5UKKDq6XRybhEulRUaqSFlwsFPRqMDT9bNK49d56oh0GwbtQwnNW6ZfHEgf5Q
9gPbkwnfUMgVhJofiV/6mRhzrJUKfb+11dW4shV4lqffAeiO+wi6X0XMX9HsN6RE
eO5Y4or/uKgz9ikQjYklNvQ4laXdtqmLbA5DkHRXWAhmKii9FcnRqCW/7Pbztfn8
JrH9TcHqbp1T6nFykEhYtkHS02UfD35Y7qugtDz3okM2vggllitQAXI9+BHpLtce
8Wbr1D4py8AqqTyFrL4AwIYAwmjLGEN0pSRTABEBAAG0KENocmlzdGluZSBEb2Ry
aWxsIDxtZUBjaHJpc3RpbmUud2Vic2l0ZT6JAVQEEwEIAD4WIQSW/59zCcBe6mWf
HDSJeEtIFe6peAUCXekaWwIbAwUJEswDAAULCQgHAgYVCgkICwIEFgIDAQIeAQIX
gAAKCRCJeEtIFe6peOwTB/46R0LAx+ZpiNT8WV1z+/IrFiSXZwN0EHS3LNBMAYlL
Hn2jUa1ySgaBIwQy3mhDyOB9CESdNXo+Hr/sSG1khaCAoruQ7z4lK3UmpEeZmQsv
iWOlK7NQYMtKKamxNK46h5ld8X6/8RmGOupuKuwUrdvZ+L67K6oomqrK/yJ9RUBs
SYAceVXYSd/1QXEPIm7hjdhXGgk8FS8vODNI23ZiniqDCwbMMcM1g9QeDqPilsZY
T6L+YO63FpbhEWhEKmaXrsB31o7si7wfpAlcXJh6WHluaPUrxwr45O2u01NHb+ZG
J8pHcGgS0WBVCqSdGYy9JWbPGn/TvokFxSxfMd5wfwImuQENBF3pGlsBCAC/Qy/X
jjcqhc2BDlWJaCLA6ZlR9pEAX/yuQCAR9tO68/vhj0TCC5DhbWpxAq/B8R/lcp1x
AzE6sxpZXlKlTfyOwAMF2d28jTz35dZN7lERlo/cj4CxCS/t5CPCwNp45AlSuJrQ
ofoqKm+AiJ4rrU1BipmumKawrDjfnDzmANPlduPdYzXKUL9sPmbWXPzqj/aV1jKB
3tQ1wDZCDrACmPKAgYflHqq1lWwrQZf68CGCV/Lqldv9T1iLtmNqERlPKROpoTYD
8OC/KprYiKLOJY0jtNB6G/eXCBN8vjkQjlQ3c7BacaCHD3ddOZtdbHXqEJlLfq/k
kCMm+FDQXGu7S3XpABEBAAGJATwEGAEIACYWIQSW/59zCcBe6mWfHDSJeEtIFe6p
eAUCXekaWwIbDAUJEswDAAAKCRCJeEtIFe6peOX8CACL8RPJoIG/+mrcB32l7LOO
v0F9fuWUXpv05a3QkeBKaZhJVwfmR2LmgbnlQhA+KuDIfeKl5lkXz0WM0659vo9P
1hgHidqV3Wf7axBwxHWkWWE0JXc7o2Z/WSa65baRx8S9HLUHzZz0al8y87WgEoGw
o0bFKuj6xvaMgsrrJY7qrcnfYsDg9nkya+VrLVzZCS6fIDqBfuRge8Jj+XcX4Boi
aGkI30+5D0if1p2Zt7kOpfgXff63lEAWK+8pa1b2MGK5po6C7EGKkGppECm6mOgw
8l3U/jq7yXgiVx8n6WqNms9g1IRHNN2QICIaERGYvBOJn9XwTDfeVhjLvguPKTD3
uQENBF3pGnsBCAC/aCA120kcIWup6XKt4/u92GFYn/fVaF5Jxx00FRr+0vrPwl88
e7lYi8ZJUaanC8Lql90nQ/1jzxCreMSqOTeppxHE+Za+iCNGh0uP0TPitwlzszUU
oO5Z5sKIamSPXFZJB/XB/VK6xPDw54IdkWzYp2otxmhcnJeIuRiNJfmUM8MZY2mV
j3VVflWjzeFnSMgeuHWbWQ+QfMzwJBquqqF3A148lPBH1q8bRWg6EiLJr/UlSBgb
DLQyTwQ8IAihrf6TrEv6mE1s6VusPS5IZ44QKMQ2VyBoGGkfyxK5gu26V74PFlcq
VtGKss78sahJhBnbrlHL2k+f/mnmiQaA7ZXhABEBAAGJATwEGAEIACYWIQSW/59z
CcBe6mWfHDSJeEtIFe6peAUCXekaewIbIAUJEswDAAAKCRCJeEtIFe6peHHHB/9R
BK+l3agYh+40SAY+Lufqlz0vvFM+zRVRXLSHIwlpmXJmD0kPA2Uri9BVZ41rj+Lt
DMf3b3WW3FZMGQH+olABSeVVWHtwP25ccDwdumU4s3bdQst3yZ3E2rjezixj/2nC
qMqThE5WH7AdxdRihNNFvSvddDbNw1vcbeZ5MDlyFH63Qw3gl5fPbiJXNuSNwXN2
Yi0J3GQlh/eCVaL7HHKdkfvImt6vhGWUWK0dPuz5IjGuC76zdUWlHoZ9OKLitQZC
Zss1jjErIyVEfrKS/T8Z70tjHacNexBtJLqGev6KuopWig9LQ13ytE/ZP0XX+svb
+ZaVsDKuVHO7FSncPVzkuQENBF3pGrgBCADau+f5dSQvq1d+DbghQ06V6/ZATln2
pXKQpqHeTc7jBL3qgDYV6w4Gayug6E8rWj275LGinSzGN/road9i2NYZPTDaD79y
CZYSaHITwR1cH+JOeIrD2spoLX8hZfOC/qHMoJNr7x7EaC+iSlXL6C9CLfBP0kTD
qZLFK7nGSJPaUdJTD412iI5HcqgKPqidDbX75SHG5RC2vkARvkPDW8lEuJZvhjwD
aOtR1i1QWFdBadGUOR5cAh6uYDDsum1WqO3H4bUSK16/S8C6wiEkDlJitnFogVtA
2IkooUTWll33+bdTjuxIsGb4us0YaxbFKDy9DL91/ek/e3fyaOUaSBuBABEBAAGJ
AnIEGAEIACYWIQSW/59zCcBe6mWfHDSJeEtIFe6peAUCXekauAIbDgUJEswDAAFA
CRCJeEtIFe6peMB0IAQZAQgAHRYhBBIgz5FIt2/z+IaZ5GRgK4TTvWujBQJd6Rq4
AAoJEGRgK4TTvWujgSQIAJUbPUPEyJe3cFCWIZd5sivMpZpV+Ef0npsZWc6lOPzi
AwFHxU5BCCd1RaCT7u3ZZaov6mzr9MtnPA8ZN+2nO+aIn3T9w5e7ibDZWS5mtlTS
WRebL3l4doPSL59dJzFchPK1ZNOgkIW6syyU+t3xSuM8KPpy03ORCZCf74D/yx0q
yT9N8xv5eovUJ4caDjG6np3LPUdc0wucf9IGi/2K06M+YE6gy8mjQAp5OKDa5wTK
FkVYVjBLhk+RvkU0Xzq5aRzFNnaQPyutCSe3kObrN2bK22eBA7LS3x/3XtV6b7EV
ZCdTWQgAFj4y0CkzyGdb4eDa2YiNQnzF7oCvI+RUA9//rAgAlG2fD6iGF+0OSpKu
y2btgHm5XbJm8en/5n/rswutVkGiGRKpYB6SwJ1PgZvcpn2nHBqYO+E95uSScjzj
3D5Rd2k4GwbXNyma/b0PX1iABSQmavjnoMM4c3boCc4gQoV54znt43DIovr9WmTR
pgAUh6H3hl80PmPUe7uJdoDDWRDLVJ1OPv1Wc2w6lAXrxtKBblOIP3csRn0D1EC4
/+Lr8n1OEV24lwwQoWvOZAWo0CZnR8v5+Qw3YuAxlw7U/8lgaGsaGiP25RWrtoix
3vQDOOv2/K+UytLxJZnAn1C1G1GGtrQyO1ibIPrTq86nexk2nr2djJGXFRp0unGl
Gu3xGrkBDQRd6RwGAQgAycfK7SCprgO8R9T4nijg8ujC42ewdQXO0CPrShKYLqXm
kFnKxGT/2bfJPhp38GMQnYOwYHTlcazmvzmtXlydtCkD2eDiU6NoI344z5u8j0zd
gE1GlG3FLHXPdKcnFchmsKSIMFW0salAqsUo50qJsQAhWuBimtXTW/ev1i+eFCyT
IJ6X8edVEO8Ub4cdHTLcSUgeTi51xT6tO3Ihg9D+nraGi5iT1RCk070ddtLFbhne
KNiG96lbhgNhpE8E3pkSXoGIeFzD9+j7wKoF5Tz+Bra7kiZFGrBWWyMY/rlubJog
zpuZ/kQgJn/sWfsJyLX6ya59PaRM+5aLGAEJiHJYRQARAQABiQE8BBgBCAAmFiEE
lv+fcwnAXuplnxw0iXhLSBXuqXgFAl3pHAYCGwwFCRLMAwAACgkQiXhLSBXuqXgt
xwf9HTyY1J4cRw/NyhKE+MABj/chCfCxePlsUMIL1iKSbxL2NmuQmPZGDKdAYOrH
ocR9NVFV/g77TfSuSEe2O/gz3LAOSn+RLs4rqq3yxJ10M/1zXfPIgbQQILhDyt4d
uR0s7hmmPkDT0CwBn8+jof5fH+pEsPnWmHAFqQ5yuyJDwa0+ICHr8zxqhvZJLJRv
GTSm9gXpXq/IFgsWeFmwC8GTaTyl5rd8qOxmcbV/x9j+0Q+GryqD8ILPyVp0PN39
2gSNBVfol2r5d+WZ5ye0oXbJGgy89vZRyUF5SQSJ83vF5NaXOarV3qJsy3v9lukK
JHDVbdWMkg5jUeusy24SURK5WA==
=zxPx
-----END PGP PUBLIC KEY BLOCK-----

View File

@ -19,7 +19,7 @@
<entry>
<id>https://christine.website/@post.link</id>
<title>@post.front_matter.title</title>
<published>@post.date.to_rfc3339()</published>
<updated>@post.date.to_rfc3339()</updated>
<link href="https://christine.website/@post.link" rel="alternate"/>
</entry>
}

View File

@ -9,7 +9,6 @@
<link>https://christine.website/blog</link>
<description>Tech, philosophy and more</description>
<generator>@APP https://github.com/Xe/site</generator>
<ttl>1440</ttl>
@for post in posts {
<item>
<guid>https://christine.website/@post.link</guid>

View File

@ -9,7 +9,7 @@
<meta name="twitter:card" content="summary" />
<meta name="twitter:site" content="@@theprincessxena" />
<meta name="twitter:title" content="@post.front_matter.title" />
<meta name="twitter:description" content="Posted on @post.date.format("%Y-%m-%d")" />
<meta name="twitter:description" content="Posted on @post.date" />
<!-- Facebook -->
<meta property="og:type" content="website" />
@ -20,9 +20,7 @@
<meta name="description" content="@post.front_matter.title - Christine Dodrill's Blog" />
<meta name="author" content="Christine Dodrill">
@if post.front_matter.redirect_to.is_none() {
<link rel="canonical" href="https://christine.website/@post.link">
}
<link rel="canonical" href="https://christine.website/@post.link">
<script type="application/ld+json">
@{
@ -31,7 +29,7 @@
"headline": "@post.front_matter.title",
"image": "https://christine.website/static/img/avatar.png",
"url": "https://christine.website/@post.link",
"datePublished": "@post.date.format("%Y-%m-%d")",
"datePublished": "@post.date",
"mainEntityOfPage": @{
"@@type": "WebPage",
"@@id": "https://christine.website/@post.link"
@ -47,12 +45,6 @@
@}
</script>
@if let Some(to) = post.front_matter.redirect_to.clone() {
<script>
window.location.replace("@to");
</script>
}
@body
<hr />

View File

@ -10,7 +10,7 @@
<h3>Email</h3>
<p>me@@christine.website</p>
<p>My GPG fingerprint is <code>803C 935A E118 A224</code>. If you get an email that appears to be from me and the signature does not match that fingerprint, it is not from me. You may download a copy of my public key <a href="/static/gpg.pub">here</a>.</p>
<p>My GPG fingerprint is <code>799F 9134 8118 1111</code>. If you get an email that appears to be from me and the signature does not match that fingerprint, it is not from me. You may download a copy of my public key <a href="/static/gpg.pub">here</a>.</p>
<h3>Social Media</h3>
<ul>

View File

@ -1,3 +1,5 @@
@use crate::APPLICATION_NAME as APP;
@()
</div>
<hr />
@ -5,7 +7,7 @@
<blockquote>Copyright 2020 Christine Dodrill. Any and all opinions listed here are my own and not representative of my employers; future, past and present.</blockquote>
<!--<p>Like what you see? Donate on <a href="https://www.patreon.com/cadey">Patreon</a> like <a href="/patrons">these awesome people</a>!</p>-->
<p>Looking for someone for your team? Take a look <a href="/signalboost">here</a>.</p>
<p>Served by @env!("out")/bin/xesite</a>, see <a href="https://github.com/Xe/site">source code here</a>.</p>
<p>Served by @APP running commit <a href="https://github.com/Xe/site/commit/@env!("GITHUB_SHA")">@env!("GITHUB_SHA")</a>, see <a href="https://github.com/Xe/site">source code here</a>.</p>
</footer>
</div>

View File

@ -9,7 +9,7 @@
<meta name="twitter:card" content="summary" />
<meta name="twitter:site" content="@@theprincessxena" />
<meta name="twitter:title" content="@post.front_matter.title" />
<meta name="twitter:description" content="Posted on @post.date.format("%Y-%m-%d")" />
<meta name="twitter:description" content="Posted on @post.date" />
<!-- Facebook -->
<meta property="og:type" content="website" />
@ -29,7 +29,7 @@
"headline": "@post.front_matter.title",
"image": "https://christine.website/static/img/avatar.png",
"url": "https://christine.website/@post.link",
"datePublished": "@post.date.format("%Y-%m-%d")",
"datePublished": "@post.date",
"mainEntityOfPage": @{
"@@type": "WebPage",
"@@id": "https://christine.website/@post.link"

View File

@ -3,62 +3,8 @@
@(title: Option<&str>, styles: Option<&str>)
<!DOCTYPE html>
<!--
MMMMMMMMMMMMMMMMMMNmmNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNmmmd.:mmMM
MMMMMMMMMMMMMMMMMNmmmNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNmmydmmmmmNMM
MMMMMMMMMMMMMMMMNm/:mNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNmms /mmmmmMMM
MMMMMMMMMMMMMMMNmm:-dmMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNmmmmdsdmmmmNMMM
MMMMMMMMMMMMMMMmmmmmmmNMMMMMMMMMMMNmmdhhddhhmNNMMMMMMMMMMMMMMMMNmy:hmmmmmmmmMMMM
MMMMMMMMMMMMMMNm++mmmmNMMMMMMmdyo/::.........-:/sdNMMMMMMMMMMNmmms`smmmmmmmNMMMM
MMMMMMMMMMMMMMmd.-dmmmmMMmhs/-....................-+dNMMMMMMNmmmmmmmmmmmmmmMMMMM
MMMMMMMMMMMMMNmmmmmmmmho:-...........................:sNMMNmmmmmmmmmmmmmmmNMNmdd
MMMMMMMMMMMMNmd+ydhs/-.................................-sNmmmmmmmmmmmmmmmdhyssss
MMMMMMMMMMMNNh+`........................................:dmmmmmmmmmmmmmmmyssssss
MMMMNNdhy+:-...........................................+dmmmmmmmmmmmmmmmdsssssss
MMMN+-...............................................-smmmmmmmmmmmmmmmmmysyyhdmN
MMMMNho:::-.--::-.......................----------..:hmmmmmmmmmmmmmmmmmmmNMMMMMM
MMMMMMMMNNNmmdo:......................--------------:ymmmmmmmmmmmmmmmmmmmMMMMMMM
MMMMMMMMMMds+........................-----------------+dmmmmmmmmmmmmmmmmmMMMMMMM
MMMMMMMMMh+........................--------------------:smmmmmmmmmmmmmmNMMMMMMMM
MMMMMMMNy/........................-------------::--------/hmmmmmmmmmmmNMMMMMMNmd
MMMMMMMd/........................--------------so----------odmmmmmmmmMMNmdhhysss
MMMMMMm/........................--------------+mh-----------:ymmmmdhhyysssssssss
MMMMMMo.......................---------------:dmmo------------+dmdysssssssssssss
yhdmNh:......................---------------:dmmmm+------------:sssssssssssyhhdm
sssssy.......................--------------:hmmmmmmos++:---------/sssyyhdmNMMMMM
ssssso......................--------------:hmmmNNNMNdddysso:------:yNNMMMMMMMMMM
ysssss.....................--------------/dmNyy/mMMd``d/------------sNMMMMMMMMMM
MNmdhy-...................--------------ommmh`o/NM/. smh+-----------:yNMMMMMMMMM
MMMMMN+...................------------/hmmss: `-//-.smmmmd+----------:hMMMMMMMMM
MMMMMMd:..................----------:smmmmhy+oosyysdmmy+:. `.--------/dMMMMMMMM
MMMMMMMh-................---------:smmmmmmmmmmmmmmmh/` `/s:-------sMMMMMMMM
MMMMMMMms:...............-------/ymmmmmmmmmmmmmmmd/ :dMMNy/-----+mMMMMMMM
MMMMMMmyss/..............------ommmmmmmmmmmmmmmmd. :yMMMMMMNs:---+mMMMMMMM
MMMMNdssssso-............----..odmmmmmmmmmmmmmmh:.` .sNMMMMMMMMMd/--sMMMMMMMM
MMMmysssssssh/................` -odmmmmmmmmmh+. `omMMMMMMMMMMMMh/+mMMMMMMMM
MNdyssssssymMNy-.............. `/sssso+:. `+mMMMMMMMMMMMMMMMdNMMMMMMMMM
NhssssssshNMMMMNo:............/.` `+dMMMMMMMMMMMMMMMMMMMMMMMMMMMM
ysssssssdMMMMMMMMm+-..........+ddy/.` -omMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
ssssssymMMMMMMMMMMMh/.........-oNMMNmy+--` `-+dNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
ssssydNMMMMMMMMMMMMMNy:........-hMMMMMMMNmdmMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
sssymMMMMMMMMMMMMMMMMMm+....-..:hMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
symNMMMMMMMMMMMMMMMMMMMNo.../-/dMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
dNMMMMMMMMMMMMMMMMMMMMMMh:.:hyNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
la budza pu cusku lu
<<.i ko do snura .i ko do kanro
.i ko do panpi .i ko do gleki>> li'u
-->
<html lang="en">
<head>
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=G-XLJX94YGBV"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag()@{dataLayer.push(arguments);@}
gtag('js', new Date());
gtag('config', 'G-XLJX94YGBV');
</script>
@if title.is_some() {
<title>@title.unwrap() - Christine Dodrill</title>
} else {
@ -69,7 +15,7 @@ la budza pu cusku lu
<link rel="stylesheet" href="/css/gruvbox-dark.css" />
<link rel="stylesheet" href="/css/shim.css" />
<link rel="stylesheet" href="https://cdn.christine.website/file/christine-static/prism/prism.css" />
@if Utc::now().month() == 12 || Utc::now().month() == 1 || Utc::now().month() == 2 { <link rel="stylesheet" href="/css/snow.css" /> }
@if Utc::now().month() == 12 { <link rel="stylesheet" href="/css/snow.css" /> }
<link rel="manifest" href="/static/manifest.json" />
<link rel="alternate" title="Christine Dodrill's Blog" type="application/rss+xml" href="https://christine.website/blog.rss" />

View File

@ -10,7 +10,7 @@
<p>
<ul>
@for post in posts {
<li>@post.date.format("%Y-%m-%d") - <a href="/@post.link">@post.front_matter.title</a></li>
<li>@post.date - <a href="/@post.link">@post.front_matter.title</a></li>
}
</ul>
</p>

View File

@ -9,7 +9,7 @@
<meta name="twitter:card" content="summary" />
<meta name="twitter:site" content="@@theprincessxena" />
<meta name="twitter:title" content="@post.front_matter.title" />
<meta name="twitter:description" content="Posted on @post.date.format("%Y-%m-%d")" />
<meta name="twitter:description" content="Posted on @post.date" />
<!-- Facebook -->
<meta property="og:type" content="website" />
@ -29,7 +29,7 @@
"headline": "@post.front_matter.title",
"image": "https://christine.website/static/img/avatar.png",
"url": "https://christine.website/@post.link",
"datePublished": "@post.date.format("%Y-%m-%d")",
"datePublished": "@post.date",
"mainEntityOfPage": @{
"@@type": "WebPage",
"@@id": "https://christine.website/@post.link"