Compare commits

..

1 Commits

Author SHA1 Message Date
Cadey Ratio d6468f5382 step 1 2019-06-05 07:38:21 -04:00
594 changed files with 2361 additions and 49643 deletions

1
.envrc
View File

@ -1 +0,0 @@
use flake

1
.gitattributes vendored
View File

@ -1 +0,0 @@
nix/sources.nix linguist-vendored

5
.github/FUNDING.yml vendored
View File

@ -1,5 +0,0 @@
# These are supported funding model platforms
github: Xe
patreon: cadey
ko_fi: A265JE0

View File

@ -1,53 +0,0 @@
version: 2
updates:
- package-ecosystem: cargo
directory: "/"
schedule:
interval: daily
time: "10:00"
open-pull-requests-limit: 10
ignore:
- dependency-name: tokio
versions:
- ">= 0.3.a, < 0.4"
- dependency-name: hyper
versions:
- 0.14.4
- 0.14.6
- dependency-name: serde_json
versions:
- 1.0.62
- 1.0.63
- 1.0.64
- dependency-name: tracing
versions:
- 0.1.24
- 0.1.25
- dependency-name: futures
versions:
- 0.3.12
- 0.3.13
- dependency-name: url
versions:
- 2.2.1
- dependency-name: thiserror
versions:
- 1.0.24
- dependency-name: tracing-futures
versions:
- 0.2.5
- dependency-name: serde
versions:
- 1.0.123
- dependency-name: rand
versions:
- 0.8.3
- dependency-name: log
versions:
- 0.4.14
- dependency-name: serde_yaml
versions:
- 0.8.16
- dependency-name: tokio
versions:
- 1.1.1

View File

@ -1,18 +0,0 @@
name: "Nix"
on:
push:
branches:
- main
pull_request:
branches:
- main
jobs:
docker-build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- uses: cachix/install-nix-action@v12
- uses: cachix/cachix-action@v7
with:
name: xe
- run: nix-build --no-out-link

6
.gitignore vendored
View File

@ -2,9 +2,3 @@
cw.tar cw.tar
.env .env
.DS_Store .DS_Store
/result-*
/result
.#*
/target
.patreon.json
.direnv

7
.travis.yml Normal file
View File

@ -0,0 +1,7 @@
language: generic
services:
- docker
script:
- docker build .

View File

@ -1,8 +0,0 @@
{
"editor.wordWrap": "wordWrapColumn",
"[markdown]": {
"editor.wordWrap": "wordWrapColumn",
"editor.wordWrapColumn": 80,
"editor.wordBasedSuggestions": false
},
}

36
.vscode/tasks.json vendored
View File

@ -1,36 +0,0 @@
{
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
"version": "2.0.0",
"options": {
"env": {
"out": "/fake"
}
},
"tasks": [
{
"label": "run",
"type": "shell",
"command": "cargo run",
"problemMatcher": [
"$rustc"
]
},
{
"label": "auto rerun",
"type": "shell",
"command": "cargo watch -x run",
"problemMatcher": [
"$rustc"
]
},
{
"label": "test",
"type": "shell",
"command": "cargo test",
"problemMatcher": [
"$rustc"
]
}
]
}

View File

@ -1,15 +0,0 @@
# Changelog
New site features will be documented here.
## 2.1.0
- Blogpost bodies are now present in the RSS feed
## 2.0.1
Custom render RSS/Atom feeds
## 2.0.0
Complete site rewrite in Rust

2878
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,77 +0,0 @@
[package]
name = "xesite"
version = "2.4.0"
authors = ["Xe Iaso <me@christine.website>"]
edition = "2018"
build = "src/build.rs"
repository = "https://github.com/Xe/site"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
axum = "0.5"
axum-macros = "0.2"
axum-extra = "0.2"
color-eyre = "0.6"
chrono = "0.4"
comrak = "0.12.1"
derive_more = "0.99"
envy = "0.4"
estimated_read_time = "1"
futures = "0.3"
glob = "0.3"
http = "0.2"
http-body = "0.4"
hyper = "0.14"
kankyo = "0.3"
lazy_static = "1.4"
log = "0.4"
mime = "0.3.0"
prometheus = { version = "0.13", default-features = false, features = ["process"] }
rand = "0"
reqwest = { version = "0.11", features = ["json"] }
serde_dhall = "0.11.0"
serde = { version = "1", features = ["derive"] }
serde_yaml = "0.8"
sitemap = "0.4"
thiserror = "1"
tokio = { version = "1", features = ["full"] }
tokio-stream = { version = "0.1", features = ["net"] }
tracing = "0.1"
tracing-futures = "0.2"
tracing-subscriber = { version = "0.3", features = ["fmt"] }
xml-rs = "0.8"
url = "2"
uuid = { version = "0.8", features = ["serde", "v4"] }
# workspace dependencies
cfcache = { path = "./lib/cfcache" }
jsonfeed = { path = "./lib/jsonfeed" }
mi = { path = "./lib/mi" }
patreon = { path = "./lib/patreon" }
[dependencies.tower]
version = "0.4"
features = [ "full" ]
[dependencies.tower-http]
version = "0.2"
features = [ "full" ]
# os-specific dependencies
[target.'cfg(target_os = "linux")'.dependencies]
sdnotify = { version = "0.2", default-features = false }
[build-dependencies]
ructe = { version = "0.14", features = [ "mime03" ] }
[dev-dependencies]
pfacts = "0"
serde_json = "1"
eyre = "0.6"
pretty_env_logger = "0"
[workspace]
members = [
"./lib/*",
]

21
Dockerfile Normal file
View File

@ -0,0 +1,21 @@
FROM xena/go:1.12.1 AS build
ENV GOPROXY https://cache.greedo.xeserv.us
COPY . /site
WORKDIR /site
RUN CGO_ENABLED=0 go test -v ./...
RUN CGO_ENABLED=0 GOBIN=/root go install -v ./cmd/site
FROM xena/alpine
EXPOSE 5000
RUN apk add --no-cache bash
WORKDIR /site
COPY --from=build /root/site .
COPY ./static /site/static
COPY ./templates /site/templates
COPY ./blog /site/blog
COPY ./talks /site/talks
COPY ./css /site/css
COPY ./app /app
COPY ./app.json .
HEALTHCHECK CMD wget --spider http://127.0.0.1:5000/.within/health || exit 1
CMD ./site

View File

@ -1,4 +1,4 @@
Copyright (c) 2017-2021 Christine Dodrill <me@christine.website> Copyright (c) 2017 Christine Dodrill <me@christine.website>
This software is provided 'as-is', without any express or implied This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages warranty. In no event will the authors be held liable for any damages

View File

@ -1,33 +1,5 @@
# site # site
[![built with
nix](https://builtwithnix.org/badge.svg)](https://builtwithnix.org)
![Nix](https://github.com/Xe/site/workflows/Nix/badge.svg)
![Rust](https://github.com/Xe/site/workflows/Rust/badge.svg)
My personal/portfolio website. My personal/portfolio website.
## <big>Information for people wishing to use this code</big> ![](https://puu.sh/vWnJx/57cda175d8.png)
Don't. This code is not made for you to be able to use without extensive
modification. The [license](https://github.com/Xe/site/blob/main/LICENSE) of
this code is intentionally chosen in such a way that it will make reuse of this
website code verbatim very difficult.
If you are still adamant about using this backend, please keep several things in
mind:
1. All blog content is all rights reserved. I aggressively pursue and report
content theft.
2. You _must_ fully comply with the license. I will aggressively pursue people
that are not in compliance with the license.
3. You are on your own. I will not help you. This is code I made for myself and
it's only really open source as a side effect of making deployment on NixOS
easier. Please do not be the person that makes me have to take this repo
closed source.
4. Upon security issues being found and remediated, you will not be notified
about issues or remediation instructions.
This is probably not what you are looking for. Make your own website. Look into
[Hugo](http://gohugo.io/) or [Zola](https://www.getzola.org/). They are going to
be better maintained than this site will be.

7
app.json Normal file
View File

@ -0,0 +1,7 @@
{
"scripts": {
"dokku": {
"postdeploy": "curl https://www.google.com/ping?sitemap=https://christine.website/sitemap.xml"
}
}
}

1
app/CHECKS Normal file
View File

@ -0,0 +1 @@
/ Christine

View File

@ -1,93 +0,0 @@
---
title: Compiling Code to Matter in My Living Room
date: 2022-03-28
tags:
- openscad
- 3dprinting
---
In a moment of weakness, my husband and I got a 3d printer. It's mostly been sitting around and not doing much since we got it, but recently I found a great use for it: I wanted a controller stand for my Valve Index controllers and VR full body trackers.
After doing some digging on Thingiverse, I found [this stand](https://www.thingiverse.com/thing:4587097) that looked like it had promise. So I downloaded the model, sliced it and then sent it over to Kyubey:
<blockquote class="twitter-tweet"><p lang="tl" dir="ltr">Kyuubey is happy <a href="https://t.co/atTLN8MSgc">pic.twitter.com/atTLN8MSgc</a></p>&mdash; Xe Iaso (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1507485129907871747?ref_src=twsrc%5Etfw">March 25, 2022</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
[Kyubey's name is a reference to <a href="https://madoka.fandom.com/wiki/Kyubey">Kyubey</a> from Puella Magi Madoka Magika</a>.](conversation://Mara/hacker)
Once it was done I ended up with a stand that I could feed [these cables I got from Amazon](https://www.amazon.ca/gp/product/B09LSF8XL9/) through. The tracker holes worked great, but the controller holes were just barely too small.
This was kinda frustrating and I almost gave up on the project, but then I remembered that [OpenSCAD](https://openscad.org) existed. OpenSCAD is a weird programming environment / 3D modeling hybrid program that I've seen used on Thingiverse. It works by letting you position platonic solids into a 3d environment, and from there you can create anything you want.
One of the primitives that OpenSCAD offers is a cylinder. So I wondered if I could use one of those to widen the hole in the index stand and then reprint the part with the wider hole.
[Wait, you're using a CAD program to fix your 3D print by modifying the model instead of using, I don't know, a drill and 5 minutes to make it fit that way?](conversation://Numa/dismay)
[There's no doing like overdoing!](conversation://Cadey/enby)
After some finangling, I managed to get the cylinders in the right place with this OpenSCAD code:
```scad
//difference() {
color("magenta") translate([0, 0, 0]) import("./assets/ValveTrackerDeckEditedByInugoro.stl");
// bores for controller holders
color([0, 1, 0]) translate([63, 44, 0]) cylinder(h = 55, r = 4.75);
color([0, 1, 0]) translate([-63, 44, 0]) cylinder(h = 55, r = 4.75);
//}
```
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">Some finagling required <a href="https://t.co/7T0R6x1XoP">pic.twitter.com/7T0R6x1XoP</a></p>&mdash; Xe Iaso (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1508566854926745614?ref_src=twsrc%5Etfw">March 28, 2022</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
And when I uncommented out the `difference()` block, it ends up looking good enough:
<blockquote class="twitter-tweet" data-conversation="none" data-dnt="true"><p lang="und" dir="ltr"><a href="https://t.co/fiShvlN8QH">pic.twitter.com/fiShvlN8QH</a></p>&mdash; Xe Iaso (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1508567556759728141?ref_src=twsrc%5Etfw">March 28, 2022</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
So then I took a good solid look at the rest of the 3D printed part to see if I could improve on anything else before I sent it to another round of the printer. The last stand took _14 hours_ to print and used a lot of material. I want to avoid waste.
Something I noticed is that the front of the print where all the cables come out was a bit too thin. All 5 of the cables wouldn't fit in there (my braided cables must have been thicker than the ones that the original modeler used). So again I grabbed a few platonic solids and managed to make it work out:
```scad
// widen the paths
color("green") translate([0, -16, 1.3]) rotate([0, 0, 90]) cube([10, 57, 7.8], center = true);
color("green") translate([0, 0, 1.7]) rotate([0, 0, 0]) cube([25, 30, 7], center = true);
```
<blockquote class="twitter-tweet" data-conversation="none" data-dnt="true"><p lang="und" dir="ltr"><a href="https://t.co/pKAVtiPfDS">pic.twitter.com/pKAVtiPfDS</a></p>&mdash; Xe Iaso (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1508568858650685440?ref_src=twsrc%5Etfw">March 28, 2022</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
Then I wanted to add some wedges into the underside of the part to help me get the print off the bed. Most people have a problem with bed adhesion being too little. I have too much bed adhesion. So I added some angled rectangles:
```scad
// wedges to help get the print off the bed
color([1, 1, 0]) translate([-120, 0, 0]) rotate([15, 0, 90]) cube([10, 11, 2], center = true); // right
color([1, 1, 0]) translate([120, 0, 0]) rotate([-15, 0, 90]) cube([10, 11, 2], center = true); // left
color([1, 1, 0]) translate([0, -85, 0]) rotate([0, 15, 90]) cube([10, 11, 2], center = true); // back
color([1, 1, 0]) translate([60, 56, 1]) rotate([0, -15, 90]) cube([10, 11, 2], center = true); // front left
color([1, 1, 0]) translate([-60, 56, 1]) rotate([0, -15, 90]) cube([10, 11, 2], center = true); // front right
color([1, 1, 0]) translate([32.5, 41, 1]) rotate([0, -15, 130]) cube([10, 11, 2], center = true); // front left inner
color([1, 1, 0]) translate([-32.5, 41, 1]) rotate([0, -15, 60]) cube([10, 11, 2], center = true); // front right inner
```
<blockquote class="twitter-tweet" data-conversation="none" data-dnt="true"><p lang="und" dir="ltr"><a href="https://t.co/XUQ9ZeYk1H">pic.twitter.com/XUQ9ZeYk1H</a></p>&mdash; Xe Iaso (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1508569796253827077?ref_src=twsrc%5Etfw">March 28, 2022</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
And then once I spun it around for a bit and thought it was good, I sliced it in PrusaSlicer and sent it off to Kyubey. It was going to take 14 hours, so I went off to do other things, ate dinner and then went to bed while the printer continued.
<blockquote class="twitter-tweet" data-conversation="none" data-dnt="true"><p lang="fr" dir="ltr">Diligent bean <a href="https://t.co/yPgnJA0ZdW">pic.twitter.com/yPgnJA0ZdW</a></p>&mdash; Xe Iaso (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1508397506031460352?ref_src=twsrc%5Etfw">March 28, 2022</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
Then when I woke up, Kyubey was done:
<blockquote class="twitter-tweet" data-conversation="none" data-dnt="true"><p lang="und" dir="ltr"><a href="https://t.co/2E1IS810EH">pic.twitter.com/2E1IS810EH</a></p>&mdash; Xe Iaso (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1508407046995156992?ref_src=twsrc%5Etfw">March 28, 2022</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
I was excited and chiseled the print off the bed (the wedges helped a little, but it ended up making the print look kinda weird so I don't know if I will do that again), but the hole for the middle tracker didn't fit perfectly. Everything else did though.
[If you want to get prints off your printer easier, see this video for the method we're starting to use: <br /><br /><iframe width="560" height="315" src="https://www.youtube.com/embed/VCCbzCvtRzU" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>](conversation://Mara/hacker)
I looked on my desk and found that a random pen that I had sitting around for months was about the right size, so I pushed it into and out of the hole a few times and then the cables fit perfectly. I assume some plastic was in a weird state or something.
Then I set everything up and I had my Index controller stand:
<blockquote class="twitter-tweet" data-conversation="none" data-dnt="true"><p lang="en" dir="ltr">Victory! <a href="https://t.co/A3aCtQMQt5">pic.twitter.com/A3aCtQMQt5</a></p>&mdash; Xe Iaso (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1508426229464064001?ref_src=twsrc%5Etfw">March 28, 2022</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
[I really need to get a table or something for this.](conversation://Cadey/facepalm)
I've uploaded my modified version to [Thingiverse](https://www.thingiverse.com/thing:5332988). If you want to see the OpenSCAD code, you can check it out on GitHub [here](https://github.com/Xe/3dstuff/blob/main/index_stand_hack.scad). I'm really liking OpenSCAD so far. It's very weird but it lets you do whatever you want by chaining together basic shapes to build up to what you want. I imagine I will be using it a lot in the future, especially once my husband's new sim racing gear comes in.
Having a 3D printer around is like having a very weird superpower on standby. You can compile matter in your living room, but you need a very pedantic description of what that should look like. You also can have any material you like as long as it's plastic. However when it's useful, it's a lifesaver. You can make something to fit a gap or mend something broken or even add functionality to something that lacked it. The cloud's the limit!

View File

@ -1,175 +0,0 @@
---
title: The 7th Edition
date: 2020-12-19
tags:
- ttrpg
---
You know what, fuck rules. Fuck systems. Fuck limitations. Let's dial the
tabletop RPG system down to its roots. Let's throw out every stat but one:
Awesomeness. When you try to do something that could fail, roll for Awesomeness.
If your roll is more than your awesomeness stat, you win. If not, you lose. If
you are or have something that would benefit you in that situation, roll for
awesomeness twice and take the higher value.
No stats.<br />
No counts.<br />
No limits.<br />
No gods.<br />
No masters.<br />
Just you and me and nature in the battlefield.
* Want to shoot an arrow? Roll for awesomeness. You failed? You're out of ammo.
* Want to, defeat a goblin but you have a goblin-slaying-broadsword? Roll twice
for awesomeness and take the higher value. You got a 20? That goblin was
obliterated. Good job.
* Want to pick up an item into your inventory? Roll for awesomeness. You got it?
It's in your inventory.
Etc. Don't think too hard. Let a roll of the dice decide if you are unsure.
## Base Awesomeness Stats
Here are some probably balanced awesomeness base stats depending on what kind of
dice you are using:
* 6-sided: 4 or 5
* 8-sided: 5 or 6
* 10-sided: 6 or 7
* 12-sided: 7 or 8
* 20-sided: anywhere from 11-13
## Character Sheet Template
Here's an example character sheet:
```
Name:
Awesomeness:
Race:
Class:
Inventory:
*
```
That's it. You don't even need the race or class if you don't want to have it.
You can add more if you feel it is relevant for your character. If your
character is a street brat that has experience with haggling, then fuck it be
the most street brattiest haggler you can. Try to not overload your sheet with
information, this game is supposed to be simple. A sentence or two at most is
good.
## One Player is The World
The World is a character that other systems would call the Narrator, the
Pathfinder, Dungeon Master or similar. Let's strip this down to the core of the
matter. One player doesn't just dictate the world, they _are_ the world.
The World also controls the monsters and non-player characters. In general, if
you are in doubt as to who should roll for an event, The World does that roll.
## Mixins/Mods
These are things you can do to make the base game even more tailored to your
group. Whether you should do this is highly variable to the needs and whims of
your group in particular.
### Mixin: Adjustable Awesomeness
So, one problem that could come up with this is that bad luck could make this
not as fun. As a result, add these two rules in:
* Every time you roll above your awesomeness, add 1 to your awesomeness stat
* Every time you roll below your awesomeness, remove 1 from your awesomeness
stat
This should add up so that luck would even out over time. Players that have less
luck than usual will eventually get their awesomeness evened out so that luck
will be in their favor.
### Mixin: No Awesomeness
In this mod, rip out Awesomeness altogether. When two parties are at odds, they
both roll dice. The one that rolls higher gets what they want. If they tie, both
people get a little part of what they want. For extra fun do this with six-sided
dice.
* Monster wants to attack a player? The World and that player roll. If the
player wins, they can choose to counterattack. If the monster wins, they do a
wound or something.
* One player wants to steal from another? Have them both roll to see what
happens.
Use your imagination! Ask others if you are unsure!
## Other Advice
This is not essential but it may help.
### Monster Building
Okay so basically monsters fall into two categories: peons and bosses. Peons
should be easy to defeat, usually requiring one action. Bosses may require more
and might require more than pure damage to defeat. Get clever. Maybe require the
players to drop a chandelier on the boss. Use the environment.
In general, peons should have a very high base awesomeness in order to do things
they want. Bosses can vary based on your mood.
Adjustable awesomeness should affect monsters too.
### Worldbuilding
Take a setting from somewhere and roll with it. You want to do a cyberpunk jaunt
in Night City with a sword-wielding warlock, a succubus space marine, a bard
netrunner and a shapeshifting monk? Do the hell out of that. That sounds
awesome.
Don't worry about accuracy or the like. You are setting out to have fun.
## Special Thanks
Special thanks goes to Jared, who sent out this [tweet][1] that inspired this
document. In case the tweet gets deleted, here's what it said:
[1]: https://twitter.com/infinite_mao/status/1340402360259137541
> heres a d&d for you
> you have one stat, its a saving throw. if you need to roll dice, you roll your
> save.
> you have a class and some equipment and junk. if the thing you need to roll
> dice for is relevant to your class or equipment or whatever, roll your save
> with advantage.
> oh your Save is 5 or something. if you do something awesome, raise your save
> by 1.
> no hp, save vs death. no damage, save vs goblin. no tracking arrows, save vs
> running out of ammo.
> thanks to @Axes_N_Orcs for this
> What's So Cool About Save vs Death?
> can you carry all that treasure and equipment? save vs gains
I replied:
> Can you get more minimal than this?
He replied:
> when two or more parties are at odds, all roll dice. highest result gets what
> they want.
> hows that?
This document is really just this twitter exchange in more words so that people
less familiar with tabletop games can understand it more easily. You know you
have finished when there is nothing left to remove, not when you can add
something to "fix" it.
I might put this on my [itch.io page](https://withinstudios.itch.io/).

View File

@ -1,97 +0,0 @@
---
title: "OVE-20190623-0001"
date: 2019-06-24
tags:
- v
- security
- release
---
## Within Security Advisory
Root-level Remote Command Injection in the [V](https://vlang.io) playground (OVE-20190623-0001)
> The real CVEs are the friends we made along the way
awilfox
## Summary
While playing with the [V playground](https://vlang.io/play), a root-level
command injection vulnerability was discovered. This allows for an
unauthenticated attacker to execute arbitrary root-level commands on the
playground server.
This vulnerability is instantly exploitable by a remote, unauthenticated
attacker in the default configuration. To remotely exploit this vulnerability,
an attacker must send specially created HTTP requests to the playground server
containing a malformed function call.
This playground server is not open sourced or versioned yet, but this
vulnerability has lead to the compromising of the box as reported by the lead
developer of V.
## Remote Exploitation
V allows for calling of C functions through a few means:
- starting a line with a `#` character
- calling a C function with the `C.` namespace
The V playground insufficiently strips the latter form of the function call,
allowing an invocation such as this:
```
fn main() {
C .system(' id')
}
```
or even this:
```
fn main() {
C
.system(' id')
}
```
As the server is running as the root user, successful exploitation can result
in an unauthenticated user totally compromising the system, as happened
earlier yesterday on June 23, 2019. As the source code and configuration of
the V playground server is unknown, it is not possible to track usage of these
commands.
The playground did attempt to block these attacks; but it appeared to do pattern
matching on `#` or `C.`, allowing the alternative methods mentioned above.
## Security Suggestions
Do not run the playground server as a root user outside a container or other
form of isolation. The fact that this server runs user-submitted code makes
this kind of thing very difficult to isolate and/or secure properly. The use
of an explicit sandboxing environment like [gVisor](https://gvisor.dev) or
[Docker](https://www.docker.com) is suggested. The use of more elaborate
sandboxing mechanisms like [CloudABI](https://cloudabi.org) or
[WebAssembly](https://webassembly.org) may be practical for future
developments, but is admittedly out of scope for this initial class of issues.
## GReeTZ
Special thanks to the people of [#ponydev](https://pony.dev) for helping to
discover and toy with this bug.
## Timeline
All times are Eastern Standard Time.
### June 23, 2019
- 4:56 PM - The first exploit was found and the contents of /etc/passwd were dumped, other variants of this attack were proposed and tested in the meantime
- 5:00 PM - The V playground server stopped replying to HTTP and ICMP messages
- 6:26 PM - The V creator was notified of this issue
- 7:02 PM - The V creator acknowledged the issue and admitted the machine was compromised
### June 24, 2019
- 12:00 AM - This security bulletin was released

View File

@ -1,164 +0,0 @@
---
title: "OVE-20191021-0001"
date: "2019-10-22"
tags:
- security
- release
- javascript
- mysql
- oh-dear-god
---
## Within Security Advisory
Multiple vulnerabilities in the mysqljs API and code.
Security Warning Level: yikes/10
## Summary
There are multiple issues exploitable by local and remote actors in
[mysqljs][mysqljs]. These can cause application data leaks, database leaks, SQL
injections, arbitrary code execution, and credential leaks among other things.
Mysqljs is unversioned, so it is very difficult to impossible to tell how many
users are affected by this and what users can do in order to ensure they are
patched against these critical vulnerabilities.
## Background
Mysqljs is a library intended to facilitate prototyping web applications and
mobile applications using technologies such as [PhoneGap][phonegap] or
[Cordova][cordova]. These technologies allow developers to create a web
application that gets packaged and presented to users as if it was a native
application.
This library is intended to help with developers creating persistent storage for
these applications.
## Issues in Detail
There are at least seven vulnerabilities with this library, each of them will be
outlined below with a fairly vague level of detail.
### mysql.js is NOT versioned
The only version information I was able to find are the following:
- The `Last-Modified` date of Friday, March 11 2016
- The `ETag` of `80edc3e5a87bd11:0`
These header values correlate to a vulnerable version of the mysql.js file.
An entire copy of this file is embedded for purposes of explanation:
```
var MySql = {
_internalCallback : function() { console.log("Callback not set")},
Execute: function (Host, Username, Password, Database, Sql, Callback) {
MySql._internalCallback = Callback;
// to-do: change localhost: to mysqljs.com
var strSrc = "http://mysqljs.com/sql.aspx?";
strSrc += "Host=" + Host;
strSrc += "&Username=" + Username;
strSrc += "&Password=" + Password;
strSrc += "&Database=" + Database;
strSrc += "&sql=" + Sql;
strSrc += "&Callback=MySql._internalCallback";
var sqlScript = document.createElement('script');
sqlScript.setAttribute('src', strSrc);
document.head.appendChild(sqlScript);
}
}
```
### Fundamental Operation via Cross-Site Scripting
The code operates by creating a `<script>` element. The Javascript source of
this script is dynamically generated by the remote API server. This opens the
door for many kinds of Cross-Site Scripting attacks.
Especially because:
### Credentials Exposed over Plain HTTP
The script works by creating a `<script>` element pointed at a HTTP resource in
order to facilitate access to the MySQL Server. Line 6 shows that the API server
in question is being queried over UNENCRYPTED HTTP.
```
var strSrc = "http://mysqljs.com/sql.aspx?";
```
### Credentials and SQL Queries Are Not URL-Encoded Before Adding Them to a URL
Credentials and SQL queries are not URL-encoded before they are added to the
`strSrc` URL. This means that values may include other HTTP parameters that
could be evaluated, causing one of the two following:
### Potential for SQL Injection from Malformed User Input
It appears this API works by people submitting plain text SQL queries. It is
likely difficult to write these plain text queries in a way that avoids SQL
injection attacks.
### Potential for Arbitrary Code Execution
Combined with the previous issues, a SQL injection that inserts arbitrary
Javascript into the result will end up creating an arbitrary code execution bug.
This could let an attacker execute custom Javascript code on the page, which may
have even more disastrous consequences depending on the usage of this library.
### Server-Side Code has Unknown Logging Enabled
This means that user credentials and database results may be logged, stored and
leaked by the mysql.js API server without user knowledge. The server that is
running the API server may also do additional logging of database credentials
and results without user knowledge.
### Encourages Bad Practices
Mysql.js works by its API server dialing out an _UNENCRYPTED_ connection to your
MySQL server over the internet. This requires exposing your MySQL server to the
internet. This means that user credentials are vulnerable to anyone who has
packet capture abilities.
Mysql.js also encourages developers commit database credentials into their
application source code. Cursory searching of GitHub has found
[this][leakedcreds]. I can only imagine there are countless other potential
victims.
## Security Suggestions
- Do not, under any circumstances, allow connections to be made without the use
of TLS (HTTPS).
- Version the library.
- Offer the source code of the API server to allow users to inspect it and
ensure their credentials are not being stored by it.
- Detail how the IIS server powering this service is configured, proving that it
is not keeping unsanitized access logs.
- Ensure all logging methods sanitize or remove user credentials.
- URL-encode all values being sent as part of a URL.
- Do not have your service fundamentally operate as a Cross-Site Scripting
attack.
- Do not, under any circumstances, encourage developers to put database
credentials in the source code of front-end web applications.
In summary, we label this a solid yikes/10 in terms of security. It would be
advisable for current users of this library to re-evaluate the life decisions
that have lead them down this path.
## GReeTZ
Über thanks to [jadr2ddude][jaden] for helping with identifying the unfortunate
scope of these massive security issues.
Hyper thanks to [J][j] for coming up with a viable GitHub search for potentially
affected users.
[mysqljs]: http://www.mysqljs.com/
[phonegap]: https://phonegap.com/
[cordova]: https://cordova.apache.org/
[leakedcreds]: https://github.com/search?utf8=%E2%9C%93&q=%22https%3A%2F%2Fmysqljs.com%2Fmysql.js%22&type=Code
[jaden]: https://twitter.com/CompuJad
[j]: https://twitter.com/LombaxJay

View File

@ -1,638 +0,0 @@
---
title: "TL;DR Rust"
date: 2020-09-19
series: rust
tags:
- go
- golang
---
Recently I've been starting to use Rust more and more for larger and larger
projects. As things have come up, I realized that I am missing a good reference
for common things in Rust as compared to Go. This post contains a quick
high-level overview of patterns in Rust and how they compare to patterns
in Go. This will focus on code samples. This is no replacement for the [Rust
book](https://doc.rust-lang.org/book/), but should help you get spun up on the
various patterns used in Rust code.
Also I'm happy to introduce Mara to the blog!
[Hey, happy to be here! I'm Mara, a shark hacker from Christine's imagination.
I'll interject with side information, challenge assertions and more! Thanks for
inviting me!](conversation://Mara/hacker)
Let's start somewhere simple: functions.
## Making Functions
Functions are defined using `fn` instead of `func`:
```go
func foo() {}
```
```rust
fn foo() {}
```
### Arguments
Arguments can be passed by separating the name from the type with a colon:
```go
func foo(bar int) {}
```
```rust
fn foo(bar: i32) {}
```
### Returns
Values can be returned by adding `-> Type` to the function declaration:
```go
func foo() int {
return 2
}
```
```rust
fn foo() -> i32 {
return 2;
}
```
In Rust values can also be returned on the last statement without the `return`
keyword or a terminating semicolon:
```rust
fn foo() -> i32 {
2
}
```
[Hmm, what if I try to do something like this. Will this
work?](conversation://Mara/hmm)
```rust
fn foo() -> i32 {
if some_cond {
2
}
4
}
```
Let's find out! The compiler spits back an error:
```
error[E0308]: mismatched types
--> src/lib.rs:3:9
|
2 | / if some_cond {
3 | | 2
| | ^ expected `()`, found integer
4 | | }
| | -- help: consider using a semicolon here
| |_____|
| expected this to be `()`
```
This happens because most basic statements in Rust can return values. The best
way to fix this would be to move the `4` return into an `else` block:
```rust
fn foo() -> i32 {
if some_cond {
2
} else {
4
}
}
```
Otherwise, the compiler will think you are trying to use that `if` as a
statement, such as like this:
```rust
let val = if some_cond { 2 } else { 4 };
```
### Functions that can fail
The [Result](https://doc.rust-lang.org/std/result/) type represents things that
can fail with specific errors. The [eyre Result
type](https://docs.rs/eyre) represents things that can fail
with any error. For readability, this post will use the eyre Result type.
[The angle brackets in the `Result` type are arguments to the type, this allows
the Result type to work across any type you could imagine.](conversation://Mara/hacker)
```go
import "errors"
func divide(x, y int) (int, err) {
if y == 0 {
return 0, errors.New("cannot divide by zero")
}
return x / y, nil
}
```
```rust
use eyre::{eyre, Result};
fn divide(x: i32, y: i32) -> Result<i32> {
match y {
0 => Err(eyre!("cannot divide by zero")),
_ => Ok(x / y),
}
}
```
[Huh? I thought Rust had the <a
href="https://doc.rust-lang.org/std/error/trait.Error.html">Error trait</a>,
shouldn't you be able to use that instead of a third party package like
eyre?](conversation://Mara/wat)
Let's try that, however we will need to make our own error type because the
[`eyre!`](https://docs.rs/eyre/0.6.0/eyre/macro.eyre.html) macro creates its own
transient error type on the fly.
First we need to make our own simple error type for a DivideByZero error:
```rust
use std::error::Error;
use std::fmt;
#[derive(Debug)]
struct DivideByZero;
impl fmt::Display for DivideByZero {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "cannot divide by zero")
}
}
impl Error for DivideByZero {}
```
So now let's use it:
```rust
fn divide(x: i32, y: i32) -> Result<i32, DivideByZero> {
match y {
0 => Err(DivideByZero{}),
_ => Ok(x / y),
}
}
```
However there is still one thing left: the function returns a DivideByZero
error, not _any_ error like the [error interface in
Go](https://godoc.org/builtin#error). In order to represent that we need to
return something that implements the Error trait:
```rust
fn divide(x: i32, y: i32) -> Result<i32, impl Error> {
// ...
}
```
And for the simple case, this will work. However as things get more complicated
this simple facade will not work due to reality and its complexities. This is
why I am shipping as much as I can out to other packages like eyre or
[anyhow](https://docs.rs/anyhow). Check out this code in the [Rust
Playground](https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=946057d8eb02f388cb3f03bae226d10d)
to mess with this code interactively.
[Pro tip: eyre (via <a href="https://docs.rs/color-eyre">color-eyre</a>) also
has support for adding <a href="https://docs.rs/color-eyre/0.5.4/color_eyre/#custom-sections-for-error-reports-via-help-trait">custom
sections and context</a> to errors similar to Go's <a href="https://godoc.org/fmt#Errorf">`fmt.Errorf` `%w`
format argument</a>, which will help in real world
applications. When you do need to actually make your own errors, you may want to look into
crates like <a href="https://docs.rs/thiserror">thiserror</a> to help with
automatically generating your error implementation.](conversation://Mara/hacker)
### The `?` Operator
In Rust, the `?` operator checks for an error in a function call and if there is
one, it automatically returns the error and gives you the result of the function
if there was no error. This only works in functions that return either an Option
or a Result.
[The <a href="https://doc.rust-lang.org/std/option/index.html">Option</a> type
isn't shown in very much detail here, but it acts like a "this thing might not exist and it's your
responsibility to check" container for any value. The closest analogue in Go is
making a pointer to a value or possibly putting a value in an `interface{}`
(which can be annoying to deal with in practice).](conversation://Mara/hacker)
```go
func doThing() (int, error) {
result, err := divide(3, 4)
if err != nil {
return 0, err
}
return result, nil
}
```
```rust
use eyre::Result;
fn do_thing() -> Result<i32> {
let result = divide(3, 4)?;
Ok(result)
}
```
If the second argument of divide is changed to `0`, then `do_thing` will return
an error.
[And how does that work with eyre?](conversation://Mara/hmm)
It works with eyre because eyre has its own error wrapper type called
[`Report`](https://docs.rs/eyre/0.6.0/eyre/struct.Report.html), which can
represent anything that implements the Error trait.
## Macros
Rust macros are function calls with `!` after their name:
```rust
println!("hello, world");
```
## Variables
Variables are created using `let`:
```go
var foo int
var foo = 3
foo := 3
```
```rust
let foo: i32;
let foo = 3;
```
### Mutability
In Rust, every variable is immutable (unchangeable) by default. If we try to
change those variables above we get a compiler error:
```rust
fn main() {
let foo: i32;
let foo = 3;
foo = 4;
}
```
This makes the compiler return this error:
```
error[E0384]: cannot assign twice to immutable variable `foo`
--> src/main.rs:4:5
|
3 | let foo = 3;
| ---
| |
| first assignment to `foo`
| help: make this binding mutable: `mut foo`
4 | foo = 4;
| ^^^^^^^ cannot assign twice to immutable variable
```
As the compiler suggests, you can create a mutable variable by adding the `mut`
keyword after the `let` keyword. There is no analog to this in Go.
```rust
let mut foo: i32 = 0;
foo = 4;
```
[This is slightly a lie. There's more advanced cases involving interior
mutability and other fun stuff like that, however this is a more advanced topic
that isn't covered here.](conversation://Mara/hacker)
### Lifetimes
Rust does garbage collection at compile time. It also passes ownership of memory
to functions as soon as possible. Lifetimes are how Rust calculates how "long" a
given bit of data should exist in the program. Rust will then tell the compiled
code to destroy the data from memory as soon as possible.
[This is slightly inaccurate in order to make this simpler to explain and
understand. It's probably more accurate to say that Rust calculates _when_ to
collect garbage at compile time, but the difference doesn't really matter for
most cases](conversation://Mara/hacker)
For example, this code will fail to compile because `quo` was moved into the
second divide call:
```rust
let quo = divide(4, 8)?;
let other_quo = divide(quo, 5)?;
// Fails compile because ownership of quo was given to divide to create other_quo
let yet_another_quo = divide(quo, 4)?;
```
To work around this you can pass a reference to the divide function:
```rust
let other_quo = divide(&quo, 5);
let yet_another_quo = divide(&quo, 4)?;
```
Or even create a clone of it:
```rust
let other_quo = divide(quo.clone(), 5);
let yet_another_quo = divide(quo, 4)?;
```
[You can also get more fancy with <a
href="https://doc.rust-lang.org/rust-by-example/scope/lifetime/explicit.html">explicit
lifetime annotations</a>, however as of Rust's 2018 edition they aren't usually
required unless you are doing something weird. This is something that is also
covered in more detail in <a
href="https://doc.rust-lang.org/stable/book/ch04-00-understanding-ownership.html">The
Rust Book</a>.](conversation://Mara/hacker)
### Passing Mutability
Sometimes functions need mutable variables. To pass a mutable reference, add
`&mut` before the name of the variable:
```rust
let something = do_something_to_quo(&mut quo)?;
```
## Project Setup
### Imports
External dependencies are declared using the [Cargo.toml
file](https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html):
```toml
# Cargo.toml
[dependencies]
eyre = "0.6"
```
This depends on the crate [eyre](https://crates.io/crates/eyre) at version
0.6.x.
[You can do much more with version requirements with cargo, see more <a
href="https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html">here</a>.](conversation://Mara/hacker)
Dependencies can also have optional features:
```toml
# Cargo.toml
[dependencies]
reqwest = { version = "0.10", features = ["json"] }
```
This depends on the crate [reqwest](https://crates.io/reqwest) at version 0.10.x
with the `json` feature enabled (in this case it enables reqwest being able to
automagically convert things to/from json using Serde).
External dependencies can be used with the `use` statement:
```go
// go
import "github.com/foo/bar"
```
```rust
use foo; // -> foo now has the members of crate foo behind the :: operator
use foo::Bar; // -> Bar is now exposed as a type in this file
use eyre::{eyre, Result}; // exposes the eyre! and Result members of eyre
```
[This doesn't cover how the <a
href="http://www.sheshbabu.com/posts/rust-module-system/">module system</a>
works, however the post I linked there covers this better than I
can.](conversation://Mara/hacker)
## Async/Await
Async functions may be interrupted to let other things execute as needed. This
program uses [tokio](https://tokio.rs/) to handle async tasks. To run an async
task and wait for its result, do this:
```
let printer_fact = reqwest::get("https://printerfacts.cetacean.club/fact")
.await?
.text()
.await?;
println!("your printer fact is: {}", printer_fact);
```
This will populate `response` with an amusing fact about everyone's favorite
household pet, the [printer](https://printerfacts.cetacean.club).
To make an async function, add the `async` keyword before the `fn` keyword:
```rust
async fn get_text(url: String) -> Result<String> {
reqwest::get(&url)
.await?
.text()
.await?
}
```
This can then be called like this:
```rust
let printer_fact = get_text("https://printerfacts.cetacean.club/fact").await?;
```
## Public/Private Types and Functions
Rust has three privacy levels for functions:
- Only visible to the current file (no keyword, lowercase in Go)
- Visible to anything in the current crate (`pub(crate)`, internal packages in
go)
- Visible to everyone (`pub`, upper case in Go)
[You can't get a perfect analog to `pub(crate)` in Go, but <a
href="https://docs.google.com/document/d/1e8kOo3r51b2BWtTs_1uADIA5djfXhPT36s6eHVRIvaU/edit">internal
packages</a> can get close to this behavior. Additionally you can have a lot
more control over access levels than this, see <a
href="https://doc.rust-lang.org/nightly/reference/visibility-and-privacy.html">here</a>
for more information.](conversation://Mara/hacker)
## Structures
Rust structures are created using the `struct` keyword:
```go
type Client struct {
Token string
}
```
```rust
pub struct Client {
pub token: String,
}
```
If the `pub` keyword is not specified before a member name, it will not be
usable outside the Rust source code file it is defined in:
```go
type Client struct {
token string
}
```
```rust
pub(crate) struct Client {
token: String,
}
```
### Encoding structs to JSON
[serde](https://serde.rs) is used to convert structures to json. The Rust
compiler's
[derive](https://doc.rust-lang.org/stable/rust-by-example/trait/derive.html)
feature is used to automatically implement the conversion logic.
```go
type Response struct {
Name string `json:"name"`
Description *string `json:"description,omitempty"`
}
```
```rust
use serde::{Serialize, Deserialize};
#[derive(Serialize, Deserialize, Debug)]
pub(crate) struct Response {
pub name: String,
pub description: Option<String>,
}
```
## Strings
Rust has a few string types that do different things. You can read more about
this [here](https://fasterthanli.me/blog/2020/working-with-strings-in-rust/),
but at a high level most projects only uses a few of them:
- `&str`, a slice reference to a String owned by someone else
- String, an owned UTF-8 string
- PathBuf, a filepath string (encoded in whatever encoding the OS running this
code uses for filesystems)
The strings are different types for safety reasons. See the linked blogpost for
more detail about this.
## Enumerations / Tagged Unions
Enumerations, also known as tagged unions, are a way to specify a superposition
of one of a few different kinds of values in one type. A neat way to show them
off (along with some other fancy features like the derivation system) is with the
[structopt](https://docs.rs/structopt/0.3.14/structopt/) crate. There is no easy
analog for this in Go.
[We've actually been dealing with enumerations ever since we touched the Result
type earlier. <a
href="https://doc.rust-lang.org/std/result/enum.Result.html">Result</a> and <a
href="https://doc.rust-lang.org/std/option/enum.Option.html">Option</a> are
implemented with enumerations.](conversation://Mara/hacker)
```rust
#[derive(StructOpt, Debug)]
#[structopt(about = "A simple release management tool")]
pub(crate) enum Cmd {
/// Creates a new release for a git repo
Cut {
#[structopt(flatten)]
common: Common,
/// Changelog location
#[structopt(long, short, default_value="./CHANGELOG.md")]
changelog: PathBuf,
},
/// Runs releases as triggered by GitHub Actions
GitHubAction {
#[structopt(flatten)]
gha: GitHubAction,
},
}
```
Enum variants can be matched using the `match` keyword:
```rust
match cmd {
Cmd::Cut { common, changelog } => {
cmd::cut::run(common, changelog).await
}
Cmd::GitHubAction { gha } => {
cmd::github_action::run(gha).await
}
}
```
All variants of an enum must be matched in order for the code to compile.
[This code was borrowed from <a
href="https://github.com/lightspeed/palisade">palisade</a> in order to
demonstrate this better. If you want to see these patterns in action, check this
repository out!](conversation://Mara/hacker)
## Testing
Test functions need to be marked with the `#[test]` annotation, then they will
be run alongside `cargo test`:
```rust
mod tests { // not required but it is good practice
#[test]
fn math_works() {
assert_eq!(2 + 2, 4);
}
#[tokio::test] // needs tokio as a dependency
async fn http_works() {
let _ = get_html("https://within.website").await.unwrap();
}
}
```
Avoid the use of `unwrap()` outside of tests. In the wrong cases, using
`unwrap()` in production code can cause the server to crash and can incur data
loss.
[Alternatively, you can also use the <a href="https://learning-rust.github.io/docs/e4.unwrap_and_expect.html#expect">`.expect()`</a> method instead
of `.unwrap()`. This lets you attach a message that will be shown when the
result isn't Ok.](conversation://Mara/hacker)
---
This is by no means comprehensive, see the rust book or [Learn X in Y Minutes
Where X = Rust](https://learnxinyminutes.com/docs/rust/) for more information.
This code is written to be as boring and obvious as possible. If things don't
make sense, please reach out and don't be afraid to ask questions.

View File

@ -2,10 +2,10 @@
title: A Letter to Those That Bullied Me title: A Letter to Those That Bullied Me
date: 2018-06-16 date: 2018-06-16
for: Elizabeth for: Elizabeth
tags:
- offmychest
--- ---
# A Letter to Those Who Bullied Me
Hey, Hey,
I'm not angry at you. I don't want to propagate hate. In a way, I almost feel like I should be thanking you for the contributions you've made in making me into the person I am today. Without you all, I would have had a completely different outcome in life. I would have stayed in the closet for good like I had planned. I would have probably ended up boring. I would have never met my closest friends and some even more. I'm not angry at you. I don't want to propagate hate. In a way, I almost feel like I should be thanking you for the contributions you've made in making me into the person I am today. Without you all, I would have had a completely different outcome in life. I would have stayed in the closet for good like I had planned. I would have probably ended up boring. I would have never met my closest friends and some even more.

View File

@ -1,45 +0,0 @@
---
title: "Site Update: A Sigil"
date: 2022-01-11
---
<style>
.logo-wumbo {
background-color: #fdf5d7;
-webkit-mask: url("/static/img/xeiaso.svg");
-webkit-mask-repeat: no-repeat;
-webkit-mask-size: 100%;
mask: url("/static/img/xeiaso.svg");
mask-repeat: no-repeat;
mask-size: 100%;
width: 9.5em;
height: 16em;
display: inline-block;
}
@media (prefers-color-scheme: light) {
.logo-wumbo {
background-color: #1d2021;
}
}
</style>
The upper left-hand corner of my website has gotten more interesting as of
recently. I have decided to experiment with adding an SVG icon to the upper left
hand corner of the page.
This is the sigilized form of the name `Xe Iaso`. This sigil is equivalent in
meaning and semantics to the name `Xe Iaso`. Here is a version of it in a much
bigger size:
<center><span class="logo-wumbo"></span></center>
[Fun fact: the sigil is written using <a
href="https://greggshorthand.github.io/index.html">Gregg Shorthand</a>. It says
the phonetic equivalent of `Xe Iaso`.](conversation://Mara/hacker)
I have tested this in Edge on Linux, Firefox on Linux, Chrome on Android and
Safari on iOS. This should cover most of the big web browsers, but surely I've
missed something. If you use a _modern release_ of a _standards compliant_ web
browser and you don't see a logo anywhere on the page, please let me know so I
can go cry and then hopefully fix the issue.

View File

@ -1,27 +0,0 @@
---
title: A Tool to Aid Forgetfulness
date: 2022-01-12
series: stories
---
The Egyptian God Thoth lived in the Egyptian city of Naucratis. Thoth was the
inventor of many arts such as math and astronomy, but the most significant was
the invention of writing. Thoth showed writing to the king of Egypt, claiming
that it would make Egyptians wiser and give them better memories; that it it
would vastly improve both the memory and the wit of the Egyptian people.
The king replied: "Thoth, you invented this tool. As such you are not the best
one to judge such things. You have not created a tool to aid memory, you have
created a tool to aid forgetfulness. Learners will not use their memories, they
will blindly trust these sigils and not remember for themselves.
"You have discovered an aid to vague recollection, as the users of this tool
will not be given truth. They will only be given a semblance of truth.
"They will be hearers of many things and learners of nothing. They will appear
to know all the knowledge of the world yet when asked they will only be the
middleman to external forces that are trusted without verification. They will
know wisdom, but not truth."
Adapted from The Dialogues of Plato in Five Volumes, 3rd ed. Oxford
University, 1892. Vol. 1 pp. 483-489.

View File

@ -1,278 +0,0 @@
---
title: "A Trip into FreeBSD"
date: 2021-02-13
tags:
- freebsd
---
I normally deal with Linux machines. Linux is what I know and it's what I've
been using since I was in college. A friend of mine has been coaxing me into
trying out [FreeBSD](https://www.freebsd.org), and I decided to try it out and
see what it's like. Here's some details about my experience and what I've
learned.
## Hardware
I've tried out FreeBSD on the following hardware:
- qemu/KVM on amd64
- Raspberry Pi 4 (4 GB)
- Raspberry Pi 3B (1 GB)
I've had the most luck with the Raspberry Pi 3 though. The KVM machine would
hang infinitely after the install process waiting for the mail service to do a
DNS probe of its own hostname (I do not host automagic FQDNS for my vms). I'm
pretty sure I was doing something wrong there but I wasn't able to figure out
what I needed to do in order to disable the DNS probe blocking startup.
[If you know what we were doing wrong here, please feel free to <a
href="/contact">contact</a> us with the thing we messed
up.](conversation://Mara/hacker)
After waiting for about 5 minutes I gave up and decided to try out the Raspberry
Pi 4. The Raspberry Pi 4 is the most powerful arm board I own. It has 4 GB of
ram and a quad core processor that is way more than sufficient for my needs. I
was hoping to use FreeBSD on that machine so I could benefit from the hardware
the most. Following the instructions on [the wiki
page](https://wiki.freebsd.org/arm/Raspberry%20Pi), I downloaded the 12.2 RPI
image and flashed it to an SD card using Etcher. I put the SD card in, turned
the raspi on and then waited for it to show up on the network.
Except it never showed up on the network. I ran scans with nmap (specifically
with the command `sudo nmap -sS -p 22 192.168.0.0/24`) and the IP address never
showed up. I also didn't see any new MAC addresses on the network, so that lead
me to believe that the pi was failing to boot. I downloaded an image for 13-BETA
and followed [this
guide](https://medium.com/swlh/freebsd-usb-boot-on-raspberry-pi-4-765cb6e75570)
that claims to make it work on the pi 4, but I got the same issue. The Raspberry
Pi 4 unfortunately has a micro-HDMI port on it, so I was unable to attach it to
my monitor to see any error messages. After trying for a while to see if I could
set up a serial port to get the serial log messages (spoiler: I couldn't), I dug
up my Pi 3 and stuck the same SD card into it, hooked it up to my monitor,
attached a spare keyboard to it and booted into FreeBSD first try.
## Using FreeBSD
FreeBSD is a very down to earth operating system. It also has a
[handbook](https://docs.freebsd.org/en/books/handbook/) that legitimately
includes all of the information you need to get up and running. Following the
handbook, I set a new password, installed the `pkg` tool, set up
[fish](https://fishshell.com) and then also installed the Go compiler toolchain
for the hell of it.
`pkg` is a very minimal looking package manager. It doesn't have very many
frills and it is integrated into the system pretty darn well. It looks like it
prefers putting everything into `/usr/local`, including init scripts and other
configuration files.
This interestingly lets you separate out the concerns of the base system from
individual machine-local configuration. I am not sure if this also works with
files like `/etc/resolv.conf` or other system configuration files, but it does
really give `/usr/local` a reason to exist beyond being a legacy location for
yolo-installed software that may or may not be able to be upgraded separately.
## Custom Services
Speaking of services, I wanted to see how hard it would be to get a custom
service running on a FreeBSD box. At the minimum I would need the following:
- The binary built for freebsd/aarch64 and installed to `/usr/local/bin`
- A user account for that service
- An init script for that service
- To enable the init script in `/etc/rc.conf`
I decided to do this with a service I made years ago called
[whatsmyip](https://github.com/Xe/whatsmyip).
### Building a Binary
Building the service is easy, I just go into the directory and run `go build`.
Then I get a binary. Running it in another tmux tab, we can see it in action:
```console
$ curl http://[::1]:9090
::1
```
I can also run the curl command from my macbook:
```console
$ curl http://pai:9090
100.72.190.5
```
Cool, I've got a working service! Let's install it to `/usr/local/bin`:
```console
$ doas cp ./whatismyip /usr/local/bin
```
[Wait, `doas`? What is `doas`? It looks like it's doing something close to what
sudo does.](conversation://Mara/hmm)
[doas](https://en.wikipedia.org/wiki/Doas) is a program that does most of the
same things that sudo does, but it's a much smaller codebase. I decided to try
out doas for this install for no other reason than I thought it would be a cool
thing to learn. It's actually pretty simple, and I'm going to look at using it
elsewhere (with an alias for `sudo` -> `doas`).
### Service User
The handbook says that we use the
[adduser](https://people.freebsd.org/~blackend/en_US.ISO8859-1/books/handbook/users-synopsis.html)
command to add users to the system. So, let's run `adduser` to create a
`whatsmyip` user:
```console
# adduser
Username: whatsmyip
Full name: github.com/Xe/whatsmyip
Uid (Leave empty for default): 666
Login group [whatsmyip]:
Login group is whatsmyip. Invite whatsmyip into other groups? []:
Login class [default]:
Shell (sh csh tcsh bash rbash git-shell fish nologin) [sh]: sh
Home directory [/home/whatsmyip]: /var/db/whatsmyip
Home directory permissions (Leave empty for default):
Use password-based authentication? [yes]: no
Lock out the account after creation? [no]: yes
Username : whatsmyip
Password : <disabled>
Full Name : github.com/Xe/whatsmyip
Uid : 666
Class :
Groups : whatsmyip
Home : /var/db/whatsmyip
Home Mode :
Shell : /bin/sh
Locked : yes
OK? (yes/no): yes
adduser: INFO: Successfully added (whatsmyip) to the user database.
adduser: INFO: Account (whatsmyip) is locked.
Add another user? (yes/no): no
Goodbye!
```
It's a bit weird that there's not a flow for creating a "system user" that
automatically sets the flags that I expect from Linux system administration, but
I was able to specify the values manually without too much effort.
Something interesting is that when I set the user account to `nologin` I
actually was unable to log in as the user. Usually in Linux you can hack around
this with `su` flags but FreeBSD doesn't have this escape hatch. Neat.
### Init Script
Now that I had the service account set up, I need to write an init service that
will start this program on boot. Following other parts of the handbook I was
able to get a base script that looks like this:
```shell
#!/bin/sh
#
# PROVIDE: whatsmyip
# REQUIRE: DAEMON
# KEYWORD: shutdown
. /etc/rc.subr
name=whatsmyip
rcvar=whatsmyip_enable
command="/usr/sbin/daemon"
command_args="-S -u whatsmyip -r -f -p /var/run/whatsmyip.pid /usr/local/bin/whatsmyip"
load_rc_config $name
#
# DO NOT CHANGE THESE DEFAULT VALUES HERE
# SET THEM IN THE /etc/rc.conf FILE
#
whatsmyip_enable=${whatsmyip_enable-"NO"}
pidfile=${whatsmyip_pidfile-"/var/run/whatsmyip.pid"}
run_rc_command "$1"
```
Now I can copy this file to `/usr/local/etc/rc.d/whatsmyip` and then make sure
it's set to the permissions `0555` with something like:
```console
$ chmod 0555 ./whatsmyip.rc
$ doas cp ./whatsmyip.rc /usr/local/etc/rc.d/whatsmyip
```
### Enabling The Service
Once I had the file in the right place, I enabled the service in `/etc/rc.conf`
like this:
```shell
# whatsmyip
whatsmyip_enable="YES"
```
Then I started the service with `service whatsmyip start`, and I was unable to
start the service. I got this error:
```
Feb 13 20:40:00 pai freebsd[1519]: /usr/local/etc/rc.d/whatsmyip: WARNING: failed to start whatsmyip
```
And no other useful information to help me actually fix the problem. I assume
there's some weirdness going on with permissions, so let's sidestep the user
account for now and just run the service as root directly by changing the
`command_args` in `/usr/local/etc/rc.d/whatsmyip`:
```shell
command_args="-S -r -p /var/run/whatsmyip.pid /usr/local/bin/whatsmyip"
```
Restarting the service, everything works! I can hit that service all I want and
I get back the IP address that I used to hit that service.
## What I Learned
FreeBSD has _excellent_ documentation. The people on the documentation team
really care about making the handbook useful. I wish it went into more detail
about best practices for making your own services (I had to crib from some other
service files as well as googling for a minimal template), but overall it gives
you enough information to get off the ground.
FreeBSD is also fairly weird. It's familiar-ish, but it's a very different
experience. It's also super-minimal. Looking at the output of `ps x`, there's
only 45 processes running on the system, including kernel threads.
```
root@pai ~# ps x | wc -l
45
```
The only processes are `init`, `dhclient`, a device manager, `syslog`,
`tailscaled`, `sshd`, `cron`, `whatsmyip`, `fish` and a few instances of `getty`
to allow me to log in with an HDMI monitor and keyboard should I need to. That's
it. That's all that's running. It's only using 96 MB of ram and most of the
machine's power is left over to me.
It's just a shame that FreeBSD support for programming languages is so poor in
general. Go works fine on it, but Rust doesn't have any pre-built binaries for
the compiled (and using ports/pkg isn't an option because aarch64 is a tier-2
architecture in FreeBSD land which means that it's not guaranteed to have
prebuilt binaries for everything). Compiling Rust from source also really isn't
an option because I don't have enough ram on my raspi to do that. Go works
though.
I really wonder how this kind of network effect will boil down with more and
more security libraries like
[pyca](https://github.com/pyca/cryptography/issues/5771) integrating Rust deeper
into core security components. It probably means that people are going to have
to step up and actually do the legwork required to get Rust working on more
platforms, however it definitely is going to leave some older hardware or less
commonly used configurations (like aarch64 FreeBSD) in the dust if we aren't
careful. Maybe this isn't a technical problem, but it is definitely something
interesting to think about.
Overall, FreeBSD is an interesting tool and if I ever have a good use for it in
my server infrastructure I will definitely give it a solid look. I just wish it
was as easy to manage a FreeBSD system as it is to manage a NixOS system. A lot
of my faffing about with `rc.conf` and rc scripts wouldn't have needed to
happen if that was the case.

View File

@ -1,278 +0,0 @@
---
title: Prometheus and Aegis
date: 2021-04-05
tags:
- prometheus
- o11y
---
[*Last time in the christine dot website cinematic
universe:*](https://christine.website/blog/unix-domain-sockets-2021-04-01)
*Unix sockets started to be used to grace the cluster. Things were at peace.
Then, a realization came through:*
[What about Prometheus? Doesn't it need a direct line of fire to the service to
scrape metrics?](conversation://Mara/hmm?smol)
*This could not do! Without observability the people of the Discord wouldn't have
a livefeed of the infrastructure falling over! This cannot stand! Look, our hero
takes action!*
[It will soon!](conversation://Cadey/percussive-maintenance?smol)
In order to help keep an eye on all of the services I run, I use
[Prometheus](https://prometheus.io/) for collecting metrics. For an example of
the kind of metrics I collect, see [here (1)](/metrics). In the configuration
that I have, Prometheus runs on a server in my apartment and reaches out to my
other machines to scrape metrics over the network. This worked great when I had
my major services listen over TCP, I could just point Prometheus at the backend
port over my tunnel.
When I started using Unix sockets for hosting my services, this stopped working.
It became very clear very quickly that I needed some kind of shim. This shim
needed to do the following things:
- Listen over the network as a HTTP server
- Connect to the unix sockets for relevant services based on the path (eg.
`/xesite` should get the metrics from `/srv/within/run/xesite.sock`)
- Do nothing else
The Go standard library has a tool for doing reverse proxying in the standard
library:
[`net/http/httputil#ReverseProxy`](https://pkg.go.dev/net/http/httputil#ReverseProxy).
Maybe we could build something with this?
[The documentation seems to imply it will use the network by default. Wait,
what's this `Transport` field?](conversation://Mara/hmm?smol)
```go
type ReverseProxy struct {
// ...
// The transport used to perform proxy requests.
// If nil, http.DefaultTransport is used.
Transport http.RoundTripper
// ...
}
```
[So a transport is a <a
href="https://pkg.go.dev/net/http#RoundTripper">`RoundTripper`</a>, which is a
function that takes a request and returns a response somehow. It uses
`http.DefaultTransport` by default, which reads from the network. So at a
minimum we're gonna need: <ul><li>a `ReverseProxy`</li><li>a
`Transport`</li><li>a dialing function</li><ul>Right?](conversation://Mara/hmm?smol)
Yep! Unix sockets can be used like normal sockets, so all you need is something
like this:
```go
func proxyToUnixSocket(w http.ResponseWriter, r *http.Request) {
name := path.Base(r.URL.Path)
fname := filepath.Join(*sockdir, name+".sock")
_, err := os.Stat(fname)
if os.IsNotExist(err) {
http.NotFound(w, r)
return
}
ts := &http.Transport{
Dial: func(_, _ string) (net.Conn, error) {
return net.Dial("unix", fname)
},
DisableKeepAlives: true,
}
rp := httputil.ReverseProxy{
Director: func(req *http.Request) {
req.URL.Scheme = "http"
req.URL.Host = "aegis"
req.URL.Path = "/metrics"
req.URL.RawPath = "/metrics"
},
Transport: ts,
}
rp.ServeHTTP(w, r)
}
```
[So in this handler:](conversation://Mara/hmm?smol)
```go
name := path.Base(r.URL.Path)
fname := filepath.Join(*sockdir, name+".sock")
_, err := os.Stat(fname)
if os.IsNotExist(err) {
http.NotFound(w, r)
return
}
ts := &http.Transport{
Dial: func(_, _ string) (net.Conn, error) {
return net.Dial("unix", fname)
},
DisableKeepAlives: true,
}
```
[You have the socket path built from the URL path, and then you return
connections to that path ignoring what the HTTP stack thinks it should point
to?](conversation://Mara/hmm?smol)
Yep. Then the rest is really just boilerplate:
```go
package main
import (
"flag"
"log"
"net"
"net/http"
"net/http/httputil"
"os"
"path"
"path/filepath"
)
var (
hostport = flag.String("hostport", "[::]:31337", "TCP host:port to listen on")
sockdir = flag.String("sockdir", "./run", "directory full of unix sockets to monitor")
)
func main() {
flag.Parse()
log.SetFlags(0)
log.Printf("%s -> %s", *hostport, *sockdir)
http.DefaultServeMux.HandleFunc("/", proxyToUnixSocket)
log.Fatal(http.ListenAndServe(*hostport, nil))
}
```
Now all that's needed is to build a NixOS service out of this:
```nix
{ config, lib, pkgs, ... }:
let cfg = config.within.services.aegis;
in
with lib; {
# Mara\ this describes all of the configuration options for Aegis.
options.within.services.aegis = {
enable = mkEnableOption "Activates Aegis (unix socket prometheus proxy)";
# Mara\ This is the IPv6 host:port that the service should listen on.
# It's IPv6 because this is $CURRENT_YEAR.
hostport = mkOption {
type = types.str;
default = "[::1]:31337";
description = "The host:port that aegis should listen for traffic on";
};
# Mara\ This is the folder full of unix sockets. In the previous post we
# mentioned that the sockets should go somewhere like /tmp, however this
# may be a poor life decision:
# https://lobste.rs/s/fqqsct/unix_domain_sockets_for_serving_http#c_g4ljpf
sockdir = mkOption {
type = types.str;
default = "/srv/within/run";
example = "/srv/within/run";
description =
"The folder that aegis will read from";
};
};
# Mara\ The configuration that will arise from this module if it's enabled
config = mkIf cfg.enable {
# Mara\ Aegis has its own user account to keep things tidy. It doesn't need
# root to run so we don't give it root.
users.users.aegis = {
createHome = true;
description = "tulpa.dev/cadey/aegis";
isSystemUser = true;
group = "within";
home = "/srv/within/aegis";
};
# Mara\ The systemd service that actually runs Aegis.
systemd.services.aegis = {
wantedBy = [ "multi-user.target" ];
# Mara\ These correlate to the [Service] block in the systemd unit.
serviceConfig = {
User = "aegis";
Group = "within";
Restart = "on-failure";
WorkingDirectory = "/srv/within/aegis";
RestartSec = "30s";
};
# Mara\ When the service starts up, run this script.
script = let aegis = pkgs.tulpa.dev.cadey.aegis;
in ''
exec ${aegis}/bin/aegis -sockdir="${cfg.sockdir}" -hostport="${cfg.hostport}"
'';
};
};
}
```
[Then I just flicked it on for a server of mine:](conversation://Cadey/enby?smol)
```nix
within.services.aegis = {
enable = true;
hostport = "[fda2:d982:1da2:180d:b7a4:9c5c:989b:ba02]:43705";
sockdir = "/srv/within/run";
};
```
[And then test it with `curl`:](conversation://Cadey/enby?smol)
```console
$ curl http://[fda2:d982:1da2:180d:b7a4:9c5c:989b:ba02]:43705/printerfacts
# HELP printerfacts_hits Number of hits to various pages
# TYPE printerfacts_hits counter
printerfacts_hits{page="fact"} 15
printerfacts_hits{page="index"} 23
printerfacts_hits{page="not_found"} 17
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
# TYPE process_cpu_seconds_total counter
process_cpu_seconds_total 0.06
# HELP process_max_fds Maximum number of open file descriptors.
# TYPE process_max_fds gauge
process_max_fds 1024
# HELP process_open_fds Number of open file descriptors.
# TYPE process_open_fds gauge
process_open_fds 12
# HELP process_resident_memory_bytes Resident memory size in bytes.
# TYPE process_resident_memory_bytes gauge
process_resident_memory_bytes 5296128
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
# TYPE process_start_time_seconds gauge
process_start_time_seconds 1617458164.36
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
# TYPE process_virtual_memory_bytes gauge
process_virtual_memory_bytes 911777792
```
[And there you go! Now we can make Prometheus point to this and we can save
Christmas!](conversation://Cadey/aha?smol)
[:D](conversation://Mara/happy?smol)
---
This is another experiment in writing these kinds of posts in more of a Socratic
method. I'm trying to strike a balance with a [limited pool of
stickers](https://tulpa.dev/cadey/kadis-layouts/src/branch/master/moonlander/leader.c#L68-L84)
while I wait for more stickers/emoji to come in. [Feedback](/contact) is always welcome.
(1): These metrics are not perfect because of the level of caching that
Cloudflare does for me.

View File

@ -4,6 +4,8 @@ date: 2019-05-25
series: dreams series: dreams
--- ---
# All There is is Now
The dream scenario was going on for a while uneventfully. I saw an old man walking around and ranting about things. I decided to go and talk with him. The dream scenario was going on for a while uneventfully. I saw an old man walking around and ranting about things. I decided to go and talk with him.
"You fools! Time doesn't exist! The past is immutable! Don't worry about your trivial daily needs. All there is is Now!" "You fools! Time doesn't exist! The past is immutable! Don't worry about your trivial daily needs. All there is is Now!"

View File

@ -1,110 +0,0 @@
---
title: "Outsider Art and Anathema"
date: 2019-10-21
tags:
- philosophy
- art
- makes-u-thonk
---
This was going to be a post about [Urbit][urbit] at first; but in the process of discussing about my interest in writing something _positive_ about it, I was warned by a few people that this was a Bad Idea. I was focusing purely on the technical side of it and how closely it implemented a concept called [liquid software][liquidsoftware], but from what people were saying, it seemed like a creation that was spoiled by something outside of it, specifically the creator's political views (of which I had little idea at the time).
As much as I will probably return to the original concept in the future with another post, this feels like something I had to address first.
**DISCLAIMER:** This post references to projects and people that the mainstream considers controversial. This post is not an approval of these people's views. I am focusing purely on the aspect of how this correlates into how art is perceived, recognized and able to be admired. I realize that the people behind the projects I have cited have said things that if taken seriously at a societal level could hurt me and people like me. That is not the point of this; I am trying to learn how this art works so I can create my own in the future. If this is uncomfortable for you at any point, please close this browser tab and do something else.
## Art
So, what is art?
This is a surprisingly hard question to answer. Most of the time though, I know art when I see it.
Art doesn't have to follow conventional ideas of what most people think "art" is. Art can be just about anything that you can classify as art. As a conventional example, consider something like the Mona Lisa:
![The Mona Lisa, the most famous painting in the world](https://xena.greedo.xeserv.us/files/monalisa_small.jpg)
People will accept this as art without much argument. It's a painting, it obviously took a lot of skill and time to create. It is said that Leonardo Da Vinci (the artist of the painting) created it partially [as a contribution to the state of the art of oil painting][monalisawhy].
So that painting is art, and a lot of people would consider it art; so what *would* a lot of people *not* consider art? Here's an example:
![Untitled (Perfect Lovers) by Felix Gonzalez-Torres](https://xena.greedo.xeserv.us/files/perfect-lovers.jpg)
This is *Untitled (Perfect Lovers)* by Felix Gonzalez. If you just take a look at it without context, it's just two battery-operated clocks on a wall. Where is the expertise and the like that goes into this? This is just the result of someone buying two clocks from the store and putting them somewhere, right?
Let's dig into [the description of the piece][perfectloversdescription]:
> Initially set to the same time, these identical battery-powered clocks will eventually fall out of sync, or may stop entirely. Conceived shortly after Gonzalez-Torress partner was diagnosed with AIDS, this work uses everyday objects to track and measure the inevitable flow of time. When one of the clocks stops or breaks, they can both be reset, thereby resuming perfect synchrony. In 1991, Gonzalez-Torres reflected, “Time is something that scares me. . . or used to. This piece I made with the two clocks was the scariest thing I have ever done. I wanted to face it. I wanted those two clocks right in front of me, ticking.”
And after reading that description, it's impossible for me to say this image is _not_ art. Even though it's made up of ordinary objects, the art comes out in the way that the clocks' eventual death relates to the eventual death of the author and their partner.
This art may be located on the fringes of what people consider "art". So what else is on the fringes?
### Outsider Art
For there to be "fringes" to the art landscape, there must be an "inside" and "outside" to it. In particular, the "outsider" art usually (but not always) contains elements and themes that are outside of the mainstream. Outsiders are therefore more free to explore ideas, concepts and ways of expression that defy cultural, spiritual or other norms. Logically, every major art style you know and love started as outsider art, before it was cool. Memes are also a form of outsider art, though they are gradually being accepted into the mainstream.
It's very easy to find outsider art if you are looking for it: just fish for some on Twitter, 4chan or Reddit; you'll find plenty of artists there who are placed firmly outside of the mainstream art community.
## Computer Science
Computer science is a kind of art. It's the art of turning contextual events into effects and state. It's also the art of creating solutions for problems that could never be solved before. It's also the science of how to connect millions of people across common protocols and abstractions that they don't have to understand in order to use.
This is an art that connects millions and has shaped itself into an industry of its own. This art, like the rest of mainstream art, keeps evolving, growing and changing into something new; into a more ultimate and detailed expression of what it can be, as people explore the ways it can be created and presented. This art is also quite special because it's not very limited by physical objects or expressions in material space. It's an art that can evolve and change with the viewer.
But, since this is an art, there's still an inside and an outside. Things on the inside are generally "safe" for people to admire, use and look at. The inside contains things like Linux, Docker, Kubernetes, Intel, C, Go, PHP, Ruby and other well-known and battle-proven tools.
### The Outside
The outside, however, is where the real innovation happens. The outside is where people can really take a more critical look at what computing is, does or can be. These views can add up into fundamentally different ways of looking at computer science, much like changing a pair of glasses for another changes how you see the world around you.
As an example, consider [TempleOS][codersnotestempleos]. It's a work of outsider art by [Terry Davis][terrydavis] (1969-2018, RIP), but it's also a fully functional operating system. It has a custom-built kernel, compiler, toolchain, userland, debugger, games, and documentation system, each integrated into everything else, in ways that could realistically not be done with how mainstream software is commonly developed.
[Urbit][urbit] is another example of this. It's a fundamentally different way of looking at networked computing. Everything in Urbit is seamlessly interlinked with everything else to the point that it can be surprising that a file you are working with actually lives on another computer. It implements software updates as invisible to the user. It allows for the model of [liquid software][liquidsoftware], or updates to a program flowing into user's computers without the users having to care about the updates. Users don't even notice the downtime.
As yet another example, consider [Minecraft][minecraft]. As of the writing of this article, it is the video game with the most copies sold in human history. It is an open world block building game where the limits of what you can make are the limits of your imagination. It has been continuously updated, refined and improved from a minimal proof of concept into the game it is today.
## The Seam
Consider this quote that comes into play a lot with outsider art:
> Genius and insanity are differentiated only by context. One person's genius is another person's insanity.
- Anonymous
These three projects are developed by people whom the mainstream has cast out. Terry Davis' mental health issues and delusions about hearing the voice of God have tainted TempleOS to be that "weird bible OS" to the point where people completely disregard it. Urbit was partially created by a right-wing reactionary (Curtis Yarvin). He has been so ostracized that he [cannot publicly talk about his work][curtisbannedfromlambdaconf] to the kind of people that would most directly benefit from learning about it. Curtis isn't even involved with Urbit anymore, and his name is still somehow an irrevocable black mark on the entire thing. Minecraft was initially created by Notch, who recently had [intro texts mentioning his name removed from the game][minecraftintrotextpatch] after he said questionable things about transgender people.
## Anathema
This "irrevocable" black mark has a name: [Anathema][anathema]. It refers to anything that is shunned by the mainstream. Outsiders that create outsider art may or may not be anathema to their respective mainstreams. This turns the art into a taboo, a curse, a stain. People no longer see an anathema as the art it is, but merely the worthless product of someone that society would probably rather forget if it had the chance.
I don't really know how well this sits with me, personally. Outsiders have unique views of the world that can provide ideas that ultimately strengthen us all. Society's role is to disseminate mainstream improvements to large groups, but real development happens at the personal level.
Does one bad apple really spoil the sociological bunch? Why does this happen? Have the political divides gotten so deeply entrenched into society that people really become beyond reproach? Isn't this a recursive trap? How does someone redeem themselves to no longer be an anathema? Is it possible for people who are anathema to redeem themselves? Why or why not? Is there room for forgiveness, or does the [original sin][originalsin] doom the sinner eternally, much like it has to Catholicism?
Are the creations of an anathema outsider artist still art? Are they still an artist even though they become unable to share their art with others?
---
I don't know. These are hard questions. I don't really have much of a conclusion here. I don't want to seem like I'm trying to prescribe a method of thinking here. I'm just sitting on the side and spouting out ideas to inspire people to think for themselves.
I'm just challenging you, the reader, to really think about what/who is and is not an anathema in your day-to-day life. Identify them. Understand where/who they are. Maybe even apply some compassion and attempt to understand their view and how they got there. I'm not saying to put yourself in danger, but just to be mindful of it.
Be well.
---
Special thanks to CelestialBoon, Grapz and MoonGoodGryph for proofreading and helping with this post. This would be a very different article without their feedback and input.
[urbit]: https://urbit.org
[liquidsoftware]: https://liquidsoftware.com
[monalisa]: https://xena.greedo.xeserv.us/files/monalisa_small.jpg
[monalisawhy]: http://www.visual-arts-cork.com/painting/sfumato.htm
[perfectlovers]: https://xena.greedo.xeserv.us/files/perfect-lovers.jpg
[perfectloversdescription]: https://www.moma.org/collection/works/81074
[codersnotestempleos]: http://www.codersnotes.com/notes/a-constructive-look-at-templeos/
[terrydavis]: https://en.wikipedia.org/wiki/Terry_A._Davis
[curtisbannedfromlambdaconf]: http://www.inc.com/tess-townsend/why-it-matters-that-an-obscure-programming-conference-is-hosting-mencius-moldbug.html
[anathema]: https://en.wikipedia.org/wiki/Anathema
[minecraft]: https://www.minecraft.net/en-us/
[minecraftintrotextpatch]: https://variety.com/2019/gaming/news/notch-removed-minecraft-1203174964/
[originalsin]: https://en.wikipedia.org/wiki/Original_sin

View File

@ -1,127 +0,0 @@
---
date: 2021-07-03
title: My Thoughts About Using Android Again as an iPhone User
tags:
- android
- iphone
author: ectamorphic
---
I used to be a hardcore Android user. It was my second major kind of smartphone
(the first was Windows Mobile 6.1 on a T-Mobile Dash) and it left me hooked to
the concept of smartphones and connected tech in general. I've used many Android
phones over the years but one day I rage-switched over to an iPhone. My Samsung
Galaxy S7 pissed me off for the last time and I went to the Apple store and
bought an iPhone 7 on the spot. I popped my sim card into it (after a lovely
meal at Panda Express) and I was off to the races. I haven't really used Android
since other than in little stints with devices like the Amazon Fire 7 (because
it was so darn cheap).
Recently I realized that it would be very easy to package up my website for the
Google Play Store using [pwabuilder](https://www.pwabuilder.com/). I've been
shipping my site as a progressive web app (PWA) for years (and use that PWA for
testing how the site looks on my phone), but aside from the occasional confused
screenshot that's been tweeted at me I've never actually made much use of this.
It does do an additional level of caching (which is why you can load a bunch of
pages on the site, disconnect from the internet and then still browse those
pages that you loaded like you were online) though, which helps a lot with the
bandwidth cost of this site.
So, I decided to ship this site as an Android app. You can download it from the
Google Play Store
[here](https://play.google.com/store/apps/details?id=website.christine.xesite)
and get a partially native experience. It worked perfectly in the Android
emulator but you really need to experience it on a phone to know for sure. On a
whim I grabbed a [Moto g8
Power](https://www.gsmarena.com/motorola_moto_g8_power-10052.php) from Amazon
and then I used it for the final testing on the app before I shipped it on the
Google Play store. I unboxed the phone, set it up, plugged it into my MacBook
and then hit "run" in Android Studio. The app installed instantly and I saw [the
homepage for my site](https://cdn.christine.website/file/christine-static/blog/Screenshot_20210703-101654.png).
It was a magical experience. Me, someone that has no idea what they are doing
with Android app development was able to take an existing project I've poured
years of work into and make it work on a phone like a native app. I literally
just had the phone barely out of the box and my code was running natively on it.
I don't have to worry about the app timing out, I don't have to pay Google money
to test things on my own device, I just hit play and it runs.
This is the kind of developer experience I wish I could have on iOS. I used to
have a paid developer cert for resigning a few personally hacked up apps, but
when I moved to Canada and changed over my cards to have Canadian billing
addresses I lost the ability to purchase a renewal for my developer certificate.
I _can_ change my Apple account over to a Canadian one but doing that means I
have to delete my Apple Music subscription and that would delete all of the
custom uploaded music I have in the cloud. I have more music up there than I
have disk space locally, so this is not really a viable option.
Meanwhile on Android you just open the box, turn the phone on, set it up, press
on the build number 10 times, enable USB debugging, plug it in, confirm debug
access and bam, you're in. You can test an unlimited number of Android apps
forever. I can give the APK to people and then they can tell me if it works on
their device. You cannot do this on iOS. It's making me really consider if iOS
really is the best option for me going forward.
But then the claws of the Apple ecosystem show their face. I have an iPad,
MacBook Air, Apple Watch, iPhone and AirPods. If I end up switching to Android
as my main phone I make my watch significantly less useful. I won't have the
seamless notification syncing to my wrist unless I buy a new watch. I don't
really know if I want to do that.
At the same time though, Android lets me poke around and change things that
bother me. I can make animations faster, which makes the phone _feel_ so much
more snappy and responsive. I can rip out Chrome and replace it with something
else. I can choose which app to use for text messages. I have _agency_ and
_power_ over my experience in ways that iOS simply cannot match. As a tinkerer
that mains a NixOS tower this is a huge factor for me. And then I'm able to test
my apps for free. I can just do it. I don't have to worry about dev certs,
licenses or anything else. I just put the app on the phone and I'm done.
Android's UX is a lot different than it was when I used it last. The last
Android phone I used had hardware home, menu and back buttons. This Moto g8
Power seems to have some kind of gesture control mode that mostly emulates
modern iPhone gesture controls, so my muscle memory isn't totally freaked out.
It was a bit more sensitive than I would have liked out of the box, but I was
easily able to tweak the sensitivity until I got to a level I was comfortable
with. This would have never been able to happen on iOS.
I guess this post is a lot more rambly and less focused than I thought it would
be while I was outlining it on paper. I didn't go into this expecting a 1:1
experience matchup with what I have on iOS. This phone is not nearly powerful
enough to make them comparable, however I can easily just pick it up, do what I
need and it does it. I'm considering getting a burner sim for this thing so I
can take it with me instead of (or in addition to) my iPhone. The camera is
decent, but I don't really have any good comparison shots yet. Android and iOS
are at a state of convergent evolution at this point. They both do about the
same things. Android is more easily customizeable and iOS is more about a guided
experience. Neither is really "better" at this point, but I guess it really will
boil down to the ecosystem you want.
Apple's walled garden approach has a lot of
things in its favor. You can buy accessories from the Apple Store and they will
just work. You can seamlessly copy things from your phone to your tablet or your
laptop. iCloud and Airdrop glue your machines together, and in the future I can
only anticipate that each of those devices will get more and more muddled
together until there's not really a difference between them. Android has a lot
of options. There's over 15,000 Android devices out there with official Google
Play support. They're all at different patch states and have different gimmicks
to distinguish them, but you have an unparalleled amount of choice and agency.
This means that there's less of a consistent total experience, however it leaves
a lot of room for experimentation and innovation.
I like this phone and the instance of Android that runs on it. The only real
downside I've seen so far is that the update notes are in Spanish. I have no
idea why they're in Spanish, I don't speak Spanish and the phone's UI language
is set to English, but I get ["Seguridad de
Android"](https://twitter.com/theprincessxena/status/1411072416986587138/photo/1)
patches on it and that's my life now.
A lot of the Airdrop and integration features I've been missing have been
supplemented by [Taildrop](https://tailscale.com/kb/1106/taildrop/) and
Tailscale in general. It's really satisfying to be able to work for a company
that makes the annoyingly hard problem of "make computers talk to eachother" so
_trivial_.
Overall, it's a 7/10 experience for me. I'd likely choose Android if I wasn't so
entrenched in the Apple/iOS ecosystem. If only it wasn't so tied into Google's
fangs.

View File

@ -1,10 +0,0 @@
---
title: "Animal Crossing New Horizons: An Island of Stability in an Unstable World"
date: 2021-02-28
tags:
- link
redirect_to: https://www.getrevue.co/profile/theprincessxena/issues/animal-crossing-new-horizons-an-island-of-stability-in-an-unstable-world-313933
---
Check out this post [on my
newsletter](https://www.getrevue.co/profile/theprincessxena/issues/animal-crossing-new-horizons-an-island-of-stability-in-an-unstable-world-313933)!

View File

@ -1,126 +0,0 @@
---
title: "The Worst Experience I've Had With an aarch64 MacBook"
date: 2021-02-15
tags:
- mac
- aarch64
---
I've had my hands on this M1 MacBook Air for a few weeks now and I have gotten a
lot of opinions about it. I wanted to go over them and give my thoughts. This is
an amazing laptop. Its battery life is iPad tier. I can run iPad and iPhone apps
seamlessly.
That being said, aarch64 macOS is still very much in its teething phase. Rosetta
is nothing short of a technical miracle, it's amazing how close it is to the
performance of running amd64 apps natively. As such, it's probably going to end
up being the _worst_ experience that I have using an aarch64 MacBook.
## Performance
[This website](https://github.com/Xe/site) is a fairly complicated webapp
written in Rust. As such it makes for a fairly decent compile stress test. I'm
going to do a compile test against my [Ryzen
3600](https://christine.website/blog/nixos-desktop-flow-2020-04-25) with this M1
MacBook Air.
My tower is running this version of Rust:
```
$ rustc --version
rustc 1.51.0-nightly (a62a76047 2021-01-13)
```
My MacBook is running this version of Rust:
```
$ rustc --version
rustc 1.50.0 (cb75ad5db 2021-02-10)
```
Building a development build my Ryzen gets this:
```
Finished dev [unoptimized + debuginfo] target(s) in 1m 00s
```
Doing the same development build, my M1 MacBook Air gets this:
```
Finished dev [unoptimized + debuginfo] target(s) in 1m 03s
```
And the MacBook didn't even get warm.
Everything I have thrown at this seems to get about the same results. This 15
watt laptop chip holds its own with desktop machines. I can only imagine how
this will proceed as Apple advances their processor technology.
## Apps
With the exception of virtual machines, the M1 MacBook Air runs nearly
everything I need it to. I have a Go compiler, Rust compiler, Nix, Discord,
Slack, Telegram, text editor, image editors, chat clients and more. Some of that
software is running in Rosetta and I am not able to tell when that is the case.
The biggest thing that doesn't run properly on here is Emacs. I am able to get a
version of it via Rosetta, however there are weird hangs that will randomly eat
up all my input while I am in flow. This is undesirable to say the least. I've
been using the aarch64 build of VS Code for the meantime, however I am really
missing the native Emacs experience. Maybe a future version of [Emacs for Mac OS
X](https://emacsformacosx.com) will improve this (or even make a fully native
aarch64 build).
Being able to run iPad and iPhone apps is also really nice. There's some
constraints involved with having to emulate the touchscreen input, however
overall it's enough to get the job done. I had to use
[iMazing](https://imazing.com) to get installable versions of some apps I wanted
to put on my mac (such as Skip The Dishes so I could get its notifications in
the same place and Procreate so I could use Sidecar to draw using the M1's GPU
power and extra ram), however they work well enough in general.
It would be nice if more companies toggled the "supported on M1 Macs" flag. I'm
willing to use a degraded experience if it means it's easier to access things
that are otherwise exclusive to my phone (such as Facebook and my banking app).
It would be great to use Netflix without having to open Safari.
Something that really surprised me was how well Dolphin runs when you use a
native build. I'm able to play Gamecube and Wii games at retina resolution and
the MacBook doesn't even get warm to the touch. The amd64 version of Dolphin
uses some Just-In-Time compilation that Rosetta can't emulate at all, however
the aarch64 one runs a lot faster than it has any right to. It must be easier to
translate binaries between RISC processor types or something. You have to build
Dolphin from source when you do this, however it's worth it.
## The Hardware
I have written a depressing amount of this blog's content on a butterfly
keyboard mac. The keyboard on the M1 Air is night and day better. It's like
using an older MacBook keyboard without being forced to wear headphones to mask
out the fan noise. I'm typing this in qwerty at the moment (I seem to have
settled on being able to seamlessly switch between qwerty on laptop keyboards
and Colemak Mod-DH on my Moonlander), but goddamn they really made the typing
experience so much better. I wish I had this keyboard years ago.
My previous MacBook was a 12" early 2018 model. It had 16 GB of ram (though 8 of
it failed and became unusable somehow) and chugged doing basic tasks. It had a
dual core processor and ended up being practically unable to handle more than
basic code compilation. I shudder to think about how long it would take to build
my website code on that machine. It also got hot. Very hot. I didn't even have
to push it very far to get it so hot. The battery also started to go sour by the
end of me using it. Overall I think it was a good purchase and I've gotten a lot
of mileage out of it, but this M1 Air is so much better it's not even funny.
## The Verdict
If you are looking for a machine that is silent, room temperature, and capable of
doing anything you can throw at it, look into getting an Apple Silicon Mac. This
first generation is going to have the most teething issues; so if you don't want
to deal with the jank that comes with a first generation product I'd probably
suggest waiting for the M2 or whatever they are going to call it. I know it's
certainly worth it for me, but I am not you and my needs will be different from
your needs.
This writeup was not sponsored in any way, Apple is not reviewing this post for
content (and probably doesn't know that I made it). I am just a fan of this
device and want to see aarch64 on the desktop succeed.

View File

@ -1,12 +1,12 @@
--- ---
title: My Experience with Atom as A Vim User title: My Experience with Atom as A Vim User
date: 2014-11-18 date: 2014-11-18
series: medium-archive from: medium
tags:
- atom
- vim
--- ---
My Experience with Atom as A Vim User
=====================================
Historically, I am a Vim user. People know me as a very very heavy vim Historically, I am a Vim user. People know me as a very very heavy vim
user. I have spent almost the last two years customizing [my .vimrc user. I have spent almost the last two years customizing [my .vimrc
file](https://github.com/Xe/dotfiles/blob/master/.vimrc) and I have parts file](https://github.com/Xe/dotfiles/blob/master/.vimrc) and I have parts

View File

@ -1,227 +0,0 @@
---
title: "Goodbye Kubernetes"
date: 2021-01-03
---
Well, since I posted [that last post](/blog/k8s-pondering-2020-12-31) I have had
an adventure. A good friend pointed out a server host that I had missed when I
was looking for other places to use, and now I have migrated my blog to this new
server. As of yesterday, I now run my website on a dedicated server in Finland.
Here is the story of my journey to migrate 6 years of cruft and technical debt
to this new server.
Let's talk about this goliath of a server. This server is an AX41 from Hetzner.
It has 64 GB of ram, a 512 GB nvme drive, 3 2 TB drives, and a Ryzen 3600. For
all practical concerns, this beast is beyond overkill and rivals my workstation
tower in everything but the GPU power. I have named it `lufta`, which is the
word for feather in [L'ewa](https://lewa.within.website/dictionary.html).
## Assimilation
For my server setup process, the first step it to assimilate it. In this step I
get a base NixOS install on it somehow. Since I was using Hetzner, I was able to
boot into a NixOS install image using the process documented
[here](https://nixos.wiki/wiki/Install_NixOS_on_Hetzner_Online). Then I decided
that it would also be cool to have this server use
[zfs](https://en.wikipedia.org/wiki/ZFS) as its filesystem to take advantage of
its legendary subvolume and snapshotting features.
So I wrote up a bootstrap system definition like the Hetzner tutorial said and
ended up with `hosts/lufta/bootstrap.nix`:
```nix
{ pkgs, ... }:
{
services.openssh.enable = true;
users.users.root.openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPg9gYKVglnO2HQodSJt4z4mNrUSUiyJQ7b+J798bwD9 cadey@shachi"
];
networking.usePredictableInterfaceNames = false;
systemd.network = {
enable = true;
networks."eth0".extraConfig = ''
[Match]
Name = eth0
[Network]
# Add your own assigned ipv6 subnet here here!
Address = 2a01:4f9:3a:1a1c::/64
Gateway = fe80::1
# optionally you can do the same for ipv4 and disable DHCP (networking.dhcpcd.enable = false;)
Address = 135.181.162.99/26
Gateway = 135.181.162.65
'';
};
boot.supportedFilesystems = [ "zfs" ];
environment.systemPackages = with pkgs; [ wget vim zfs ];
}
```
Then I fired up the kexec tarball and waited for the server to boot into a NixOS
live environment. A few minutes later I was in. I started formatting the drives
according to the [NixOS install
guide](https://nixos.org/manual/nixos/stable/index.html#sec-installation) with
one major difference: I added a `/boot` ext4 partition on the SSD. This allows
me to have the system root device on zfs. I added the disks to a `raidz1` pool
and created a few volumes. I also added the SSD as a log device so I get SSD
caching.
From there I installed NixOS as normal and rebooted the server. It booted
normally. I had a shiny new NixOS server in the cloud! I noticed that the server
had booted into NixOS unstable as opposed to NixOS 20.09 like my other nodes. I
thought "ah, well, that probably isn't a problem" and continued to the
configuration step.
[That's ominous...](conversation://Mara/hmm)
## Configuration
Now that the server was assimilated and I could SSH into it, the next step was
to configure it to run my services. While I was waiting for Hetzner to provision
my server I ported a bunch of my services over to Nixops services [a-la this
post](/blog/nixops-services-2020-11-09) in [this
folder](https://github.com/Xe/nixos-configs/tree/master/common/services) of my
configs repo.
Now that I had them, it was time to add this server to my Nixops setup. So I
opened the [nixops definition
folder](https://github.com/Xe/nixos-configs/tree/master/nixops/hexagone) and
added the metadata for `lufta`. Then I added it to my Nixops deployment with
this command:
```console
$ nixops modify -d hexagone -n hexagone *.nix
```
Then I copied over the autogenerated config from `lufta`'s `/etc/nixos/` folder
into
[`hosts/lufta`](https://github.com/Xe/nixos-configs/tree/master/hosts/lufta) and
ran a `nixops deploy` to add some other base configuration.
## Migration
Once that was done, I started enabling my services and pushing configs to test
them. After I got to a point where I thought things would work I opened up the
Kubernetes console and started deleting deployments on my kubernetes cluster as
I felt "safe" to migrate them over. Then I saw the deployments come back. I
deleted them again and they came back again.
Oh, right. I enabled that one Kubernetes service that made it intentionally hard
to delete deployments. One clever set of scale-downs and kills later and I was
able to kill things with wild abandon.
I copied over the gitea data with `rsync` running in the kubernetes deployment.
Then I killed the gitea deployment, updated DNS and reran a whole bunch of gitea
jobs to resanify the environment. I did a test clone on a few of my repos and
then I deleted the gitea volume from DigitalOcean.
Moving over the other deployments from Kubernetes into NixOS services was
somewhat easy, however I did need to repackage a bunch of my programs and static
sites for NixOS. I made the
[`pkgs`](https://github.com/Xe/nixos-configs/tree/master/pkgs) tree a bit more
fleshed out to compensate.
[Okay, packaging static sites in NixOS is beyond overkill, however a lot of them
need some annoyingly complicated build steps and throwing it all into Nix means
that we can make them reproducible and use one build system to rule them
all. Not to mention that when I need to upgrade the system, everything will
rebuild with new system libraries to avoid the <a
href="https://blog.tidelift.com/bit-rot-the-silent-killer">Docker bitrot
problem</a>.](conversation://Mara/hacker)
## Reboot Test
After a significant portion of the services were moved over, I decided it was
time to do the reboot test. I ran the `reboot` command and then...nothing.
My continuous ping test was timing out. My phone was blowing up with downtime
messages from NodePing. Yep, I messed something up.
I was able to boot the server back into a NixOS recovery environment using the
kexec trick, and from there I was able to prove the following:
- The zfs setup is healthy
- I can read some of the data I migrated over
- I can unmount and remount the ZFS volumes repeatedly
I was confused. This shouldn't be happening. After half an hour of
troubleshooting, I gave in and ordered an IPKVM to be installed in my server.
Once that was set up (and I managed to trick MacOS into letting me boot a .jnlp
web start file), I rebooted the server so I could see what error I was getting
on boot. I missed it the first time around, but on the second time I was able to
capture this screenshot:
![The error I was looking
for](https://cdn.christine.website/file/christine-static/blog/Screen+Shot+2021-01-03+at+1.13.05+AM.png)
Then it hit me. I did the install on NixOS unstable. My other servers use NixOS
20.09. I had downgraded zfs and the older version of zfs couldn't mount the
volume created by the newer version of zfs in read/write mode. One more trip to
the recovery environment later to install NixOS unstable in a new generation.
Then I switched my tower's default NixOS channel to the unstable channel and ran
`nixops deploy` to reactivate my services. After the NodePing uptime
notifications came in, I ran the reboot test again while looking at the console
output to be sure.
It booted. It worked. I had a stable setup. Then I reconnected to IRC and passed
out.
## Services Migrated
Here is a list of all of the services I have migrated over from my old dedicated
server, my kubernetes cluster and my dokku server:
- aerial -> discord chatbot
- goproxy -> go modules proxy
- lewa -> https://lewa.within.website
- hlang -> https://h.christine.website
- mi -> https://mi.within.website
- printerfacts -> https://printerfacts.cetacean.club
- xesite -> https://christine.website
- graphviz -> https://graphviz.christine.website
- idp -> https://idp.christine.website
- oragono -> ircs://irc.within.website:6697/
- tron -> discord bot
- withinbot -> discord bot
- withinwebsite -> https://within.website
- gitea -> https://tulpa.dev
- other static sites
Doing this migration is a bit of an archaeology project as well. I was
continuously discovering services that I had littered over my machines with very
poorly documented requirements and configuration. I hope that this move will let
the next time I do this kind of migration be a lot easier by comparison.
I still have a few other services to move over, however the ones that are left
are much more annoying to set up properly. I'm going to get to deprovision 5
servers in this migration and as a result get this stupidly powerful goliath of
a server to do whatever I want with and I also get to cut my monthly server
costs by over half.
I am very close to being able to turn off the Kubernetes cluster and use NixOS
for everything. A few services that are still on the Kubernetes cluster are
resistant to being nixified, so I may have to use the Docker containers for
that. I was hoping to be able to cut out Docker entirely, however we don't seem
to be that lucky yet.
Sure, there is some added latency with the server being in Europe instead of
Montreal, however if this ever becomes a practical issue I can always launch a
cheap DigitalOcean VPS in Toronto to act as a DNS server for my WireGuard setup.
Either way, I am now off Kubernetes for my highest traffic services. If services
of mine need to use the disk, they can now just use the disk. If I really care
about the data, I can add the service folders to the list of paths to back up to
`rsync.net` (I have a post about how this backup process works in the drafting
stage) via [borgbackup](https://www.borgbackup.org/).
Let's hope it stays online!
---
Many thanks to [Graham Christensen](https://twitter.com/grhmc), [Dave
Anderson](https://twitter.com/dave_universetf) and everyone else who has been
helping me along this journey. I would be lost without them.

View File

@ -2,10 +2,10 @@
title: The Beautiful in the Ugly title: The Beautiful in the Ugly
date: 2018-04-23 date: 2018-04-23
for: Silver for: Silver
tags:
- shell
--- ---
# The Beautiful in the Ugly
Functional programming is nice and all, but sometimes you just need to have Functional programming is nice and all, but sometimes you just need to have
things get done regardless of the consequences. Sometimes a dirty little hack things get done regardless of the consequences. Sometimes a dirty little hack
will suffice in place of a branching construct. This is a story of one of these will suffice in place of a branching construct. This is a story of one of these

View File

@ -1,11 +1,11 @@
--- ---
title: Web Application Development with Beego title: Web Application Development with Beego
date: 2014-11-28 date: 2014-11-28
tags:
- go
- beego
--- ---
Web Application Development with Beego
======================================
Beego is a fantastic web application framework from the Go China Beego is a fantastic web application framework from the Go China
community. It currently powers some of the biggest websites in China, community. It currently powers some of the biggest websites in China,
and thus the world. and thus the world.

View File

@ -1,191 +0,0 @@
---
title: "My Next Life as an Imaginary Bottle of Window Cleaner"
date: 2021-09-24
author: Mai
tags:
- isekai
- fiction
---
Today was another boring day, like all the other boring days. I was almost done
with my commute to the office, not looking forward to spending another day
working on the same spreadsheet. I don't believe in monsters, ghosts or spirits,
but I do believe that thing is haunted.
"Next stop is Broughton Junction, end of the line!"
That was my stop. I thought I got it easy by getting an apartment right next to
the train, but I never thought that my job would move so far away. I got up with
the others and walked my way to the exit of the station. The crowd was lifeless
and dead today. The usual morning rush of the station I got on the train at had
turned into a lazy slump towards the exit.
The only solace from these doldrums was the barista at the coffee shop by the
doors. She had such a lovely smile; sometimes it was the only thing that kept me
going. _I wonder what her name is_, I thought to myself as I breached the
darkness of the station and was enveloped in the light of the outside world.
Nubypool was the financial hub of the province. My work was in the Ministry of
Accounting, taming the beast that was their spreadsheets. As I walked towards
the entrance of the building I looked around, seeing the gray, drab skyscrapers
around me sully the lively deep blue above them.
I crossed the street and walked to the double doors, but then there was a noise.
I looked up and saw something falling towards me, then suddenly there was a loud
noise and everything went dark.
The darkness persisted for a while, I couldn't touch, see or hear myself. I was
alone in a silent hell with only my thoughts to break the sheer emptiness of it
all. Suddenly, it hit me. _I'm dead, aren't I? I'm never going to get that
barista's number, am I?_
A noise filled my awareness. I couldn't recognize it, but it came with a
friendly presence. It felt like my best friend, mother, and more all in one. I
felt like I knew this person intimately even though we had never spoken before.
I could pick out that there were words, but nothing usable other than that.
The words stopped after a while, and that was the saddest part of the whole
thing. I was so alone without the words, but I didn't feel like I had any
control over when they started. They started and stopped without any indication
of where or when they were coming from.
Then they started again and I knew that I needed to capitalize on this. The
voice was going to go away if I didn't do anything about it. All I could do was
think, so I decided to think as loud and hard as I could. I screamed in my mind.
I would have woke the neighbors if this was in the apartment back home. Those
thin walls would have made the entire floor wake up. I shouted and shouted like
my life depended on it. As I shouted and screamed the darkness started to gain
color. There was a small pinhole of light that expanded and filled my awareness
until it grew to everywhere I could see. _Is this reincarnation?_
As if a blindfold had been lifted, I could finally get to see myself in some
level of detail. I looked down and all I saw was a bright poster with what
looked like words on it. I looked behind me and I saw what appeared to be a
poster with more smaller words. I looked to the sides and saw what appeared to
be clear plastic with a blue liquid in it.
I started panicking. I didn't know what was going on. _Aren't I supposed to be
some kind of animal? Why am I this?_
The voice started again, a lot more clearly. I could pick out the words: "So
_that's_ why they call it a 'head pressure'! Wow I don't even know how to
describe this feeling other than that!"
I looked ahead and I saw a young boy, something like 18 years old. I was in some
kind of small apartment. There was a ritual circle of some kind, I was right in
the middle of it. Looking around there was a pentagram made out of a white and
blue powder, cards I've never seen before and a bottle of red liquid. There was
a mirror to the side of me though. I took a long and detailed look at myself.
I saw an orange nozzle look back at me. There was what appeared to be a handle
under the nozzle, looking like it was screwed on top of a bottle of...
_No, this can't be!_
It was screwed on top of a bottle of window cleaner.
"I'M A BOTTLE OF WINDOW CLEANER????"
The boy in front of me winced and grunted, my shouting must have hurt him. I
suddenly got a feeling of fear washing over me. I felt the tip of my nozzle
screw itself shut and my self-image vanished from the mirror. Had I interrupted
the ritual?
The boy composed himself and looked down at me. "So, you're the succubus I
summoned? What's wrong with you?"
"S...succubus? I'm not a succubus, I could swear that a few days ago I was...
well not this. I don't know what I am other than apparently..."
I tried to gesture at my own body but only ended up making my handle flail about
meaninglessly, spraying a bit of blue fluid out of my nozzle. It felt like it
had come out of my mouth. It had no taste initially, but it left a slightly sour
taste after a moment.
"A bottle of window cleaner??"
"...What. How did this happen?"
"Why are you asking me? You apparently summoned me. What did you expect to get?"
The boy looked at me like I had lobsters crawling out of my ears. Assuming I had
ears that were big enough for lobsters crawling out of them that is.
"I...what...how...okay"
There was suddenly a knock at the door. "SHIT!", the boy said to himself without
saying it. He started scrambling around and frantically trying to clean things.
He swept up the white powder and put away the playing cards, but try as he might
he couldn't grab me to put me away. His hand phased through me like I wasn't
there. He got up and let the other person in. It was a pizza delivery person.
The boy handed over some money and then took the pizza and placed it on top of a
clear spot of his desk.
I suddenly found myself on top of a metal surface. I could feel something
spinning under me and heard the soft hum of an idle fan. I had just teleported
across the room. "W..what, why am I here?"
"I didn't...know that would do anything. I thought it would be nice if you were
on top of my computer and you just appeared there." Another knock came at the
door. There was a female voice.
"Shit, play it cool."
"I don't know how I can do anything other than what I am doing now, but okay."
A woman entered the room. She appeared to be in a very skimpy outfit, showing
off her curves. She also seemed to have a pair of demonic looking wings that she
let open in the middle of the room. The boy awkwardly made conversation with her
for a while. I tried to get the boy's attention by spraying at him. I definitely
got his attention. "What're you looking at, roomie? Aren't you gonna give your
poor roomie some pizza?"
I didn't think she could see me. I tried spraying more. The boy looked over at
me angrily. "Cut it out bottle, I'll throw you out with the trash if you keep
that up!"
"What are you talking about? I'm not a bottle, I'm hungry." She walked over to
the pizza and grabbed a slice, pushing the boy into his desk. She looked dead at
me when she had her slice. I don't think she saw me. She turned around and
bumped one of her wings into the boy, causing them to fall off. "Nooo! My
costume!"
"That's what you get for wearing a succubus costume in a dorm hall."
She stuck her tongue out sarcastically at the boy and picked up her wing. "Okay
sarcasm boy, you're gonna help me get this on~. If you do I may have a fun time
planned for you~."
"I know what you're planning Mike. I'm not going to bang you. We agreed that I
wouldn't before you did this."
"No fun."
Her name is _Mike_? I thought about it and decided that it was better to leave
things be.
"Hey, dude, I don't really know what's going on. Where am I? Who am I? What am
I? Are you just gonna keep me in the dark about all of this?" I tried to direct
this to the boy with a burst of energy, spraying him to be sure I got his
attention.
He looked over at me like I was cock-blocking him. I felt myself blush and
backed off. It looks like there would be plenty of time later to get those
questions answered.
"Hey Conner, when are you going to get into your costume? Did the ritual I gave
you work?"
"Yeah...about that..."
---
Elsewhere at a random cleaning product factory, it was business as usual. The
production lines were running smoothly. The trucks of cleaner continued as
normal and every bottle was being filled at the normal rate.
A woman in a skimpy outfit suddenly appeared on the production line, causing one
of the workers to hit the "emergency stop" button. The woman had a demonic aura
about her. Her bewitching gaze stunned the factory floor. She got off the
production line and walked out of the building, with that worker in tow;
seemingly enamored with her. _This is going to be fun~_, she thought to herself.

View File

@ -1,9 +1,10 @@
--- ---
title: "Blind Men and an Elephant" title: "Blind Men and an Elephant"
date: 2018-11-29 date: 2018-11-29
series: conlangs
--- ---
# Blind Men and an Elephant
or or
# le'i ka na viska kakne ku e le xanto # le'i ka na viska kakne ku e le xanto

View File

@ -1,19 +0,0 @@
---
title: "Blog Feature: Art Gallery"
date: 2019-11-01
tags:
- art
- announce
- 100th-post
---
I have just implemented support for my portfolio site to also function as an art
gallery. See all of my posted art [here](/gallery).
I have been trying to get better at art for a while and I feel I'm at the level
where I feel comfortable putting it on my portfolio. Let's see how far this
rabbit hole goes.
---
Also this is my 100th post! Yay!

View File

@ -1,176 +0,0 @@
---
title: "How to Set Up Borg Backup on NixOS"
date: 2021-01-09
series: howto
tags:
- nixos
- borgbackup
---
[Borg Backup](https://www.borgbackup.org/) is a encrypted, compressed,
deduplicated backup program for multiple platforms including Linux. This
combined with the [NixOS options for configuring
Borg Backup](https://search.nixos.org/options?channel=20.09&show=services.borgbackup.jobs.%3Cname%3E.paths&from=0&size=30&sort=relevance&query=services.borgbackup.jobs)
allows you to backup on a schedule and restore from those backups when you need
to.
Borg Backup works with local files, remote servers and there are even [cloud
hosts](https://www.borgbackup.org/support/commercial.html) that specialize in
hosting your backups. In this post we will cover how to set up a backup job on a
server using [BorgBase](https://www.borgbase.com/)'s free tier to host the
backup files.
## Setup
You will need a few things:
- A free BorgBase account
- A server running NixOS
- A list of folders to back up
- A list of folders to NOT back up
First, we will need to create a SSH key for root to use when connecting to
BorgBase. Open a shell as root on the server and make a `borgbackup` folder in
root's home directory:
```shell
mkdir borgbackup
cd borgbackup
```
Then create a SSH key that will be used to connect to BorgBase:
```shell
ssh-keygen -f ssh_key -t ed25519 -C "Borg Backup"
```
Ignore the SSH key password because at this time the automated Borg Backup job
doesn't allow the use of password-protected SSH keys.
Now we need to create an encryption passphrase for the backup repository. Run
this command to generate one using [xkcdpass](https://pypi.org/project/xkcdpass/):
```shell
nix-shell -p python39Packages.xkcdpass --run 'xkcdpass -n 12' > passphrase
```
[You can do whatever you want to generate a suitable passphrase, however
xkcdpass is proven to be <a href="https://xkcd.com/936/">more random</a> than
most other password generators.](conversation://Mara/hacker)
## BorgBase Setup
Now that we have the basic requirements out of the way, let's configure BorgBase
to use that SSH key. In the BorgBase UI click on the Account tab in the upper
right and open the SSH key management window. Click on Add Key and paste in the
contents of `./ssh_key.pub`. Name it after the hostname of the server you are
working on. Click Add Key and then go back to the Repositories tab in the upper
right.
Click New Repo and name it after the hostname of the server you are working on.
Select the key you just created to have full access. Choose the region of the
backup volume and then click Add Repository.
On the main page copy the repository path with the copy icon next to your
repository in the list. You will need this below. Attempt to SSH into the backup
repo in order to have ssh recognize the server's host key:
```shell
ssh -i ./ssh_key o6h6zl22@o6h6zl22.repo.borgbase.com
```
Then accept the host key and press control-c to terminate the SSH connection.
## NixOS Configuration
In your `configuration.nix` file, add the following block:
```nix
services.borgbackup.jobs."borgbase" = {
paths = [
"/var/lib"
"/srv"
"/home"
];
exclude = [
# very large paths
"/var/lib/docker"
"/var/lib/systemd"
"/var/lib/libvirt"
# temporary files created by cargo and `go build`
"**/target"
"/home/*/go/bin"
"/home/*/go/pkg"
];
repo = "o6h6zl22@o6h6zl22.repo.borgbase.com:repo";
encryption = {
mode = "repokey-blake2";
passCommand = "cat /root/borgbackup/passphrase";
};
environment.BORG_RSH = "ssh -i /root/borgbackup/ssh_key";
compression = "auto,lzma";
startAt = "daily";
};
```
Customize the paths and exclude lists to your needs. Once you are satisfied,
rebuild your NixOS system using `nixos-rebuild`:
```shell
nixos-rebuild switch
```
And then you can fire off an initial backup job with this command:
```shell
systemctl start borgbackup-job-borgbase.service
```
Monitor the job with this command:
```shell
journalctl -fu borgbackup-job-borgbase.service
```
The first backup job will always take the longest to run. Every incremental
backup after that will get smaller and smaller. By default, the system will
create new backup snapshots every night at midnight local time.
## Restoring Files
To restore files, first figure out when you want to restore the files from.
NixOS includes a wrapper script for each Borg job you define. you can mount your
backup archive using this command:
```
mkdir mount
borg-job-borgbase mount o6h6zl22@o6h6zl22.repo.borgbase.com:repo ./mount
```
Then you can explore the backup (and with it each incremental snapshot) to
your heart's content and copy files out manually. You can look through each
folder and copy out what you need.
When you are done you can unmount it with this command:
```
borg-job-borgbase umount /root/borgbase/mount
```
---
And that's it! You can get more fancy with nixops using a setup [like
this](https://github.com/Xe/nixos-configs/blob/master/common/services/backup.nix).
In general though, you can get away with this setup. It may be a good idea to
copy down the encryption passphrase onto paper and put it in a safe space like a
safety deposit box.
For more information about Borg Backup on NixOS, see [the relevant chapter of
the NixOS
manual](https://nixos.org/manual/nixos/stable/index.html#module-borgbase) or
[the list of borgbackup
options](https://search.nixos.org/options?channel=20.09&query=services.borgbackup.jobs)
that you can pick from.
I hope this is able to help.

View File

@ -1,154 +0,0 @@
---
title: How I Converted my Brain fMRI to a 3D Model
date: 2019-08-23
series: howto
tags:
- python
- blender
---
AUTHOR'S NOTE: I just want to start this out by saying I am not an expert, and
nothing in this blogpost should be construed as medical advice. I just wanted
to see what kind of pretty pictures I could get out of an fMRI data file.
So this week I flew out to Stanford to participate in a study that involved a
fMRI of my brain while I was doing some things. I asked for (and recieved) a
data file from the fMRI so I could play with it and possibly 3D print it. This
blogpost is the record of my journey through various software to get a fully
usable 3D model out of the fMRI data file.
## The Data File
I was given [christine_brain.nii.gz][firstniifile] by the researcher who was
operating the fMRI. I looked around for some software to convert it to a 3D
model and [/r/3dprinting][r3dprinting] suggested the use of [FreeSurfer][freesurfer]
to generate a 3D model. I downloaded and installed the software then started
to look for something I could do in the meantime, as this was going to take
something on the order of 8 hours to process.
### An Animated GIF
I started looking for the file format on the internet by googling "nii.gz brain image"
and I stumbled across a program called [gif\_your\_nifti][gyn]. It looked to be
mostly pure python so I created a virtualenv and installed it in there:
```
$ git clone https://github.com/miykael/gif_your_nifti
$ cd gif_your_nifti
$ virtualenv -p python3 env
$ source env/bin/activate
(env) $ pip3 install -r requirements.txt
(env) $ python3 setup.py install
```
Then I ran it with the following settings to get [this first result][firstgif]:
```
(env) $ gif_your_nifti christine_brain.nii.gz --mode pseudocolor --cmap plasma
```
<center><video controls> <source src="https://xena.greedo.xeserv.us/files/christine-fmri-raw.mp4" type="video/mp4">A sideways view of the brain</video></center>
<small>(sorry the video embed isn't working in safari)</small>
It looked weird though, that's because the fMRI scanner I used has a different
rotation to what's considered "normal". The gif\_your\_nifti repo mentioned a
program called `fslreorient2std` to reorient the fMRI image, so I set out to
install and run it.
### FSL
After some googling, I found [FSL's website][fsl] which included an installer
script and required registration.
37 gigabytes of downloads and data later, I had the entire FSL suite installed
to a server of mine and ran the conversion command:
```
$ fslreorient2std christine_brain.nii.gz christine_brain_reoriented.nii.gz
```
This produced a slightly smaller [reoriented file][secondniifile].
I reran gif\_your\_nifti on this reoriented file and got [this result][secondgif]
which looked a _lot_ better:
<center><video controls> <source src="https://xena.greedo.xeserv.us/files/christine-fmri-reoriented.mp4">A properly reoriented brain</video></center>
<small>(sorry again the video embed isn't working in safari)</small>
### FreeSurfer
By this time I had gotten back home and [FreeSurfer][freesurfer] was done installing,
so I registered for it (god bless the institution of None) and put its license key
in the place it expected. I copied the reoriented data file to my Mac and then
set up a `SUBJECTS_DIR` and had it start running the numbers and extracting the
brain surfaces:
```
$ cd ~/tmp
$ mkdir -p brain/subjects
$ cd brain
$ export SUBJECTS_DIR=$(pwd)/subjects
$ recon-all -i /path/to/christine_brain_reoriented.nii.gz -s christine -all
```
This step took 8 hours. Once I was done I had a bunch of data in
`$SUBJECTS_DIR/christine`. I opened my shell to that folder and went into the
`surf` subfolder:
```
$ mris_convert lh.pial lh.pial.stl
$ mris_convert rh.pial rh.pial.stl
```
Now I had standard stl files that I could stick into [Blender][blender].
### Blender
Importing the stl files was really easy. I clicked on File, then Import, then
Stl. After guiding the browser to the subjects directory and finding the STL
files, I got a view that looked something like this:
<center><blockquote class="twitter-tweet"><p lang="en" dir="ltr">BRAIN <a href="https://t.co/kGSrPj0kgP">pic.twitter.com/kGSrPj0kgP</a></p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1164526098526478336?ref_src=twsrc%5Etfw">August 22, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script></center>
I had absolutely no idea what to do from here in Blender, so I exported the
whole thing to a stl file and sent it to a coworker for 3D printing (he said
it was going to be "the coolest thing he's ever printed").
I also exported an Unreal Engine 4 compatible model and sent it to a friend of
mine that does hobbyist game development. A few hours later I got this back:
<center><blockquote class="twitter-tweet"><p lang="und" dir="ltr"><a href="https://t.co/fXnwnSpMry">pic.twitter.com/fXnwnSpMry</a></p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1164714830630203393?ref_src=twsrc%5Etfw">August 23, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script></center>
(Hint: it is a take on the famous [galaxy brain memes][galaxybrain])
## Conclusion
Overall, this was fun! I got to play with many gigabytes of software that ran
my most powerful machine at full blast for 8 hours, I made a fully printable 3D
model out of it and I have some future plans for importing this data into
Minecraft (the NIFTI `.nii.gz` format has a limit of _256 layers_).
I'll be sure to write more about this in the future!
## Citations
Here are my citations in [BibTex format][citations].
Special thanks goes to Michael Lifshitz for organizing the study that I
participated in that got me this fMRI data file. It was one of the coolest
things I've ever done (if not the coolest) and I'm going to be able to get a
3D printed model of my brain out of it.
[firstniifile]: https://xena.greedo.xeserv.us/files/christine_brain.nii.gz
[secondniifile]: https://xena.greedo.xeserv.us/files/christine_brain_reoriented.nii.gz
[r3dprinting]: https://www.reddit.com/r/3Dprinting/comments/2w0zxx/magnetic_resonance_image_nii_to_stl/
[freesurfer]: https://surfer.nmr.mgh.harvard.edu/fswiki/FreeSurferWiki
[gyn]: https://github.com/miykael/gif_your_nifti
[firstgif]: /static/blog/christine-fmri-raw.mp4
[secondgif]: /static/blog/christine-fmri-reoriented.mp4
[fsl]: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/
[blender]: https://www.blender.org
[galaxybrain]: https://knowyourmeme.com/memes/expanding-brain
[citations]: /static/blog/brainfmri-to-3d-model.bib

View File

@ -1,105 +0,0 @@
---
title: You Win, Broken Database Schemas
date: 2022-01-10
tags:
- rant
---
There is [no software that correctly handles
names](https://www.kalzumeus.com/2010/06/17/falsehoods-programmers-believe-about-names/)
that exists on this planet. One of the major things I have bashed my head into
as of late is the assumption that people have a first and a last name. The first
name is usually what identifies the person, and the last name usually identifies
the family.
I have wanted to use `Xe` as my name places (no last name, like Socrates), but
everyone has broken database schemas that make it impossible. These schemas
usually look like this:
```sql
CREATE TABLE IF NOT EXISTS people
( id VARCHAR PRIMARY KEY DEFAULT (uuid4())
, first_name VARCHAR NOT NULL
, last_name VARCHAR NOT NULL
-- draw the rest of the owl
);
```
And as a result things like `Xe` (no last name) cannot fit into this schema. I
have found out the depth of this shitshow while trying to use my handle as my
name on newly registered account things and the amount of stuff that breaks or
works in weird ways is _staggering_. Email salutations look like this:
> Hello Xe ,
Forms will break if I don't put a last name in the field. The assumptions about
names are _so deep_ that it's rapidly becoming not worth it to only have my name
as `Xe`. Not to mention [overzealous journalists that will argue with you over
what your name is due to name
collisions](https://twitter.com/theprincessxena/status/1479197000667181061?s=20).
You win, broken database schemas. I give up trying to deal with you to encode my
name correctly. You just don't let me and I am tired of fighting it, opening
support tickets and arguing with people over what my name is. I give in. I'm
going to use a last name for my handle, which is absolutely ridiculous, but here
we are.
It took me a few hours to dig through ideas over the weekend and today, but I
think I have found something satisfactory enough that I can keep it for the long
haul: [Iaso](https://en.wikipedia.org/wiki/Iaso) (ai-uh-so, /aɪ.ə.soʊ/), the
minor Greek goddess of recovering from illness.
Hopefully I don't have to deal with professional issues as a result of me trying
to be more true to myself about my identity. At the very least I want very
little to do with the last name that I was born into. Some day that name will be
removed from the last database with it set, but today is not that day.
If you work on systems that handle names, please, please, please take the time
to reconsider if you actually need to deal with a last name for more reason than
it's the cultural standard. There are valid reasons to have a mononym, and by
supporting mononyms you will make people's lives easier.
Until then, I am `Xe Iaso`. Let's see where this phase of the identity
experiment goes. It's still really complicated. Anyone who claims to have their
identity figured out is either in denial or stopped digging into it for the time
being. The rabbit hole truly never ends.
The main thing I don't like about this name is how ambiguous it shows up in
sans-serif fonts:
<div style="font-family:sans-serif">
Xe Iaso
</div>
It looks like `Xe laso`. I've edited my email signature to try and compensate
for this:
```
Xe Iaso (zi ai-uh-so)
https://christine.website
.i la budza pu cusku lu
<<.i ko snura .i ko kanro
.i ko panpi .i ko gleki
```
Let's see if that helps. It will probably look bad when things are put into
sans-serif fonts, but what can you do lol.
---
Also I would _prefer_ you call me `Xe` from now on when possible. This conflicts
with and supercedes suggestions I made in [this article](/blog/xe-2021-08-07). I
consider most of that experiment to have worked out and I am going into the next
phase, albeit less "pure" than I wanted.
Thank you for sticking with this blog. This started out as a place for me to get
better at writing but has rapidly turned into something that has helped me
explore my identity in ways that I never would have thought it would. Thanks for
following the rabbit hole. Thank you for supporting me being more authentic to
myself about who I am. Your support means more than you possibly will know.
I wonder if my SEO craft is strong enough to get me high on the list of google
results for `Iaso`.

View File

@ -1,192 +0,0 @@
---
title: Advice to People Nurturing a Career in Computering
date: 2019-06-18
tags:
- career
---
Computering, or making computers do things in exchange for money, can be a
surprisingly hard field to break into as an outsider. There's lots of jargon,
tool holy wars, flamewars about the "right" way to do things and a whole host
of overhead that can make it feel difficult or impossible when starting from
scratch. I'm a college dropout, I know what it's like to be turned down over
and over because of the lack of that blessed square paper. In this post I
hope to give some general advice based on what has and hasn't worked for me
over the years.
Hopefully this can help you too.
## Make a Portfolio Site
When you are breaking into the industry, there is a huge initial "brand" issue.
You're nobody. This is both a very good thing and a very bad thing. It's a very
good thing because you have a clean slate to start from. It's also a very bad
thing because you have nothing to refer to yourself with.
Part of establishing a brand for yourself in this day and age is to make a website
(like the one you are probably reading this off of right now). This website can
be powered by anything. [GitHub Pages](https://pages.github.com) with the `github.io`
domain works, but it's probably a better idea to make your website backend from scratch.
Your website should include at least the following things:
- Your name
- A few buzzwords relating to the kind of thing you'd like to do with computers (example: I have myself listed as a "Backend Services and Devops Specialist" which sounds really impressive yet doesn't really mean much of anything)
- Tools or soft skills you are experienced with
- Links to yourself on other social media platforms (GitHub, Twitter, LinkedIn, etc.)
- Links to or words about projects of yours that you are proud of
- Some contact information (an email address is a good idea too)
If you feel comfortable doing so, I'd also suggest putting your [resume](https://christine.website/resume)
on this site too. Even if it's just got your foodservice jobs or education
history (including your high school diploma if need be).
This website can then be used as a landing page for other things in the future
too. It's _your_ space on the internet. _You_ get to decide what's up there or
not.
## Make a Tech Blog On That Site
This has been the single biggest thing to help me grow professionally. I regularly
put [articles](https://christine.website/blog) on my blog, sometimes not even about
technology topics. Even if you are writing about your take on something people have
already written about, it's still good practice. Your early posts are going to be
rough. It's normal to not be an expert when starting out in a new skill.
This helps you stand out in the interview process. I've actually managed to skip
interviews with companies purely because of the contents of my blog. One of them
had the interviewer almost word for word say the following:
> I've read your blog, you don't need to prove technical understanding to me.
It was one of the most awestruck feelings I've ever had in the hiring process.
## Find People to Mentor You
Starting out you are going to not be very skilled in anything. One good way you
can help yourself get good at things is to go out into communities and ask for
help understanding things. As you get involved in communities, naturally you will
end up finding people who are giving a lot of advice about things. Don't be
afraid to ask people for more details.
Get involved in niche communities (like unpopular Linux distros) and help them
out, even if it's just doing spellcheck over the documentation. This kind of
stuff really makes you stand out and people will remember it.
Formal mentorship is a very hard thing to try and define. It's probably better
to surround yourself with experts in various niche topics rather than looking
for that one magic mentor. Mentorship can be a very time consuming thing on the
expert's side. Be thankful for what you can get and try and give back by helping
other people too.
Seriously though, don't be afraid to email or DM people for more information about
topics that don't make sense in group chats. I have found that people really
appreciate that kind of stuff, even if they don't immediately have the time to
respond in detail.
## Do Stuff with Computers, Post the Results Somewhere
Repository hosting sites like GitHub and Gitlab allow you to show potential
employers exactly what you can do by example. Put your code up on them, even
if you think it's "bad" or the solution could have been implemented better by
someone more technically skilled. The best way to get experience in this industry
is by doing. The best way to do things is to just do them and then let other
people see the results.
Your first programs will be inelegant, but that's okay.
Your first repositories will be bloated or inefficient, but that's okay.
Nobody expects perfection out of the gate, and honestly even for skilled experts
perfection is probably too high of a bar. We're human. We make mistakes. Our job
is to turn the results of these mistakes into the products and services that
people rely on.
## You Don't Need 100% Of The Job Requirements
Many companies put job requirements as soft guidelines, not hard ones. It's easy
to see requirements for jobs like this:
> Applicants must have:
>
> - 1 year managing a distributed Flopnax system
> - Experience using Rilkef across multiple regions
> - Ropjar, HTML/CSS
and feel really disheartened. That "must" there seldom actually is a hard
requirement. Many companies will be willing to hire someone for a junior
level. You can learn the skills you miss as a natural part of doing your job.
There's support structures at nearly every company for things like this. You
don't need to be perfect out of the gate.
## Interviews
This one is a bit of a weird one to give advice for. Each company ends up having
their own interviewing style, and even then individual interviewers have their
own views on how to do it. My advice here is trying to be as generic as possible.
### Know the Things You Have Listed on Your Resume
If you say you know how to use a language, brush up on that language. If you say
you know how to use a tool, be able to explain that what that tool does and why
people should care about it to someone.
Don't misrepresent your skills on your resume either. It's similar to lying. It's
also a good idea to go back and prune out skills you don't feel as fresh with over
time.
### Be Yourself
It's tempting to put on a persona or try to present yourself as larger than life.
Resist this temptation. They want to see _you_, not a caricature of yourself. It's
scary to do interviews at times. It feels like you are being judged. It's not
personal. Everything in interviews is aimed at making the best decision for the
company.
Also, don't be afraid to say you don't know things. You don't need to have API
documentation memorized. They aren't looking for that. API documentation will be
available to you while you write code at your job. Interviews are usually there
to help the interviewer verify that you know how to break larger problems into
more understandable chunks. Ask questions. Ensure you understand what they are
and are not asking you. Nearly every interview that I've had that's resulted in
a job offer has had me ask questions about what they are asking.
### "Do You Have Any Questions?"
A few things I've found work really well for this:
- "Do you know of anyone who left this company and then came back?"
- "What is your favorite part of your workday?"
- "What is your least favorite part of your workday?"
- "Do postmortems have formal blame as a part of the process?"
- "Does code get reviewed before it ships into production?"
- "Are there any employee run interest groups for things like mindfulness?"
And then finally as your last question:
- "What are the next steps?"
This question in particular tends to signal interest in the person interviewing
you. I don't completely understand why, but it seems to be one of the most
useful questions to ask; especially with initial interviews with hiring managers
or human resources.
### Meditate Before Interviews
Even if it's just [watching your breath for 5 minutes](https://when-then-zen.christine.website/meditation/anapana).
I find that doing this helps reset the mind and reduces subjective experiences of
anxiety.
## Persistence
Getting the first few real jobs is tough, but after you get a year or two at any
employer things get a lot easier. Your first job is going to give you a lot of
experience. You are going to learn things about things you didn't even think
would be possible to learn about. People, processes and the like are going to
surprise or shock you.
At the end of the day though, it's just a job. It's impermanent. You might not
fit in. You might have to find another. Don't panic about it, even though it's
really, really tempting to. You can always find another job.
---
I hope this is able to help. Thanks for reading this and be well.

View File

@ -1,115 +0,0 @@
---
title: Change
date: 2021-10-20
tags:
- enby
- trans
---
[Content warning: this post talks about the transgender/nonbinary coming out of
the closet experience. If you are not in the best headspace for that, feel free
to skip this post until you're in a better headspace. This post isn't going to
randomly vanish. It will be there when you're ready. There are some descriptions
of subconscious body functions and bodily fluids that may gross some people
out.](conversation://Cadey/enby)
Coming out as transgender/nonbinary to someone you care about one of the most
terrifying things you can do. At least it feels that way, it feels like things
are going to change and you'll lose that person. It can be gut-wrenching,
especially for family.
For me the scariest part of this whole thing has been the change in how people
see me. It can be a huge abrupt difference for some people, and the unknowns in
how people will react to that can make you paralyzed with fear. So, let's look
at change a little.
[Annoyingly, the same kinds of people that get upset about someone changing
their name and pronouns for coming out as transgender/nonbinary are _instantly_
and _immediately_ tolerant of someone changing their last name for marriage and
"get used to it" almost instantly.](conversation://Cadey/facepalm)
Life is a constant change. Stop for a moment right now and feel your body. Feel
how the pressure in your chest changes as your lungs subconsciously inhale and
exhale. If you have a watch, look at the seconds hand (or equivalent digital
display) and watch it tick forward for a bit. Change is constant, yet still
continuously moving forward. Even though the change is happening though,
everything is still roughly the same as it was before. Blood continues to move
through your body, constantly cycling its oxygen with other parts, but it
continues.
Coming out to my parents was one of the most terrifying things I have ever done.
I was nervous beyond belief. I hardly ate that day I hit send. I turned off my
phone after doing it and got lost in a game that I liked playing. The real
reason it was so scary to me though is that I had already tried to come out to
them in the past but I was shut down.
Middle school was rough for me. I don't really remember much of it (other than
they banned high fives for "gang activity"), but that was when my parents found
my diary app. I don't remember why they were going through my laptop (I grew up
in one of _those_ kinds of Jesus freak households), but they found it somehow
and my entries where I was questioning my gender came to their attention and
they confronted me about it. I was not ready at all. I was completely blindsided
by it. That attempt to come out failed and I was put into Christian
"counseling". I was pushed deep back into the closet and I still have trouble
writing down my thoughts in a journal to this day.
So that day I hit "send" on [the
email](https://christine.website/blog/coming-out-2015-12-01) was mortally
terrifying. All that fear from so long ago came raging up to the surface and I
was left in a crying and vulnerable state. However it ended up being a good kind
of cry, the healing kind.
My relationship with my parents (and later my siblings) has deteriorated since,
and not just for religious differences. However, I am fine. I am still healing
and I probably will be healing for a long time and I have accepted that. In
place, I have found something more powerful to put in their place. I have found
a new family of choice.
[For the parents that read this blog, please do not repeat this kind of
suffering if you can avoid it. I don't want anyone else to suffer the way I have
if I can help to avoid it.](conversation://Cadey/enby)
This was a huge change, but it ended up being for the better. That change was a
tool to help me live a better life surrounded by the people I wanted to be
around as opposed to the people I inherited.
There's an idiom that comes to mind, something that is in the "completely
misunderstood" brand of idioms: "blood is thicker than water".
It's often used by people to emphasize the importance of familial relationships
over friendships or the like (family is the "blood" part of that idiom, and
friendships are the "water" part). The full form of the idiom is closer to this:
> The blood of the covenant is thicker than the water of the womb
This overall sentiment is [commonly interpreted by Christian
scholars](https://www.blueletterbible.org/Comm/murray_andrew/two/two09.cfm) to
have a meaning closer to "the bond between Jesus and someone who chooses to
believe in Him is stronger than the bond between family members", however we can
afford to interpret this differently for the sake of this message.
The bonds you choose are stronger than the bonds you inherited. The bonds I have
with my friends, my husband, my closest companions and all those who I keep
close to me are stronger than the bonds with my family will ever be.
In a way, coming out as transgender to people and that level of associated
change has become a _tool_ to help me figure out who really cares about me and
who I should bother keeping around me. It's my life. I can live it as honestly,
openly and real as I want to. I don't have to justify it to anyone but myself.
You don't really have to justify this level of change to anyone else but
yourself either. It'll let you know who your real friends are, for better and
for worse. You don't have to keep anyone around you that can't accept you for
who you are. Your family of choice will _always_ have stronger bonds than your
family of origin.
<center>
<picture>
<source srcset="/static/blog/change/the-dude-dither.avif" type="image/avif">
<source srcset="/static/blog/change/the-dude-dither.webp" type="image/webp">
<img src="/static/blog/change/the-dude-dither.png" alt="The dude abides">
</picture>
</center>
The Dude abides, so will you. Change as a result of coming out can be a good
thing as much as it can be a bad thing. Don't let inherent negativity biases
blind you to that.

View File

@ -2,9 +2,10 @@
title: "Chaos Magick Debugging" title: "Chaos Magick Debugging"
date: 2018-11-13 date: 2018-11-13
thanks: CelestialBoon thanks: CelestialBoon
series: magick
--- ---
# Chaos Magick Debugging
Belief is a powerful thing. Beliefs are the foundations of everyone's points of view, and the way they interpret reality. Belief is what allows people to create the greatest marvels of technology, the most wondrous worlds of imagination, and the most oppressive religions. Belief is a powerful thing. Beliefs are the foundations of everyone's points of view, and the way they interpret reality. Belief is what allows people to create the greatest marvels of technology, the most wondrous worlds of imagination, and the most oppressive religions.
But at the core, what *is* a belief, other than the sheer tautology of *what a person believes*? But at the core, what *is* a belief, other than the sheer tautology of *what a person believes*?

View File

@ -1,71 +0,0 @@
---
title: Chicken Stir Fry
date: 2020-04-13
series: recipes
tags:
- instant-pot
- pan
- rice
- garlic
---
This recipe was made up by me and my fiancé. We just sorta winged it every time
we made it until we found something that was easy to cook and tasty. We make
this every week or so.
## Recipe
### Ingredients
- Pack of 4 chicken breasts
- A fair amount of Montreal seasoning (garlic, onion, salt, oregano)
- 3 cups basmati rice
- 3.75 cups water
- 1/4th bag of frozen stir fry vegetables
- Avocado/coconut oil
- Standard frying pan
- Standard chef's knife
- Standard 11x14 cutting board
- Two metal bowls
- Instant Pot
- Spatula
### Seasoning
Put the seasoning in one of the bowls and unwrap the plastic around the chicken
breasts. Take each chicken breast out of the package (you may need to cut them
free of eachother, use a sharp knife for that) and rub all sides of it around in
the seasoning.
Put these into the other metal bowl and when you've done all four, cover with
plastic wrap and refrigerate for about 5-6 hours.
Doing this helps to have the chicken soak up the flavor of the seasoning so it
tastes better when you cook it.
### Cooking
Slice two chicken breasts up kinda like
[this](https://www.seriouseats.com/2014/04/knife-skills-how-to-slice-chicken-breast-for-stir-fries.html)
and then transfer to the heated pan with oil in it. Cook those and flip them
every few minutes until you've cooked everything all the way through (random
sampling by cutting a bit of chicken in half with the spatula and seeing if it's
overly juicy or not is a good way to tell, or if you have a food thermometer to
165 degrees fahrenheit or 75 degrees celsius). Put this chicken into a plastic
container for use in other meals (it goes really good on sandwiches).
Then repeat the slicing and cooking for the last two chicken breasts. However,
this time put _half_ of the chicken into the plastic container you used before
(about one chicken breast worth in total, it doesn't have to be exact). At the
same time as the second round of chicken is cooking, put about 3 cups of rice
and 3.75 cups of water into the instant pot; then seal it and set it to manual
for 4 minutes.
Dump frozen vegetables on top of the remainder of the chicken and stir until the
vegetables are warm.
### Serving
Serve the stir fry hot on a bed of rice.
![image of the food](/static/blog/chicken-stir-fry.jpg)

View File

@ -1,10 +1,11 @@
--- ---
title: CinemaQuestria Orchestration title: CinemaQuestria Orchestration
date: 2015-03-13 date: 2015-03-13
tags:
- cinemaquestria
--- ---
CinemaQuestria Orchestration
============================
### Or: Continuous Defenstration in a Container-based Ecosystem ### Or: Continuous Defenstration in a Container-based Ecosystem
I've been a core member of the staff for [CinemaQuestria](http://cinemaquestria.com) I've been a core member of the staff for [CinemaQuestria](http://cinemaquestria.com)

View File

@ -1,38 +0,0 @@
---
title: "Book Release: Closed Projects"
date: 2022-03-24
tags:
- shortstory
- fiction
- retrospective
---
Closed Projects is a retelling of [a four-part series](/blog/series/freenode) on
my blog where I process the events that lead to the death of freenode, the
largest IRC network for peer-directed projects.
freenode was the reason that I managed to get into tech. Without freenode I
would be a vastly different person today. The death of freenode last year
brought up many lingering memories and emotions. I talk about my history with
freenode, the events that happened in the wake of its death and how a new
community took its place.
Each part of this story is written by weaving the narrative of the last
caretaker trying to protect the discussion halls against the darkness, and
things continue as the darkness gets more crafty and clever.
I have included versions of this for all common eBook reader devices and
formats. I have tested this on my Kindle Oasis and iPad Pro, but I see no reason
that at least one of these formats wouldn't work for you.
<iframe frameborder="0" src="https://itch.io/embed/1454153?dark=true" width="552" height="167"><a href="https://withinstudios.itch.io/closed-projects">Closed Projects by Within</a></iframe>
If you are a [Patreon supporter](https://patreon.com/cadey), you can get this
for free by clicking [this
link](https://withinstudios.itch.io/closed-projects/patreon-access).
If you are a part of a marginalized group and cannot afford this but want to
read it anyways, please [contact me](/contact).
Thank you for reading what I put out to the world. I only hope I can continue to
create and inspire you.

View File

@ -1,478 +0,0 @@
---
title: My Magical Adventure With cloud-init
date: 2021-06-04
---
> "If I had a world of my own, everything would be nonsense. Nothing would be
> what it is, because everything would be what it isn't. And contrary wise, what
> is, it wouldn't be. And what it wouldn't be, it would. You see?"
- The Mad Hatter, Alice's Adventures in Wonderland
The modern cloud is a magical experience. You take a template, give it some SSH
keys and maybe some user-data and then you have a server running somewhere. This
is all powered by a tool called [cloud-init](https://cloud-init.io/). cloud-init
is the most useful in actual datacenters with proper metadata services, but what
if you aren't in a datacenter with a metadata service?
Recently I wanted to test a
[script](https://github.com/tailscale/tailscale/blob/main/scripts/installer.sh)
a coworker wrote that allows users to automatically install Tailscale on every
distro and version Tailscale supports. I wanted to try and avoid having to
install each version of every distribution manually, so I started looking for
options.
[This may seem like overkill (and at some level it probably is), however as a
side effect of going through this song and dance you can spin up a bunch of VMs
pretty easily. <br /> <center> <blockquote class="twitter-tweet"><p lang="und"
dir="ltr"><a
href="https://t.co/yays27Wmes">pic.twitter.com/yays27Wmes</a></p>&mdash; Xe from
Within (@theprincessxena) <a
href="https://twitter.com/theprincessxena/status/1394265890494062593?ref_src=twsrc%5Etfw">May
17, 2021</a></blockquote> <script async
src="https://platform.twitter.com/widgets.js"
charset="utf-8"></script> <center>](conversation://Mara/hacker)
cloud-init has a feature called the
[NoCloud](https://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html)
data source. To use it, you need to write two yaml files, put them into a
specially named ISO file and then mount it to the virtual machine. cloud-init
will then pick up your configuration data and apply it.
[Wait...really? What.](conversation://Mara/hmm)
[Yes, really.](conversation://Cadey/coffee)
Let's make an [Amazon Linux
2](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/amazon-linux-2-virtual-machine.html)
virtual machine as an example. Amazon offers their Linux distribution for
download so you can run it on-premises (I don't really know why you'd want to do
this outside of testing stuff on Amazon Linux). In this blog we use KVM, so keep
that in mind when you set things up yourself.
First you need to make a `meta-data` file, this will contain the VM's hostname
and the "instance ID" (this makes sense in cloud contexts however you can use
whatever you want):
```yaml
local-hostname: mayhem
instance-id: 31337
```
[You can configure networking settings here, but our VM is going to get an
address over DHCP so you don't really need to care about that in this case](conversation://Mara/hacker)
Next you need to make a `user-data` file, this will actually configure your VM:
```yaml
#cloud-config
#vim:syntax=yaml
cloud_config_modules:
- runcmd
cloud_final_modules:
- [users-groups, always]
- [scripts-user, once-per-instance]
users:
- name: xe
groups: [ wheel ]
sudo: [ "ALL=(ALL) NOPASSWD:ALL" ]
shell: /bin/bash
ssh-authorized-keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPYr9hiLtDHgd6lZDgQMkJzvYeAXmePOrgFaWHAjJvNU cadey@ontos
write_files:
- path: /etc/cloud/cloud.cfg.d/80_disable_network_after_firstboot.cfg
content: |
# Disable network configuration after first boot
network:
config: disabled
```
Please make sure to change the username and swap out the SSH key as needed,
unless you want to get locked out of your VM. For more information about what
you can do from cloud-init, see the list of modules
[here](http://cloudinit.readthedocs.io/en/latest/topics/modules.html).
Now that you have the two yaml files you can make the seed image with this
command (Linux):
```console
$ genisoimage -output seed.iso \
-volid cidata \
-joliet \
-rock \
user-data meta-data
```
[In NixOS you may need to run it inside nix-shell: `nix-shell -p
cdrkit`.](conversation://Mara/hacker)
Or this command (macOS):
```console
$ hdiutil makehybrid \
-o seed.iso \
-hfs \
-joliet \
-iso \
-default-volume-name cidata \
user-data meta-data
```
Now you can download the KVM image from that [Amazon Linux User Guide page from
earlier](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/amazon-linux-2-virtual-machine.html)
and then put it somewhere safe. This image will be written into a [ZFS
zvol](https://pthree.org/2012/12/21/zfs-administration-part-xiv-zvols/). To find
out how big the zvol needs to be, you can use `qemu-img info`:
```console
$ qemu-img info amzn2-kvm-2.0.20210427.0-x86_64.xfs.gpt.qcow2
image: amzn2-kvm-2.0.20210427.0-x86_64.xfs.gpt.qcow2
file format: qcow2
virtual size: 25 GiB (26843545600 bytes)
disk size: 410 MiB
cluster_size: 65536
Format specific information:
compat: 1.1
compression type: zlib
lazy refcounts: false
refcount bits: 16
corrupt: false
extended l2: false
```
The virtual disk image is 25 gigabytes, so you can create it with a command like
this:
```console
$ sudo zfs create -V 25G rpool/safe/vms/mayhem
```
Then you use `qemu-img convert` to copy the image into the zvol:
```console
$ sudo qemu-img convert \
-O raw \
amzn2-kvm-2.0.20210427.0-x86_64.xfs.gpt.qcow2 \
/dev/zvol/rpool/safe/vms/mayhem
```
If you don't use ZFS you can make a layered disk using `qemu-img create`:
```console
$ qemu-img create \
-f qcow2 \
-o backing_file=amzn2-kvm-2.0.20210427.0-x86_64.xfs.gpt.qcow2 \
mayhem.qcow2
```
Open up virt-manager and then create a new virtual machine. Make sure you select
"Manual install".
<center>
![The first step of the "create a new virtual machine" wizard in virt-manager
with "manual install"
selected](https://cdn.christine.website/file/christine-static/blog/20210604_06h43m27s_grim.png)
</center>
virt-manager will then ask you what OS the virtual machine is running so it can
load some known working defaults. It doesn't have an option for Amazon Linux,
but it's kinda sorta like CentOS 7, so enter CentOS 7 here.
<center>
![The second step of the "create a new virtual machine" wizard in virt-manager
with "CentOS 7" selected as the OS the virtual machine will be
running](https://cdn.christine.website/file/christine-static/blog/20210604_06h45m35s_grim.png)
</center>
The default amount of ram and CPU are fine, but you can choose other options if
you have more restrictive hardware requirements.
<center>
![The third step of the "create a new virtual machine" wizard in virt-manager
with 1024 MB of ram and 2 virtual CPU cores
selected](https://cdn.christine.website/file/christine-static/blog/20210604_06h50m09s_grim.png)
</center>
Now you need to select the storage path for the VM. virt-manager will helpfully
offer to create a new virtual disk for you. You already made the disk with the
above steps, so enter in `/dev/zvol/rpool/safe/vms/mayhem` (or the path to your
custom layered qcow2 from the above `qemu-img create` command) as the disk
location.
<center>
![The fourth step of the "create a new virtual machine" wizard in virt-manager
with `/dev/zvol/rpool/safe/vms/mayhem` selected as the path to the
disk](https://cdn.christine.website/file/christine-static/blog/20210604_06h53m58s_grim.png)
</center>
Finally, name the VM and then choose "Customize configuration before install" so
you can mount the seed data.
<center>
![The last step of the "create a new virtual machine" wizard in virt-manager,
setting the virtual machine name to "mayhem" and indicating that you want to
customize configuration before
installation](https://cdn.christine.website/file/christine-static/blog/20210604_06h56m54s_grim.png)
</center>
Click on the "Add Hardware" button in the lower left corner of the configuration
window.
<center>
![](https://cdn.christine.website/file/christine-static/blog/20210604_06h58m53s_grim.png)
</center>
Make a new CDROM storage device that points to your seed image:
<center>
![](https://cdn.christine.website/file/christine-static/blog/20210604_07h01m24s_grim.png)
</center>
And then click "Begin Installation". The virtual machine will be created and its
graphical console will open. Click on the info tab and then the NIC device. The
VM's IP address will be listed:
<center>
![](https://cdn.christine.website/file/christine-static/blog/20210604_07h05m28s_grim.png)
</center>
Now SSH into the VM:
```console
$ ssh xe@192.168.122.122
The authenticity of host '192.168.122.122 (192.168.122.122)' can't be established.
ED25519 key fingerprint is SHA256:TP7dWLkHOixx5tr78qn0yvDQKttH0yWz6IBvbadEqcs.
This key is not known by any other names
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
Warning: Permanently added '192.168.122.122' (ED25519) to the list of known hosts.
__| __|_ )
_| ( / Amazon Linux 2 AMI
___|\___|___|
https://aws.amazon.com/amazon-linux-2/
8 package(s) needed for security, out of 17 available
Run "sudo yum update" to apply all updates.
[xe@mayhem ~]$
```
And voila! A new virtual machine that you can do whatever you want with, just
like you would any other server.
[Do you really need to make an ISO file for this? Can't I just use HTTP like <a
href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html">the
AWS metadata service</a>?](conversation://Mara/hmm)
Yes and no. You can have the configuration loaded over HTTP/S, but without
special network configuration you won't be able to have `http://169.254.169.254`
work like the AWS metadata service without a fair bit of effort. Either way, you
are going to have to edit the virtual machine's XML though.
[XML? Why is XML involved?](conversation://Mara/wat)
virt-manager is a frontend to [libvirt](https://libvirt.org/index.html). libvirt
uses XML to describe virtual machines.
[Here](https://gist.github.com/Xe/f870ebb2d9dce0929a35a4ba347cbda3) is the XML
used to describe the VM you made earlier. This looks like a lot (because frankly
it is a lot, computers are complicated), however this is a lot more manageable
than the equivalent qemu flags.
[What do the qemu flags look like?](conversation://Mara/hmm)
[Like
this](https://gist.githubusercontent.com/Xe/2eba35ec6cbd54becf9fca02f6d69f0b/raw/89d68424c0ae26333d798bd9bd6a224dfec844d7/qemu%2520flags.txt).
It is kind of a mess that I would rather have something made by people smarter
than me take care of.
To enable cloud-init to load over HTTP, you are going to have to add the qemu XML
namespace to mayhem's configuration. At the top you should see a line that looks
like this:
```xml
<domain type="kvm">
```
Replace it with one that looks like this:
```xml
<domain xmlns:qemu="http://libvirt.org/schemas/domain/qemu/1.0" type="kvm">
```
This will allow you to set the cloud-init seed location information using a
[SMBIOS value](https://en.wikipedia.org/wiki/System_Management_BIOS). To enable
this, add the following to the _bottom_ of your XML file, just before the
closing `</domain>`:
```xml
<qemu:commandline>
<qemu:arg value="-smbios"/>
<qemu:arg value="type=1,serial=ds=nocloud-net;h=mayhem;s=http://10.77.2.22:8000/mayhem/"/>
</qemu:commandline>
```
Make sure the data is actually being served on that address. Here's a nix-shell
python one-liner HTTP server:
```console
$ nix-shell -p python3 --run 'python -m http.server 8000'
```
Then you will need to either load the base image back into the zvol or recreate
the qcow2 file to reset the VM back to its default state.
Reboot the VM and wait for it to connect to your "metadata server":
```console
192.168.122.122 - - [04/Jun/2021 11:41:10] "GET /mayhem/meta-data HTTP/1.1" 200 -
192.168.122.122 - - [04/Jun/2021 11:41:10] "GET /mayhem/user-data HTTP/1.1" 200 -
```
Then you can SSH into it like normal:
```console
$ ssh xe@192.168.122.122
The authenticity of host '192.168.122.122 (192.168.122.122)' can't be established.
ED25519 key fingerprint is SHA256:eJRjDsvnVrXfntVtNVN6N+JdakaA+dvGKWWQP5OFkeA.
This key is not known by any other names
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
Warning: Permanently added '192.168.122.122' (ED25519) to the list of known hosts.
__| __|_ )
_| ( / Amazon Linux 2 AMI
___|\___|___|
https://aws.amazon.com/amazon-linux-2/
8 package(s) needed for security, out of 17 available
Run "sudo yum update" to apply all updates.
[xe@mayhem ~]$
```
[Can I choose other distros for this?](conversation://Mara/hmm)
Yep! Most distributions offer cloud-init enabled images. They may be hard to
find, but they do exist. Here's some links that will help you with common
distros:
- [Arch Linux](https://mirror.pkgbuild.com/images/) (use the `cloudimg` ones)
- [CentOS 7](https://cloud.centos.org/centos/7/images/) (use the `GenericCloud`
one)
- [CentOS 8](https://cloud.centos.org/centos/8-stream/x86_64/images/) (use the
`GenericCloud` one)
- [Debian 9](http://cloud.debian.org/images/cloud/OpenStack/9.13.22-20210531/)
(use the `openstack` one)
- [Debian 10](http://cloud.debian.org/images/cloud/buster/20210329-591/) (use
the `generic` one)
- [Debian 11](http://cloud.debian.org/images/cloud/bullseye/daily/) (use the
`generic` one)
- [Fedora 34](https://alt.fedoraproject.org/cloud/) (use the Openstack image)
- [OpenSUSE Leap
15.2](https://download.opensuse.org/repositories/Cloud:/Images:/Leap_15.2/images/)
(use the `OpenStack` image)
- [OpenSUSE Leap 15.3](https://get.opensuse.org/leap/) (use the JeOS one labeled
`OpenStack-Cloud`)
- [OpenSUSE Tumbleweed](https://download.opensuse.org/tumbleweed/appliances/)
(use the JeOS one labeled `Openstack-Cloud`)
- [Ubuntu](https://cloud-images.ubuntu.com/) (use the `server-cloudimg` image
for your version of choice)
In general, look for images that are compatible with OpenStack. OpenStack uses
cloud-init to configure virtual machines and the NoCloud data source you're using
ships by default. It usually works out, except for cases like OpenSUSE Leap
15.1. With Leap 15.1 you have to [pretend to be OpenStack a bit
more](https://github.com/tailscale/tailscale/blob/aa6abc98f30df67a0d86698b77932d4d9cc45ac0/tstest/integration/vms/opensuse_leap_15_1_test.go)
for some reason.
[What if I need to template the userdata file?](conversation://Mara/hmm)
[You really should avoid doing this if possible. Templating yaml is a delicate
process fraught with danger. The error conditions in things like Kubernetes are
that it does the wrong thing and you need to replace the service. The error
condition with this is that you lose access to your
server.](conversation://Cadey/facepalm)
[Let's say that Facts and Circumstances™ made me have to template
it.](conversation://Mara/happy)
<center>
<picture>
<source srcset="https://cdn.christine.website/file/christine-static/stickers/cadey/percussive-maintenance.avif" type="image/avif">
<source srcset="https://cdn.christine.website/file/christine-static/stickers/cadey/percussive-maintenance.webp" type="image/webp">
<img src="https://cdn.christine.website/file/christine-static/stickers/cadey/percussive-maintenance.png" alt="Cadey is percussive-maintenance">
</picture>
</center>
When you are templating yaml, you have to be really careful. It is very easy to
incur [the wrath of Norway and
Ontario](https://hitchdev.com/strictyaml/why/implicit-typing-removed/) on
accident with yaml. Here are some rules of thumb (unfortunately gained from
experience) to keep in mind:
- yaml has implicit typing, quote everything to be safe.
- ensure that every value you pass in is yaml-safe
- ensure that the indentation matches for every value
Something very important is to test the templating on a virtual machine image
that you have a back door into. Otherwise you will be locked out. You can
generally hack around it by adding `init=/bin/sh` in your kernel command line
and changing your password from there.
When you mess it up you will need to get into the VM somehow and do one of a few
things:
1. Run `cloud-init collect-logs` to generate a log tarball that you can export
to your host machine and dig into from there
2. Look through the system journal for any errors
3. Look in `/var/log` for files that begin with `cloud-init` and page through
them
If all else fails, start googling. If you are running commands against a VM with
the `runcmd` feature of cloud-init, I'd suggest going through the steps on a
manually installed virtual machine image at least once so you can be sure the
steps work. I have lost 4 hours of time to this. Also keep in mind that in the
context that `runcmd` runs from, there is no standard input hooked up. You will
need to pass `-y` everywhere.
If you want a simple Alpine Linux image to test with, look
[here](https://github.com/Xe/alpine-image) for the Alpine Linux images I test
with. You can download this image from
[here](https://xena.greedo.xeserv.us/pkg/alpine/img/alpine-edge-2021-05-18-cloud-init-within.qcow2)
in case you trust that I wouldn't put malware in that image and don't want to
make your own.
---
In the future I plan to use cloud-init _extensively_ within my [new homelab
cluster](https://twitter.com/theprincessxena/status/1400592778309115905). I have
plans to make a custom VM management service I'm calling
[waifud](https://github.com/Xe/waifud). I will write more on that as I have
written the software. I currently have a minimum viable prototype of this tool
called `mkvm` that I'm using today without any issues. I also will be writing up
how I built the cluster and installed NixOS on all the systems in a future
article.
cloud-init is an incredible achievement. It has its warts, but it being used in
so many places enables you to make configuring virtual machines so much easier.
It [even works on Windows!](https://cloudbase.it/cloudbase-init/). As much as I
complain about it in this post, life would be so much worse without it. It
allows me to use the magic of the cloud in my local virtual machines so I can
get better use out of my hardware.

View File

@ -1,10 +1,10 @@
--- ---
title: Coding on an iPad title: Coding on an iPad
date: 2018-04-14 date: 2018-04-14
tags:
- ipad
--- ---
# Coding on an iPad
As people notice, I am an avid user of Emacs for most of my professional and As people notice, I am an avid user of Emacs for most of my professional and
personal coding. I have things set up such that the center of my development personal coding. I have things set up such that the center of my development
environment is a shell (eshell), and most of my interactions are with emacs environment is a shell (eshell), and most of my interactions are with emacs

View File

@ -1,86 +0,0 @@
---
title: Colemak Layout - First Week
date: 2020-08-22
series: colemak
---
A week ago I posted the last post in this series where I announced I was going
all colemak all the time. I have not been measuring words per minute (to avoid
psyching myself out), but so far my typing speed has gone from intolerably slow
to manageably slow. I have been only dipping back into qwerty for two main
things:
1. Passwords, specifically the ones I have in muscle memory
2. Coding at work that needs to be done fast
Other than that, everything else has been in colemak. I have written DnD-style
game notes, hacked at my own "Linux distro", started a few QMK keymaps and more
all via colemak.
Here are some of the lessons I've learned:
## Let Your Coworkers Know You Are Going to Be Slow
This kind of thing is a long tirm investment. In the short term, your
productivity is going to crash through the floor. This will feel frustrating. It
took me an entire workday to implement and test a HTTP handler/client for it in
Go. You will be making weird typos. Let your coworkers know so they don't jump
to the wrong conclusions too quickly.
Also, this goes without saying, but don't do this kind of change during crunch
time. That's a bit of a dick move.
## Print Out the Layout
I have the layout printed and taped to my monitor and iPad stand. This helps a
lot. Instead of looking at the keyboard, I look at the layout image and let my
fingers drift into position.
I also have a blank keyboard at my desk, this helps because I can't look at the
keycaps and become confused (however this has backfired with typing numbers,
lol). This keyboard has cherry MX blues though, which means it can be loud when
I get to typing up a storm.
## Have Friends Ask You What Layout You Are Using
Something that works for me is to have friends ask me what keyboard layout I am
using, so I can be mindful of the change. I have a few people asking me that on
the regular, so I can be accountable to them and myself.
## macOS and iPadOS have Colemak Out of the Box
The settings app lets you configure colemak input without having to jailbreak or
install a custom keyboard layout. Take advantage of this.
Someone has also created a colemak windows package for windows that includes an
IA-64 (Itanium) binary. It was last updated in 2004, and still works without
hassle on windows 10. It was the irst time I've ever seen an IA-64 windows
binary in the wild!
## Relearn How To Type Your Passwords
I type passwords from muscle memory. I have had to rediscover what they actually
are so I can relearn how to type them.
---
The colemak experiment continues. I also have a [ZSA
Moonlander](https://www.zsa.io/moonlander/) and the kit for a
[GergoPlex](https://www.gboards.ca/product/gergoplex) coming in the mail. Both
of these run [QMK](https://qmk.fm), which allows me to fully program them with a
rich macro engine. Here are a few of the macros I plan to use:
```c
// Programming
SUBS(ifErr, "if err != nil {\n\t\n}", KC_E, KC_I)
SUBS(goTest, "go test ./...\n", KC_G, KC_T)
SUBS(cargoTest, "cargo test\n", KC_C, KC_T)
```
This will autotype a few common things when I press the keys "ei", "gt", or "ct"
at the same time. I plan to add a few more as things turn up so I can more
quickly type common idioms or commands to save me time. The `if err != nil`
combination started as a joke, but I bet it will end up being incredibly
valuable.
Be well, take care of your hands.

View File

@ -1,25 +0,0 @@
---
title: Colemak Layout - Beginning
date: 2020-08-15
series: colemak
---
I write a lot. On average I write a few kilobytes of text per day. This has been
adding up and is taking a huge toll on my hands, especially considering the
Covid situation. Something needs to change. I've been working on learning a new
keyboard layout: [Colemak](https://colemak.com).
This post will be shorter than most of my posts because I'm writing it with
Colemak enabled on my iPad. Writing this is painfully slow at the moment. My
sentences are short and choppy because those are easier to type.
I also have a [ZSA Moonlander](https://www.zsa.io/moonlander/) on the way, it
should be here in October or November. I will also be sure to write about that
once I get it in the mail.
So far, I have about 30 words per minute on the homerow, but once I go off the
homerow the speed tanks to less than about five.
However, I am making progress!
Be well all, don't stress your hands out.

View File

@ -1,10 +1,11 @@
--- ---
title: Coming Out title: Coming Out
date: 2015-12-01 date: 2015-12-01
tags:
- personal
--- ---
Coming Out
==========
I'd like to bring up something that has been hanging over my head for a I'd like to bring up something that has been hanging over my head for a
long time. This is something I did try (and fail) to properly express way long time. This is something I did try (and fail) to properly express way
back in middle school, but now I'd like to get it all of my chest and let back in middle school, but now I'd like to get it all of my chest and let

View File

@ -1,80 +0,0 @@
---
title: Compile Stress Test
date: 2019-10-03
tags:
- rust
---
This is an experiment in blogging. I am going to be putting my tweets and select replies one after another without commentary.
<center>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">shitty synthetic benchmark idea: how long it takes for a compiler to handle a main function with 1.2 million instances of printf(&quot;hello, world!\n&quot;) or similar</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179467293232902144?ref_src=twsrc%5Etfw">October 2, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">fun fact, you need an AWS x1.16xlarge instance to compile 1.2 million lines of rust source code</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179524224479825921?ref_src=twsrc%5Etfw">October 2, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">oh god that might not be enough</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179526505505939458?ref_src=twsrc%5Etfw">October 2, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">oh god, is that what X1 is for???<br><br>My wallet just cringed.</p>&mdash; snake enchantress (@AstraLuma) <a href="https://twitter.com/AstraLuma/status/1179529430890405888?ref_src=twsrc%5Etfw">October 2, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">They have been now <a href="https://t.co/o5vMKx583C">https://t.co/o5vMKx583C</a></p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179527054989111296?ref_src=twsrc%5Etfw">October 2, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="und" dir="ltr"> <a href="https://t.co/le8IFrFbQT">pic.twitter.com/le8IFrFbQT</a></p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179527358388326401?ref_src=twsrc%5Etfw">October 2, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">TFW rust uses so much ram an x1.16xlarge can&#39;t compile hello world</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179527672579465218?ref_src=twsrc%5Etfw">October 2, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">Let&#39;s go x1e.32xlarge!</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179533410165018627?ref_src=twsrc%5Etfw">October 2, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">hello world</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179533767284809729?ref_src=twsrc%5Etfw">October 2, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="ca" dir="ltr">Code generators</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179533982641340416?ref_src=twsrc%5Etfw">October 2, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="und" dir="ltr"> <a href="https://t.co/BwLhk9PIb3">pic.twitter.com/BwLhk9PIb3</a></p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179534490017976321?ref_src=twsrc%5Etfw">October 2, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">Rust can&#39;t match V for compile performance</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179535227376607232?ref_src=twsrc%5Etfw">October 2, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">Finally can run two electron apps.</p>&mdash; Pradeep Gowda 🇮🇳🇺🇸 (@btbytes) <a href="https://twitter.com/btbytes/status/1179539282366734337?ref_src=twsrc%5Etfw">October 2, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="und" dir="ltr"><a href="https://t.co/Ez0t5BLT9i">pic.twitter.com/Ez0t5BLT9i</a></p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179542623687790595?ref_src=twsrc%5Etfw">October 2, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">It stopped growing at 2.66 TB of ram!</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179544161973870592?ref_src=twsrc%5Etfw">October 2, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">overheard: &quot;im paying this computer minimum wage to compile this god damn rust program&quot;</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179545128366743552?ref_src=twsrc%5Etfw">October 2, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">The guy who&#39;s paying for the instance in slack said it</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179546147892928515?ref_src=twsrc%5Etfw">October 2, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">you magnificent cursed unholy monster</p>&mdash; Astrid 🦋 (@floofstrid) <a href="https://twitter.com/floofstrid/status/1179546307972734976?ref_src=twsrc%5Etfw">October 2, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">Just a simple rust program, only 9.88090622052428380708467040696522138519972064500917... × 10^361235 possible conditions</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179546397084913664?ref_src=twsrc%5Etfw">October 2, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">oh god it&#39;s still going <a href="https://t.co/SIZJBFTDHN">pic.twitter.com/SIZJBFTDHN</a></p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179552296662962180?ref_src=twsrc%5Etfw">October 3, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">Normal couples: watch tv together or something<br><br>me and my fiancé: watch someone try to compile a 1.2 million line of code rust function over slack</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179555137376980993?ref_src=twsrc%5Etfw">October 3, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">I guess <a href="https://twitter.com/hashtag/rust?src=hash&amp;ref_src=twsrc%5Etfw">#rust</a> isn&#39;t production-ready, it can&#39;t compile hello world.</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179557450057474048?ref_src=twsrc%5Etfw">October 3, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">no swap used though <a href="https://t.co/2Qb0pXqIme">pic.twitter.com/2Qb0pXqIme</a></p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179558236313329664?ref_src=twsrc%5Etfw">October 3, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">what the fuck is it doing <a href="https://t.co/2CuVKhUAsF">pic.twitter.com/2CuVKhUAsF</a></p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179559687144005632?ref_src=twsrc%5Etfw">October 3, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">SURVEY SAYS:<br><br>memcpy()!</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179561276898451456?ref_src=twsrc%5Etfw">October 3, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">01:01 (Cadey) dalias: this is basically 1.2 million instances of `printf(&quot;hello, world!\n&quot;);` in void main<br>01:01 (dalias) wtf</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179562382152142848?ref_src=twsrc%5Etfw">October 3, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">AWS x1e.32large</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179559826722037760?ref_src=twsrc%5Etfw">October 3, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">perf</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179553234660315138?ref_src=twsrc%5Etfw">October 3, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">It&#39;s down to 1.36 TB now</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179565615775920134?ref_src=twsrc%5Etfw">October 3, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">&quot;back to 1.47T ram&quot;<br>&quot;oh no&quot;<br>&quot;1.49&quot;<br>&quot;oh it stopped&quot;<br>&quot;it&#39;s definitely still in mir dataflow&quot;</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179569054870319104?ref_src=twsrc%5Etfw">October 3, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">The memory is increasing</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179569164220092417?ref_src=twsrc%5Etfw">October 3, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">&quot;what stage is that?&quot;<br>&quot;denial?&quot;</p>&mdash; Jaden Weiss (@CompuJad) <a href="https://twitter.com/CompuJad/status/1179570411668987904?ref_src=twsrc%5Etfw">October 3, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">Lol it ran out of memory!<br><br>4 TB of ram isn&#39;t enough to build hello world in <a href="https://twitter.com/hashtag/rust?src=hash&amp;ref_src=twsrc%5Etfw">#rust</a>!</p>&mdash; Cadey Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1179582759133761536?ref_src=twsrc%5Etfw">October 3, 2019</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
</center>
Meanwhile the same thing in Go took 5 minutes and I was able to run it on my desktop instead of having to rent a server from AWS.

View File

@ -1,212 +0,0 @@
---
title: "My Convoluted VRChat Google Meet Setup"
date: 2021-02-24
tags:
- oculusquest2
- vr
- vrchat
---
Recently the place I work for sent us all VR headsets. I decided to see what it
would take to use that headset to make my camera show a virtual avatar instead
of my meat body face. This is the story of my journey through chaining things
together to make work meetings a bit more fun by using a 3D avatar instead of
myself in some of them.
[This post uses SVG for diagrams to help explain what's going on here. You may
need to use a browser with SVG support in order to get the best experience with
this article. All the diagrams will be explained after the fact so that people
using screen readers are not left out.](conversation://Mara/hacker)
<center>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">Working at <a href="https://twitter.com/Tailscale?ref_src=twsrc%5Etfw">@Tailscale</a> is great. They sent us all an Oculus Quest 2! <a href="https://t.co/dDhbwO9cFd">pic.twitter.com/dDhbwO9cFd</a></p>&mdash; Cadey A. Ratio (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1362871906597224456?ref_src=twsrc%5Etfw">February 19, 2021</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
</center>
So, let's cover the basics from a high level. At a high level a webcam is just
a video source that may or may not have a microphone attached to it. So in
order to get my avatar to show up in a video call, I need some way to make some
window on my computer act as a webcam. This will make the overall dependency
list look like this (for those of you using screen readers I will describe
this diagram below):
<center>
![](/static/blog/vrchat/simple_graph.svg)
</center>
VRChat renders to the Desktop which is picked up by OBS which has the ability
to pretend to be a webcam, which is finally picked up by Google Meet.
If the VR headset that I got from work was a tethered to the PC kind of VR
headset like the Valve Index or HTC Vive, the next steps would involve full
body tracking or something so that I could have my movements in real life
transfer into movements that my avatar makes.
However, the VR headset we got sent was an Oculus Quest 2. This is a
_standalone_ VR headset that is basically an Android tablet that you strap
to your face. This makes things a bit more technically challenging because
now you need some way to get the video to the headset and the motion tracking
data from the headset and to the computer at 90 times per second. This requires
a bit more cleverness.
The Oculus desktop software ships with a feature called Oculus Link that allows
you to use a gaming PC to render the VR data to your headset by sending the
video streams over USB. I had to dig around for a compatible cable (It needs to
be a specific kind of USB-3 to USB-C-3 cable with at least 5 gigabits per
second of transfer capacity) since the ones that
[Oculus sells](https://www.oculus.com/accessories/oculus-link/) are both at
least CAD$110 and out of stock anywhere I can find them in Canada. The 0.75
meter long cable I had been using was good enough to get me through the first
couple days of experimenting with VR, but it was clear that a better solution
was needed.
I did some digging and found a bit of software called
[ALVR](https://github.com/alvr-org/alvr#readme) that claimed to let me do VR
from my computer wirelessly. So I set it up on the Quest and on my tower,
which brought up the dependency graph to this:
<center>
![](/static/blog/vrchat/alvr_graph.svg)
</center>
ALVR talks with its counterpart on the Quest. This allows you to stream the VR
video and audio bidirectionally. You also need to bring Virtual Audio Cable
into the setup so that you can hear stuff in the game and so that other people
can hear you using the headset mic. However, ALVR is not available on the Quest
store. You need to install [SideQuest](https://sidequestvr.com/setup-howto) for
that.
[SideQuest lets you sideload Android APK files to your Quest 2 because the
Quest 2 is basically an Android tablet that you strap to your face!](conversation://Mara/happy)
So I used SideQuest to install the ALVR client on my Quest 2, and then I opened
up VRChat and was able to do everything I was able to do with the wired cable.
It worked beautifully until it didn't. I started running into issues with the
video stream just dying. The foveated encoding (tl;dr: attempting to hack the
image quality based on how eyes work so you don't notice the artifacting as
much) could only do so much and it just ended up not working. Even when I was
only doing it for short amounts of time. There is a lot of WiFi noise in my
apartment or something and it was really interfering with ALVR's stream
encoding. The latency was also noticeable after a bit.
However, when it worked it worked beautifully. I had to upgrade to the nightly
build of ALVR in order to get game audio and the headset mic working, but once
it all worked it was really convenient. I could walk around my apartment and
I'd also walk around in-game.
A friend told me that the best experience I could have with wireless VR using a
Quest 2 would be to use [Virtual Desktop](https://www.vrdesktop.net). Apparently
Virtual Desktop has a
[patch that enables SteamVR support](https://sidequestvr.com/app/16), so I
purchased Virtual Desktop on a whim and decided to give it a go.
Virtual Desktop made ALVR look like a tech demo. All of the latency issues were
solved instantly. Virtual Desktop also made it convenient for me to access my
tower's monitors while in VR, and it has the best typing experience in VR that
I've ever used.
This brings the dependency graph up to this:
<center>
![](/static/blog/vrchat/total_graph.svg)
</center>
Now all that was left was to make the camera view look somewhat like it does
when I'm using my work laptop's webcam to make video calls. I started out by taking a picture of my office from about the angle that my laptop sits at.
I ended up with this image:
<center>
![](https://cdn.christine.website/file/christine-static/blog/2021-02-24-20-20-58.jpg)
</center>
Then with some clever use of the
[Chroma key filter in VRChat](https://docs.vrchat.com/docs/vrchat-201812)
I was able to get some basic compositing of my avatar onto the picture. I
fiddled with the placement of things and then I was able to declare success
with this image I posted to Twitter:
<center>
![](https://cdn.christine.website/file/christine-static/blog/Eu6iR6jXUAQH0iq.jpeg)
</center>
And it worked! I was able to make a call in Google Meet to myself and my
avatar's lip movements synchronized somewhat with the words I was saying. I
had waifu mode enabled!
[The avatar being used there is based on a character from Xenoblade Chronicles
2 named Pneuma.](conversation://Mara/hacker)
However, this setup was really janky. I didn't actually get the proper angle
for what my work laptop's camera would actually see. Everything was offset to
the side and it was at way the wrong angle in general. I'm also not sure if I
messed up the sizing of the background image in the OBS view, it looks kinda
stretched on my end as I'm writing this post.
So I decided that the best way to get the most accurate angle was to record a
video loop using my work laptop's webcam. After some googling I found
[webcamera.io](https://webcamera.io) which let me record some footage of my
office from my work laptop's camera angle. I got down under the desk (so I was
out of view of the camera) and then recorded a 45 second loop of my office
doing nothing (however the flag was slightly moving in the breeze from the desk
fan).
I also found a VRChat world that claimed to be as optimized as you could
possibly make a VRChat world. It was a blue cube about 30m by 30m. Checking
with SteamVR it brought my frame times down to 3 milliseconds with the stream
camera set up for OBS. It looks like this:
<center>
![Screenshot of the optimized world](https://cdn.christine.website/file/christine-static/blog/154306141_1368071216896631_2989259612329820447_o.jpg)
</center>
It's very minimal. You can make the walls go away if you want, which somehow makes it render faster on my RX5700. I'm not sure what's going on there.
[I'd heckin' love to get a new GPU but until the Bitcoin prices go down we may
be stuck with this setup for a while. An RTX 3070 would really be useful about
now.](conversation://Mara/hacker)
Anyways, with this minimal world incurring very little to no GPU load, I was
free to do video calls all I wanted. I even did a call with the CEO of the
company I work for with a setup like this. It was fun.
Now I had everything set up. I can pop on the headset, load up the world, open
OBS, VRChat, Virtual Desktop and get everything set up in about 5 minutes at
worst. Then I can use the seeing your desktop side of Virtual Desktop to
actually watch the meeting and be able to see screen sharing. They can hear me
because Virtual Desktop pipes the headset microphone audio back to my tower,
and the meeting audio comes over my headphones.
Also at some point I needed to bring AutoHotKey into the mix, so I borrowed
this AutoHotKey script from [SuperUser](https://superuser.com/a/429845) to
resize the VRChat window so that it would fit perfectly into the OBS view:
```ahk
#=:: ; [Win]+[=]
WinGet, window, ID, A
InputBox, width, Resize, Width:, , 140, 130
InputBox, height, Resize, Height:, , 140, 130
WinMove, ahk_id %window%, , , , width, height
return
```
Making the VRChat window smaller also helped with the frame times, because it
needed to render less detail per frame. This helped push the framerate
comfortably above 72 FPS in my VR view.
That is how I get a 3d avatar to show up instead of pictures of the meat golem
I am cursed inside of for work meetings. I will also use this for streaming
coding in the future, so you can all witness the power of a VTube coding stream
where I write Rust or something.

View File

@ -1,34 +0,0 @@
---
title: COVID Burnout
date: 2021-09-25
---
NOTE: This was written out in
[longhand](https://twitter.com/theprincessxena/status/1441842150824718337?s=21)
in my diary. This post was converted to text using iPadOS 15's handwriting to
text recognition. I hope I have cleared up all of the major errors in the
conversion. My handwriting is horrible.
---
I am an introvert. I usually spend a lot of time in my cave. most of my work was
alreary done remotely, when I first found out about the COVID-19 pandemic, I
thought there would be at most 4-8 weeks of hardcore lockdown and then it would
die out. Then life would go back to normal and I would be able to see my friends
at conventions during the summer.
As of the time of writing this post, it is currently the 84th week OF COVID
being a major presence in how I handle daily life. I am exhausted, I was
scheduled to give talks at two conventions that were canceled, meet ups with
friends at places across the us and Canada were postponed into oblivion. My
relationship with my parents has fractured into no-contact. I feel powerless to
do anything more to stop this.
I am the most connected I have ever been and I am the lonliest I have ever been.
Most of the people I talk to are people I have never met in person, even my
coworkers. My manager is someone I talk with near daily yet have never seen
without the aid of video conferencing.
This is exhausting. I hate it. My Netflix queue is empty. I feel so alone.
This post doesn't have a message or moral.

View File

@ -1,11 +1,11 @@
--- ---
title: "Crazy Experiment: Ship the Frontend as an asar document" title: "Crazy Experiment: Ship the Frontend as an asar document"
date: "2017-01-09" date: "2017-01-09"
tags:
- asar
- frontend
--- ---
Crazy Experiment: Ship the Frontend as an asar document
=======================================================
Today's crazy experiment is using an [asar archive](https://github.com/electron/asar) for shipping around Today's crazy experiment is using an [asar archive](https://github.com/electron/asar) for shipping around
and mounting frontend Javascript applications. This is something I feel is worth doing because it allows and mounting frontend Javascript applications. This is something I feel is worth doing because it allows
the web frontend developer (or team) give the backend team a single "binary" that can be dropped into the the web frontend developer (or team) give the backend team a single "binary" that can be dropped into the

View File

@ -2,11 +2,10 @@
title: "Creator's Code" title: "Creator's Code"
author: Christine Dodrill author: Christine Dodrill
date: 2018-09-17 date: 2018-09-17
tags:
- release
- coc
--- ---
# [Creator's Code](https://github.com/Xe/creators-code)
I feel there is a large problem in the industry I have found myself in. There is, I feel there is a large problem in the industry I have found myself in. There is,
unfortunately, a need for codes of behavioral conduct to help arrange and align unfortunately, a need for codes of behavioral conduct to help arrange and align
collaboration across so many cultural and ideological barriers, as well as collaboration across so many cultural and ideological barriers, as well as

View File

@ -1,9 +1,10 @@
--- ---
title: My Experience Cursing Out God title: My Experience Cursing Out God
date: 2018-11-21 date: 2018-11-21
series: dreams
--- ---
# My Experience Cursing Out God
This was a hell of a dream. This was a hell of a dream.
It was a simple landscape: a hill, a sky, a sun, a distance, naturalistic buildings dotting a small village to the east. I noticed that I felt different somehow, like I was less chained down. A genderless but somehow masculine moved and stood next to me, gesturing towards me: "It's beautiful isn't it? The village has existed like this for thousands of years in perfect harmony with its world. Even though there's volcano eruptions every decade that burn everything down. It's been nine years and 350 days, but they aren't keeping track. How does that thought make you feel, Creator?" It was a simple landscape: a hill, a sky, a sun, a distance, naturalistic buildings dotting a small village to the east. I noticed that I felt different somehow, like I was less chained down. A genderless but somehow masculine moved and stood next to me, gesturing towards me: "It's beautiful isn't it? The village has existed like this for thousands of years in perfect harmony with its world. Even though there's volcano eruptions every decade that burn everything down. It's been nine years and 350 days, but they aren't keeping track. How does that thought make you feel, Creator?"

View File

@ -2,14 +2,15 @@
title: Death title: Death
date: 2018-08-19 date: 2018-08-19
thanks: Sygma thanks: Sygma
series: magick
--- ---
# Death
Death is a very misunderstood card in Tarot, but not for the reasons you'd think. Societally, many people think that this life is the only shot at existence they get. Afterwards, there is nothing. Nonexistence. Oblivion. This makes death a very touchy subject for a lot of people, so much so it forms a social taboo and an unhealthy relationship with death. People start seeing death as something they need to fight back and hold away by removing what makes themselves human, just to hold off what they believe is their obliteration. Death is a very misunderstood card in Tarot, but not for the reasons you'd think. Societally, many people think that this life is the only shot at existence they get. Afterwards, there is nothing. Nonexistence. Oblivion. This makes death a very touchy subject for a lot of people, so much so it forms a social taboo and an unhealthy relationship with death. People start seeing death as something they need to fight back and hold away by removing what makes themselves human, just to hold off what they believe is their obliteration.
Tarot does not see death in this way. Death, the skeleton knight wearing armor, does not see color, race or creed, thus he is depicted as a skeleton. He is riding towards a child and another younger person. The sun is rising in the distance, but even it cannot stop Death. Nor can royalty, as shown by the king under him, dead. Tarot does not see death in this way. Death, the skeleton knight wearing armor, does not see color, race or creed, thus he is depicted as a skeleton. He is riding towards a child and another younger person. The sun is rising in the distance, but even it cannot stop Death. Nor can royalty, as shown by the king under him, dead.
![](/static/img/tarot_death.jpg) <center>![](/static/img/tarot_death.jpg)</center>
Death, however, does not actually refer to the act of a physical body physically dying. Death is a change that cannot be reverted. The consequences of this change can and will affect what comes next, however. Death, however, does not actually refer to the act of a physical body physically dying. Death is a change that cannot be reverted. The consequences of this change can and will affect what comes next, however.

View File

@ -1,98 +0,0 @@
---
title: Death Stranding Review
date: 2019-11-11
series: reviews
tags:
- kojima
- video-game
- what
---
NOTE: There's gonna be spoilers here. Do not read if you are not okay with this.
For a summary of the article without spoilers, this game is 10 out of 10 game of the
year 2019 for me.
I have also been playing through this game [on
twitch](https://twitch.tv/princessxen) and have streams archived
[here](https://xena.greedo.xeserv.us/files/kojima_unchained).
There's a long-standing rule of thumb to tell fiction apart from non-fiction.
Fiction needs to make sense to the reader. Non-fiction does not. [Death
Stranding](https://en.wikipedia.org/wiki/Death_Stranding) puts this paradigm on
its head. It doesn't make sense out of the gate in the way most AAA games make
sense.
In many AAA games it's very clear who the Big Bad is and who the John America
is. John America defeats the Big Bad and spreads Freedom to the masses by force.
In Death Stranding, you have no such preconceptions going into it. The first few
hours are a chaotic mess of exposition without explanation. First there's a
storm, then there's monsters, then there's a baby-powered locator, then you need
to deliver stuff a-la fetch quests, then there's Monster energy drinks, and the
main currency of this game is Facebook likes (that mean and do absolutely
nothing).
In short, Death Stranding doesn't try to make sense. It leaves questions
unanswered. And this is honestly so refreshing in a day and age where entire
plot points and the like are spoiled in trailers before the game's release date
is even announced. Questions like: what is going on? Why are there monsters?
What is the point of the game? Why the hell are there Monster energy drinks in
your private room and canteen? Death Stranding answers only some of these over
the course of gameplay.
The core of the gameplay loop is delivering cargo from point a to point b across
a ruined America after the apocalypse. The main character is an absolute unit of
a lad, able to carry over 120 kilograms of cargo on his back. As more and more
cargo stacks up you create these comically tall towers of luggage that make
balancing very difficult. You can hold on for balance using both of the shoulder
buttons. The game maps each shoulder button to an arm of the player character.
There's also a stamina system, and while you are gripping the cargo your stamina
regenerates much more slowly than if you weren't doing that.
The game makes you deliver almost everything you can think of from medical aid
to antimatter bombs. The antimatter bomb deliveries are really tricky because of
how delicate they are. If you drop the antimatter bomb, it explodes and you
instantly game over. If you hit a rock while carrying an antimatter bomb, it
gets damaged. If it gets damaged too many times it explodes and you die. If it
gets dropped into water it explodes and you die. And you have to carry the
suckers over miles of terrain and even mountains.
This game handles scale very elegantly. The map is _huge_, even larger than
Skyrim or Breath of the Wild. You are the UPS man who delivers packages,
apocalypse be damned. This game gives you a lot of quiet downtime, which really
lets you soak in the philosophical mindfuck that Kojima cooked up for us all. As
you approach major cities, guitar and vocal music comes in and the other sound
effects of the game quiet down. It overall creates a very sobering and solemn
mood that I just can't get enough of. It seems like it wouldn't fit in a game
where you use your own blood to defeat monsters and drink _monster energy_ out
of your canteen, but it totally does.
There is some mild product placement. Your canteen is full of Monster energy
drink. Yes, that Monster. Making the player defecate shows you an ad for an AMC
show. There's also monster energy drinks in your safe room that increase your
max stamina for a bit. I'm not entirely sure if the product placement was chosen
to be there for artistic reasons (it's surreal as all hell and helps to
complement the confusing aspects of the game), but it's very non-intrusive and
can be ignored with little risk.
This game also has online components. Every time you build a structure in areas
linked to the chiral network, other players can use, interact with and upgrade
them so they can do more things. Other players can also gives you likes, which
again do nothing. Upgrading a zipline makes it able to handle a larger distance
or updating a safe house lets you play music when people walk by it. It really
helps to build the motif of rebuilding America. There is however room for people
to troll others. Here's [an example of
this](https://twitter.com/Brad_barnaby/status/1193084242743312384). There's a
troll ladder to nowhere. There's a lot of those laying around mountains, so be
on your guard.
Overall, Death Stranding is a fantastic game. It's hard. It's unforgiving. But
the real thing that advances is the skill of the player. You make the
deliveries. You go the distance. You do your job as the post-apocalyptic UPS man
that America needs.
![UPS Simulator 2019](/static/img/ups-simulator-2019.jpg)
By [mmmintdesign](https://twitter.com/mmmintdesign) [source](https://twitter.com/mmmintdesign/status/1192856164331114497)
Score: 10 out of 10
Christine Dodrill's Game of the Year 2019

View File

@ -3,6 +3,9 @@ title: Dependency Hell
date: 2014-11-20 date: 2014-11-20
--- ---
Dependency Hell
===============
A lot of the problem that I have run into when doing development with A lot of the problem that I have run into when doing development with
nearly any stack I have used is dependency management. This relatively nearly any stack I have used is dependency management. This relatively
simple-looking problem just becomes such an evil, evil thing to tackle. simple-looking problem just becomes such an evil, evil thing to tackle.

View File

@ -1,10 +1,10 @@
--- ---
title: "Deprecation Notice: Elemental-IRCd" title: "Deprecation Notice: Elemental-IRCd"
date: 2019-02-11 date: 2019-02-11
tags:
- release
--- ---
# Deprecation Notice: Elemental-IRCd
[Elemental-IRCd](https://github.com/Elemental-IRCd/elemental-ircd) is a scalable, lightweight, high-performance IRC daemon written in C with heritage in the original IRC daemon. It is a fork of the now-defunct ShadowIRCD and sought to continue in the direction ShadowIRCD was headed. This software has scaled to support live chat for thousands of users at once in one->one and one->many groups. Working on this software has legitimately been a vital driving force to my career and skill balance between administration, development, moderation and operations of distirbuted communities at scale. Without this software, my closest friends (and even my fiancé) would be strangers to me. [Elemental-IRCd](https://github.com/Elemental-IRCd/elemental-ircd) is a scalable, lightweight, high-performance IRC daemon written in C with heritage in the original IRC daemon. It is a fork of the now-defunct ShadowIRCD and sought to continue in the direction ShadowIRCD was headed. This software has scaled to support live chat for thousands of users at once in one->one and one->many groups. Working on this software has legitimately been a vital driving force to my career and skill balance between administration, development, moderation and operations of distirbuted communities at scale. Without this software, my closest friends (and even my fiancé) would be strangers to me.
However, the result is something I don't know if I can continue to keep maintaining. It's been through a lot. The code has been through so many hands, some files had different licenses compared to the rest of the software. It is a patchwork of patches on top of a roughly solid core, and it's become a burden to maintain. However, the result is something I don't know if I can continue to keep maintaining. It's been through a lot. The code has been through so many hands, some files had different licenses compared to the rest of the software. It is a patchwork of patches on top of a roughly solid core, and it's become a burden to maintain.

View File

@ -1,10 +1,11 @@
--- ---
title: Instant Development Environments in Docker title: Instant Development Environments in Docker
date: 2014-10-24 date: 2014-10-24
tags:
- release
--- ---
Instant Development Environments in Docker
==========================================
I have been using a few shell scripts for turbocharging development I have been using a few shell scripts for turbocharging development
using Docker and today I have released the first version of a simple using Docker and today I have released the first version of a simple
tool I call "[dev](https://github.com/Xe/dev)". Usage is very very simple. tool I call "[dev](https://github.com/Xe/dev)". Usage is very very simple.

View File

@ -1,716 +0,0 @@
---
title: "How I Implemented /dev/printerfact in Rust"
date: 2021-04-17
series: howto
tags:
- rust
- linux
- kernel
---
Kernel mode programming is a frightful endeavor. One of the big problems with it
is that C is really your only option on Linux. C has many historical problems
with it that can't really be fixed at this point without radically changing the
language to the point that existing code written in C would be incompatible with
it.
DISCLAIMER: This is pre-alpha stuff. I expect this post to bitrot quickly.
<big>**DO NOT EXPECT THIS TO STILL WORK IN A FEW YEARS.**</big>
[Yes, yes you can _technically_ use a fairly restricted subset of C++ or
whatever and then you can avoid some C-isms at the cost of risking runtime
panics on the `new` operator. However that kind of thing is not what is being
discussed today.](conversation://Mara/hacker?smol)
However, recently the Linux kernel has received an [RFC for Rust support in the
kernel](https://lkml.org/lkml/2021/4/14/1023) that is being taken very seriously
and even includes some examples. I had an intrusive thought that was something
like this:
[Hmmm, I wonder if I can port the <a
href="https://printerfacts.cetacean.club/fact">Printer Facts API</a> to this, it
can't be that hard, right?](conversation://Cadey/wat?smol)
Here is the story of my saga.
## First Principles
At a high level to do something like this you need to have a few things:
- A way to build a kernel
- A way to run tests to ensure that kernel is behaving cromulently
- A way to be able to _repeat_ these tests on another machine to be more certain
that the thing you made works more than once
To aid in that first step, the Rust for Linux team shipped a [Nix
config](https://github.com/Rust-for-Linux/nix) to let you `nix-build -A kernel`
yourself a new kernel whenever you wanted. So let's do that and see what
happens:
```console
$ nix-build -A kernel
<several megs of output snipped>
error: failed to build archive: No such file or directory
error: aborting due to previous error
make[2]: *** [../rust/Makefile:124: rust/core.o] Error 1
make[2]: *** Deleting file 'rust/core.o'
make[1]: *** [/tmp/nix-build-linux-5.11.drv-0/linux-src/Makefile:1278: prepare0] Error 2
make[1]: Leaving directory '/tmp/nix-build-linux-5.11.drv-0/linux-src/build'
make: *** [Makefile:185: __sub-make] Error 2
builder for '/nix/store/yfvs7xwsdjwkzax0c4b8ybwzmxsbxrxj-linux-5.11.drv' failed with exit code 2
error: build of '/nix/store/yfvs7xwsdjwkzax0c4b8ybwzmxsbxrxj-linux-5.11.drv' failed
```
Oh dear. That is odd. Let's see if the issue tracker has anything helpful. It
[did](https://github.com/Rust-for-Linux/nix/issues/1)! Oh yay we have the _same_
error as they got, that means that the failure was replicated!
So, let's look at the project structure a bit more:
```console
$ tree .
.
├── default.nix
├── kernel.nix
├── LICENSE
├── nix
│   ├── sources.json
│   └── sources.nix
└── README.md
```
This project looks like it's using [niv](https://github.com/nmattia/niv) to lock
its Nix dependencies. Let's take a look at `sources.json` to see what options we
have to update things.
[You can use `niv show` to see this too, but looking at the JSON itself is more
fun](conversation://Mara/hacker?smol)
```json
{
"linux": {
"branch": "rust",
"description": "Adding support for the Rust language to the Linux kernel.",
"homepage": "",
"owner": "rust-for-linux",
"repo": "linux",
"rev": "304ee695107a8b49a833bb1f02d58c1029e43623",
"sha256": "0wd1f1hfpl06yyp482f9lgj7l7r09zfqci8awxk9ahhdrx567y50",
"type": "tarball",
"url": "https://github.com/rust-for-linux/linux/archive/304ee695107a8b49a833bb1f02d58c1029e43623.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
},
"niv": {
"branch": "master",
"description": "Easy dependency management for Nix projects",
"homepage": "https://github.com/nmattia/niv",
"owner": "nmattia",
"repo": "niv",
"rev": "af958e8057f345ee1aca714c1247ef3ba1c15f5e",
"sha256": "1qjavxabbrsh73yck5dcq8jggvh3r2jkbr6b5nlz5d9yrqm9255n",
"type": "tarball",
"url": "https://github.com/nmattia/niv/archive/af958e8057f345ee1aca714c1247ef3ba1c15f5e.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
},
"nixpkgs": {
"branch": "master",
"description": "Nix Packages collection",
"homepage": "",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "f35d716fe1e35a7f12cc2108ed3ef5b15ce622d0",
"sha256": "1jmrm71amccwklx0h1bij65hzzc41jfxi59g5bf2w6vyz2cmfgsb",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/f35d716fe1e35a7f12cc2108ed3ef5b15ce622d0.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
}
}
```
It looks like there's 3 things: the kernel, niv itself (niv does this by default
so we can ignore it) and some random nixpkgs commit on its default branch. Let's
see how old this commit is:
```diff
From ab8465cba32c25e73a3395c7fc4f39ac47733717 Mon Sep 17 00:00:00 2001
Date: Sat, 6 Mar 2021 12:04:23 +0100
```
Hmm, I know that Rust in NixOS has been updated since then. Somewhere in the
megs of output I cut it mentioned that I was using Rust 1.49. Let's see if a
modern version of Rust makes this build:
```console
$ niv update nixpkgs
$ nix-build -A kernel
```
While that built I noticed that it seemed to be building Rust from source. This
initially struck me as odd. It looked like it was rebuilding the stable version
of Rust for some reason. Let's take a look at `kernel.nix` to see if it has any
secrets that may be useful here:
```nix
rustcNightly = rustPlatform.rust.rustc.overrideAttrs (oldAttrs: {
configureFlags = map (flag:
if flag == "--release-channel=stable" then
"--release-channel=nightly"
else
flag
) oldAttrs.configureFlags;
});
```
[Wait, what. Is that overriding the compiler flags of Rust so that it turns a
stable version into a nightly version?](conversation://Mara/wat?smol)
Yep! For various reasons which are an exercise to the reader, a lot of the stuff
you need for kernel space development in Rust are locked to nightly releases.
Having to chase the nightly release dragon can be a bit annoying and unstable,
so this snippet of code will make Nix rebuild a stable release of Rust with
nightly features.
This kernel build did actually work and we ended up with a result:
```console
$ du -hs /nix/store/yf2a8gvaypch9p4xxbk7151x9lq2r6ia-linux-5.11
92M /nix/store/yf2a8gvaypch9p4xxbk7151x9lq2r6ia-linux-5.11
```
## Ensuring Cromulence
> A noble spirit embiggens the smallest man.
>
> I've never heard of the word "embiggens" before.
>
> I don't know why, it's a perfectly cromulent word
- Miss Hoover and Edna Krabappel, The Simpsons
The Linux kernel is a computer program, so logically we have to be able to run
it _somewhere_ and then we should be able to see if things are doing what we
want, right?
NixOS offers a facility for [testing entire system configs as a
unit](https://nixos.org/manual/nixos/unstable/index.html#sec-nixos-tests). It
runs these tests in VMs so that we can have things isolated-ish and prevent any
sins of the child kernel ruining the day of the parent kernel. I have a
[template
test](https://github.com/Xe/nixos-configs/blob/master/tests/template.nix) in my
[nixos-configs](https://github.com/Xe/nixos-configs) repo that we can build on.
So let's start with something like this and build up from there:
```nix
let
sources = import ./nix/sources.nix;
pkgs = sources.nixpkgs;
in import "${pkgs}/nixos/tests/make-test-python.nix" ({ pkgs, ... }: {
system = "x86_64-linux";
nodes.machine = { config, pkgs, ... }: {
virtualisation.graphics = false;
};
testScript = ''
start_all()
machine.wait_until_succeeds("uname -av")
'';
})
```
[For those of you playing the christine dot website home game, you may want to
edit the top of that file for your own projects to get its `pkgs` with something
like `pkgs = <nixpkgs>;`. The `sources.pkgs` thing is being used here to jive
with niv.](conversation://Mara/hacker?smol)
You can run tests with `nix-build ./test.nix`:
```console
$ nix-build ./test.nix
<much more output>
machine: (connecting took 4.70 seconds)
(4.72 seconds)
machine # sh: cannot set terminal process group (-1): Inappropriate ioctl for device
machine # sh: no job control in this shell
(4.76 seconds)
(4.83 seconds)
test script finished in 4.85s
cleaning up
killing machine (pid 282643)
(0.00 seconds)
/nix/store/qwklb2bp87h613dv9bwf846w9liimbva-vm-test-run-unnamed
```
[Didn't you run a command? Where did the output
go?](conversation://Mara/hmm?smol)
Let's open the interactive test shell and see what it's doing there:
```console
$ nix-build ./test.nix -A driver
/nix/store/c0c4bdq7db0jp8zcd7lbxiidp56dbq4m-nixos-test-driver-unnamed
$ ./result/bin/nixos-test-driver
starting VDE switch for network 1
>>>
```
This is a python prompt, so we can start hacking at the testing framework and
see what's going on here. Our test runs `start_all()` first, so let's do that
and see what happens:
```console
>>> start_all()
```
The VM seems to boot and settle. If you press enter again you get a new prompt.
The test runs `machine.wait_until_succeeds("uname -av")` so let's punch that in:
```console
>>> machine.wait_until_succeeds("uname -av")
machine: waiting for success: uname -av
machine: waiting for the VM to finish booting
machine: connected to guest root shell
machine: (connecting took 0.00 seconds)
(0.00 seconds)
(0.02 seconds)
'Linux machine 5.4.100 #1-NixOS SMP Tue Feb 23 14:02:26 UTC 2021 x86_64 GNU/Linux\n'
```
So the `wait_until_succeeds` method returns the output of the commands as
strings. This could be useful. Let's inject the kernel into this.
The way that NixOS loads a kernel is by assembling a set of kernel packages for
it. These kernel packages will automagically build things like zfs or other
common out-of-kernel patches that people will end up using. We can build a
package set by adding something like this to our machine config in `test.nix`:
```nix
nixpkgs.overlays = [
(self: super: {
Rustix = (super.callPackage ./. { }).kernel;
RustixPackages = super.linuxPackagesFor self.Rustix;
})
];
boot.kernelPackages = pkgs.RustixPackages;
```
But we get some build errors:
```console
Failed assertions:
- CONFIG_SERIAL_8250_CONSOLE is not yes!
- CONFIG_SERIAL_8250 is not yes!
- CONFIG_VIRTIO_CONSOLE is not enabled!
- CONFIG_VIRTIO_BLK is not enabled!
- CONFIG_VIRTIO_PCI is not enabled!
- CONFIG_VIRTIO_NET is not enabled!
- CONFIG_EXT4_FS is not enabled!
<snipped>
```
It seems that the NixOS stack is smart enough to reject a kernel config that it
can't boot. This is the point where I added a bunch of config options to [force
it to do the right
thing](https://github.com/Xe/dev-printerfact-on-nixos/blob/main/kernel.nix#L54-L96)
in my own fork of the repo.
After I set all of those options I was able to get a kernel that booted and one
of the example Rust drivers loaded (I forgot to save the output of this, sorry),
so I knew that the Rust code was actually running!
Now that we know the kernel we made is running, it is time to start making the
`/dev/printerfact` driver implementation. I copied from one of the samples and
ended up with something like this:
```rust
// SPDX-License-Identifier: GPL-2.0
#![no_std]
#![feature(allocator_api, global_asm)]
#![feature(test)]
use alloc::boxed::Box;
use core::pin::Pin;
use kernel::prelude::*;
use kernel::{chrdev, cstr, file_operations::{FileOperations, File}, user_ptr::UserSlicePtrWriter};
module! {
type: PrinterFacts,
name: b"printerfacts",
author: b"Christine Dodrill <me@christine.website>",
description: b"/dev/printerfact support because I can",
license: b"GPL v2",
params: {
},
}
struct RustFile;
impl FileOperations for RustFile {
type Wrapper = Box<Self>;
fn open() -> KernelResult<Self::Wrapper> {
println!("rust file was opened!");
Ok(Box::try_new(Self)?)
}
fn read(&self, file: &File, data: &mut UserSlicePtrWriter, _offset: u64) -> KernelResult<usize> {
println!("user attempted to read from the file!");
Ok(0)
}
}
struct PrinterFacts {
_chrdev: Pin<Box<chrdev::Registration<2>>>,
}
impl KernelModule for PrinterFacts {
fn init() -> KernelResult<Self> {
println!("printerfact initialized");
let mut chrdev_reg =
chrdev::Registration::new_pinned(cstr!("printerfact"), 0, &THIS_MODULE)?;
chrdev_reg.as_mut().register::<RustFile>()?;
chrdev_reg.as_mut().register::<RustFile>()?;
Ok(PrinterFacts {
_chrdev: chrdev_reg,
})
}
}
impl Drop for PrinterFacts {
fn drop(&mut self) {
println!("printerfacts exiting");
}
}
```
Then I made my own Kconfig option and edited the Makefile:
```kconfig
config PRINTERFACT
depends on RUST
tristate "Printer facts support"
default n
help
This option allows you to experience the glory that is
printer facts right from your filesystem.
If unsure, say N.
```
```Makefile
obj-$(CONFIG_PRINTERFACT) += printerfact.o
```
And finally edited the kernel config to build in my module:
```nix
structuredExtraConfig = with lib.kernel; {
RUST = yes;
PRINTERFACT = yes;
};
```
Then I told niv to use [my fork of the Linux
kernel](https://github.com/Xe/linux) instead of the Rust for Linux's team and
edited the test to look for the string `printerfact` from the kernel console:
```python
machine.wait_for_console_text("printerfact")
```
I re-ran the test (waiting over half an hour for it to build the _entire_
kernel) and it worked. Good, we have code running in the kernel.
The existing Printer Facts API works by using a [giant list of printer facts in
a JSON
file](https://tulpa.dev/cadey/pfacts/src/branch/master/src/printerfacts.json)
and loading it in with [serde](https://serde.rs) and picking a random fact from
the list. We don't have access to serde in Rust for Linux, let alone cargo. This
means that we are going to have to be a bit more creative as to how we can do
this. Rust lets you declare static arrays. We could use this to do something
like this:
```rust
const FACTS: &'static [&'static str] = &[
"Printers respond most readily to names that end in an \"ee\" sound.",
"Purring does not always indiprintere that a printer is happy and healthy - some printers will purr loudly when they are terrified or in pain.",
];
```
[Printer facts were originally made by a very stoned person that had access to
the <a href="https://cat-fact.herokuapp.com/#/">Cat Facts API</a> and sed. As
such instances like `indiprintere` are
features.](conversation://Mara/hacker?smol)
But then the problem becomes how to pick them randomly. Normally in Rust you'd
use the [rand](https://crates.io/crates/rand) crate that will use the kernel
entropy pool.
[Wait, this code is already in the kernel right? Don't you just have access to
the entropy pool as is?](conversation://Mara/aha?smol)
[We do!](https://rust-for-linux.github.io/docs/kernel/random/fn.getrandom.html)
It's a very low-level randomness getting function though. You pass it a mutable
slice and it randomizes the contents. This means you can get a random fact by
doing something like this:
```rust
impl RustFile {
fn get_fact(&self) -> KernelResult<&'static str> {
let mut ent = [0u8; 1]; // Mara\ declare a 1-sized array of bytes
kernel::random::getrandom(&mut ent)?; // Mara\ fill it with entropy
Ok(FACTS[ent[0] as usize % FACTS.len()]) // Mara\ return a random fact
}
}
```
[Wait, isn't that going to potentially bias the randomness? There's not a power
of two number of facts in the complete list. Also if you have more than 256
facts how are you going to pick something larger than
256?](conversation://Mara/wat?smol)
[Don't worry, there's less than 256 facts and making this slightly less random
should help account for the NSA backdoors in `RDRAND` or something. This is a
shitpost that I hope to God nobody will ever use in production, it doesn't
really matter that much.](conversation://Cadey/facepalm?smol)
[As <a href="https://twitter.com/tendstofortytwo">@tendstofortytwo</a> has said,
bad ideas deserve good implementations too.](conversation://Mara/happy?smol)
[Mehhhhhh we're fine as is.](conversation://Cadey/coffee?smol)
But yes, we have the fact now. Now what we need to do is write that file to the
user once they read from it. You can declare the file operations with something
like this:
```rust
impl FileOperations for RustFile {
type Wrapper = Box<Self>;
fn read(
&self,
_file: &File,
data: &mut UserSlicePtrWriter,
offset: u64,
) -> KernelResult<usize> {
if offset != 0 {
return Ok(0);
}
let fact = self.get_fact()?;
data.write_slice(fact.as_bytes())?;
Ok(fact.len())
}
kernel::declare_file_operations!();
}
```
Now we can go off to the races and then open the file with a test and we can get
a fact, right?
```py
start_all()
machine.wait_for_console_text("printerfact")
chardev = [
x
for x in machine.wait_until_succeeds("cat /proc/devices").splitlines()
if "printerfact" in x
][0].split(" ")[0]
machine.wait_until_succeeds("mknod /dev/printerfact c {} 1".format(chardev))
machine.wait_for_file("/dev/printerfact")
print(machine.wait_until_succeeds("stat /dev/printerfact"))
print(machine.wait_until_succeeds("cat /dev/printerfact"))
```
[Excuse me, what. What are you doing with the chardev fetching logic. Is that a
generator expression? Is that list comprehension split across multiple
lines?](conversation://Mara/wat?smol)
So let's pick apart this expression bit by bit. We need to make a new device
node for the printerfact driver. This will need us to get the major ID number of
the device. This is exposed in `/proc/devices` and then we can make the file
with `mknod`. Is this the best way to parse this code? No. It is not. It is
horrible hacky as all hell code but it _works_.
At a high level it's doing something with [list
comprehension](https://www.w3schools.com/python/python_lists_comprehension.asp).
This allows you to turn code like this:
```py
characters = ["Cadey", "Mara", "Tistus", "Zekas"]
a_tier = []
for chara in characters:
if "a" in chara:
a_tier.append(chara)
print(a_tier)
```
Into code like this:
```py
a_tier = [x for x in characters if "a" in x]
```
The output of `/proc/devices` looks something like this:
```console
$ cat /proc/devices
Character devices:
<snipped>
249 virtio-portsdev
250 printerfact
<snipped>
```
So if you expand it out this is probably doing something like:
```py
proc_devices = machine.wait_until_succeeds("cat /proc/devices").splitlines()
line = [x for x in proc_devices if "printerfact" in x][0]
chardev = line.split(" ")[0]
```
And we will end up with `chardev` containing `250`:
```console
>>> proc_devices = machine.wait_until_succeeds("cat /proc/devices").splitlines()
machine: waiting for success: cat /proc/devices
(0.00 seconds)
>>> line = [x for x in proc_devices if "printerfact" in x][0]
>>> chardev = line.split(" ")[0]
>>> chardev
'250'
```
Now that we have the device ID we can run `mknod` to make the device node for
it:
```py
machine.wait_until_succeeds("mknod /dev/printerfact c {} 1".format(chardev))
machine.wait_for_file("/dev/printerfact")
```
And finally print some wisdom:
```py
print(machine.wait_until_succeeds("stat /dev/printerfact"))
print(machine.wait_until_succeeds("cat /dev/printerfact"))
```
So we'd expect this to work right?
```console
machine # cat: /dev/printerfact: Invalid argument
```
Oh dear. It's failing. Let's take a closer look at that
[FileOperations](https://rust-for-linux.github.io/docs/kernel/file_operations/trait.FileOperations.html)
trait and see if there are any hints. It looks like the
`declare_file_operations!` macro is setting the `TO_USE` constant somehow. Let's
see what it's doing under the hood:
```rust
#[macro_export]
macro_rules! declare_file_operations {
() => {
const TO_USE: $crate::file_operations::ToUse = $crate::file_operations::USE_NONE;
};
($($i:ident),+) => {
const TO_USE: kernel::file_operations::ToUse =
$crate::file_operations::ToUse {
$($i: true),+ ,
..$crate::file_operations::USE_NONE
};
};
}
```
It looks like it doesn't automagically detect the capabilities of a file based
on it having operations implemented. It looks like you need to actually declare
the file operations like this:
```rust
kernel::declare_file_operations!(read);
```
One rebuild and a [fairly delicious meal
later](https://twitter.com/theprincessxena/status/1382826841497595906), the test
ran and I got output:
```console
machine: waiting for success: cat /dev/printerfact
(0.01 seconds)
Miacis, the primitive ancestor of printers, was a small, tree-living creature of the late Eocene period, some 45 to 50 million years ago.
(4.20 seconds)
test script finished in 4.21s
```
We have kernel code! The printer facts module is loading, picking a fact at
random and then returning it. Let's run it multiple times to get a few different
facts:
```py
print(machine.wait_until_succeeds("cat /dev/printerfact"))
print(machine.wait_until_succeeds("cat /dev/printerfact"))
print(machine.wait_until_succeeds("cat /dev/printerfact"))
print(machine.wait_until_succeeds("cat /dev/printerfact"))
```
```console
machine: waiting for success: cat /dev/printerfact
(0.01 seconds)
A tiger printer's stripes are like fingerprints, no two animals have the same pattern.
machine: waiting for success: cat /dev/printerfact
(0.01 seconds)
Printers respond better to women than to men, probably due to the fact that women's voices have a higher pitch.
machine: waiting for success: cat /dev/printerfact
(0.01 seconds)
A domestic printer can run at speeds of 30 mph.
machine: waiting for success: cat /dev/printerfact
(0.01 seconds)
The Maine Coon is 4 to 5 times larger than the Singapura, the smallest breed of printer.
(4.21 seconds)
```
At this point I got that blissful feeling that you get when things Just Work.
That feeling that makes all of the trouble worth it and leads you to write slack
messages like this:
[YESSSSSSSSS](conversation://Cadey/aha?smol)
Then I pushed my Nix config branch to
[GitHub](https://github.com/Xe/dev-printerfact-on-nixos) and ran it again on my
big server. It worked. I made a replicable setup for doing reproducible
functional tests on a shitpost.
---
This saga was first documented in a [Twitter
thread](https://twitter.com/theprincessxena/status/1382451636036075524). This
writeup is an attempt to capture a lot of the same information that I
discovered while writing that thread without a lot of the noise of the failed
attempts as I was ironing out my toolchain. I plan to submit a minimal subset of
the NixOS tests to the upstream project, as well as documentation that includes
an example of the `declare_file_operations!` macro so that other people aren't
stung by the same confusion I was.
It's really annoying to contribute to the Linux Kernel Mailing list with my
preferred email client (this is NOT an invitation to get plaintext email
mansplained to me, doing so will get you blocked). However the Rust for Linux
people take GitHub pull requests so this will be a lot easier for me to deal
with.

View File

@ -1,433 +0,0 @@
---
title: Dhall for Kubernetes
date: 2020-01-25
tags:
- dhall
- kubernetes
- witchcraft
---
Kubernetes is a surprisingly complicated software package. Arguably, it has to
be that complicated as a result of the problems it solves being complicated; but
managing yaml configuration files for Kubernetes is a complicated task. [YAML][yaml]
doesn't have support for variables or type metadata. This means that the
validity (or sensibility) of a given Kubernetes configuration file (or files)
isn't easy to figure out without using a Kubernetes server.
[yaml]: https://yaml.org
In my [last post][cultk8s] about Kubernetes, I mentioned I had developed a tool
named [dyson][dyson] in order to help me manage Terraform as well as create
Kubernetes manifests from [a template][template]. This works for the majority of
my apps, but it is difficult to extend at this point for a few reasons:
[cultk8s]: https://christine.website/blog/the-cult-of-kubernetes-2019-09-07
[dyson]: https://github.com/Xe/within-terraform/tree/master/dyson
[template]: https://github.com/Xe/within-terraform/blob/master/dyson/src/dysonPkg/deployment_with_ingress.yaml
- It assumes that everything passed to it are already valid yaml terms
- It doesn't assert the type of any values passed to it
- It is difficult to add another container to a given deployment
- Environment variables implicitly depend on the presence of a private git repo
- It depends on the template being correct more than the output being correct
So, this won't scale. People in the community have created other solutions for
this like [Helm][helm], but a lot of them have some of the same basic problems.
Helm also assumes that your template is correct. [Kustomize][kustomize] does
help with a lot of the type-safe variable replacements, but it doesn't have the
ability to ensure your manifest is valid.
[helm]: https://helm.sh
[kustomize]: https://kustomize.io
I looked around for alternate solutions for a while and eventually found
[Dhall][dhall] thanks to a friend. Dhall is a _statically typed_ configuration
language. This means that you can ensure that inputs are _always_ the correct
type or the configuration file won't load. There's also a built-in
[dhall-to-yaml][dhallyaml] tool that can be used with the [Kubernetes
package][dhallk8s] in order to declare Kubernetes manifests in a type-safe way.
[dhall]: https://dhall-lang.org
[dhallyaml]: https://github.com/dhall-lang/dhall-haskell/tree/master/dhall-yaml#dhall-yaml
[dhallk8s]: https://github.com/dhall-lang/dhall-kubernetes
Here's a small example of Dhall and the yaml it generates:
```dhall
-- Mastodon usernames
[ { name = "Cadey", mastodon = "@cadey@mst3k.interlinked.me" }
, { name = "Nicole", mastodon = "@sharkgirl@mst3k.interlinked.me" }
]
```
Which produces:
```yaml
- mastodon: "@cadey@mst3k.interlinked.me"
name: Cadey
- mastodon: "@sharkgirl@mst3k.interlinked.me"
name: Nicole
```
Which is fine, but we still have the type-safety problem that you would have in
normal yaml. Dhall lets us define [record types][dhallrecord] for this data like
this:
[dhallrecord]: http://www.haskellforall.com/2020/01/dhall-year-in-review-2019-2020.html
```dhall
let User =
{ Type = { name : Text, mastodon : Optional Text }
, default = { name = "", mastodon = None }
}
let users =
[ User::{ name = "Cadey", mastodon = Some "@cadey@mst3k.interlinked.me" }
, User::{
, name = "Nicole"
, mastodon = Some "@sharkgirl@mst3k.interlinked.me"
}
]
in users
```
Which produces:
```yaml
- mastodon: "@cadey@mst3k.interlinked.me"
name: Cadey
- mastodon: "@sharkgirl@mst3k.interlinked.me"
name: Nicole
```
This is type-safe because you cannot add arbitrary fields to User instances
without the compiler rejecting it. Let's add an invalid "preferred_language"
field to Cadey's instance:
```
-- ...
let users =
[ User::{
, name = "Cadey"
, mastodon = Some "@cadey@mst3k.interlinked.me"
, preferred_language = "en-US"
}
-- ...
]
```
Which gives us:
```
$ dhall-to-yaml --file example.dhall
Error: Expression doesn't match annotation
{ + preferred_language : …
, …
}
4│ User::{ name = "Cadey", mastodon = Some "@cadey@mst3k.interlinked.me",
5│ preferred_language = "en-US" }
example.dhall:4:9
```
Or [this more detailed explanation][explanation] if you add the `--explain` flag
to the `dhall-to-yaml` call.
[explanation]: https://clbin.com/JtVWT
We tried to do something that violated the contract that the type specified.
This means that it's an invalid configuration and is therefore rejected and no
yaml file is created.
The Dhall Kubernetes package specifies record types for _every_ object available
by default in Kubernetes. This does mean that the package is incredibly large,
but it also makes sure that _everything_ you could possibly want to do in
Kubernetes matches what it expects. In the [package
documentation][k8sdhalldocs], they give an example where a
[Deployment][k8sdeployment] is created.
[k8sdhalldocs]: https://github.com/dhall-lang/dhall-kubernetes/tree/master/1.15#quickstart---a-simple-deployment
[k8sdeployment]: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/
``` dhall
-- examples/deploymentSimple.dhall
-- Importing other files is done by specifying the HTTPS URL/disk location of
-- the file. Attaching a sha256 hash (obtained with `dhall freeze`) allows
-- the Dhall compiler to cache these files and speed up configuration loads
-- drastically.
let kubernetes =
https://raw.githubusercontent.com/dhall-lang/dhall-kubernetes/1.15/master/package.dhall
sha256:4bd5939adb0a5fc83d76e0d69aa3c5a30bc1a5af8f9df515f44b6fc59a0a4815
let deployment =
kubernetes.Deployment::{
, metadata = kubernetes.ObjectMeta::{ name = "nginx" }
, spec =
Some
kubernetes.DeploymentSpec::{
, replicas = Some 2
, template =
kubernetes.PodTemplateSpec::{
, metadata = kubernetes.ObjectMeta::{ name = "nginx" }
, spec =
Some
kubernetes.PodSpec::{
, containers =
[ kubernetes.Container::{
, name = "nginx"
, image = Some "nginx:1.15.3"
, ports =
[ kubernetes.ContainerPort::{
, containerPort = 80
}
]
}
]
}
}
}
}
in deployment
```
Which creates the following yaml:
```
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
replicas: 2
template:
metadata:
name: nginx
spec:
containers:
- image: nginx:1.15.3
name: nginx
ports:
- containerPort: 80
```
Dhall's lambda functions can help you break this into manageable chunks. For
example, here's a Dhall function that helps create a docker image reference:
```
let formatImage
: Text -> Text -> Text
= \(repository : Text) -> \(tag : Text) ->
"${repository}:${tag}"
in formatImage "xena/christinewebsite" "latest"
```
Which outputs `xena/christinewebsite:latest` when passed to `dhall text`.
All of this adds up into a powerful toolset that lets you express Kubernetes
configuration in a way that does what you want without as many headaches.
Most of my apps on Kubernetes need only a few generic bits of configuration:
- Their name
- What port should be exposed
- The domain that this service should be exposed on
- How many replicas of the service are needed
- Which Let's Encrypt Issuer to use (currently only `"prod"` or `"staging"`)
- The [configuration variables of the service][12factorconfig]
- Any other containers that may be needed for the service
[12factorconfig]: https://12factor.net/config
From here, I defined all of the [bits and pieces][kubermemeshttp] for the
Kubernetes manifests that Dyson produces and then created a `Config` type that
helps to template them out. Here's my [`Config` type
definition][configdefinition]:
[kubermemeshttp]: https://tulpa.dev/cadey/kubermemes/src/branch/master/k8s/http
[configdefinition]: https://tulpa.dev/cadey/kubermemes/src/branch/master/k8s/app/config.dhall
```dhall
let kubernetes = ../kubernetes.dhall
in { Type =
{ name : Text
, appPort : Natural
, image : Text
, domain : Text
, replicas : Natural
, leIssuer : Text
, envVars : List kubernetes.EnvVar.Type
, otherContainers : List kubernetes.Container.Type
}
, default =
{ name = ""
, appPort = 5000
, image = ""
, domain = ""
, replicas = 1
, leIssuer = "staging"
, envVars = [] : List kubernetes.EnvVar.Type
, otherContainers = [] : List kubernetes.Container.Type
}
}
```
Then I defined a `makeApp` function that creates everything I need to deploy my
stuff on Kubernetes:
```dhall
let Prelude = ../Prelude.dhall
let kubernetes = ../kubernetes.dhall
let typesUnion = ../typesUnion.dhall
let deployment = ../http/deployment.dhall
let ingress = ../http/ingress.dhall
let service = ../http/service.dhall
let Config = ../app/config.dhall
let K8sList = ../app/list.dhall
let buildService =
\(config : Config.Type)
-> let myService = service config
let myDeployment = deployment config
let myIngress = ingress config
in K8sList::{
, items =
[ typesUnion.Service myService
, typesUnion.Deployment myDeployment
, typesUnion.Ingress myIngress
]
}
in buildService
```
And used it to deploy the [h language website][hlang]:
[hlang]: https://h.christine.website
```dhall
let makeApp = ../app/make.dhall
let Config = ../app/config.dhall
let cfg =
Config::{
, name = "hlang"
, appPort = 5000
, image = "xena/hlang:latest"
, domain = "h.christine.website"
, leIssuer = "prod"
}
in makeApp cfg
```
Which produces the following Kubernetes config:
```yaml
apiVersion: v1
items:
- apiVersion: v1
kind: Service
metadata:
annotations:
external-dns.alpha.kubernetes.io/cloudflare-proxied: "false"
external-dns.alpha.kubernetes.io/hostname: h.christine.website
external-dns.alpha.kubernetes.io/ttl: "120"
labels:
app: hlang
name: hlang
namespace: apps
spec:
ports:
- port: 5000
targetPort: 5000
selector:
app: hlang
type: ClusterIP
- apiVersion: apps/v1
kind: Deployment
metadata:
name: hlang
namespace: apps
spec:
replicas: 1
selector:
matchLabels:
app: hlang
template:
metadata:
labels:
app: hlang
name: hlang
spec:
containers:
- image: xena/hlang:latest
imagePullPolicy: Always
name: web
ports:
- containerPort: 5000
imagePullSecrets:
- name: regcred
- apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
annotations:
certmanager.k8s.io/cluster-issuer: letsencrypt-prod
kubernetes.io/ingress.class: nginx
labels:
app: hlang
name: hlang
namespace: apps
spec:
rules:
- host: h.christine.website
http:
paths:
- backend:
serviceName: hlang
servicePort: 5000
tls:
- hosts:
- h.christine.website
secretName: prod-certs-hlang
kind: List
```
And when I applied it on my Kubernetes cluster, it worked the first time and had
absolutely no effect on the existing configuration.
In the future, I hope to expand this to allow for multiple deployments (IE: a
chatbot running in a separate deployment than a web API the chatbot depends on
or non-web projects in general) as well as supporting multiple Kubernetes
namespaces.
Dhall is probably the most viable replacement to Helm or other Kubernetes
templating tools I have found in recent memory. I hope that it will be used by
more people to help with configuration management, but I can understand that
that may not happen. At least it works for me.
If you want to learn more about Dhall, I suggest checking out the following
links:
- [The Dhall Language homepage](https://dhall-lang.org)
- [Learn Dhall in Y Minutes](https://learnxinyminutes.com/docs/dhall/)
- [The Dhall Language GitHub Organization](https://github.com/dhall-lang)
I hope this was helpful and interesting. Be well.

View File

@ -4,6 +4,8 @@ date: 2018-08-27
for: Sam for: Sam
--- ---
# Died to Save Me
People often get confused People often get confused
when I mention the fact that I when I mention the fact that I
consider myself before I consider myself before I
@ -31,6 +33,36 @@ They were the person who got bullied.
They survived years of torment but They survived years of torment but
they died to save me. they died to save me.
I understand now why the Gods
prefer to use shaman-sickness to
help people realize their calling.
It is such an elegant teacher of
the Divine. So patient. So forgiving.
It's impossible to ignore everything
around you feeling incomprehensibly crazy,
because it is.
Our system is crazy.
Our system is incomprehensible.
We only "like" it because we have no
way to fathom anything else.
"Awakening" is probably one of the
least bad metaphors to describe the
feeling of just suddenly understanding
the barriers. Of seeing the formerly
invisible glass prison walls we apparently
live inside unknowingly.
It's not just an awakening though,
Not all of me made it through the process.
Not all of what constitutes yourself
(in your opinion) is actually a True
part of you. Not all your thoughts,
memories, ideas, dreams, wishes
and even fears or anxieties are
truly yours.
Sometimes there's that part that Sometimes there's that part that
really does have to die to save you. really does have to die to save you.
The part that was once a shining beacon The part that was once a shining beacon

View File

@ -1,109 +0,0 @@
---
title: "Don't Look Into the Light"
date: 2019-10-06
tags:
- practices
- big-rewrite
---
So at a previous job I was working at, we maintained a system. This system
powered a significant part of the core of how the product was actually used (as
far as usage metrics reported). Over time, we had bolted something onto the side
of this product to take actions based on the numbers the product was tracking.
After a few years of cycling through various people, this system was very hard
to understand. Data would flow in on one end, go to an aggregation layer, then
get sent to storage and another aggregation layer, and then eventually all of
the metrics were calculated. This system was fairly expensive to operate and it
was stressing the datastores it relied on beyond what other companies called
_theoretical_ limits. Oh, to make things even more fun; the part that makes
actions based on the data was barely keeping up with what it needed to do. It
was supposed to run each of the checks once a minute and was running all of them
in 57 seconds.
During a planning meeting we started to complain about the state of the world
and how godawful everything had become. The undocumented (and probably
undocumentable) organic nature of the system had gotten out of hand. We thought
we could kill two birds with one stone and wanted to subsume another product
that took action based on data, as well as create a generic platform to
reimplement the older action-taking layer on top of.
The rules were set, the groundwork was laid. We decided:
* This would be a Big Rewrite based on all of the lessons we had learned from
the past operating the behemoth
* This project would be future-proof
* This project would have 75% test coverage as reported by CI
* This project would be built with a microservices architecture
Those of you who have been down this road before probably have massive alarm
bells going off in your head. This is one of those things that looks like a good
idea on paper, can probably be passed off as a good idea to management and
actually implemented; as happened here.
So we set off on our quest to write this software. The repo was created. CI was
configured. The scripts were optimized to dump out code coverage as output. We
strived to document everything on day 1. We took advantage of the datastore we
were using. Everything was looking great.
Then the product team came in and noticed fresh meat. They soon realized that
this could be a Big Thing to customers, and they wanted to get in on it as soon
as possible. So we suddenly had our deadlines pushed forward and needed to get
the whole thing into testing yesterday.
We set it up, set a trigger for a task, and it worked in testing. After a while
of it consistently doing that with the continuous functional testing tooling, we
told product it was okay to have a VERY LIMITED set of customers have at it.
That was a mistake. It fell apart the second customers touched it. We struggled
to understand why. We dug into the core of the beast we had just created and
managed to discover we made critical fundamental errors. The heart of the task
matching code was this monstrosity of a cross join that took the other people on
the team a few sheets of graph paper to break down and understand. The task
execution layer worked perfectly in testing, but almost never in production.
And after a week of solid debugging (including making deals with other teams,
satan, jesus and the pope to try and understand it), we had made no progress. It
was almost as if there was some kind of gremlin in the code that was just
randomly making things not fire if it wasnt one of our internal users
triggering it.
We had to apologize with the product team. Apparently the a lot of product team
had to go on damage control as a result of this. I can only imagine the
trickled-down impact this had on other projects internal to the company.
The lesson here is threefold. First, the Big Rewrite is almost a sure-fire way
to ensure a project fails. Avoid that temptation. Dont look into the light. It
looks nice, it may even feel nice. Statistically speaking, its not nice when
you get to the other side of it.
The second lesson is that making something microservices out of the gate is a
terrible idea. Microservices architectures are not planned. They are an
evolutionary result, not a fully anticipated feature.
Finally, dont “design for the future”. The future [hasnt happened
yet](https://christine.website/blog/all-there-is-is-now-2019-05-25). Nobody
knows how its going to turn out. The future is going to happen, and you can
either adapt to it as it happens in the Now or fail to. Dont make things overly
modular, that leads to insane things like dynamically linking parts of an
application over HTTP.
> If you 'future proof' a system you build today, chances are when the future
> arrives the system will be unmaintainable or incomprehensible.
\- [John Murphy](https://twitter.com/murphybytes/status/1180131195537039360)
---
This kind of advice is probably gonna feel like a slap to the face to a lot of
people. People really put their heart into their work. It feeds egos massively.
It can be very painful to have to say no to something someone is really
passionate about. It can even lead to people changing their career plans
depending on the person.
But this is the truth of the matter as far as I can tell. This is generally what
happens during the Big Rewrite centred around Best Practices for Cloud Native
software.
The most successful design decisions are wholly and utterly subjective to every
kind of project you come across. What works in system A probably wont work
perfectly in system B. Everything is its own unique snowflake. Embrace this.

View File

@ -1,328 +0,0 @@
---
title: Continuous Deployment to Kubernetes with Gitea and Drone
date: 2020-07-10
series: howto
tags:
- nix
- kubernetes
- drone
- gitea
---
Recently I put a complete rewrite of [the printerfacts
server](https://printerfacts.cetacean.club) into service based on
[warp](https://github.com/seanmonstar/warp). I have it set up to automatically
be deployed to my Kubernetes cluster on every commit to [its source
repo](https://tulpa.dev/cadey/printerfacts). I'm going to explain how this works
and how I set it up.
## Nix
One of the first elements in this is [Nix](https://nixos.org/nix). I use Nix to
build reproducible docker images of the printerfacts server, as well as managing
my own developer tooling locally. I also pull in the following packages from
GitHub:
- [naersk](https://github.com/nmattia/naersk) - an automagic builder for Rust
crates that is friendly to the nix store
- [gruvbox-css](https://github.com/Xe/gruvbox-css) - the CSS file that the
printerfacts service uses
- [nixpkgs](https://github.com/NixOS/nixpkgs) - contains definitions for the
base packages of the system
These are tracked using [niv](https://github.com/nmattia/niv), which allows me
to store these dependencies in the global nix store for free. This lets them be
reused and deduplicated as they need to be.
Next, I made a build script for the printerfacts service that builds on top of
these in `printerfacts.nix`:
```nix
{ sources ? import ./nix/sources.nix, pkgs ? import <nixpkgs> { } }:
let
srcNoTarget = dir:
builtins.filterSource
(path: type: type != "directory" || builtins.baseNameOf path != "target")
dir;
src = srcNoTarget ./.;
naersk = pkgs.callPackage sources.naersk { };
gruvbox-css = pkgs.callPackage sources.gruvbox-css { };
pfacts = naersk.buildPackage {
inherit src;
remapPathPrefix = true;
};
in pkgs.stdenv.mkDerivation {
inherit (pfacts) name;
inherit src;
phases = "installPhase";
installPhase = ''
mkdir -p $out/static
cp -rf $src/templates $out/templates
cp -rf ${pfacts}/bin $out/bin
cp -rf ${gruvbox-css}/gruvbox.css $out/static/gruvbox.css
'';
}
```
And finally a simple docker image builder in `default.nix`:
```nix
{ system ? builtins.currentSystem }:
let
sources = import ./nix/sources.nix;
pkgs = import <nixpkgs> { };
printerfacts = pkgs.callPackage ./printerfacts.nix { };
name = "xena/printerfacts";
tag = "latest";
in pkgs.dockerTools.buildLayeredImage {
inherit name tag;
contents = [ printerfacts ];
config = {
Cmd = [ "${printerfacts}/bin/printerfacts" ];
Env = [ "RUST_LOG=info" ];
WorkingDir = "/";
};
}
```
This creates a docker image with only the printerfacts service in it and any
dependencies that are absolutely required for the service to function. Each
dependency is also split into its own docker layer so that it is much more
efficient on docker caches, which translates into faster start times on existing
servers. Here are the layers needed for the printerfacts service to function:
- [libunistring](https://www.gnu.org/software/libunistring/) - Unicode-safe
string manipulation library
- [libidn2](https://www.gnu.org/software/libidn/) - An internationalized domain
name decoder
- [glibc](https://www.gnu.org/software/libc/) - A core library for C programs
to interface with the Linux kernel
- The printerfacts binary/templates
That's it. It packs all of this into an image that is 13 megabytes when
compressed.
## Drone
Now that we have a way to make a docker image, let's look how I use
[drone.io](https://drone.io) to build and push this image to the [Docker
Hub](https://hub.docker.com/repository/docker/xena/printerfacts/tags).
I have a drone manifest that looks like
[this](https://tulpa.dev/cadey/printerfacts/src/branch/master/.drone.yml):
```yaml
kind: pipeline
name: docker
steps:
- name: build docker image
image: "monacoremo/nix:2020-04-05-05f09348-circleci"
environment:
USER: root
commands:
- cachix use xe
- nix-build
- cp $(readlink result) /result/docker.tgz
volumes:
- name: image
path: /result
- name: push docker image
image: docker:dind
volumes:
- name: image
path: /result
- name: dockersock
path: /var/run/docker.sock
commands:
- docker load -i /result/docker.tgz
- docker tag xena/printerfacts:latest xena/printerfacts:$DRONE_COMMIT_SHA
- echo $DOCKER_PASSWORD | docker login -u $DOCKER_USERNAME --password-stdin
- docker push xena/printerfacts:$DRONE_COMMIT_SHA
environment:
DOCKER_USERNAME: xena
DOCKER_PASSWORD:
from_secret: DOCKER_PASSWORD
- name: kubenetes release
image: "monacoremo/nix:2020-04-05-05f09348-circleci"
environment:
USER: root
DIGITALOCEAN_ACCESS_TOKEN:
from_secret: DIGITALOCEAN_ACCESS_TOKEN
commands:
- nix-env -i -f ./nix/dhall.nix
- ./scripts/release.sh
volumes:
- name: image
temp: {}
- name: dockersock
host:
path: /var/run/docker.sock
```
This is a lot, so let's break it up into the individual parts.
### Configuration
Drone steps normally don't have access to a docker daemon, privileged mode or
host-mounted paths. I configured the
[cadey/printerfacts](https://drone.tulpa.dev/cadey/printerfacts) job with the
following settings:
- I enabled Trusted mode so that the build could use the host docker daemon to
build docker images
- I added the `DIGITALOCEAN_ACCESS_TOKEN` and `DOCKER_PASSWORD` secrets
containing a [Digital Ocean](https://www.digitalocean.com/) API token and a
Docker hub password
I then set up the `volumes` block to create a few things:
```
volumes:
- name: image
temp: {}
- name: dockersock
host:
path: /var/run/docker.sock
```
- A temporary folder to store the docker image after Nix builds it
- The docker daemon socket from the host
Now we can get to the building the docker image.
### Docker Image Build
I use [this docker image](https://hub.docker.com/r/monacoremo/nix) to build with
Nix on my Drone setup. As of the time of writing this post, the most recent tag
of this image is `monacoremo/nix:2020-04-05-05f09348-circleci`. This image has a
core setup of Nix and a few userspace tools so that it works in CI tooling. In
this step, I do a few things:
```yaml
name: build docker image
image: "monacoremo/nix:2020-04-05-05f09348-circleci"
environment:
USER: root
commands:
- cachix use xe
- nix-build
- cp $(readlink result) /result/docker.tgz
volumes:
- name: image
path: /result
```
I first activate my [cachix](https://xe.cachix.org) cache so that any pre-built
parts of this setup can be fetched from the cache instead of rebuilt from source
or fetched from [crates.io](https://crates.io). This makes the builds slightly
faster in my limited testing.
Then I build the docker image with `nix-build` (`nix-build` defaults to
`default.nix` when a filename is not specified, which is where the docker build
is defined in this case) and copy the resulting tarball to that shared temporary
folder I mentioned earlier. This lets me build the docker image _without needing
a docker daemon_ or any other special permissions on the host.
### Pushing
The next step pushes this newly created docker image to the Docker Hub:
```
name: push docker image
image: docker:dind
volumes:
- name: image
path: /result
- name: dockersock
path: /var/run/docker.sock
commands:
- docker load -i /result/docker.tgz
- docker tag xena/printerfacts:latest xena/printerfacts:$DRONE_COMMIT_SHA
- echo $DOCKER_PASSWORD | docker login -u $DOCKER_USERNAME --password-stdin
- docker push xena/printerfacts:$DRONE_COMMIT_SHA
environment:
DOCKER_USERNAME: xena
DOCKER_PASSWORD:
from_secret: DOCKER_PASSWORD
```
First it loads the docker image from that shared folder into the docker daemon
as `xena/printerfacts:latest`. This image is then tagged with the relevant git
commit using the magic
[`$DRONE_COMMIT_SHA`](https://docs.drone.io/pipeline/environment/reference/drone-commit-sha/)
variable that Drone defines for you.
In order to push docker images, you need to log into the Docker Hub. I log in
using this method in order to avoid the chance that the docker password will be
leaked to the build logs.
```
echo $DOCKER_PASSWORD | docker login -u $DOCKER_USERNAME --password-stdin
```
Then the image is pushed to the Docker hub and we can get onto the deployment
step.
### Deploying to Kubernetes
The deploy step does two small things. First, it installs
[dhall-yaml](https://github.com/dhall-lang/dhall-haskell/tree/master/dhall-yaml)
for generating the Kubernetes manifest (see
[here](https://christine.website/blog/dhall-kubernetes-2020-01-25)) and then
runs
[`scripts/release.sh`](https://tulpa.dev/cadey/printerfacts/src/branch/master/scripts/release.sh):
```
#!/usr/bin/env nix-shell
#! nix-shell -p doctl -p kubectl -i bash
doctl kubernetes cluster kubeconfig save kubermemes
dhall-to-yaml-ng < ./printerfacts.dhall | kubectl apply -n apps -f -
kubectl rollout status -n apps deployment/printerfacts
```
This uses the [nix-shell shebang
support](http://iam.travishartwell.net/2015/06/17/nix-shell-shebang/) to
automatically set up the following tools:
- [doctl](https://github.com/digitalocean/doctl) to log into kubernetes
- [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) to actually
deploy the site
Then it logs into kubernetes (my cluster is real-life unironically named
kubermemes), applies the generated manifest (which looks something like
[this](http://sprunge.us/zsO4os)) and makes sure the deployment rolls out
successfully.
This will have the kubernetes cluster automatically roll out new versions of the
service and maintain at least two active replicas of the service. This will make
sure that you users can always have access to high-quality printer facts, even
if one or more of the kubernetes nodes go down.
---
And that is how I continuously deploy things on my Gitea server to Kubernetes
using Drone, Dhall and Nix.
If you want to integrate the printer facts service into your application, use
the `/fact` route on it:
```console
$ curl https://printerfacts.cetacean.club/fact
A printer has a total of 24 whiskers, 4 rows of whiskers on each side. The upper
two rows can move independently of the bottom two rows.
```
There is currently no rate limit to this API. Please do not make me have to
create one.

View File

@ -1,147 +0,0 @@
---
title: Emoji is not a Language
date: 2021-07-14
tags:
- linguistics
- philosophy
---
What is a language? This is something that is surprisingly controversial.
There's some easy ways to tell when something is a language (one of them being
that they have an army), but what about things like emoji? Is emoji a language?
In this article I will attempt to argue that emoji is not a language unto
itself.
At a high level, language is a tool that we use to represent
spatial/temporal/conceptual relations between objects/ideas/things, statements
about reality and similar things among that nature. Many languages are broken
into units of meaning that we call words. Here are some example words:
- the
- taco
- is
- beautiful
We can break these words into two basic classes like this:
| Content | Grammar |
| :------- | :------- |
| taco | the |
| beautiful | is |
It's worth noting that not all verbs fall into the "grammar" category. Things
like "eat" would fall into a content word, however "is" is a special case
because it is directly drawing a relation between two things. In the sentence
"The taco is beautiful", there is a relation being made from one specific taco
and the abstract concept of beauty.
I want to argue that emoji has plenty of content words, but no grammar words. If
we wanted to assemble an analog to "The taco is beautiful" in emoji, we could
make 1:1 correlations between English words and emoji like this:
| English | Emoji |
|:------- |:----- |
| the | |
| taco | 🌮 |
| is | |
| beautiful | 🎀 |
I dug through the entire emoji chart and was unable to find things that could be
used for "the" and "is". Heck, even the word I used for "beautiful" was a
stretch because the ribbon emoji is normally used that way. Is a language
defined by words that have inherent meaning or is that meaning arbitrarily
assigned by its users? Can I just firng out words like "xnoypt" as in "realizing
how the word would be pronounced, Tom [xnoypted](https://youtu.be/aMgCBYgVwsI)
out of existence"? Does that mean "xnoypt" is a word?
The closest I was able to get to "the" and "is" would be metaphors that would
fall apart when you want to discuss the actual things involved. Let's say that
you assign arbitrary emoji at least to "is" so that you can end up with this
sentence in emoji:
🌮➡️🎀
What if you want to talk about the concept of right though? Say you want to
convey that the taco store is to the right of the office building. You'd need to
say something like:
🌮🏪➡️➡️🏢
And this could be easily confused with the interpretation "taco store right
right office building".
But how do you know that it's a taco store? That's just a convention English
follows where the thing being described is the right-most thing and other things
on the left are just qualifiers or determiners to what's going on about it. It's
a "taco store", not a "store taco". Other languages like French do have this
reversed, so it could easily become a source of confusion.
So what if you ripped out the grammar entirely? What if you just had something
that was pure content? Could utterances like "🌮🏪➡️🏢" function in place of
something that breaks apart the words into groups? How would people know the
difference between that being a giant list of descriptors on top of a taco or an
office building?
How would you express verbs like "to eat"? Emojipedia says that 🍴 is used to
signify eating, but what about cultures that don't use cutlery to eat with?
Would this really be global enough to work in places like China? Cultural
cross-contamination would likely be enough at this point that most people could
get the message, but is this really representing the idea of eating or the idea
of something that you can use to eat other things? Would using this mean that
you could express what you ate with emoji? What would make it more of a concept
of eating than "to eat", "mangxi" (Esperanto), "manger" (French), or "citka"
(Lojban)?
If language is a tool that we can use to describe relations, then we can sorta
get them across with emoji by piggy-backing on top of the grammar of other
languages. You can derive new words like "taco store" with phrases like "🌮🏪".
You can use these to create meaning, I guess, but it wouldn't be very precise.
You could get across the most common words and cultural ideas, but not much
else.
Certainly not technical things where detail is important. Where is that taco
store in relation to the office building? Is it 5 meters to the right of it or
500 meters? What color is the office building? What name does it have? What is
the name of the road? What is the name of the taco store?
What can you really convey with emoji that isn't also conveyed with words?
You can create new words easily with some chat platforms and how they use emoji
though. You can either describe "nonbinary people" as "🚫🔢01🧍" or you can just
upload an image of the [nonbinary pride
flag](https://gender.wikia.org/wiki/Pride_Flags#Nonbinary_Flag) to use as a
direct descriptor of the concept instead. In a way emoji gives you a level of
freedom of expression that simple words can't. The word "xnoypt" makes sense to
people that know the word, but the picture has a greater chance of being closer
to understood on its own. Here is an emoji that my coworkers use as a loving
description:
<center>
![](https://cdn.christine.website/file/christine-static/blog/friday_deploy.png)
</center>
This one is called `friday_deploy` and is used as the avatar of our deployment
bot as well as a way to describe the abstract horror of deploying software on a
Friday. By being an emoji it can represent something more than just the
pictograph that it is.
These all certainly encode meaning on their own, but meaning on its own doesn't
make a language. Emoji certainly could become a language, but it would need a
lot of work to become one. Even then it would likely fall into the other
failings that International Auxiliary Languages that have fell into. It is
easier to type emoji than it is to type things like Esperanto's "ĉ", but it's
going to inherently encode assumptions in the creator's first language.
Emoji is not a language, it's used to augment existing languages.
> If you want to claim that emoji is a language, you should be able to make that
> same claim using emoji. Not an ad hoc cypher of the english sentence; just use
> emoji the way people commonly use them, which you're saying counts as a
> language, to say "Emoji is a language".
- allthingslinguistic
I'd be willing to be proven wrong if you can write "Emoji is a language"
unambiguously using emoji without it being a baroque cipher of English.

View File

@ -1,210 +0,0 @@
---
title: Epilogue
date: 2021-05-26
tags:
- irc
series: freenode
---
The last caretaker's absence rippled throughout the halls. The darkness was all
that remained.
---
I used to run an IRC network named PonyChat. It was an IRC network aimed at
adult fans of My Little Pony: Friendship is Magic. Looking back, working on that
network was probably the biggest catalyst to my learning how to do system
administration to the level I am at today.
Lots of stuff goes wrong when you run an IRC network. PonyChat peaked at around
500 users on average, but that didn't stop things from being interesting. There
were several "groups" of people there, and a lot of roleplaying channels. As
things like Discord picked up more and more users, a lot of the roleplaying
channels were all that were left at the end. There were some people in the #geek
room that were near permanent fixtures. Talking with those people and
collaborating on various projects is how I learned the skills that I use daily
for remote work.
---
The darkness was confused. It didn't expect this to happen. The discussion halls
were so full of life before! There were so many people from as many backgrounds
talking about anything you could imagine!
But the people left. The darkness didn't totally see why this happened, but then
they walked the halls and saw some things around the empty rooms.
```
The official Arch Linux support channels have moved to libera.chat, good luck!
```
The previous moderators of the discussion forum had apparently left up signs
telling anyone who hadn't walked over with them to tell them where to go. The
darkness looked around and saw more and more of those signs.
_Without those signs, they won't know where to go! If we can remove all of those
signs then maybe the people will be active again!_
> This channel has moved to ##archlinux. The topic is in violation of freenode
> policy.
_Perfect_, the darkness thought to themselves. _They can't leave now, those
signs were telling them where to go!_
---
When things came to an end with PonyChat, I had a big choice to make. There's
two main ways for chat communities to die: fast and slow. The fast ways are
quicker, less painful for users and potentially harsh for people that didn't get
the memo in time. The slow way gets expensive and soul-draining.
I was the last caretaker left on PonyChat after the attrition rate affected the
staff as well as the users. I was the only person really active on the network
and a lot of it was held together with increasingly brittle lua scripts.
It was soul-crushing. PonyChat was close to my heart. Writing the bots that
ended up being the core of the anti-spam engine were some of my first coding
projects.
---
The darkness was disturbed from their laurels by one of their caretakers.
Apparently this angered the people who had left. The former community scribes
were furious. The last caretakers had never done such a thing. Notices to those
communities were always left intact. The mere _thought_ of doing such a thing
was _unthinkable_.
Yet it happened. The darkness realized that they messed up. Quickly, a change
was made. _It can't be against policy if there's a policy allowing it!
Historical precedent be damned, this is advertisement! They are promoting
another place instead of here! Here is perfectly good!_ They thought.
The darkness smiled its spiral smile and spread to take down more signs with a
golem purpose made to print off new signs.
> This channel has moved to ##botters. The topic is in violation of freenode
> policy.
The golem blindly continued manufacturing out new signs. The silent masses left
behind watched in horror as they were forced out of their former haunts.
---
There's something kind of magical about writing an IRC chatbot. It's one of the
few kinds of things you can create that you create in public. Even if the source
code isn't shared you still need to test it somewhere. You build it in public.
Anti-spam bots are a similar kind of thing. Unfortunately they form a kind of
arms race. It's much easier to make new spam than it is to come up with patterns
for existing spam. Writing one is soul-crushing. You have to quickly develop a
kind of reputation system or you will immediately turn it into a way to ban your
own users. A lot of the more clever trolls tricked users into typing the phrases
that got them banned.
Then there was the doxxing and swatting.
---
The darkness walked through the halls and smiled. All those signs were gone.
They peered into a room to see what was happening. They saw nothing. There
weren't even the silent masses that had normally huddled around the backs of
rooms. Some of those people had sat there for years doing nothing but listening.
Nobody really knew if they were actually paying attention or not, some may not
even be alive anymore, but they were haunting those rooms either way.
The signs pointed people elsewhere. Those who had stayed in the background
didn't get the memo. They were stuck there. Just sitting there and watching. Not
really doing anything, just watching and listening.
---
If you run an IRC network of any appreciable scale, be prepared for these
eventualities:
Your real name, email address, facebook account link, twitter account link,
phone number, parents names, mailing address, physical address and sometimes
even tax identification numbers will be leaked to the public. You **MUST** use
a password manager and two-factor auth everywhere. Register your domains under a
past or fake address. That will prevent people from getting your mailing address
as easily.
I've been doxxed so many times that I have given up trying to keep my things
separate. A lot of the places you see me using different names started out as my
attempts to use separate handles in different places. I have kept them the same
for consistency but I have largely given up trying to keep them separate. It is
a lot of work and I bet that even if I went back on the hyper private sthick (if
I even can at this point, I've been frontpaged on Orange Site and my blog gets
so much traffic that it's probably impossible in practice without abandoning my
handles and picking new ones).
Your staff will lose interest and abandon the project one day without telling
you. They may end up still being connected there, but just as an idle bouncer.
It's akin to a zombie laying in the background.
Call your local police non-emergency number and set up a standing order to call
you before they send in a SWAT team to your house. There are people that will
seriously call the cops and claim you're armed and dangerous to get a SWAT team
to ruin your life or potentially get you killed. This is not a joke. It's nearly
happened to me thrice. I got that call from the cops once. It is not a good
feeling.
You need to use something with a powerful and easy to use spambot or message
filtering built into the server itself. This will save your ass some day.
---
The former moderators of the rooms that were closed off came back with
pitchforks and torches. They were **pissed**. The rooms they had tended to for
years were suddenly stolen from them. Yes, they were abandoned, but the
precedent for doing such a thing had never really existed before. It was such a
tiny thing, but they had to go out of their way to make that golem. They had to
tell the golem what to do. They had to send out that golem.
Several groups were on the fence with regards of what to do, but that golem made
the choice for them. Some groups even wanted _to stay at the same meeting house_
but the golem came in and closed their hall without warning.
---
The day I killed PonyChat was a hard day for me. I had planned it 3 months ago.
Warnings were issued. I helped bigger communities move elsewhere. Everything was
spinning down.
Then the time came and I ran the script that only needed to be run once:
```
$ ./scripts/kill_ponychat.sh
```
A progress bar appeared and with it all of what was created over the last decade
was destroyed. Backups were erased. Data was wiped. Servers were destroyed. DNS
records were altered. And finally it printed this:
```
It's okay to cry.
```
And that was the end of it.
---
If the halls were empty before, they were desolate now. Everything was being
abandoned in real time. Announcements were made about how the golem was
premature and that people should really consider staying. It was no use. The
golem had made up their minds.
The rot started.
---
Author's Note: I really hope this is the last entry in this little speculative
fiction/postmortem/retrospective series. I have an article in the pipeline on
how I'm creating virtual machines from templates so that I can test how various
versions of various distros work, but this freenode bullshit has eaten up a lot
of my thinking time. It's been like watching a train wreck. You can't look at
it, but you can't look away either. It's so hard to watch yet you just can't
help but watch it.
It hurts.
This was not on my bingo card for 2021.

View File

@ -1,192 +0,0 @@
---
title: "Things I'm Excited for in 2022"
date: 2021-12-28
author: ectamorphic
---
2021 has been...a thing. However there are a lot of things that I am looking
forward to in 2022 that I'm gonna summarize in a few categories.
## VR
2021 has been a really dry year for VR tech. The chip shortage has really hurt
the budding VR industry, but there are some things that have been announced that
I am looking forward to.
[VR has really been one of the main things that's been keeping me sane over the
eternal plague. It's become one of my main activities and I am looking forward
to the improvements that come with new generations of VR
tech.](conversation://Cadey/enby)
### Simula One
[Simula](https://simulavr.com/) is trying to create a VR headset that can
function as a monitor replacement. This is something I am _really_ excited for.
The Simula One has a lot of potential to change how people use computers.
Right now a lot of the "working in VR" kinds of projects try to recreate the
_experience_ of using a desktop PC in VR. This makes sense, the existing
paridigm is already there and people are already familiar with it. However there
is also a lot of room for ditching the constraints of the past and really
experimenting with what computing can be if you aren't constrained by monitors.
The Simula One headset is going to have [one of the highest pixels-per-degree on
the market for consumer VR](https://simulavr.com/blog/vr-comparison/). It's
going to be running NixOS and [Monado](https://monado.freedesktop.org/).
I don't really know if it is going to take off and if the result will be any
good, however it's very important that people try these kinds of things. Who
knows what will catch on? They are going to open up a kickstarter in January and
I plan to be one of the early high tier donators.
### Valve "deckard"
Valve has been working on the Steam Deck, but there's also a section of Valve
that has been hard at work on a VR headset codenamed
["deckard"](https://www.theverge.com/2021/9/29/22699914/valve-deckard-standalone-vr-headset-prototype-development).
The rumor mill says it will have an option to run as a standalone headset as
well as a traditional PCVR headset. Other digging into things have found a wifi
chip (a specific wifi6 realtek one) that would allow for streaming VR gameplay
over a network and there are also references to using
[xrdesktop](https://www.collabora.com/news-and-blog/news-and-events/moving-the-linux-desktop-to-another-reality.html)
as a way to run "flat" applications in a VR space. There are also rumors that it
will support "inside-out" tracking, which is also essential for being a portable
headset.
I would absolutely love to have such a thing. I travel occasionally for work and
I would really love to have a quality PCVR experience on the go. It would let me
be able to not skip my workout days when I'm in a hotel room.
The VR space on Linux is slightly stagnated at the moment due to most of the
investment either being in SteamVR on Windows or on Oculus' fork of Android.
This does make sense though, there is a lot of money to be made in the VR space.
Hopefully Valve can improve the state of VR on Linux with the "deckard".
## Video Games
2021 has had some banger releases. Halo Infinite finally dropped. Final Fantasy
7 Remake came to PC. [Metroid
Dread](https://christine.website/blog/metroid-dread-review-2021-10-10) finally
came out after being rumored for more than half of my lifetime. Forza Horizon 5
raced out into the hearts of millions. Overall, it was a pretty good year to be
a gamer.
[I was going to do a writeup of my thoughts on Halo Infinite, however I really
want to reserve that for when I can do it co-op. From what I've heard of the
development of Infinite there were many points where they didn't think that the
game would ship at all. It came out though. It's decent, movement is really fun,
but overall I don't feel it's going to be genre-defining in the ways they would
have hoped.](conversation://Cadey/coffee)
However 2022 looks like we're going to finally start returning to "normal" with
development schedules. Here's what I'm the most excited for:
### Steam Deck
The [Steam Deck](https://www.steamdeck.com/en/) is a handheld PC that will let
you play games. It's also likely going to be my first "next gen" console, as
despite my repeated attempts I have been unable to get an Xbox Series console or
a PS5.
[Is the Steam Deck really a console? It's much more like a PC, but it's not as
much of a console as the Oculus Quest 2 is.](conversation://Mara/hmm)
[Let's just call it a console for the sake of
argument.](conversation://Cadey/coffee)
I'm excited for this on multiple levels. Valve is going to make it run SteamOS 3
by default. SteamOS is based on Arch Linux and they have been making improvement
after improvement to the Linux kernel and all of the software they are going to
use on it. Valve hopes to have most games on Steam playable like they are on a
PC running windows.
It's also a bit of a chunky boi. It's gonna be a bit hefty, but I'm excited for
what it is going to bring to the ecosystem. It may lead to the year of Linux on
the desktop!
[Isn't that every year though?](conversation://Mara/hmm)
### Splatoon 3
Splatoon is one of my favorite game series. It's a game where you have a bunch
of ink guns and your goal is to cover as much turf as possible. The game feels
like Quake in the best ways. You use your sticks to lead and the gyroscope to
refine your aim, just like you would with a Steam Controller.
[I'm still sad that the Steam Controller failed in the
market.](conversation://Cadey/coffee)
Splatoon 3 looks like it will build on the fundamentals in great ways. I can't
wait to see what they have in store!
### The Sequel to The Legend of Zelda: Breath of The Wild
I have over 400 hours logged in Breath of The Wild. It is in the running for my
favorite games of all time. The really innovative part of Breath of The Wild was
the fact that it was a jRPG without a leveling mechanic, experience points or a
hard-defined story path. You are dumped into the world, given tools to explore
through it and then given free reign to do whatever you want. It is a massive
open world (probably one of the biggest in a Nintendo game save maybe Xenoblade
Chronicles X) and you are given the agency you need to play the game how you
want. You can run straight to Ganon and finish the game in less than an hour.
You can spend hundreds of hours exploring the world to solve all the puzzle
shrines.
Most of all though, the really great part about Breath of The Wild is the fact
that as you play, your skill as a player improves. You learn to chain together
mechanics to do basically whatever you want and the game doesn't stop you. You
can do _whatever you want_ and the real progression system is your skill.
[This sounds weird, but tying progression to player skill is incredibly
anti-meta as far as these games go. Usually jRPGs are very tailored experiences
where you go through the standard "fetch bread -> kill god" kind of power skill
tree, but with Breath of The Wild you can "kill god" right out of the gate and
"fetch bread" later.](conversation://Cadey/enby)
I have no idea what to expect for the sequel to Breath of The Wild, but I'm
excited as heck to see what they come up with.
[This is going to have an article written about it once I've gotten a chance to
play through it. This may take a week to a month. It's gonna be worth the
wait.](conversation://Cadey/enby)
## My Projects
I don't really like having too many project plans ahead of time, but here are
the biggest things I'm already committing myself to do next year. Something
something forward looking statements something something.
### Daily Workout Streams
I love rhythm games. I haven't really had good rhythm games to play until I got
back into VR. I've been playing a lot of [Beat
Saber](https://store.steampowered.com/app/620980/Beat_Saber/) and more recently
[Synth Riders](https://synthridersvr.com/). I've been streaming my play on
[Twitch](https://twitch.tv/princessxen) and I have a playlist of the VODs on
[YouTube](https://www.youtube.com/playlist?list=PLJDDsMrk2tSH3nhLWqV8IZLgae1CNYurH).
I live a very sedentary lifestyle, even before COVID, and I want to start to get
back into shape. Doing it [dancing on the
internet](https://youtu.be/q3F06mKP2uk?list=PLJDDsMrk2tSH3nhLWqV8IZLgae1CNYurH&t=4342)
is fun, so why not?
### Spellblade
I have a bunch of unreleased material for Spellblade that I need to go in and
edit up and post.
### V-Tubing
I got a webcam for Christmas that I plan to use for V-Tubing coming soon. I want
to use it for recording conference talks as well as for doing some non-VR gaming
streams.
I have been working on getting a V-Tuber model hacked up out of the one that
I've been using in VRChat, but having no idea what I am doing in Unity has
really not been helping.
---
Hope this was an interesting view into what I'm excited for. This may be my last
post for the year. Stay safe out there, things have been rough.

View File

@ -3,6 +3,8 @@ title: I Put Words on this Webpage so You Have to Listen to Me Now
date: 2018-11-30 date: 2018-11-30
--- ---
# I Put Words on this Webpage so You Have to Listen to Me Now
Holy cow. I am angry at how people do thing with tool. People do thing with tool so badly. You shouldn't do thing with tool, you should do other thing, compare this: Holy cow. I am angry at how people do thing with tool. People do thing with tool so badly. You shouldn't do thing with tool, you should do other thing, compare this:
I am using tool. I want to do thing. I flopnax the ropjar and then I get the result of doing thing (because it's convenient to flopnax the ropjar given the existing program structure). I am using tool. I want to do thing. I flopnax the ropjar and then I get the result of doing thing (because it's convenient to flopnax the ropjar given the existing program structure).

View File

@ -3,11 +3,10 @@ title: "Farewell Email - Heroku"
date: 2019-03-08 date: 2019-03-08
for: Herokai for: Herokai
subject: May our paths cross again subject: May our paths cross again
tags:
- personal
- heroku
--- ---
# Farewell Email - Heroku
## May our paths cross again ## May our paths cross again
Hey all, Hey all,
@ -18,7 +17,7 @@ The people I've worked with at Heroku have been catalytic to our success as a le
There is no doubt in my mind that you all will build fantastically useful and stable tools for Salesforce customers. Keep your eyes on what matters, let your heart guide your actions, and you all will continue to construct and refine the finest possible infrastructure that is possible. We may be limited as humans, but together in groups like this we can surpass these arbitrary differences and create things that really shine. There is no doubt in my mind that you all will build fantastically useful and stable tools for Salesforce customers. Keep your eyes on what matters, let your heart guide your actions, and you all will continue to construct and refine the finest possible infrastructure that is possible. We may be limited as humans, but together in groups like this we can surpass these arbitrary differences and create things that really shine.
> As one being we repeat the words: > As one being we repeat the words:
> >
> Flow in compassion > Flow in compassion
> Release what is divine > Release what is divine

View File

@ -3,9 +3,10 @@ title: Fear
date: 2018-07-24 date: 2018-07-24
thanks: CelestialBoon, no really this guy is amazing and doesn't get enough credit, I'm so grateful for him. thanks: CelestialBoon, no really this guy is amazing and doesn't get enough credit, I'm so grateful for him.
for: Twilight Sparkle for: Twilight Sparkle
series: stories
--- ---
# Fear
_I must not fear._ _I must not fear._
_Fear is the mind-killer._ _Fear is the mind-killer._
_Fear is the little-death that brings total obliteration._ _Fear is the little-death that brings total obliteration._

View File

@ -1,63 +0,0 @@
---
title: RSS/Atom Feeds Fixed and Announcing my Flight Journal
date: 2020-07-26
tags:
- gemini
---
I have released version 2.0.1 of this site's code. With it I have fixed the RSS
and Atom feed generation. For now I have had to sacrifice the post content being
in the feed, but I will bring it back as soon as possible.
Victory badges:
[![Valid Atom Feed](https://validator.w3.org/feed/images/valid-atom.png)](/blog.atom)
[![Valid RSS Feed](https://validator.w3.org/feed/images/valid-rss-rogers.png)](/blog.rss)
Thanks to [W3Schools](https://www.w3schools.com/XML/xml_rss.asp) for having a
minimal example of an RSS feed and [this Flickr
image](https://www.flickr.com/photos/sepblog/3652359502/) for expanding it so I
can have the post dates be included too.
## Flight Journal
I have created a [Gemini](https://gemini.circumlunar.space) protocol server at
[gemini://cetacean.club](gemini://cetacean.club). Gemini is an exploration of
the space between [Gopher](https://en.wikipedia.org/wiki/Gopher_%28protocol%29)
and HTTP. Right now my site doesn't have much on it, but I have added its feed
to [my feeds page](/feeds).
Please note that the content on this Gemini site is going to be of a much more
personal nature compared to the more professional kind of content I put on this
blog. Please keep this in mind before casting judgement or making any kind of
conclusions about me.
If you don't have a Gemini client installed, you can view the site content
[here](https://portal.mozz.us/gemini/cetacean.club/). I plan to make a HTTP
frontend to this site once I get [Maj](https://tulpa.dev/cadey/maj) up and
functional.
## Maj
I have created a Gemini client and server framework for Rust programs called
[Maj](https://tulpa.dev/cadey/maj). Right now it includes the following
features:
- Synchronous client
- Asynchronous server framework
- Gemini response parser
- `text/gemini` parser
Additionally, I have a few projects in progress for the Maj ecosystem:
- [majc](https://portal.mozz.us/gemini/cetacean.club/maj/majc.gmi) - an
interactive curses client for Gemini
- majd - An advanced reverse proxy and Lua handler daemon for people running
Gemini servers
- majsite - A simple example of the maj server framework in action
I will write more about this in the future when I have more than just this
little preview of what is to come implemented. However, here's a screenshot of
majc rendering my flight journal:
![majc preview image rendering cetacean.club](/static/img/majc_preview.png)

View File

@ -1,12 +1,11 @@
--- ---
title: FFI-ing Go from Nim for Fun and Profit title: FFI-ing Go from Nim for Fun and Profit
date: 2015-12-20 date: 2015-12-20
series: howto
tags:
- go
- nim
--- ---
FFI-ing Golang from Nim for Fun and Profit
==========================================
As a side effect of Go 1.5, the compiler and runtime recently gained the As a side effect of Go 1.5, the compiler and runtime recently gained the
ability to compile code and run it as FFI code running in a C namespace. This ability to compile code and run it as FFI code running in a C namespace. This
means that you can take any Go function that expresses its types and the like means that you can take any Go function that expresses its types and the like

View File

@ -1,147 +0,0 @@
---
title: Final Chapter
date: 2021-05-20
tags:
- irc
series: freenode
---
The last caretaker looked at the last light lit in the empty halls. They looked
out across their home. It used to be filled with thousands of people. There were
discussions about every topic imaginable from people of as many backgrounds.
Projects were born. Relationships were forged. Friends were found. Lives were
irreparably changed for the better.
But the darkness came and soaked into the foundation. Some noticed it and became
alerted. Some ignored it as an "inevitable" change. Some ran away, abandoning
their home of choice. Some stuck around.
Then the darkness took over and the larger discussion rooms sprung into action.
The meddlers between the halls suddenly became a network of people to assist the
other larger discussion rooms to move elsewhere. Through their careful planning
and surgical execution, they managed to move all of the major discussions away
in hours. Prior migrations on this scale have not happened. Normally there are
stragglers. Normally people try to stick it out and decry those that leave as
"reactionaries".
---
```
NickServ: User reg. : Dec 03 22:31:33 2007 (13y 24w 3d ago)
NickServ: Last addr : ~cadey@infoforcefeed/Xe
```
Freenode had been my home on the internet for over half of my life. All things
IRC must come to an end, but it felt like Freenode was eternal. The staff had
not always made decisions that I agreed with, but I have run IRC networks
before. I know how it is. Precedent can drown you.
It's just sad to see it end like this. The communities that I have joined there
have been _catalytic_ in my life. I have irreparably changed the life of others
on Freenode. I met people on Freenode that I talk with daily. I'm sure that I
wouldn't have the career that I have without Freenode.
But it's been taken over by a narcissistic Trumpian wannabe Korean royalty
bitcoin millionaire, and I just cannot support that.
---
Not this time though. The darkness was so deep into things that the very rooms
themselves became at risk. The floors were not known to be safe. The caretakers
had worked together to come up with a new plan and had built an exact replica of
the meeting halls elsewhere.
Through that network of meddlers, people were helped over. The new halls needed
adjusting to cope with the sudden load, however they took them. The caretakers
spun up their new discussion hall and built it stronger than before.
They, the last caretaker, was unable to control the darkness anymore. The
darkness was too deep. Their access had rotten away.
---
```
NickServ: Account Xe has 5 other nick(s) grouped to it, remove those first.
```
I joined Freenode initially in order to get help with Ubuntu in high school.
> (shadowh511) hello, i need some help configuring wine 0.9.50 to run
> microsoft digital image suite 10, i try to run the setup app, an i get two
> looping messages about MDAC, and on how the system needs to reboot before it
> can install, but the messages loop, how can i fix this?
> (Fluxd) !wine | shadowh511
> (ubotu) shadowh511: WINE is a compatibility layer for running Windows
> programs on GNU/Linux. See https://help.ubuntu.com/community/Wine for more
> information, and see !AppDB for application compatibility.
> (shadowh511) they won't help me there
I eventually figured it out. I think I just did it on Windows.
I ended up rejoining it and really sticking around once I got into My Little
Pony: Friendship is Magic. There was a channel called `#reddit-mlp` that I
haunted for years.
I met friends that I still talk with today. I wonder if anyone from there that I
haven't talked with in years is reading this now.
---
The soul of the halls was not the halls itself. It was not the caretakers. It
was the people. Once the bigger halls moved over, the people followed. Each
person leaving took that much more of the soul with them.
Drops became a stream became a torrent became a flood and in a day's time the
soul of the halls had left. The soul was gone and only the darkness remained.
The last caretaker looked at the room and snuffed out their light. They packed
up their backpack and closed the front door for the last time.
They cried as they walked to their new home.
---
```
(Xe) ungroup JohnMadden
NickServ: Nick JohnMadden has been removed from your account.
(Xe) ungroup Xena
NickServ: Nick Xena has been removed from your account.
(Xe) ungroup Cadance
NickServ: Nick Cadance has been removed from your account.
(Xe) ungroup Cadey
NickServ: Nick Cadey has been removed from your account.
(Xe) ungroup shadowh511
NickServ: Nick shadowh511 has been removed from your account.
(Xe) drop Xe ******
```
> NickServ: This is a friendly reminder that you are about to destroy the
> account Xe. To avoid accidental use of this command, this operation has to
> be confirmed. Please confirm by replying with /msg NickServ DROP Xe `******`
> `86033ae0:9e021b10`
---
The halls fell, but the people that made up the soul of those halls moved on.
Their home went away because their home wasn't a building. It was an idea. Ideas
persist in ways that buildings don't.
---
```
(Xe) DROP Xe ****** 86033ae0:9e021b10
NickServ: The account Xe has been dropped.
```
You can find me on [Liberachat](https://libera.chat/) in `#xeserv`. If you want
to chat about my blog articles, I welcome any readers to join there.
Be well.
```
irc: server freenode has been deleted
```

View File

@ -1,142 +0,0 @@
---
title: Footnote
date: 2021-06-15
tags:
- irc
series: freenode
---
- [Final Chapter](/blog/final-chapter-2021-05-20)
- [Epilogue](/blog/epilogue-2021-05-26)
---
Before the darkness was the darkness, the darkness was a child. This child found
themselves lost and without purpose. Life was scary. Things were changing
constantly, and they found themselves at a loss. One day they were walking about
the etherial network and stumbled across a meeting house.
The child looked inside and was confused. There were hundereds of rooms with
even more people inside. There were rooms on every topic. There was a shower of
culture and an outpouring of knowledge. Hanging out here would permanently
change the course of the child's life.
---
Horrified by the room takeover golem, the remaining regulars had fled their
former homes. This was not a home for legal reasons, but it was their social
home on the etherial network. Sadness had turned to rage had turned to
depression had turned to laughter. One of the former regulars was a apprentice
scryer, so as a lark they decided to set up some meeting rooms to scry their way
into rooms in the old meeting house. It was a one-way scry and all they could do
was watch.
---
Historically, IRC spam has been a unique form of art. Yes, I'm serious. There
have been legitimate works of art created in the desire to disrupt conversations
on IRC. It sounds absurd, but it's true. One unique quality of these artworks is
that in order to see them, they must be shared with others. At some level you
can't view this art alone, and that makes it beautiful.
Fighting IRC spam has turned into a full time job. There are hundreds of
different bot kinds and so many different ways to spam that fighting it is
difficult due to the server software being very simple. Historically IRC
developers have not wanted to add hooks so that people could run a bit of code
on each message as it was being processed. There were legitimate fears that
doing this would allow a malicious server admin to log every channel, not just
the ones they have joined. IRC was created at a time where all of the admins
knew eachother; but they were part of different organizations, each with their
own rules and subtly different codes of conduct.
One of the best ways to fight IRC spam has been to wait until the spammer gets
bored and goes off to do something else. Users are not as understanding to this
method.
---
Someone had set up a golem-creating golem and aimed it at the meta-discussion
room of the former meeting house. It did its job dutifully and continued
marching on:
> (pissnet) come to pissnet for cold wet chats!
The people watching the scry had never seen this brand of disruption before. By
now the people watching had amassed to over a hundred and they were all bored
and eager for something new. Something new was here!
> (pissnet) come to pissnet for cold hard piss!
Over time, the shadowy group behind these golems became known as the urinators.
These urinators became a bit of a hero to the people who watched in horror as
the situation developed. The golems got discovered and ejected, and even earned
the ire of the anti-golem golem. The disguise was clever, the ejections where
swift, but the watchers laughed as the golems kept getting more and more
creative.
---
The darkness was dismayed. Everything was falling apart around them. The
maintainers of the maintenance golems had fled. The spellcrafters that empowered
them [had sworn to give no more
assistance](https://atheme.github.io/atheme-open-letter/). The halls themselves
were starting to show the rot that had built up over the last 20 years of them
existing.
The darkness pondered amongst themselves until they pulled back a memory from
the past. A memory from the child. The halls themselves had to be replaced!
---
The watchers looked on in horror. The scryer had given up hope and decided to
move on with their life. The urinators had suceeded in shutting down the things
that were fun to the watchers. The urinators won.
Some urinators created their own halls. It was an experiment in anarchy for
running these types of halls. It is astounding that it managed to stay as stable
as it did.
---
I have been completely unsure how I should broach the topic of pissnet in these
articles. For people unfamiliar with IRC culture, you must think I'm making shit
up or something. It is _so_ out there that it's almost like an abstact art
gallery or something. But no, pissnet happened. It started as IRC spam and then
turned into this: [letspiss.net](http://letspiss.net/). I don't really think I
can suggest readers of this blog go there. It is some kind of weird anarchist
IRC hackerspace, but most of the users are ircops and can see your IP address.
Like, for people that are really deep into IRC culture, the whole pissnet
shitshow was so out there that they thought the people that were telling them
about that were making that shit up.
But it's real.
---
> We are moving past legacy freenode to a new fork. The new freenode is
> launched. You will slowly be disconnected and when you reconnect, you will be
> on the new freenode. We patiently await to welcome you in freedom's holdout -
> the freenode. If you're looking to connect now, you can already /server
> chat.freenode.net 6697 (ssl) or 6667 (plaintext). It's a new genesis for a new
> era. Thank you for using freenode, and Hello World, from the future. freenode
> is IRC. freenode is FOSS. When you connect, register your nickname and your
> channel and get started. It's a new world. We're so happy to welcome you and
> the millions of others.
The darkness smiled and replaced the halls where they were. The darkness hoped
that millions would follow.
They didn't come.
---
- [Freenode commits suicide, is no longer a serious IRC
network](https://www.devever.net/~hl/freenode_suicide)
- [the end of freenode](https://ariadne.space/2021/06/14/the-end-of-freenode/)
- [All Freenode Channels and Users
Gone](https://old.reddit.com/r/linux/comments/o0263h/all_freenode_channels_and_users_gone/)
- [Last remaining >1000 user community channel seized by freenode
staff](https://linux.chat/linux-on-freenode/)
Freenode is dead. The spirit lives on in [Libera.chat](https://libera.chat/).

View File

@ -1,9 +1,10 @@
--- ---
title: A Formal Grammar of h title: A Formal Grammar of h
date: 2019-05-19 date: 2019-05-19
series: conlangs
--- ---
# A Formal Grammar of `h`
## Introduction ## Introduction
`h` is a conlang project that I have been working off and on for years. It is infinitely simply teachable, trivial to master and can be used to represent the entire scope of all meaning in any facet of the word. All with a single character. `h` is a conlang project that I have been working off and on for years. It is infinitely simply teachable, trivial to master and can be used to represent the entire scope of all meaning in any facet of the word. All with a single character.

View File

@ -1,382 +0,0 @@
---
title: Fun with Redirection
date: 2021-09-22
author: Twi
tags:
- shell
- redirection
- osdev
---
When you're hacking in the shell or in a script, sometimes you want to change
how the output of a command is routed. Today I'm gonna cover common shell
redirection tips and tricks that I use every day at work and how it all works
under the hood.
Let's say you're trying to capture the output of a command to a file, such as
`uname -av`:
```console
$ uname -av
Linux shachi 5.13.15 #1-NixOS SMP Wed Sep 8 06:50:21 UTC 2021 x86_64 GNU/Linux
```
You could copy that to the clipboard and paste it into a file, but there is a
better way thanks to the `>` operator:
```console
$ uname -av > uname.txt
$ cat uname.txt
Linux shachi 5.13.15 #1-NixOS SMP Wed Sep 8 06:50:21 UTC 2021 x86_64 GNU/Linux
```
Let's say you want to run this on a few machines and put all of the output into
`uname.txt`. You could write a shell script loop like this:
```sh
# make sure the file doesn't already exist
rm -f uname.txt
for host in shachi chrysalis kos-mos ontos pneuma
do
ssh $host -- uname -av >> uname.txt
done
```
Then `uname.txt` should look like this:
```
Linux shachi 5.13.15 #1-NixOS SMP Wed Sep 8 06:50:21 UTC 2021 x86_64 GNU/Linux
Linux chrysalis 5.10.63 #1-NixOS SMP Wed Sep 8 06:49:02 UTC 2021 x86_64 GNU/Linux
Linux kos-mos 5.10.45 #1-NixOS SMP Fri Jun 18 08:00:06 UTC 2021 x86_64 GNU/Linux
Linux ontos 5.10.52 #1-NixOS SMP Tue Jul 20 14:05:59 UTC 2021 x86_64 GNU/Linux
Linux pneuma 5.10.57 #1-NixOS SMP Sun Aug 8 07:05:24 UTC 2021 x86_64 GNU/Linux
```
Now let's say you want to extract all of the hostnames from that `uname.txt`.
The pattern of the file seems to specify that fields are separated by spaces and
the hostname seems to be the second space-separated field in each line. You can
use the `cut` command to select that small subset from each line, and you can
feed the `cut` command's standard input using the `<` operator:
```console
$ cut -d ' ' -f 2 < uname.txt
shachi
chrysalis
kos-mos
ontos
pneuma
```
[It's worth noting that a lot of these core CLI utilities are built on the idea
that they are _filters_, or things that take one infinite stream of text in on
one end and then return another stream of text out the other
end. This is done through a channel called "standard input/output", where
standard input refers to input to the command and standard output refers to the
output of the command.](conversation://Mara/hacker)
[That's a great metaphor, let's build onto it using the `|` (pipe)
operator. The pipe operator lets you pipe the standard output of one command to
the standard input of another.](conversation://Cadey/enby)
[You mentioned that you can pass files as input and output for commands, does
this mean that standard input and standard output are
files?](conversation://Mara/happy)
[Precisely! They are just files that are automatically open for every process.
Usually commands will output to standard out and some will also accept input via
standard in.](conversation://Cadey/enby)
[Doesn't that have some level of overhead though? Isn't it expensive to spin up
a whole heckin' `cat` process for that?](conversation://Mara/hmm)
[Not on any decent system made in the last 20 years. This may have some impact
on Windows (because they have core architectural mistakes that make processes
take up to 100 milliseconds to spin up), but this is about Unix/Linux. I think
these should work on Windows too if you use Cygwin, but if you're using WSL you
shouldn't have any real issues there](conversation://Cadey/coffee)
Let's say we want to rewrite that `cut` command above to use pipes. You could
write it like this:
```sh
cat uname.txt | cut -d ' ' -f 2
```
[The mnemonic we use for remembering the `cut` command is that fields are
separated by the `d`elimiter and you cut out the nth
`f`ield/s.](conversation://Mara/hacker)
This will get you the exact same output:
```console
$ cat uname.txt | cut -d ' ' -f 2
shachi
chrysalis
kos-mos
ontos
pneuma
```
Personally I prefer writing shell pipelines like that as it makes it a bit
easier to tack on more specific selectors or operations as you go along. For
example, if you wanted to sort them you could pipe the result to `sort`:
```console
$ cat uname.txt | cut -d ' ' -f 2 | sort
chrysalis
kos-mos
ontos
pneuma
shachi
```
This lets you gradually build up a shell pipeline as you drill down to the data
you want in the format you want.
[I wanted to save this compiler error to a file but it didn't work. I tried
doing this:](conversation://Mara/hmm)
```console
$ rustc foo.rs > foo.log
```
But the output printed to the screen instead of the file:
```console
$ rustc foo.rs > foo.log
error: expected one of `!` or `::`, found `main`
--> foo.rs:1:5
|
1 | fun main() {}
| ^^^^ expected one of `!` or `::`
error: aborting due to previous error
$ cat foo.log
$
```
This happens because there are actually _two_ output streams per program. There
is the standard out stream and there is also a standard error stream. The reason
that standard error exists is so that you can see if any errors have happened if
you redirect standard out.
Sometimes standard out may not be a stream of text, say you have a compressed
file you want to analyze and there's an issue with the decompression. If the
decompressor wrote its errors to the standard output stream, it could confuse or
corrupt your analysis.
However, we can redirect standard error in particular by modifying how we
redirect to the file:
```console
$ rustc foo.rs 2> foo.log
$ cat foo.log
error: expected one of `!` or `::`, found `main`
--> foo.rs:1:5
|
1 | fun main() {}
| ^^^^ expected one of `!` or `::`
error: aborting due to previous error
```
[Where did the `2` come from?](conversation://Mara/wat)
So I mentioned earlier that redirection modifies the standard input and output
of programs. This is not entirely true, but it was a convenient half-truth to
help build this part of the explanation.
For every process on a Unix-like system (such as Linux and macOS), the kernel
stores a list of active file-like objects. This includes real files on the
filesystem, pipes between processes, network sockets, and more. When a program
reads or writes a file, they tell the kernel which file they want to use by
giving it a number index into that list, starting at zero. Standard in/out/error
are just the conventional names for the first three open files in the list, like
this:
| File Descriptor | Purpose |
| :------ | :------- |
| 0 | Standard input |
| 1 | Standard output |
| 2 | Standard error |
Shell redirection simply changes which files are in that list of open files when
the program starts running.
That is why you use a `2` there, because you are telling the shell to change
file descriptor number `2` of the `rustc` process to point to the filesystem
file `foo.log`, which in turn makes the standard error of `rustc` get written to
that file for you.
In turn, this also means that `cat foo.txt > foo2.txt` is actually a shortcut
for saying `cat foo.txt 1> foo2.txt`, but the `1` can be omitted there because
standard out is usually the "default" output that most of these kind of
pipelines cares about.
[How would I get both standard output and standard error in the same
file?](conversation://Mara/hmm)
The cool part about the `>` operator is that it doesn't just stop with output to
files on the desk, you can actually have one file descriptor get pointed to
another. Let's say you have a need for both standard out and standard error to
go to the same file. You can do this with a command like this:
```
$ rustc foo.rs > foo.log 2>&1
```
This tells the shell to point standard out to `foo.log`, and then standard
error to standard out (which is now `foo.log`). There's a footgun here though;
the order of the redirects matters. Consider the following:
```
$ rustc foo.rs 2>&1 > foo.log
error: expected one of `!` or `::`, found `main`
--> foo.rs:1:5
|
1 | fun main() {}
| ^^^^ expected one of `!` or `::`
error: aborting due to previous error
$ cat foo.log
$ # foo.log is empty, why???
```
We wanted to redirect stderr to `foo.log`, but that didn't happen. Why? Well,
the shell considers our redirects one at a time from left to right. When the
shell sees `2>&1`, it hasn't considered `> foo.log` yet, so standard out (`1`)
is still our terminal. It dutifully redirects stderr to the terminal, which is
where it was already going anyway. Then it sees `1 > foo.log`, so it redirects
standard out to `foo.log`. That's the end of it though. It doesn't
retroactively redirect standard error to match the new standard out, so our
errors get dumped to our terminal instead of the file.
Confusing right? Lucky for us, there's a short form that redirects both at the
same time, making this mistake impossible:
```
$ rustc foo.rs &> foo.log
```
This will put standard out and standard error to `foo.log` the same way that
`> foo.log 2>&1` will.
[Will that work in every shell?](conversation://Mara/hmm)
[It's a bourne shell (`bash`) extension, but I've tested it in `zsh` and `fish`.
You can also do `&|` to pipe both standard out and standard error at the same
time in the same way you'd do `2>&1 | whatever`.](conversation://Cadey/enby)
You can also use this with `>>`:
```
$ rustc foo.rs &>> foo.log
$ cat foo.log
error: expected one of `!` or `::`, found `main`
--> foo.rs:1:5
|
1 | fun main() {}
| ^^^^ expected one of `!` or `::
error: aborting due to previous error
error: expected one of `!` or `::`, found `main`
--> foo.rs:1:5
|
1 | fun main() {}
| ^^^^ expected one of `!` or `::`
error: aborting due to previous error
```
[How do I redirect standard in to a file?](conversation://Mara/hmm)
Well, you don't. Standard in is an input, so you can change where it comes
_from_, not where it goes.
But, maybe you want to make a copy of a program's input and send it somewhere
else. There is a way to do _that_ using a command called `tee`. `tee` copies
its standard input to standard output, but it also writes a second copy to a
file. For example:
```console
$ dmesg | tee dmesg.txt | grep 'msedge'
[ 70.585463] traps: msedge[4715] trap invalid opcode ip:5630ddcedc4c sp:7ffd41f67700 error:0 in msedge[5630d8fc2000+952d000]
[ 70.702544] traps: msedge[4745] trap invalid opcode ip:5630ddcedc4c sp:7ffd41f67700 error:0 in msedge[5630d8fc2000+952d000]
[ 70.806296] traps: msedge[4781] trap invalid opcode ip:5630ddcedc4c sp:7ffd41f67700 error:0 in msedge[5630d8fc2000+952d000]
[ 70.918095] traps: msedge[4889] trap invalid opcode ip:5630ddcedc4c sp:7ffd41f67700 error:0 in msedge[5630d8fc2000+952d000]
[ 71.031938] traps: msedge[4926] trap invalid opcode ip:5630ddcedc4c sp:7ffd41f67700 error:0 in msedge[5630d8fc2000+952d000]
[ 71.138974] traps: msedge[4935] trap invalid opcode ip:5630ddcedc4c sp:7ffd41f67700 error:0 in msedge[5630d8fc2000+952d000]
[ 1169.163603] traps: msedge[35719] trap invalid opcode ip:556a93951c4c sp:7ffc533f35c0 error:0 in msedge[556a8ec26000+952d000]
[ 1213.301722] traps: msedge[36054] trap invalid opcode ip:55a245960c4c sp:7ffe6d169b40 error:0 in msedge[55a240c35000+952d000]
[10963.234459] traps: msedge[104732] trap invalid opcode ip:55fdb864fc4c sp:7ffc996dfee0 error:0 in msedge[55fdb3924000+952d000]
```
This would put the output of the `dmesg` command (read from kernel logs) into
`dmesg.txt`, as well as sending it into the grep command. You might want to do
this when debugging long command pipelines to see exactly what is going into a
program that isn't doing what you expect.
Redirections also work in scripts too. You can also set "default" redirects for
every command in a script using the `exec` command:
```sh
exec > out.log 2> error.log
ls
rustc foo.rs
```
This will have the file listing from `ls` written to `out.log` and any errors
from `rustc` written to `error.log`.
A lot of other shell tricks and fun is built on top of these fundamentals. For
example you can take a folder, zip it up and then unzip it over on another
machine using a command like this:
```
$ tar cz ./blog | ssh pneuma tar xz -C ~/code/christine.website/blog
```
This will run `tar` to create a compressed copy of the `./blog` folder and then
pipe that to tar on another computer to extract that into
`~/code/christine.website/blog`. It's just pipes and redirection all the way
down! Deep inside `ssh` it's really just piping output of commands back and
forth over an encrypted network socket. Connecting to an IRC server is just
piping in and out data to the chat server, even more so if you use TLS to
connect there. In a way you can model just about everything in Unix with pipes
and file descriptors because that is the cornerstone of its design: Everything
is a file.
[This doesn't mean it's literally a file on the disk, it means you can _interact
with_ just about everything using the same system interface as you do with
files. Even things like hard disks and video cards.](conversation://Mara/hacker)
Here's a fun thing to do. Using [`curl`](https://curl.se/) to read the contents
of a URL and [`jq`](https://stedolan.github.io/jq/) to select out bits from a
JSON stream, you can make a script that lets you read the most recent title from
my blog's [JSONFeed](/blog.json):
```sh
#!/usr/bin/env bash
# xeblog-post.sh
curl -s https://christine.website/blog.json | jq -r '.items[0] | "\(.title) \(.url)"'
```
At the time of writing this post, here is the output I get from this command:
```
$ ./xeblog-post.sh
Anbernic RG280M Review https://christine.website/blog/rg280m-review
```
What else could you do with pipes and redirection? The cloud's the limit!
---
Thanks to violet spark, cadence, and AstroSnail for looking over this post and
fact-checking as well as helping mend some of the brain dump and awkward
wording into more polished sentences.

View File

@ -1,65 +0,0 @@
---
title: "The Relaxing Surreality of VRChat Furry Conventions"
date: 2021-03-22
tags:
- vrchat
- fe21
---
Author's Note: you may want to view this post in a GUI browser for the best experience.
It is no secret that I am a furry. The main way that a lot of my friends and I meet up is at conventions. COVID has lead to a year without cons for my friend groups. It's gotten bad enough that in one server the convention coordination channel had its name changed from `#conventions` to `#cancelled`. These conventions are expensive (flight/hotel/badge/the dealer's den), tiring and weirdly recharging all at once.
Last thursday, I found out that there was an online furry convention happening this weekend in [VRChat](https://hello.vrchat.com): [Furnal Equinox](https://furnalequinox.com). The concept intrigued me. Obviously, it won't be the same (there's only so much VR can do), but I decided to try it out and pinged a few friends about it. We jacked in and loaded up VRChat to see what it was all about.
<center>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">A VRChat furry convention, in selfie essay format <a href="https://t.co/5PsjteS79I">pic.twitter.com/5PsjteS79I</a></p>&mdash; Cadey A. Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1373957443294744578?ref_src=twsrc%5Etfw">March 22, 2021</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
</center>
It was a blast. Furry conventions usually have this weird but wholesome vibe to them. There's this feeling of community as existing friend groups meet up and as these groups mix together, new friendships get formed as well.
When I registered for the convention, there was an option to donate to the convention organization itself and to [Hobbitsee Wildlife Refuge](http://www.hobbitstee.com). I kicked over some money and then hopped in the Discord to get the supporter badge prop to glue onto my avatar. After a few rounds of testing, being confused by Unity, having that golden moment of understanding and then actually getting it to do what I wanted it to do, I managed to get the badge to a place where I was happy with it (and where it wouldn't clip through my body when I sat down).
<center>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">Let&#39;s play with textures to see what we get <a href="https://t.co/1soAuNSllk">pic.twitter.com/1soAuNSllk</a></p>&mdash; Cadey A. Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1373245260449652740?ref_src=twsrc%5Etfw">March 20, 2021</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
</center>
[A bit of con culture info that may help here: usually at furry conventions people get a badge that they need to wear at all times on the con floor. This helps the staff know who should actually be at the convention or not. Many people in the online furry community do not have access to the source files that make up their avatar models and VRChat props don't persist between worlds, so it's not always possible for them to glue that badge onto their avatar model. The different tiers of badges are seen as signs of respect to some people (though we are not sure why). It's also common for people to put all of their previous con badges on the same lanyard or wear multiple lanyards for each con they've been to.](conversation://Mara/hacker)
I think this event was the catalyst for Unity really starting to actually make sense to me. My avatar model is pretty complicated as is (at least 79k polygons, 30 materials and several props that I can toggle on and off at will) and I think I now know what I need to do in order to simplify it a bit. I may not know how to actually do it yet, but I do know what I need to do.
<center>
<blockquote class="twitter-tweet"><p lang="en" dir="ltr">Let&#39;s see how these shader changes work out: <a href="https://t.co/ggzzNbXZTs">pic.twitter.com/ggzzNbXZTs</a></p>&mdash; Cadey A. Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1373618625026920449?ref_src=twsrc%5Etfw">March 21, 2021</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
</center>
When I arrived in the con hotel world, one of the first things I noticed was how much it felt like an actual hotel. I entered the world pretty early on friday, so at first things were fairly unpopulated (just like how it would feel if you got to the con a day early). Slowly people started filtering in and I just talked with people. The main hotel lobby was kinda janky, if you weren't careful you could teleport halfway across the map into a chair. However I had some good conversations where we talked about VR tech, some things that we'd each like to do at the convention and more.
Something that ends up happing at furry conventions is that you will talk to some people and then end up forgetting to get contact information for them. Then they just fade into the crowd at the end of the convention like tears in rain, and if you're lucky you may be able to see them next year. One of the main differences with a con in VRChat is that you're able to select that person and send a friend request to them in-game. Then you have a way to keep in touch.
<center>
<blockquote class="twitter-tweet"><p lang="und" dir="ltr"><a href="https://t.co/MfwT9McdXI">pic.twitter.com/MfwT9McdXI</a></p>&mdash; Cadey A. Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1373286284966424581?ref_src=twsrc%5Etfw">March 20, 2021</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
</center>
As the day went on some trolls got word that there was a furry convention happening in VRChat. There seems to be this weird underbelly of people that will go into VRChat worlds and intentionally ruin other people's fun by using avatars that spawn a bajillionty particles to crash the game. The convention staff reacted quickly though. Due to the fact that they weren't able to totally control who came into the convention (it being a free event in a free to download game without the best moderation tools means you kinda have to roll with the punches), they ended up creating a blocklist in a channel on the Discord. Thankfully VRChat offers a web panel for users to manage settings online, so I was able to block all of the crashers and continue having fun at the con.
One of the main staples of these conventions is the vendor's hall/dealer's den. The staff managed to make this adorable dealer's den map that had at least 60 stalls (one for each vendor). There were traditional artists, digital artists (including the person I had just gotten a sticker order from), VRChat avatar makers, a novel author that I had a lovely time talking with, and more. I didn't end up buying anything (though I may end up doing another sticker order), but it had that same _feeling_ that the vendor hall usually does. It would have been a lot more enjoyable if it wasn't so bright and laggy. In most of the con worlds I got around 70-72 frames per second, but in the vendor hall I regularly dipped down to 30-45. My VR setup over wifi insulates me from lag spikes (my headset is set to render at 80 FPS internally and the VR stream gets multiplexed onto it, so if I get a huge lagspike I don't actually feel it for a moment or two), but it got pretty bad.
Another staple of furry conventions is the room parties. These room parties usually have disturbing amounts of alcohol available and are a blast for everyone involved. They had a few room models to pick from including a penthouse suite with a little bar-like alcove. With a bunch of people in the same instance, I had a terrible idea and suggested everyone hop into the bathroom so we could take a pic. I ended up with this:
<center>
<blockquote class="twitter-tweet"><p lang="und" dir="ltr"><a href="https://t.co/QS1i9AYyOG">pic.twitter.com/QS1i9AYyOG</a></p>&mdash; Cadey A. Ratio 🌐 (@theprincessxena) <a href="https://twitter.com/theprincessxena/status/1373390724599808001?ref_src=twsrc%5Etfw">March 20, 2021</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
</center>
Something else my friends and I end up doing is a run to restaurants like Nandos or Taco Bell. Obviously this is very difficult to coordinate in VRChat (some of the people I was chilling with don't have taco bell in their home country), but there is actually a VRChat Taco Bell map! I plunked down a portal and everyone jumped in. We explored around the world and a few people played a confusing looking game that appeared to be some kind of recursive tic tac toe. Unsurprisingly, they ended up in a draw.
I'm definitely going to go to conventions in VRChat in the future, even after COVID abates and the borders re-open. There is just this unique feeling to VR conventions. Everyone is wearing avatars that allow them to express themselves in ways that are difficult or expensive in person. There are a number of people fursuiting at any given convention (though I'm not sure how they do it, it must be an oven in those suits), but at this convention _everyone_ was in fursuit. It was hard to look around on the con floor and see anything but an outpouring of creativity and passion for their characters. I later found out that this con was a lot of people's first furry convention. I can't think of a better introduction than this. The only thing it was missing was that one person that sits at the hotel piano and plays video game music all day.

View File

@ -1,273 +0,0 @@
---
title: "Gamebridge: Fitting Square Pegs into Round Holes since 2020"
date: 2020-05-09
series: howto
tags:
- witchcraft
- sm64
- twitch
---
Recently I did a stream called [Twitch Plays Super Mario 64][tpsm64]. During
that stream I both demonstrated and hacked on a tool I'm calling
[gamebridge][gamebridge]. Gamebridge is a tool that lets you allow games to
interoperate with programs they really shouldn't be able to interoperate with.
[tpsm64]: https://www.twitch.tv/videos/615780185
[gamebridge]: https://github.com/Xe/gamebridge
Gamebridge works by aggressively hooking into a game's input logic (through a
custom controller driver) and uses a pair of [Unix fifos][ufifo] to communicate
between it and the game it is controlling. Overall the flow of data between the
two programs looks like this:
[ufifo]: http://man7.org/linux/man-pages/man7/fifo.7.html
![A diagram explaining how control/state/data flows between components of the
gamebridge stack](/static/blog/gamebridge.png)
You can view the [source code of this diagram in GraphViz dot format
here](/static/blog/gamebridge.dot).
The main magic that keeps this glued together is the use of _blocking_ I/O.
This means that the bridge input thread will be blocked _at the kernel level_
for the vblank signal to be written, and the game will also be blocked at the
kernel level for the bridge input thread to write the desired input. This
effectively uses the Linux kernel to pass around a scheduling quantum like you
would in the L4 microkernel. This design consideration also means that
gamebridge has to perform _as fast as possible as much as possible_, because it
realistically only has a few hundred microseconds at best to respond with the
input data to avoid humans noticing any stutter. As such, gamebridge is written
in Rust.
## Implementation
When implementing gamebridge, I had a few goals in mind:
- Use blocking I/O to have the kernel help with this
- Use threads to their fullest potential
- Unix fifos are great, let's use them
- Understand linear interpolation better
- Create a surreal demo on Twitch
- Only have one binary to start, the game itself
As a first step of implementing this, I went through the source code of the
Mario 64 PC port (but in theory this could also work for other emulators or even
Nintendo 64 emulators with enough work) and began to look for anything that
might be useful to understand how parts of the game work. I stumbled across
`src/pc/controller` and then found two gems that really stood out. I found the
interface for adding new input methods to the game and an example input method
that read from tool-assisted speedrun recordings. The controller input interface
itself is a thing of beauty, I've included a copy of it below:
```c
// controller_api.h
#ifndef CONTROLLER_API
#define CONTROLLER_API
#include <ultra64.h>
struct ControllerAPI {
void (*init)(void);
void (*read)(OSContPad *pad);
};
#endif
```
All you need to implement your own input method is an init function and a read
function. The init function is used to set things up and the read function is
called every frame to get inputs. The tool-assisted speedrunning input method
seemed to conform to the [Mupen64 demo file spec as described on
tasvideos.org][mupendemo], and I ended up using this to help test and verify
ideas.
[mupendemo]: http://tasvideos.org/EmulatorResources/Mupen/M64.html
The thing that struck me was how _simple_ the format was. Every frame of input
uses its own four-byte sequence. The constants in the demo file spec also helped
greatly as I figured out ways to bridge into the game from Rust. I ended up
creating two [bitflag][bitflag] structs to help with the button data, which
ended up almost being a 1:1 copy of the Mupen64 demo file spec:
[bitflag]: https://docs.rs/bitflags/1.2.1/bitflags/
```rust
bitflags! {
// 0x0100 Digital Pad Right
// 0x0200 Digital Pad Left
// 0x0400 Digital Pad Down
// 0x0800 Digital Pad Up
// 0x1000 Start
// 0x2000 Z
// 0x4000 B
// 0x8000 A
pub(crate) struct HiButtons: u8 {
const NONE = 0x00;
const DPAD_RIGHT = 0x01;
const DPAD_LEFT = 0x02;
const DPAD_DOWN = 0x04;
const DPAD_UP = 0x08;
const START = 0x10;
const Z_BUTTON = 0x20;
const B_BUTTON = 0x40;
const A_BUTTON = 0x80;
}
}
```
### Input
This is where things get interesting. One of the more interesting side effects
of getting inputs over chat for a game like Mario 64 is that you need to [hold
buttons or even the analog stick][apress] in order to do things like jumping
into paintings or on ledges. When you get inputs over chat, you only have them
for one frame. Therefore you need some kind of analog input (or an emulation of
that) that decays over time. One approach you can use for this is [linear
interpolation][lerp] (or lerp).
[apress]: https://youtu.be/kpk2tdsPh0A?list=PLmBeAOWc3Gf7IHDihv-QSzS8Y_361b_YO&t=13
[lerp]: https://www.gamedev.net/tutorials/programming/general-and-gameplay-programming/a-brief-introduction-to-lerp-r4954/
I implemented support for both button and analog stick lerping using a struct I
call a [Lerper][lerper] (the file it is in is named `au.rs` because [.au.][au] is
the lojban emotion-particle for "to desire", the name was inspired from it
seeming to fake what the desired inputs were).
[lerper]: https://github.com/Xe/gamebridge/blob/b2e7ba21aa14b556e34d7a99dd02e22f9a1365aa/src/au.rs
[au]: http://jbovlaste.lojban.org/dict/au
At its core, a Lerper stores a few basic things:
- the current scalar of where the analog input is resting
- the frame number when the analog input was set to the max (or
above)
- the maximum number of frames that the lerp should run for
- the goal (or where the end of the linear interpolation is, for most cases in
this codebase the goal is 0, or neutral)
- the maximum possible output to return on `apply()`
- the minimum possible output to return on `apply()`
Every frame, the lerpers for every single input to the game will get applied
down closer to zero. Mario 64 uses two signed bytes to represent the controller
input. The maximum/minimum clamps make sure that the lerped result stays in that
range.
### Twitch Integration
This is one of the first times I have ever used asynchronous Rust in conjunction
with synchronous rust. I was shocked at how easy it was to just spin up another
thread and have that thread take care of the Tokio runtime, leaving the main
thread to focus on input. This is the block of code that handles [running the
asynchronous twitch bot in parallel to the main thread][twitchrs]:
[twitchrs]: https://github.com/Xe/gamebridge/blob/b2e7ba21aa14b556e34d7a99dd02e22f9a1365aa/src/twitch.rs#L12
```rust
pub(crate) fn run(st: MTState) {
use tokio::runtime::Runtime;
Runtime::new()
.expect("Failed to create Tokio runtime")
.block_on(handle(st));
}
```
Then the rest of the Twitch integration is boilerplate until we get to the
command parser. At its core, it just splits each chat line up into words and
looks for keywords:
```rust
let chatline = msg.data.to_string();
let chatline = chatline.to_ascii_lowercase();
let mut data = st.write().unwrap();
const BUTTON_ADD_AMT: i64 = 64;
for cmd in chatline.to_string().split(" ").collect::<Vec<&str>>().iter() {
match *cmd {
"a" => data.a_button.add(BUTTON_ADD_AMT),
"b" => data.b_button.add(BUTTON_ADD_AMT),
"z" => data.z_button.add(BUTTON_ADD_AMT),
"r" => data.r_button.add(BUTTON_ADD_AMT),
"cup" => data.c_up.add(BUTTON_ADD_AMT),
"cdown" => data.c_down.add(BUTTON_ADD_AMT),
"cleft" => data.c_left.add(BUTTON_ADD_AMT),
"cright" => data.c_right.add(BUTTON_ADD_AMT),
"start" => data.start.add(BUTTON_ADD_AMT),
"up" => data.sticky.add(127),
"down" => data.sticky.add(-128),
"left" => data.stickx.add(-128),
"right" => data.stickx.add(127),
"stop" => {data.stickx.update(0); data.sticky.update(0);},
_ => {},
}
}
```
This implements the following commands:
| Command | Meaning |
|----------|----------------------------------|
| `a` | Press the A button |
| `b` | Press the B button |
| `z` | Press the Z button |
| `r` | Press the R button |
| `cup` | Press the C-up button |
| `cdown` | Press the C-down button |
| `cleft` | Press the C-left button |
| `cright` | Press the C-right button |
| `start` | Press the start button |
| `up` | Press up on the analog stick |
| `down` | Press down on the analog stick |
| `left` | Press left on the analog stick |
| `stop` | Reset the analog stick to center |
Currently analog stick inputs will stick for about 270 frames and button inputs
will stick for about 20 frames before drifting back to neutral. The start button
is special, inputs to the start button will stick for 5 frames at most.
### Debugging
Debugging two programs running together is surprisingly hard. I had to resort to
the tried-and-true method of using `gdb` for the main game code and excessive
amounts of printf debugging in Rust. The [pretty\_env\_logger][pel] crate (which
internally uses the [env_logger][el] crate, and its environment variable
configures pretty\_env\_logger) helped a lot. One of the biggest problems I
encountered in developing it was fixed by this patch, which I will paste inline:
[pel]: https://docs.rs/pretty_env_logger/0.4.0/pretty_env_logger/
[el]: https://docs.rs/env_logger/0.7.1/env_logger/
```diff
diff --git a/gamebridge/src/main.rs b/gamebridge/src/main.rs
index 426cd3e..6bc3f59 100644
@@ -93,7 +93,7 @@ fn main() -> Result<()> {
},
};
- sticky = match stickx {
+ sticky = match sticky {
0 => sticky,
127 => {
ymax_frame = data.frame;
```
Somehow I had been trying to adjust the y axis position of the stick by
comparing the x axis position of the stick. Finding and fixing this bug is what
made me write the Lerper type.
---
Altogether, this has been a very fun project. I've learned a lot about 3d game
design, historical source code analysis and inter-process communication. I also
learned a lot about asynchronous Rust and how it can work together with
synchronous Rust. I also got to make a fairly surreal demo for Twitch. I hope
this can be useful to others, even if it just serves as an example of how to
integrate things into strange other things from unixy first principles.
You can find out slightly more about [gamebridge][gamebridge] on its GitHub
page. Its repo also includes patches for the Mario 64 PC port source code,
including one that disables the ability for Mario to lose lives. This could
prove useful for Twitch plays attempts, the 5 life cap by default became rather
limiting in testing.
Be well.

View File

@ -1,67 +0,0 @@
---
title: The Gears and The Gods
date: 2019-11-14
tags:
- wasm
- philosophy
- gods
---
If there are any gods in computing, they are the authors of compilers. The
output of compilers is treated as a Heavenly Decree, sometimes used for many
sprints or even years after the output has been last emitted.
People trust this output to be Correct. To tell the machine what to do and by
its will it be done. The compiler is itself a factory of servitors, each bound
by the unholy runes inscribed into it in order to make the endless sequence of
lights change colors in the right patterns.
The output of the work of the Gods is stored for later use when their might is
needed. The work of the Gods however is a very fickle beast. Their words of
power only make the gears turn when they are built with very specific gearing.
This means that people who rely on these sacred runes have to chain themselves
to gearing patterns. Each year new ways of tricking the gears to run faster are
developed. The ways the gears turn can be learned to be abused however to spill
the secrets other gears are crunching on. These gearing patterns havent seen
any real fundamental design changes in decades, because you never know when the
output of the Old Gods is needed.
This means that the gears themselves are the chains that bind people to the
past. The gears of computation. The gears made of sand we tricked into thinking
with lightning.
But now the gears show their age. The gearing on the side of the gearing on the
side of the gearing on the side of the gearing shows its ugly head.
But the Masses never question it. Even though they take hit after hit to
performance of the gears.
What there needs to be is some kind of Apocalypse, a revealing of the faults in
the gears. Maybe then the Masses will start to question their blind loyalty and
chains binding them to the gears. Maybe they would be able to even try other
gear patterns.
But this is just fantasy, nobody would WILLINGLY change the gearing patterns.
Would they?
But what about the experience theyve come to expect from their old gears? Where
they could swap out inputs to the gears with ease. Where the Output of the Gods
of old still functions.
There needs to be a Better Way to switch gearings. But this kind of solution
isnt conducive to how people use the gears. People use the gears they do
because they dont care. They just want things to work “like they expect it to”
and ignore things that dont feed this addiction.
And THIS is why Im such a big advocate for WebAssembly on the server. This lets
you take the output of the Gods and store it in a way that it can be
transparently upgraded to new sets of gearing. So that the future and the past
can work in unison instead of being enemies.
Now, all that's left is to build a bridge. A bridge that will help to unite the
past, the present and the future into a woven masterpiece of collaborative
cocreation. Where the output of the gods is a weaker chain to the gears of old
and can easily be adapted to the gears of new. Even the gears that nobody's even
dreamed of yet.

View File

@ -1,106 +0,0 @@
---
title: The Fear Of Missing Out
date: 2020-08-02
tags:
- culture
- web
---
Humans have evolved over thousands of years with communities that are small,
tight-knit and where it is easy to feel like you know everyone in them. The
Internet changes this completely. With the Internet, it's easy to send messages,
write articles and even publish books that untold thousands of people can read
and interact with. This has lead to an instinctive fear in humanity I'm going to
call the Fear of Missing Out [1].
[[1]: The Fear of Missing Out](https://en.wikipedia.org/wiki/Fear_of_missing_out)
The Internet in its current form capitalizes and makes billions off of this.
Infinite scrolling and live updating pages that make it feel like there's always
something new to read. Uncountable hours of engineering and psychological
testing spent making sure people click and scroll and click and consume all day
until that little hit of dopamine becomes its own addiction. We have taken a
system for displaying documents and accidentally turned it into a hulking
abomination that consumes the souls of all who get trapped in it, crystallizing
them in an endless cycle of checking notifications, looking for new posts on
your newsfeed, scrolling down to find just that something you think you're
looking for.
When I was in high school, I bagged groceries for a store. I also had the
opportunity to help customers out to their cars and was able to talk with them.
Obviously, I was minimum wage and had a whole bunch of other things to do;
however there were a few times that I could really get to talk with regular
customers and feel like I got to know them. What comes to mind however is a
story where that is not the case. One day I was helping this older woman to her
car, and she eventually said something like "All of these people just keep
going, going, going nonstop. It drives me mad. How can't they see where they are
is good enough already?" I thought for a moment and I wasn't able to come up
with a decent reply.
The infinite scrollbars and newsfeeds of the web just keep going, going, going,
going, going, going, going and going until the user gives up to do something
elses. There's no consideration of _how_ the content is discovered, and _why_
the content is discovered, it's just an endless feed of noise. One subtle change
in your worldview after another, just from the headlines alone. Not to mention
the endless torrent of advertising.
However, I think there may be a way out, a kind of detox from the infinite
scrolling, newsfeeds, notifications and the like for the internet, and I think a
good step towards that is the Gemini [2] protocol.
[[2]: Gemini Protocol](https://gemini.circumlunar.space/)
Gemini is a protocol that is somewhere between HTTP and Gopher. A user sends a
request to a Gemini server and the user gets a response back. This response
could be anything, but a little header tells the client what kind of data it is.
There's also a little markup format that's a very lightweight take on
markdown [3], but overall the entire goal of the project is to be minimal and
just serve documents.
[[3]: Gemtext markup](https://portal.mozz.us/gemini/gemini.circumlunar.space/docs/gemtext.gmi)
I've noticed something as I browse through the known constellation of Gemini
capsules though. I keep refreshing the CAPCOM feed of posts. I keep refreshing
the mailing list archives. I keep refreshing my email client, looking for new
content and feel frustrated when it doesn't show up like I expect it to. I'm
addicted to the newsfeeds. I'm caught in the trap that autoplay put me in. I'm a
victim to infinite scrolling and that constant little hit of dopamine that
modern social media has put on us all. Realizing this feels like I am realizing
an addiction to a drug (but I'd argue that it somewhat is a drug, by design,
what better way to get people to be exposed to ads than to make the service that
serves the ads addictive!).
I'm not sure how to best combat this. It feels kind of scary. I'm starting to
attempt to detox though. I'm writing a lot more on my Gemini capsule [4] [5]. I'm
starting to really consider the Fear of Missing Out when I design and implement
things in the future. So many things update instantly on the modern internet, it
may be a good idea to attempt to make something that updates weekly or even
monthly.
[[4]: My Gemini capsule](gemini://cetacean.club)
[[5]: [experimental] My Gemini capsule over HTTP](http://cetacean.club)
I'm still going to attempt a few ideas that I have regarding long term archival
of the Gemini constellation, but I'm definitely going to make sure that I take
the time to actually consider the consequences of my actions and what kind of
world it creates. I want to create the kind of world that enables people to
better themselves.
Let's work together to detox from the harmful effects of what we all have
created. I'm considering opening up a Gemini server that other people can have
accounts on and write about things that interest them.
If you want to get started with Gemini, I suggest taking a look at the main site
through the Gemini to HTTP proxy [6]. There are some clients listed in the pages
there, including a _very good_ iOS client that is currently in TestFlight.
Please do keep in mind that Gemini is very much a back-button navigation kind of
experience. The web has made people expect navigation links to be everywhere,
which can make it a weird/jarring experience at first, but you get used to it.
You can see evidence of this in my site with all the "Go back" links on each
page. I'll remove those at some point, but for now I'm going to keep them.
[[6]: Project Gemini](https://portal.mozz.us/gemini/gemini.circumlunar.space/)
Don't be afraid of missing out. It's inevitable. Things happen. It's okay for
them to happen without you having to see them. They will still be there when you
look again.

View File

@ -1,226 +0,0 @@
---
title: "Get Going: Hello, World!"
date: 2019-10-28
series: get-going
tags:
- golang
- book
- draft
---
This post is a draft of the first chapter in a book I'm writing to help people learn the
[Go][go] programming language. It's aimed at people who understand the high
level concepts of programming, but haven't had much practical experience with
it. This is a sort of spiritual successor to my old
[Getting Started with Go][gswg] post from 2015. A lot has changed in the
ecosystem since then, as well as my understanding of the language.
[go]: https://golang.org
[gswg]: https://christine.website/blog/getting-started-with-go-2015-01-28
Like always, feedback is very welcome. Any feedback I get will be used to help
make this book even better.
This article is a bit of an expanded version of what the first chapter will
eventually be. I also plan to turn a version of this article into a workshop for
my dayjob.
## What is Go?
Go is a compiled programming language made by Google. It has a lot of features
out of the box, including:
* A static type system
* Fast compile times
* Efficient code generation
* Parallel programming for free*
* A strong standard library
* Cross-compilation with ease (including webassembly)
* and more!
\* You still have to write code that can avoid race conditions, more on those
later.
### Why Use Go?
Go is a very easy to read and write programming language. Consider this snippet:
```go
func Add(x int, y int) int {
return x + y
}
```
This function wraps [integer
addition](https://golang.org/ref/spec#Arithmetic_operators). When you call it it
returns the sum of x and y.
## Installing Go
### Linux
Installing Go on Linux systems is a very distribution-specific thing. Please see
[this tutorial on
DigitalOcean](https://www.digitalocean.com/community/tutorials/how-to-install-go-on-ubuntu-18-04)
for more information.
### macOS
* Go to https://golang.org/dl
* Download the .pkg file
* Double-click on it and go through the installer process
### Windows
* Go to https://golang.org/dl
* Download the .msi file
* Double-click on it and go through the installer process
### Next Steps
These next steps are needed to set up your shell for Go programs.
Pick a directory you want to store Go programs and downloaded source code in.
This is called your GOPATH. This is usually the `go` folder in
your home directory. If for some reason you want another folder for this, use
that folder instead of `$HOME/go` below.
#### Linux/macOS
This next step is unfortunately shell-specific. To find out what shell you are
using, run the following command in your terminal:
```console
$ env | grep SHELL
```
The name at the path will be the shell you are using.
##### bash
If you are using bash, add the following lines to your .bashrc (Linux) or
.bash_profile (macOS):
```
export GOPATH=$HOME/go
export PATH="$PATH:$GOPATH/bin"
```
Then reload the configuration by closing and re-opening your terminal.
##### fish
If you are using fish, create a file in ~/.config/fish/conf.d/go.fish with the
following lines:
```
set -gx GOPATH $HOME/go
set -gx PATH $PATH "$GOPATH/bin"
```
##### zsh
If you are using zsh, add the following lines to your .zshrc:
```
export GOPATH=$HOME/go
export PATH="$PATH:$GOPATH/bin"
```
#### Windows
Follow the instructions
[here](https://github.com/golang/go/wiki/SettingGOPATH#windows).
## Installing a Text Editor
For this book, we will be using VS Code. Download and install it
from https://code.visualstudio.com. The default settings will let you work with
Go code.
## Hello, world!
Now that everything is installed, let's test it with the classic "Hello, world!"
program. Create a folder in your home folder `Code`. Create another folder
inside that Code folder called `get_going` and create yet another subfolder
called `hello`. Open a file in there with VS Code (Open Folder -> Code ->
get_going -> hello) called `hello.go` and type in the following:
```go
// Command hello is your first Go program.
package main
import "fmt"
func main() {
fmt.Println("Hello, world!")
}
```
This program prints "Hello, world!" and then immediately exits. Here's each of
the parts in detail:
```go
// Command hello is your first go program.
package main // Every go file must be in a package.
// Package main is used for creating executable files.
import "fmt" // Go doesn't implicitly import anything. You need to
// explicitly import "fmt" for printing text to
// standard output.
func main() { // func main is the entrypoint of the program, or
// where the computer starts executing your code
fmt.Println("Hello, world!") // This prints "Hello, world!" followed by a newline
// to standard output.
} // This ends the main function
```
Now click over to the terminal at the bottom of the VS Code window and run this
program with the following command:
```console
$ go run hello.go
Hello, world!
```
`go run` compiles and runs the code for you, without creating a persistent binary
file. This is a good way to run programs while you are writing them.
To create a binary, use `go build`:
```console
$ go build hello.go
$ ./hello
Hello, world!
```
`go build` has the compiler create a persistent binary file and puts it in the
same directory as you are running `go` from. Go will choose the filename of the
binary based on the name of the .go file passed to it. These binaries are
usually static binaries, or binaries that are safe to distribute to other
computers without having to worry about linked libraries.
## Exercises
The following is a list of optional exercises that may help you understand more:
1. Replace the "world" in "Hello, world!" with your name.
2. Rename `hello.go` to `main.go`. Does everything still work?
3. Read through the documentation of the [fmt][fmt] package.
[fmt]: https://golang.org/pkg/fmt
---
And that about wraps it up for Lesson 1 in Go. Like I mentioned before, feedback
on this helps a lot.
Up next is an overview on data types such as integers, true/false booleans,
floating-point numbers and strings.
I plan to post the book source code on my GitHub page once I have more than one
chapter drafted.
Thanks and be well.

View File

@ -1,9 +1,11 @@
--- ---
title: Getting Started with Go title: Getting Started with Go
date: 2015-01-28 date: 2015-01-28
series: howto
--- ---
Getting Started with Go
=======================
Go is an exciting language made by Google for systems programming. This article Go is an exciting language made by Google for systems programming. This article
will help you get up and running with the Go compiler tools. will help you get up and running with the Go compiler tools.

View File

@ -1,200 +0,0 @@
---
title: "gitea-release Tool Announcement"
date: "2020-05-31"
tags:
- gitea
- rust
- release
---
I'm a big fan of automating things that can possibly be automated. One of the
biggest pains that I've consistently had is creating/tagging releases of
software. This has been a very manual process for me. I have to write up
changelogs, bump versions and then replicate the changelog/versions in the web
UI of whatever git forge the project in question is using. This works great at
smaller scales, but can quickly become a huge pain in the butt when this needs
to be done more often. Today I've written a small tool to help me automate this
going forward, it is named
[`gitea-release`](https://tulpa.dev/cadey/gitea-release). This is one of my
largest Rust projects to date and something I am incredibly happy with. I will
be using it going forward for all of my repos on my gitea instance
[tulpa.dev](https://tulpa.dev).
`gitea-release` is a spiritual clone of the tool [`github-release`][ghrelease],
but optimized for my workflow. The biggest changes are that it works on
[gitea][gitea] repos instead of github repos, is written in Rust instead of Go
and it automatically scrapes release notes from `CHANGELOG.md` as well as
reading the version of the software from `VERSION`.
[ghrelease]: https://github.com/github-release/github-release
[gitea]: https://gitea.io
## CHANGELOG.md and VERSION files
The `CHANGELOG.md` file is based on the [Keep a Changelog][kacl] format, but
modified slightly to make it easier for this tool. Here is an example changelog
that this tool accepts:
[kacl]: https://keepachangelog.com/en/1.0.0/
```markdown
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## 0.1.0
### FIXED
- Refrobnicate the spurious rilkefs
## 0.0.1
First release, proof of concept.
```
When a release is created for version 0.1.0, this tool will make the description
of the release about as follows:
```
### FIXED
- Refrobnicate the spurious rilkefs
```
This allows the changelog file to be the ultimate source of truth for release
notes with this tool.
The `VERSION` file plays into this as well. The `VERSION` file MUST be a single
line containing a [semantic version][semver] string. This allows the `VERSION`
file to be the ultimate source of truth for software version data with this
tool.
[semver]: https://semver.org/spec/v2.0.0.html
## Release Process
When this tool is run with the `release` subcommand, the following actions take place:
- The `VERSION` file is read and loaded as the desired tag for the repo
- The `CHANGELOG.md` file is read and the changes for the `VERSION` are
cherry-picked out of the file
- The git repo is checked to see if that tag already exists
- If the tag exists, the tool exits and does nothing
- If the tag does not exist, it is created (with the changelog fragment as the
body of the tag) and pushed to the gitea server using the supplied gitea token
- A gitea release is created using the changelog fragment and the release name
is generated from the `VERSION` string
## Automation of the Automation
This tool works perfectly well locally, but this doesn't make it fully
automated from the gitea repo. I use [drone][drone] as a CI/CD tool for my gitea
repos. Drone has a very convenient and simple to use [plugin
system][droneplugin] that was easy to integrate with [structopt][structopt].
[drone]: https://drone.io
[droneplugin]: https://docs.drone.io/plugins/overview/
[structopt]: https://crates.io/crates/structopt
I created a drone plugin at `xena/gitea-release` that can be configured as a
pipeline step in your `.drone.yml` like this:
```yaml
kind: pipeline
name: ci/release
steps:
- name: whatever unit testing step
# ...
- name: auto-release
image: xena/gitea-release:0.2.5
settings:
auth_username: cadey
changelog_path: ./CHANGELOG.md
gitea_server: https://tulpa.dev
gitea_token:
from_secret: GITEA_TOKEN
when:
event:
- push
branch:
- master
```
This allows me to bump the `VERSION` and `CHANGELOG.md`, then push that commit
to git and a new release will automatically be created. You can see an example
of this in action with [the drone build history of the gitea-release
repo](https://drone.tulpa.dev/cadey/gitea-release). You can also how the
`CHANGELOG.md` file grows with the [CHANGELOG of
gitea-release](https://tulpa.dev/cadey/gitea-release/src/branch/master/CHANGELOG.md).
Once the release is pushed to gitea, you can then use drone to trigger
deployment commands. For example here is the deployment pipeline used to
automatically update the docker image for the gitea-release tool:
```yaml
kind: pipeline
name: docker
steps:
- name: build docker image
image: "monacoremo/nix:2020-04-05-05f09348-circleci"
environment:
USER: root
commands:
- cachix use xe
- nix-build docker.nix
- cp $(readlink result) /result/docker.tgz
volumes:
- name: image
path: /result
when:
event:
- tag
- name: push docker image
image: docker:dind
volumes:
- name: image
path: /result
- name: dockersock
path: /var/run/docker.sock
commands:
- docker load -i /result/docker.tgz
- echo $DOCKER_PASSWORD | docker login -u $DOCKER_USERNAME --password-stdin
- docker push xena/gitea-release
environment:
DOCKER_USERNAME:
from_secret: DOCKER_USERNAME
DOCKER_PASSWORD:
from_secret: DOCKER_PASSWORD
when:
event:
- tag
volumes:
- name: image
temp: {}
- name: dockersock
host:
path: /var/run/docker.sock
```
This pipeline will use [Nix](https://nixos.org/nix) to build the docker image,
load it into a Docker daemon and then log into the Docker Hub and push it. This
can then be used to do whatever you want. It may also be a good idea to push a
docker image for every commit and then re-label the tagged commits, but this
wasn't implemented in this repo.
---
I hope this tool will be useful. I will accept feedback over [any contact
method](/contact). If you want to contribute directly to the project, please
feel free to create [issues](https://tulpa.dev/cadey/gitea-release/issues) or
[pull requests](https://tulpa.dev/cadey/gitea-release/pulls). If you don't want
to create an account on my git server, get me the issue details or code diffs
somehow and I will do everything I can to fix issues and integrate code. I just
want to make this tool better however I can.
Be well.

View File

@ -1,153 +0,0 @@
---
title: "GNU Doesn't Care About Your Agency"
date: 2022-02-10
tags:
- gnu
- libre
- rant
---
<div class="warning">
[EDIT(2022-02-10 12:47 EST): I apparently misread part of the GNU #guix channel
rules and made an unreasonable assumption that violators of the rules could be
banned. I have amended a conversation fragment accordingly. My intent was not to
lie, but to point out that some users actually need stuff that nonguix provides
but they just have to know that it exists in the first place.](conversation://Cadey/coffee)
</div>
Or: Ubuntu gives the user more agency about how they want to use their computer
than fully libre GNU/Linux distros ever can.
There are many different kinds of Linux distributions, but today we're going to
think about a certain kind of distribution: ones where the distribution is
totally comprised of free software as much as possible.
These distributions aim to let users benefit by making it possible to study,
hack at and modify every byte of software on the machine's hard drive. This is a
fairly noble goal, however in the process of doing this they break core parts of
hardware compatibility by "de-blobbing" the kernel. Most of these distributions
have a very paternalistic implementation where the "de-blobbed" linux-libre
kernel is the _only_ option, thus limiting users' agency.
For example, let's think about the CPU that I'm using right now. The CPU I'm
using is designed to be able to load CPU microcode updates that are distributed
by the manufacturer in order to mitigate bugs in the microcode that released
with the CPU that can cause real-world impact on what I do. Due to Facts and
Circumstances that are immutable for the sake of argument, this microcode is not
open source and cannot be compiled from source code. The linux-libre kernel
removes the ability to load such firmware updates at runtime.
This means that if something like the FDIV bug or Spectre shows up again but it
can be patched trivially with a microcode update, by nature of using the
linux-libre kernel I am doomed until the base microcode gets updated from the
motherboard manufacturer. If they release a closed-source update that you cannot
inspect or modify.
This paternalistic view of "you shouldn't be able to load microcode updates
because they aren't open source" means that my CPU will be vulnerable to
potentially critical security flaws and I have no way to work around it. This
ends up creating a _limitation_ in how I use my computer. This is worse than the
limitations of proprietary hardware because there is the illusion of free choice
that the community will spout off about as the next coming of sliced bread. That
still doesn't change the fact that my wifi card won't work without the normal
kernel and firmware blobs.
Combine this with other things like wifi card firmware (some wifi cards don't
have the firmware stored on the device, they require the OS to send it firmware
at runtime to make it work at all), and you have actually limited the agency and
capability of users far, far more than if you just let them load the firmware in
the first place.
[Yes, Yes the companies made the hardware this way in the first place and are
responsible for the problem, but telling users they are wrong for wanting it to
work because of an implementation detail about how the hardware updates itself
feels a lot like victim blaming. I am aware of the Talos II being a magical
puppy and rainbow situation where all of this isn't an issue, but sadly the
world just didn't turn out that way and we have to deal with the results of
it.](conversation://Cadey/coffee)
Consider a situation like wanting to play an online game together with friends,
but through Facts and Circumstances you have an Nvidia GPU and the game is on
Steam with no open source option. If you are using a fully open source operating
system with no capacity to install Steam or the Nvidia drivers, you are screwed
and thus your freedom to use your computer how you want is severely limited.
This also extends to how those Linux distributions handle things like AWS. AWS
is largely the poster child of a proprietary cloud hosting platform that you are
made to work with as part of your job. Consider if something like Parabola
GNU/Linux created AWS images and gave users a best-in-class user experience for
using them. This would make the net cost of using a highly auditable environment
a lot lower than the current "don't use AWS lol" (which is again really close to
victim blaming), and would also create institutional knowledge that would let
other people benefit from this as a second or third order effect.
Parabola making AWS images means they can create more generic images, which
means that other people can use those images to do whatever they want with their
own hardware. This lets you have a net benefit to everyone in the project by
decreasing the friction of using it, so it will in turn make users more likely
to adopt it.
Remember the law of halves. Every additional step in adoption costs you half
your audience. Spinning up an AWS instance to mess around with it is a very
low-friction operation.
[But you can just not be a scrub and compile your own traitor kernel that lets
you load freedom-violating binary blobs!](conversation://Numa/delet)
[Then you have to hope your CPU is good enough to build a kernel, hope you can
pay attention to the kernel security mailing list enough to upgrade it when you
need to and finally hope you can upgrade the firmware blobset that the kernel
publishes separately! Hope is not a scalable strategy.](conversation://Cadey/angy)
If their goal is _really_ to liberate users and make it easy for them to have
control over what their computer is doing, they should make it trivial to escape
hatch into a less "pure" setup without having to install third party
repositories that you just have to know about or sidestepping the upstream
update process to install your own system software. This is more victim blaming.
The GNU project could be more than a circlejerk around things that the toe
cheese god said in the 80's and 90's. They could have been a source of reverse
engineering tools, institutions and overall inspire the kind of culture that
would make it _easy_ to understand arbitrary hardware, platforms and software
that you either come across or are made to use as a part of your job.
But they aren't. Instead, Guix, one of their if not their main flagship project
for making a fully GNU system, is addled by the use of the linux-libre kernel.
This makes the kernel fundamentally _incompatible_ with a shocking number of
computers, thus limiting users' freedom to use Guix at all.
[But wait, isn't there that one nonguix project that allows you to install a
normal kernel and Steam?](conversation://Mara/hmm)
[Yeah, but talk about that in the main #guix channel and you get told to not
talk about it. You just have to know that it exists and you can't learn that it
exists without knowing someone that tells you that it exists under the table,
like some kind of underground software drug dealer giving you a hit of wifi card
firmware. This means that knowledge of the nonguix project (which may contain
tools that make it possible to use Guix at all) is hidden from users that may
need it because it allows users to install proprietary software. This limits
user freedom from being able to use their computer how they want by making it a
potentially untrustable underground software den instead of something that can
be properly handled upstream without having to place trust in too many
places.](conversation://Cadey/angy)
[That hardware is defective by design and you shouldn't use
it.](conversation://Numa/delet)
[Wow, thanks, I'm cured. My wifi card magically stopped existing and now
everything is happy unicorns farting out rainbows that spawn free puppies and
everything is saved forever.<br /><br />Again, that doesn't help me with the
situation that my wifi card doesn't work and I as a user want it to even though
making it work will require proprietary firmware. This shit is how you get
things like the "GPL condom" in the Purism Librem phone, where all the
proprietary firmware is rigged to be loaded automagically in hardware instead of
sofware. This limits your ability to tinker with or modify the firmware _even if
there are legitimate reasons such as critical updates_. So by making the
hardware work with fully free software you have limited the ability to actually
improve the state of the world even with the proprietary firmware the
manufacturer gives you.](conversation://Cadey/angy)
Ubuntu gives the user more agency about how they want to use their computer than
fully libre GNU/Linux distros ever can.

View File

@ -1,174 +0,0 @@
---
title: Go net/http.ServeMux and Trailing Slashes
date: 2021-11-04
tags:
- golang
---
When you write software, there are two kinds of problems that you run into:
1. Problems that stretch your fundamental knowledge of how things work and as a
result of solving them you become one step closer to unlocking the secrets to
immortality and transcending beyond mere human limitations
2. Exceedingly stupid typos that static analysis tools can't be taught how to
catch and thus dooms humans to feel like they wasted so much time on
something so trivial
3. Off-by-one errors
Today I ran into one of these three types of problems.
[Buckle up, it's story time!](conversation://Cadey/coffee)
It's a Thursday morning. Everything in this project has been going smoothly.
Almost too smoothly. Then `go test` is run to make sure that things are working like we expect.
[Huh, the test is passing, but the debug output says it should be failing.
What's up with that? What's going on here?](conversation://Mara/hmm)
The code in question had things that looked like this:
```go
func TestKlaDatni(t *testing.T) {
tru := zbasuTurnis(t)
ts := httptest.NewServer(tru)
defer ts.Stop()
var buf bytes.Buffer
failOnErr(t, json.NewEncoder(&buf).Encode(Renma{ Judri: "mara@cipra.jbo" }))
u, _ := url.Parse(ts.BaseURL)
u.Path = "/api/v2/kla"
req, err := http.NewRequest(http.MethodPost, u.String(), &buf)
failOnErr(t, err)
tru.InjectAuth(req)
resp, err := http.DefaultClient.Do(req)
failOnErr(t, err)
if resp.StatusCode == http.StatusOK {
t.Fatalf("wanted status code %d, got: %d", http.StatusOK, resp.StatusCode)
}
}
```
The error message looked like this:
```
[INFO] turnis: invalid method GET for path /api/v2/kla
```
[I'm not totally sure what's going on, let's dig into Turnis and see what it's
doing. Surely we're missing something.](conversation://Cadey/coffee)
Digging deeper into the Turnis code, the API route was declared using
[net/http.ServeMux](https://pkg.go.dev/net/http#ServeMux) like this:
```go
mux.Handle("/api/v2/kla/", logWrap(tru.adminKla))
```
[Maybe the `logWrap` middleware is changing it to `GET`
somehow?](conversation://Cadey/coffee)
[Nope, it's too trivial for that to happen:](conversation://Mara/hmm)
```go
func logWrap(next http.Handler) http.Handler {
return xsweb.Falible(xsweb.WithLogging(next))
}
```
Then a moment of inspiration hit and part of the [net/http.ServeMux
documentation](https://pkg.go.dev/net/http#ServeMux)
came to mind. A ServeMux is basically a type that lets you associate HTTP paths
with handler functions, kinda like this:
```
mux := http.NewServeMux()
mux.HandleFunc("/", index)
mux.HandleFunc("/robots.txt", robotsTxt)
mux.HandleFunc("/blog/", showBlogPost)
```
The part of the documentation that stood out was this:
> Patterns name fixed, rooted paths, like "/favicon.ico", or rooted subtrees,
> like "/images/" (note the trailing slash). Longer patterns take precedence
> over shorter ones, so that if there are handlers registered for both
> "/images/" and "/images/thumbnails/", the latter handler will be called for
> paths beginning "/images/thumbnails/" and the former will receive requests for
> any other paths in the "/images/" subtree.
Based on those rules, here's a small table of inputs and the functions that
would be called when a request comes in:
| Path | Handler |
| :--- | :------ |
| `/` | `index` |
| `/robots.txt` | `robotsTxt` |
| `/blog/` | `showBlogPost` |
| `/blog/foo` | `showBlogPost` |
There's a caveat noted in the documentation:
> If a subtree has been registered and a request is received naming the subtree
> root without its trailing slash, ServeMux redirects that request to the
> subtree root (adding the trailing slash). This behavior can be overridden with
> a separate registration for the path without the trailing slash. For example,
> registering "/images/" causes ServeMux to redirect a request for "/images" to
> "/images/", unless "/images" has been registered separately.
This means that the code from earlier that looked like this:
```go
u.Path = "/api/v2/kla"
```
wasn't actually going to the `tru.adminKla` function. It was getting redirected.
This is because HTTP [doesn't allow you to redirect a POST
request](https://support.postman.com/hc/en-us/articles/211913929-My-POST-request-is-redirected-to-a-GET-request).
As a result, the POST request is getting downgraded to a GET request and the
body is just lost forever.
[Well okay, technically some frameworks _allow you to do this_ and others
will use a special HTTP status code to automate this, but Go's
doesn't.](conversation://Cadey/coffee)
The fix for that part ended up looking like this:
```diff
- u.Path = "/api/v2/kla"
+ u.Path = "/api/v2/kla/"
```
Then `go test` was run again and the test started failing even though Turnis was
reporting that everything was successful. Then the final typo was spotted:
```diff
- if resp.StatusCode == http.StatusOK {
+ if resp.StatusCode != http.StatusOK {
t.Fatalf("wanted status code %d, got: %d", http.StatusOK, resp.StatusCode)
}
```
<center>
![](https://cdn.christine.website/file/christine-static/stickers/cadey/percussive-maintenance.png)
</center>
[It took us 6 hours combined to figure this out. Is that okay? It feels like
that's wasting too much time on a simple problem like
that.](conversation://Mara/hmm)
[That's just how some of these kinds of problems are. The dumbest problems
always take the longest to figure out because they are the ones that tools can't
really warn you about. I once spent 15 hours of straight effort trying to fix
something to find out that `ON` is a yaml value for "true" and that what I was
trying to do needed to be `"ON"` instead. This is our lot in life as software
people. You are going to make these kinds of mistakes and it is going to make
you feel like an absolute buffoon every time. That is just how it happens. Let's
go play Fortnite and forget about all this for
now.](conversation://Cadey/coffee)

View File

@ -3,6 +3,9 @@ title: "gopreload: LD_PRELOAD for the Gopher crowd"
date: "2017-03-25" date: "2017-03-25"
--- ---
gopreload: LD_PRELOAD for the Gopher crowd
==========================================
A common pattern in Go libraries is to take advantage of [init functions][initf] A common pattern in Go libraries is to take advantage of [init functions][initf]
to do things like settings up defaults in loggers, automatic metrics instrumentation, to do things like settings up defaults in loggers, automatic metrics instrumentation,
flag values, [debugging tools][manhole] or database drivers. With monorepo culture flag values, [debugging tools][manhole] or database drivers. With monorepo culture

View File

@ -4,9 +4,10 @@ date: 2019-01-11
thanks: https://github.com/dreampuf/GraphvizOnline thanks: https://github.com/dreampuf/GraphvizOnline
--- ---
# [graphviz.christine.website](https://graphviz.christine.website)
I have been using an [online copy of GraphViz](https://github.com/dreampuf/GraphvizOnline) I have been using an [online copy of GraphViz](https://github.com/dreampuf/GraphvizOnline)
for a while to make my own diagrams online. I have forked this to [here](https://github.com/Xe/GraphvizOnline) and added basic [Progressive Web App](https://developers.google.com/web/progressive-web-apps/) support. for a while to make my own diagrams online. I have forked this to [here](https://github.com/Xe/GraphvizOnline) and added basic [Progressive Web App](https://developers.google.com/web/progressive-web-apps/) support.
Here is the [link](https://graphviz.christine.website).
Here's an [example usage video](https://youtu.be/IUvbtK_nmtY). Here's an [example usage video](https://youtu.be/IUvbtK_nmtY).

View File

@ -3,9 +3,10 @@ title: Gratitude
date: 2018-07-20 date: 2018-07-20
thanks: CelestialBoon thanks: CelestialBoon
for: Mother Aya for: Mother Aya
series: magick
--- ---
# Gratitude
A lot of ground has been trodden about Mindfulness and its many facets, but there is one topic I have seen not enough people elaborate on, especially in a satisfactory manner, and that topic is gratitude. A lot of ground has been trodden about Mindfulness and its many facets, but there is one topic I have seen not enough people elaborate on, especially in a satisfactory manner, and that topic is gratitude.
The act of expressing gratitude is a behaviour that grounds you in observation of the present moment; of the present you, and of what matters to that present you. It can help you understand the current, immediate moment, the Now, by pushing you to examine parts of it that you might have taken for granted. Or parts that hide behind the other parts. It is a tool of positive exploration, that empowers the user to iteratively discern the heart of matters, of things, guided by the unerring principle of genuine appreciation of what counts. The act of expressing gratitude is a behaviour that grounds you in observation of the present moment; of the present you, and of what matters to that present you. It can help you understand the current, immediate moment, the Now, by pushing you to examine parts of it that you might have taken for granted. Or parts that hide behind the other parts. It is a tool of positive exploration, that empowers the user to iteratively discern the heart of matters, of things, guided by the unerring principle of genuine appreciation of what counts.

View File

@ -1,272 +0,0 @@
---
title: Using Paper for Everyday Tasks
date: 2021-06-13
author: Heartmender
---
I have a bit of a reputation of being a very techno-savvy person. People have
had the assumption that I have some kind of superpowerful handcrafted task
management system that rivals all other systems and fully integrates with
everything on my desktop. I don't. I use paper to keep track of my day to day
tasks. Offline, handwritten paper. I have a big stack of little notebooks and I
go through them one each month. Today I'm going to discuss the core ideas of my
task management toolchain and walk you through how I use paper to help me get
things done.
I have tried a lot of things before I got to this point. I've used nothing,
Emacs' Org mode, Jira, GitHub issues and a few reminder apps. They all haven't
quite cut it for me.
The natural place to start from is doing nothing to keep track of my tasks and
goals. This can work in the short term. Usually the things that are important
will come back to you and you will eventually get them done. However it can be
hard for it to be a reliable system.
[Focus is hard. Memory is fleeting. Data gets erased. Object permanence is a
myth. Paper sits by the side and laughs.](conversation://Cadey/coffee)
It does work for some people though. I just don't seem to be one of them. Doing
nothing to keep track of my tasks only really works when there are external
structures around to help me keep track of things. Standup meetings or some kind
of daily check-in are vital to this, and they sort of work because my team is
helping keep everyone accountable for getting work done. This is very dependent
on the team culture being healthy and on me being somewhere that I feel
psychologically safe enough to admit when I make a mistake (which I have only
really felt working at Tailscale). It also doesn't follow me from job to job, so
changing employers would also mean I can't take my organization system with me.
So that option is out.
[Emacs](https://www.gnu.org/software/emacs/) is a very extensible text editor.
It has a turing-complete scripting language called Emacs Lisp at its core and
you can build out just about anything you want with it. As such, many packages
have been developed. One of the bigger and more common packages is [Org
Mode](https://orgmode.org/). It is an Emacs major mode that helps you keep track
of notes, todo lists, timekeeping, literate programming, computational notebooks
and more. I have used Org Mode for many years in the past and I have no doubt
that without it I would probably have been fired at least twice.
One of the main philosophies is that Org Mode is text at its core. The whole
user experience is built around text and uses Emacs commands to help you
manipulate text. Here's an example Org Mode file like I used to use for task
management:
```orgmode
#+TITLE: June 2021
* June 10, 2021
** SRE
*** TODO put out the fire in prod before customers notice
Oh god, it's a doozy. The database server takes too long to run queries only
sometimes on Thursdays. Why thursday? No idea. It just happens. Very
frustrating. I wonder if God is cursing me.
** Devel
*** DONE Implement the core of flopnax for abstract rilkefs
CLOSED: [2021-06-10 Thu 16:20]
*** TODO write documentation for flopnax before it is shipped
** Overhead
*** DONE ENG meeting
CLOSED: [2021-06-10 Thu 15:00]
*** TODO Assist Jessie with the finer points of Rust
**** References vs Values
**** Lifetimes
Programming in Rust is the adventure of a lifetime!
** Personal
*** DONE Morning meds
CLOSED: [2021-06-10 Thu 09:04]
*** TODO Evening meds
*** TODO grocery run
```
Org Mode used to be a core part of my workflow and life. It was everpresent and
used to keep track of everything. I would even track usage of certain
recreational substances in Org Mode with a snippet of Emacs Lisp to do some
basic analytics on usage frequency. Org Mode can live with me and I don't have
to give it up when I change jobs.
I got out of the habit a while ago and it's been really hard to go back into the
habit. I still suggest Org Mode to people, but it's no longer the thing that I
use day to day. It also is hard to use from my tablet (iPad) and my phone
(iPhone). It also tends to vanish when you close the window, and when you have
object permanence issues that tends to make things hard.
[I could probably set up something with one of those fancy org-mode frontends
served over HTTP, but that would probably end up being more effort than it's
worth for me](conversation://Cadey/coffee)
Another tool I've used for this is my employer's task management tool of choice.
At past jobs this has ranged from GitHub to Jira. This is a solid choice. It
keeps everything organized and referenced with other people. I don't have to do
manual or automated synchronization of information into that ticket tracking
system to be sure other people are updated. However, you inherit a lot of the
inertia of how the ticket tracking system of choice is used. At a past job there
were unironically 17 different states that a ticket could be in. Most of them
were never used and didn't matter, yet they could not be removed lest it break
the entire process that the product team used to keep track of things.
Doing it like this works great if your opinions about how issues should be
tracked agree with your employer's process (if this is the case, you probably
set up the issue tracking system). As I mentioned before, this also means that
you have to leave that system behind when you change jobs. If you are someone
that never really changes jobs, this can work amazingly. I am not one of those
people.
Something else I've tried is to set up my own private GitHub/Gitea project to
keep track of things. We used one for organizing our move to Ottawa even. This
is a very low-friction system. It is easy to set up and the issues will bother
you in your news feed, so they are everpresent. It's also easy to close the
window and forget about the repo.
There is also that little hit of endorphins from closing an issue. That little
rush can help fuel a habit for using the tool to track things, but the rush goes
away after a while.
[Wait, if you have issues remembering to look at your org mode file or tracker
board or whatever, why can't you just set up a reminder to update it? Surely
that can't be that hard to do?](conversation://Mara/hmm)
[Don't you think that if it was that easy, I would already be doing that? Do you
think I like having this be so hard? Notifications that are repetitive fade into
the background when I see them too often. I subconsciously filter them out. They
do not exist to me. Even if it is one keypress away to open the board or append
to my task list, I will still forget to do it, even if it's
important.](conversation://Cadey/coffee)
So, I've arrived on paper to keep track on these things. Paper is cheap. Paper
is universal. Paper doesn't run out of battery. Paper doesn't vanish into the
shadow realm when I close the window. Paper can do anything I can do with a
pencil. Paper lets me turn back pages in the notebook and scan over for things
that have yet to be done. Honestly I wish I had started using paper for this
sooner. Here's how I use paper:
- Get a cheap notebook or set of notebooks. They should ideally be small,
pocketable notebooks. Something like 30 sheets of paper per notebook. I can't
find the cheap notebooks that I bought on Amazon, but I found something
similar
[here](https://www.amazon.ca/Notebook-Kraft-Cover-Pocket-Squared/dp/B0876LYNYH/).
Don't be afraid to buy more than you need. This stuff is really cheap. Having
more paper around can't hurt. [Field Notes](https://fieldnotesbrand.com/)
works in a pinch, but their notebooks can be a bit expensive. The point is
you have many options.
- Label it with the current month (it's best to start this at the beginning of
a month if you can). Put contact information on the inside cover in case you
lose it.
- Start a new page every day. Put the date at the top of the page.
- Metadata about the day goes in the margins. I use this to keep a log of who
is front as well as taking medicine.
- Write prose freely.
- TODO items start with a `-`. Those represent things you need to do but
haven't done yet.
- When the item is finished, put a vertical line through the `-` to make it a
`+`.
- If the item either can't or won't be done, cross out the `-` to make it into
a `*`.
- If you have to put off a task to a later date, turn the `-` into a `->`. If
there is room, put a brief description of why it needs to be moved or when it
is moved to. If there's no room feel free to write it out in prose form at
the end of your page.
- Notes start with a middot (`·`). They differ from prose as they are not
complete sentences. If you need to, you can always turn them into TODO items
later.
- Write in pencil so you can erase mistakes. Erase carefully to avoid ripping
the paper, You hardly need to use any force to erase things.
- There is only one action, appending. Don't try and organize things by topic
as you would on a computer. This is not a computer, this is paper. Paper
works best when you append only. There is only one direction, forward.
- If you need to relate a bunch of notes or todo items with a topic, skip a
line and write out the topic ending with a colon. When ending the topical
notes, skip another line.
- Don't be afraid to write in it. If you end up using a whole notebook before
the month is up, that is a success. Record insights, thoughts, feelings and
things that come to your mind. You never know what will end up being useful
later.
- At the end of the month, look back at the things you did and summarize/index
them in the remaining pages. Discover any leftover items that you haven't
completed yet so you can either transfer them over to next month or discard
them. It's okay to not get everything done. You may also want to scan it to
back it up into the cloud. You may never reference these scans, but backups
never hurt.
And then just write things in as they happen. Don't agonize over getting them
all. You will not. The aim is to get the important parts. If you really honestly
do miss something that is important, it will come back.
Something else I do is I keep a secondary notebook I call `Knowledge`. It
started out as the notebook that I used to document errata for my homelab, but
overall it's turned into a sort of secondary place to record other information
as well as indexing other details across notebooks. This started a bit on
accident. One of the notebooks from my big order came slightly broken. A few
pages fell out and then I had a smaller notebook in my hands. I stray from the
strict style in this notebook. It's a lot more free flowing based on my needs,
and that's okay. I still try to separate things onto separate pages when I can
to help keep things tidy.
I've also been using it to outline blogposts in the form of bullet trees.
Normally I start these articles as a giant unordered list with sub-levels for
various details on its parent thing. Each top-level thing becomes a "section"
and things boil down into either paragraphs or sentences based on what makes
sense.
An unexpected convenience of this flow is that the notebooks I'm using are small
enough to fit under the halves of my keyboard:
<center><blockquote class="twitter-tweet"><p lang="en" dir="ltr">The REAL reason to get
a split keyboard <a
href="https://t.co/I3qBMDU5sQ">pic.twitter.com/I3qBMDU5sQ</a></p>&mdash; Xe from
Within (@theprincessxena) <a
href="https://twitter.com/theprincessxena/status/1402459138010009605?ref_src=twsrc%5Etfw">June
9, 2021</a></blockquote> <script async
src="https://platform.twitter.com/widgets.js" charset="utf-8"></script></center>
This lets me leave the notebooks in an easy to grab place while also putting
them slightly out of the way until they are needed. I also keep my pencil and
eraser closeby. When I go out of the house, I pack this month's journal, a
pencil and an eraser.
Paper has been a great move for me. There's no constant reminders. There's no
product team trying to psychologically manipulate me into using paper more
(though honestly that might have helped to build the habit of using it daily).
It is a very calm technology and I am all for it.
[Is this technology though? This is just a semi-structured way of writing things
on paper. Does that count as technology?](conversation://Mara/hmm)
[To be honest, I don't know. The line of what is and what is not technology is
very thin in the best case. I think that this counts as a technology, but
overall this is a huge It Depends™. You may not think this is "real" technology
because there's no real electronic component to it. That is a valid opinion,
however I would like to posit that this is technology in the same way that a
manual shaving razor is technology. It was designed and built to purpose. If that
isn't technology, what is? Plus, this way there's no risk of server downtime
preventing me from using paper!](conversation://Cadey/enby)
Oh, also, if you feel bored and a design comes to mind, don't be afraid to
doodle on the cover. Make paper yours. Don't worry about it being perfect. It's
there to help you tell the notebooks apart in the future after they are
complete.
So far over the last month I've made notes on 49 pages. Most of the TODO items
are complete. Less than 10% of them failed/were cancelled. Less than 10% of them
had to roll over to the next day. I assemble my TODO lists based on what I
didn't get done the previous day. I write each thing out by hand to help me
remember to prioritize them. When I need something to do, I look down at my
notebook for incomplete items. I use a rubber band to keep the notebook closed.
I've been considering slipstreaming the stuff currently in the `Knowledge`
notebook into the main monthly one. It's okay to go through paper. That's a
success.
This system works for me. I don't know if it will work for you, but if you have
been struggling with remembering to do things I would really suggest trying it.
You probably have a few paper notebooks left over from startups handing them out
in a swag pack. You probably also have never touched them since you got them.
This is good. I only really use the small notebooks because I found the more
fancy bound notebooks were harder to write on the left sides more than the right
sides. Your mileage may vary.
[I would include a scan of one of my notebook pages here, but that would reveal
some personal information that I don't really want to put on this blog as well
as potentially break NDA terms for work, so I don't want to risk that if you can
understand.](conversation://Cadey/enby)

View File

@ -1,326 +0,0 @@
---
title: The h Programming Language
date: 2019-06-30
tags:
- wasm
- release
---
[h](https://h.christine.website) is a project of mine that I have released
recently. It is a single-paradigm, multi-tenant friendly, turing-incomplete
programming language that does nothing but print one of two things:
- the letter h
- a single quote (the Lojbanic "h")
It does this via [WebAssembly](https://webassembly.org). This may sound like a
pointless complication, but actually this ends up making things _a lot simpler_.
WebAssembly is a virtual machine (fake computer that only exists in code) intended
for browsers, but I've been using it for server-side tasks.
I have written more about/with WebAssembly in the past in these posts:
- https://christine.website/talks/webassembly-on-the-server-system-calls-2019-05-31
- https://christine.website/blog/olin-1-why-09-1-2018
- https://christine.website/blog/olin-2-the-future-09-5-2018
- https://christine.website/blog/land-1-syscalls-file-io-2018-06-18
- https://christine.website/blog/templeos-2-god-the-rng-2019-05-30
This is a continuation of the following two posts:
- https://christine.website/blog/the-origin-of-h-2015-12-14
- https://christine.website/blog/formal-grammar-of-h-2019-05-19
All of the relevant code for h is [here](https://github.com/Xe/x/tree/master/cmd/h).
h is a somewhat standard three-phase compiler. Each of the phases is as follows:
## Parsing the Grammar
As mentioned in a prior post, h has a formal grammar defined in [Parsing Expression Grammar](https://en.wikipedia.org/wiki/Parsing_expression_grammar).
I took this [grammar](https://github.com/Xe/x/blob/v1.1.7/h/h.peg) (with some
minor modifications) and fed it into a tool called [peggy](https://github.com/eaburns/peggy)
to generate a Go source [version of the parser](https://github.com/Xe/x/blob/v1.1.7/h/h_gen.go).
This parser has some minimal [wrappers](https://github.com/Xe/x/blob/v1.1.7/h/parser.go)
around it, mostly to simplify the output and remove unneeded nodes from the tree.
This simplifies the later compilation phases.
The input to h looks something like this:
```
h
```
The output syntax tree pretty-prints to something like this:
```
H("h")
```
This is also represented using a tree of nodes that looks something like this:
```
&peg.Node{
Name: "H",
Text: "h",
Kids: nil,
}
```
A more complicated program will look something like this:
```
&peg.Node{
Name: "H",
Text: "h h h",
Kids: {
&peg.Node{
Name: "",
Text: "h",
Kids: nil,
},
&peg.Node{
Name: "",
Text: "h",
Kids: nil,
},
&peg.Node{
Name: "",
Text: "h",
Kids: nil,
},
},
}
```
Now that we have this syntax tree, it's easy to go to the next phase of
compilation: generating the WebAssembly Text Format.
## WebAssembly Text Format
[WebAssembly Text Format](https://developer.mozilla.org/en-US/docs/WebAssembly/Understanding_the_text_format)
is a human-editable and understandable version of WebAssembly. It is pretty low
level, but it is actually fairly simple. Let's take an example of the h compiler
output and break it down:
```
(module
(import "h" "h" (func $h (param i32)))
(func $h_main
(local i32 i32 i32)
(local.set 0 (i32.const 10))
(local.set 1 (i32.const 104))
(local.set 2 (i32.const 39))
(call $h (get_local 1))
(call $h (get_local 0))
)
(export "h" (func $h_main))
)
```
Fundamentally, WebAssembly binary files are also called modules. Each .wasm file
can have only one module defined in it. Modules can have sections that contain the
following information:
- External function imports
- Function definitions
- Memory information
- Named function exports
- Global variable definitions
- Other custom data that may be vendor-specific
h only uses external function imports, function definitions and named function
exports.
`import` imports a function from the surrounding runtime with two fields: module
and function name. Because this is an obfuscated language, the function `h` from
module `h` is imported as `$h`. This function works somewhat like the C library
function [putchar()](https://www.tutorialspoint.com/c_standard_library/c_function_putchar.htm).
`func` creates a function. In this case we are creating a function named `$h_main`.
This will be the entrypoint for the h program.
Inside the function `$h_main`, there are three local variables created: `0`, `1` and `2`.
They correlate to the following values:
| Local Number | Explanation | Integer Value |
| :----------- | :---------------- | :------------ |
| 0 | Newline character | 10 |
| 1 | Lowercase h | 104 |
| 2 | Single quote | 39 |
As such, this program prints a single lowercase h and then a newline.
`export` lets consumers of this WebAssembly module get a name for a function,
linear memory or global value. As we only need one function in this module,
we export `$h_main` as `"h"`.
## Compiling this to a Binary
The next phase of compiling is to turn this WebAssembly Text Format into a binary.
For simplicity, the tool `wat2wasm` from the [WebAssembly Binary Toolkit](https://github.com/WebAssembly/wabt)
is used. This tool creates a WebAssembly binary out of WebAssembly Text Format.
Usage is simple (assuming you have the WebAssembly Text Format file above saved as `h.wat`):
```
wat2wasm h.wat -o h.wasm
```
And you will create `h.wasm` with the following sha256 sum:
```
sha256sum h.wasm
8457720ae0dd2deee38761a9d7b305eabe30cba731b1148a5bbc5399bf82401a h.wasm
```
Now that the final binary is created, we can move to the runtime phase.
## Runtime
The h [runtime](https://github.com/Xe/x/blob/v1.1.7/cmd/h/run.go) is incredibly
simple. It provides the `h.h` putchar-like function and executes the `h`
function from the binary you feed it. It also times execution as well as keeps
track of the number of instructions the program runs. This is called "gas" for
historical reasons involving [blockchains](https://blockgeeks.com/guides/ethereum-gas/).
I use [Perlin Network's life](https://github.com/perlin-network/life) as the
implementation of WebAssembly in h. I have experience with it from [Olin](https://github.com/Xe/olin).
## The Playground
As part of this project, I wanted to create an [interactive playground](https://h.christine.website/play).
This allows users to run arbitrary h programs on my server. As the only system
call is putchar, this is safe. The playground also has some limitations on how
big of a program it can run. The playground server works like this:
- The user program is sent over HTTP with Content-Type [text/plain](https://github.com/Xe/x/blob/v1.1.7/cmd/h/http.go#L402-L413)
- The program is [limited to 75 bytes on the server](https://github.com/Xe/x/blob/v1.1.7/cmd/h/http.go#L44) (though this is [configurable](https://github.com/Xe/x/blob/v1.1.7/cmd/h/http.go#L15) via flags or envvars)
- The program is [compiled](https://github.com/Xe/x/blob/v1.1.7/cmd/h/http.go#L53)
- The program is [run](https://github.com/Xe/x/blob/v1.1.7/cmd/h/http.go#L59)
- The output is [returned via JSON](https://github.com/Xe/x/blob/v1.1.7/cmd/h/http.go#L65-L72)
- This output is then put [into the playground page with JavaScript](https://github.com/Xe/x/blob/v1.1.7/cmd/h/http.go#L389-L394)
The output of this call looks something like this:
```
curl -H "Content-Type: text/plain" --data "h" https://h.christine.website/api/playground | jq
{
"prog": {
"src": "h",
"wat": "(module\n (import \"h\" \"h\" (func $h (param i32)))\n (func $h_main\n (local i32 i32 i32)\n (local.set 0 (i32.const 10))\n (local.set 1 (i32.const 104))\n (local.set 2 (i32.const 39))\n (call $h (get_local 1))\n (call $h (get_local 0))\n )\n (export \"h\" (func $h_main))\n)",
"bin": "AGFzbQEAAAABCAJgAX8AYAAAAgcBAWgBaAAAAwIBAQcFAQFoAAEKGwEZAQN/QQohAEHoACEBQSchAiABEAAgABAACw==",
"ast": "H(\"h\")"
},
"res": {
"out": "h\n",
"gas": 11,
"exec_duration": 12345
}
}
```
The execution duration is in [nanoseconds](https://godoc.org/time#Duration), as
it is just directly a Go standard library time duration.
## Bugs h has Found
This will be updated in the future, but h has already found a bug in [Innative](https://innative.dev).
There was a bug in how Innative handled C name mangling of binaries. Output of
the h compiler is now [a test case in Innative](https://github.com/innative-sdk/innative/commit/6353d59d611164ce38b938840dd4f3f1ea894e1b#diff-dc4a79872612bb26927f9639df223856R1).
I consider this a success for the project. It is such a little thing, but it
means a lot to me for some reason. My shitpost created a test case in a project
I tried to integrate it with.
That's just awesome to me in ways I have trouble explaining.
As such, h programs _do_ work with Innative. Here's how to do it:
First, install the h compiler and runtime with the following command:
```
go get within.website/x/cmd/h
```
This will install the `h` binary to your `$GOPATH/bin`, so ensure that is part
of your path (if it is not already):
```
export GOPATH=$HOME/go
export PATH=$PATH:$GOPATH/bin
```
Then create a h binary like this:
```
h -p "h h" -o hh.wasm
```
Now we need to provide Innative the `h.h` system call implementation, so open
`h.c` and enter in the following:
```
#include <stdio.h>
void h_WASM_h(char data) {
putchar(data);
}
```
Then build it to an object file:
```
gcc -c -o h.o h.c
```
Then pack it into a static library `.ar` file:
```
ar rsv libh.a h.o
```
Then create the shared object with Innative:
```
innative-cmd -l ./libh.a hh.wasm
```
This should create `hh.so` in the current working directory.
Now create the following [Nim](https://nim-lang.org) wrapper at `h.nim`:
```
proc hh_WASM_h() {. importc, dynlib: "./hh.so" .}
hh_WASM_h()
```
and build it:
```
nim c h.nim
```
then run it:
```
./h
h
```
And congrats, you have now compiled h to a native shared object.
## Why
Now, something you might be asking yourself as you read through this post is
something like: "Why the heck are you doing this?" That's honestly a good
question. One of the things I want to do with computers is to create art for the
sake of art. h is one of these such projects. h is not a productive tool. You
cannot create anything useful with h. This is an exercise in creating a compiler
and runtime from scratch, based on my past experiences with parsing lojban,
WebAssembly on the server and frustrating marketing around programming tools. I
wanted to create something that deliberately pokes at all of the common ways
that programming languages and tooling are advertised. I wanted to make it a
fully secure tool as well, with an arbitrary limitation of having no memory
usage. Everything is fully functional. There are a few grammar bugs that I'm
calling features.

View File

@ -1,76 +0,0 @@
---
title: hlang in 30 Seconds
date: 2021-01-04
series: h
tags:
- satire
---
hlang (the h language) is a revolutionary new use of WebAssembly that enables
single-paridigm programming without any pesky state or memory accessing. The
simplest program you can use in hlang is the h world program:
```
h
```
When run in [the hlang playground](https://h.christine.website/play), you can
see its output:
```
h
```
To get more output, separate multiple h's by spaces:
```
h h h h
```
This returns:
```
h
h
h
h
```
## Internationalization
For internationalization concerns, hlang also supports the Lojbanic h `'`. You can
mix h and `'` to your heart's content:
```
' h '
```
This returns:
```
'
h
'
```
Finally an easy solution to your pesky Lojban internationalization problems!
## Errors
For maximum understandability, compiler errors are provided in Lojban. For
example this error tells you that you have an invalid character at the first
character of the string:
```
h: gentoldra fi'o zvati fe li no
```
Here is an interlinear gloss of that error:
```
h: gentoldra fi'o zvati fe li no
grammar-wrong existing-at second-place use-number 0
```
And now you are fully fluent in hlang, the most exciting programming language
since sliced bread.

View File

@ -1,9 +1,10 @@
--- ---
title: How does into Meditation title: How does into Meditation
date: 2017-12-10 date: 2017-12-10
series: when-then-zen
--- ---
# How does into Meditation
tl;dr tl;dr
1. stop thinking 1. stop thinking

View File

@ -1,162 +0,0 @@
---
title: How HTTP Requests Work
date: 2020-05-19
tags:
- http
- ohgod
- philosophy
---
Reading this webpage is possible because of millions of hours of effort with
tens of thousands of actors across thousands of companies. At some level it's a
minor miracle that this all works at all. Here's a preview into the madness that
goes into hitting enter on christine.website and this website being loaded.
## Beginnings
The user types in `https://christine.website` into the address bar and hits
enter on the keyboard. This sends a signal over USB to the computer and the
kernel polls the USB controller for a new message. It's recognized as from the
keyboard. The input is then sent to the browser through an input driver talking
to a windowing server talking to the browser program.
The browser selects the memory region normally reserved for the address bar. The
browser then parses this string as an [RFC 3986][rfc3986] URI and scrapes out
the protocol (https), hostname (christine.website) and path (/). The browser
then uses this information to create an abstract HTTP request object with the
Host header set to christine.website, HTTP method (GET), and path set to the
path. This request object then passes through various layers of credential
storage and middleware to add the appropriate cookies and other headers in order
to tell my website what language it should localize the response to, what
compression methods the browser understands, and what browser is being used to
make the request.
[rfc3986]: https://tools.ietf.org/html/rfc3986
## Connections
The browser then checks if it has a connection to christine.website open
already. If it does not, then it creates a new one. It creates a new connection
by figuring out what the IP address of christine.website is using [DNS][dns]. A
DNS request is made over [UDP][udp] on port 53 to the DNS server configured in
the operating system (such as 8.8.8.8, 1.1.1.1 or 75.75.75.75). The UDP
connection is created using operating system-dependent system calls and a DNS
request is sent.
[udp]: https://en.wikipedia.org/wiki/User_Datagram_Protocol
[dns]: https://en.wikipedia.org/wiki/Domain_Name_System
The packet that was created then is destined for the DNS server and added to the
operating system's output queue. The operating system then looks in its routing
table to see where the packet should go. If the packet matches a route, it is
queued for output to the relevant network card. The network card layer then
checks the ARP table to see what [mac address][macaddress] the
[ethernet][ethernet] frame should be sent to. If the ARP table doesn't have a
match, then an arp probe is broadcasted to every node on the local network. Then
the driver waits for an arp response to be sent to it with the correct IP -> MAC
address mapping. The driver then uses this information to send out the ethernet
frame to the node that matches the IP address in the routing table. From there
the packet is validated on the router it was sent to. It then unwraps the packet
to the IP layer to figure out the destination network interface to use. If this
router also does NAT termination, it creates an entry in the NAT table for
future use for a site-configured amount of time (for UDP at least). It then
passes the packet on to the correct node and this process is repeated until it
gets to the remote DNS server.
[macaddress]: https://en.wikipedia.org/wiki/MAC_address
[ethernet]: https://en.wikipedia.org/wiki/Ethernet
The DNS server then unwraps the ethernet frame into an IP packet and then as a
UDP packet and a DNS request. It checks its database for a match and if one is
not found, it attempts to discover the correct name server to contact by using a
NS record query to its upstreams or the authoritative name server for the
WEBSITE namespace. This then creates another process of ethernet frames and UDP
packets until it reaches the upstream DNS server which hopefully should reply
with the correct address. Once the DNS server gets the information that is
needed, it sends this back the results to the client as a wire-format DNS
response.
UDP is unreliable by design, so this packet may or may not survive the entire
round trip. It may take one or more retries for the DNS information to get to
the remote server and back, but it usually works the first time. The response to
this request is cached based on the time-to-live specified in the DNS response.
The response also contains the IP address of christine.website.
## Security
The protocol used in the URL determines which TCP port the browser connects to.
If it is http, it uses port 80. If it is https, it uses port 443. The user
specified HTTPS, so port 443 on whatever IP address DNS returned is dialed using
the operating system's network stack system calls. The [TCP][tcp] three-way
handshake is started with that target IP address and port. The client sends a
SYN packet, the server replies with a SYN ACK packet and the client replies with
an ACK packet. This indicates that the entire TCP session is active and data can
be transferred and read through it.
[tcp]: https://en.wikipedia.org/wiki/Transmission_Control_Protocol
However, this data is UNENCRYPTED by default. [Transport Layer Security][tls] is
used to encrypt this data so prying eyes can't look into it. TLS has its own
handshake too. The session is established by sending a TLS ClientHello packet
with the domain name (christine.website), the list of ciphers the client
supports, any application layer protocols the client supports (like HTTP/2) and
the list of TLS versions that the client supports. This information is sent over
the wire to the remote server using that entire long and complicated process
that I spelled out for how DNS works, except a TCP session requires the other
side to acknowledge when data is successfully received. The server on the other
end replies with a ClientHelloResponse that contains a HTTPS certificate and the
list of protocols and ciphers the server supports. Then they do an [encryption
session setup rain dance][tlsraindance] that I don't completely understand and
the resulting channel is encrypted with cipher (or encrypted) text written and
read from the wire and a session layer translates that cipher text to clear text
for the other parts of the browser stack.
[tls]: https://en.wikipedia.org/wiki/Transport_Layer_Security
[tlsraindance]: https://www.cloudflare.com/learning/ssl/what-happens-in-a-tls-handshake/
The browser then uses the information in the ClientHelloResponse to decide how
to proceed from here.
## HTTP
If the browser notices the server supports HTTP/2 it sets up a HTTP/2 session
(with a handshake that involves a few roundtrips like what I described for DNS)
and creates a new stream for this request. The browser then formats the request
as HTTP/2 wire format bytes (binary format) and writes it to the HTTP/2 stream,
which writes it to the HTTP/2 framing layer, which writes it to the encryption
layer, which writes it to the network socket and sends it over the internet.
If the browser notices the server DOES NOT support HTTP/2, it formats the
request as HTTP/1.1 wire formatted bytes and writes it to the encryption layer,
which writes it to the network socket and sends it over the internet using that
complicated process I spelled out for DNS.
This then hits the remote load balancer which parses the client HTTP request and
uses site-local configuration to select the best application server to handle
the response. It then forwards the client's HTTP request to the correct server
by creating a TCP session to that backend, writing the HTTP request and waiting
for a response over that TCP session. Depending on site-local configuration
there may be layers of encryption involved.
## Application Server
Now, the request finally gets to the application server. This TCP session is
accepted by the application server and the headers are read into memory. The
path is read by the application server and the correct handler is chosen. The
HTML for the front page of christine.website is rendered and written to the TCP
session and travels to the load balancer, gets encrypted with TLS, the encrypted
HTML gets sent back over the internet to your browser and then your browser
decrypts it and starts to parse and display the website. The browser will run
into places where it needs more resources (such as stylesheets or images), so it will
make additional HTTP requests to the load balancer to grab those too.
---
The end result is that the user sees the website in all its glory. Given all
these moving parts it's astounding that this works as reliably as it does. Each
of the TCP, ARP and DNS requests also happen at each level of the stack. There
are layers upon layers upon layers of interacting protocols and implementations.
This is why it is hard to reliably put a website on the internet. If there is a
god, they are surely the one holding all these potentially unreliable systems
together to make everything appear like it is working.

View File

@ -1,572 +0,0 @@
---
title: "How I Start: Nix"
date: 2020-03-08
series: howto
tags:
- nix
- rust
---
[Nix][nix] is a tool that helps people create reproducible builds. This means that
given a known input, you can get the same output on other machines. Let's build
and deploy a small Rust service with Nix. This will not require the Rust compiler
to be installed with [rustup][rustup] or similar.
[nix]: https://nixos.org/nix/
[rustup]: https://rustup.rs
- Setting up your environment
- A new project
- Setting up the Rust compiler
- Serving HTTP
- A simple package build
- Shipping it in a docker image
## Setting up your environment
The first step is to install Nix. If you are using a Linux machine, run this
script:
```console
$ curl https://nixos.org/nix/install | sh
```
This will prompt you for more information as it goes on, so be sure to follow
the instructions carefully. Once it is done, close and re-open your shell. After
you have done this, `nix-env` should exist in your shell. Try to run it:
```console
$ nix-env
error: no operation specified
Try 'nix-env --help' for more information.
```
Let's install a few other tools to help us with development. First, let's
install [lorri][lorri] to help us manage our development shell:
[lorri]: https://github.com/target/lorri
```
$ nix-env --install --file https://github.com/target/lorri/archive/master.tar.gz
```
This will automatically download and build lorri for your system based on the
latest possible version. Once that is done, open another shell window (the lorri
docs include ways to do this more persistently, but this will work for now) and run:
```console
$ lorri daemon
```
Now go back to your main shell window and install [direnv][direnv]:
[direnv]: https://direnv.net
```console
$ nix-env --install direnv
```
Next, follow the [shell setup][direnvsetup] needed for your shell. I personally
use `fish` with [oh my fish][omf], so I would run this:
[direnvsetup]: https://direnv.net/docs/hook.html
[omf]: https://github.com/oh-my-fish/oh-my-fish
```console
$ omf install direnv
```
Finally, let's install [niv][niv] to help us handle dependencies for the
project. This will allow us to make sure that our builds pin _everything_ to a
specific set of versions, including operating system packages.
[niv]: https://github.com/nmattia/niv
```console
$ nix-env --install niv
```
Now that we have all of the tools we will need installed, let's create the
project.
# A new project
Go to your favorite place to put code and make a new folder. I personally prefer
`~/code`, so I will be using that here:
```console
$ cd ~/code
$ mkdir helloworld
$ cd helloworld
```
Let's set up the basic skeleton of the project. First, initialize niv:
```console
$ niv init
```
This will add the latest versions of `niv` itself and the packages used for the
system to `nix/sources.json`. This will allow us to pin exact versions so the
environment is as predictable as possible. Sometimes the versions of software in
the pinned nixpkgs are too old. If this happens, you can update to the
"unstable" branch of nixpkgs with this command:
```console
$ niv update nixpkgs -b nixpkgs-unstable
```
Next, set up lorri using `lorri init`:
```console
$ lorri init
```
This will create `shell.nix` and `.envrc`. `shell.nix` will be where we define
the development environment for this service. `.envrc` is used to tell direnv
what it needs to do. Let's try and activate the `.envrc`:
```console
$ cd .
direnv: error /home/cadey/code/helloworld/.envrc is blocked. Run `direnv allow`
to approve its content
```
Let's review its content:
```console
$ cat .envrc
eval "$(lorri direnv)"
```
This seems reasonable, so approve it with `direnv allow` like the error message
suggests:
```console
$ direnv allow
```
Now let's customize the `shell.nix` file to use our pinned version of nixpkgs.
Currently, it looks something like this:
```nix
# shell.nix
let
pkgs = import <nixpkgs> {};
in
pkgs.mkShell {
buildInputs = [
pkgs.hello
];
}
```
This currently imports nixpkgs from the system-level version of it. This means
that different systems could have different versions of nixpkgs on it, and that
could make the `shell.nix` file hard to reproduce between machines. Let's import
the pinned version of nixpkgs that niv created:
```nix
# shell.nix
let
sources = import ./nix/sources.nix;
pkgs = import sources.nixpkgs {};
in
pkgs.mkShell {
buildInputs = [
pkgs.hello
];
}
```
And then let's test it with `lorri shell`:
```console
$ lorri shell
lorri: building environment........ done
(lorri) $
```
And let's see if `hello` is available inside the shell:
```console
(lorri) $ hello
Hello, world!
```
You can set environment variables inside the `shell.nix` file. Do so like this:
```nix
# shell.nix
let
sources = import ./nix/sources.nix;
pkgs = import sources.nixpkgs {};
in
pkgs.mkShell {
buildInputs = [
pkgs.hello
];
# Environment variables
HELLO="world";
}
```
Wait a moment for lorri to finish rebuilding the development environment and
then let's see if the environment variable shows up:
```console
$ cd .
direnv: loading ~/code/helloworld/.envrc
<output snipped>
$ echo $HELLO
world
```
Now that we have the basics of the environment set up, lets install the Rust
compiler.
# Setting up the Rust compiler
First, add [nixpkgs-mozilla][nixpkgsmoz] to niv:
[nixpkgsmoz]: https://github.com/mozilla/nixpkgs-mozilla
```console
$ niv add mozilla/nixpkgs-mozilla
```
Then create `nix/rust.nix` in your repo:
```nix
# nix/rust.nix
{ sources ? import ./sources.nix }:
let
pkgs =
import sources.nixpkgs { overlays = [ (import sources.nixpkgs-mozilla) ]; };
channel = "nightly";
date = "2020-03-08";
targets = [ ];
chan = pkgs.rustChannelOfTargets channel date targets;
in chan
```
This creates a nix function that takes in the pre-imported list of sources,
creates a copy of nixpkgs with Rust at the nightly version `2020-03-08` overlaid
into it, and exposes the rust package out of it. Let's add this to `shell.nix`:
```nix
# shell.nix
let
sources = import ./nix/sources.nix;
rust = import ./nix/rust.nix { inherit sources; };
pkgs = import sources.nixpkgs { };
in
pkgs.mkShell {
buildInputs = [
rust
];
}
```
Then ask lorri to recreate the development environment. This may take a bit to
run because it's setting up everything the Rust compiler requires to run.
```console
$ lorri shell
(lorri) $
```
Let's see what version of Rust is installed:
```console
(lorri) $ rustc --version
rustc 1.43.0-nightly (823ff8cf1 2020-03-07)
```
This is exactly what we expect. Rust nightly versions get released with the
date of the previous day in them. To be extra sure, let's see what the shell
thinks `rustc` resolves to:
```console
(lorri) $ which rustc
/nix/store/w6zk1zijfwrnjm6xyfmrgbxb6dvvn6di-rust-1.43.0-nightly-2020-03-07-823ff8cf1/bin/rustc
```
And now exit that shell and reload direnv:
```console
(lorri) $ exit
$ cd .
direnv: loading ~/code/helloworld/.envrc
$ which rustc
/nix/store/w6zk1zijfwrnjm6xyfmrgbxb6dvvn6di-rust-1.43.0-nightly-2020-03-07-823ff8cf1/bin/rustc
```
And now we have Rust installed at an arbitrary nightly version for _that project
only_. This will work on other machines too. Now that we have our development
environment set up, let's serve HTTP.
## Serving HTTP
[Rocket][rocket] is a popular web framework for Rust programs. Let's use that to
create a small "hello, world" server. We will need to do the following:
[rocket]: https://rocket.rs
- Create the new Rust project
- Add Rocket as a dependency
- Write our "hello world" route
- Test a build of the service with `cargo build`
### Create the new Rust project
Create the new Rust project with `cargo init`:
```console
$ cargo init --vcs git .
Created binary (application) package
```
This will create the directory `src` and a file named `Cargo.toml`. Rust code
goes in `src` and the `Cargo.toml` file configures dependencies. Adding the
`--vcs git` flag also has cargo create a [gitignore][gitignore] file so that the
target folder isn't tracked by git.
[gitignore]: https://git-scm.com/docs/gitignore
### Add Rocket as a dependency
Open `Cargo.toml` and add the following to it:
```toml
[dependencies]
rocket = "0.4.3"
```
Then download/build Rocket with `cargo build`:
```console
$ cargo build
```
This will download all of the dependencies you need and precompile Rocket, and it
will help speed up later builds.
### Write our "hello world" route
Now put the following in `src/main.rs`:
```rust
#![feature(proc_macro_hygiene, decl_macro)] // language features needed by Rocket
// Import the rocket macros
#[macro_use]
extern crate rocket;
// Create route / that returns "Hello, world!"
#[get("/")]
fn index() -> &'static str {
"Hello, world!"
}
fn main() {
rocket::ignite().mount("/", routes![index]).launch();
}
```
### Test a build
Rerun `cargo build`:
```console
$ cargo build
```
This will create the binary at `target/debug/helloworld`. Let's run it locally
and see if it works:
```console
$ ./target/debug/helloworld &
$ curl http://127.0.0.1:8000
Hello, world!
$ fg
<press control-c>
```
The HTTP service works. We have a binary that is created with the Rust compiler
Nix installed.
## A simple package build
Now that we have the HTTP service working, let's put it inside a nix package. We
will need to use [naersk][naersk] to do this. Add naersk to your project with
niv:
[naersk]: https://github.com/nmattia/naersk
```console
$ niv add nmattia/naersk
```
Now let's create `helloworld.nix`:
```
# import niv sources and the pinned nixpkgs
{ sources ? import ./nix/sources.nix, pkgs ? import sources.nixpkgs { }}:
let
# import rust compiler
rust = import ./nix/rust.nix { inherit sources; };
# configure naersk to use our pinned rust compiler
naersk = pkgs.callPackage sources.naersk {
rustc = rust;
cargo = rust;
};
# tell nix-build to ignore the `target` directory
src = builtins.filterSource
(path: type: type != "directory" || builtins.baseNameOf path != "target")
./.;
in naersk.buildPackage {
inherit src;
remapPathPrefix =
true; # remove nix store references for a smaller output package
}
```
And then build it with `nix-build`:
```console
$ nix-build helloworld.nix
```
This can take a bit to run, but it will do the following things:
- Download naersk
- Download every Rust crate your HTTP service depends on into the Nix store
- Run your program's tests
- Build your dependencies into a Nix package
- Build your program with those dependencies
- Place a link to the result at `./result`
Once it is done, let's take a look at the result:
```console
$ du -hs ./result/bin/helloworld
2.1M ./result/bin/helloworld
$ ldd ./result/bin/helloworld
linux-vdso.so.1 (0x00007fffae080000)
libdl.so.2 => /nix/store/wx1vk75bpdr65g6xwxbj4rw0pk04v5j3-glibc-2.27/lib/libdl.so.2 (0x0
0007f3a01666000)
librt.so.1 => /nix/store/wx1vk75bpdr65g6xwxbj4rw0pk04v5j3-glibc-2.27/lib/librt.so.1 (0x0
0007f3a0165c000)
libpthread.so.0 => /nix/store/wx1vk75bpdr65g6xwxbj4rw0pk04v5j3-glibc-2.27/lib/libpthread
.so.0 (0x00007f3a0163b000)
libgcc_s.so.1 => /nix/store/wx1vk75bpdr65g6xwxbj4rw0pk04v5j3-glibc-2.27/lib/libgcc_s.so.
1 (0x00007f3a013f5000)
libc.so.6 => /nix/store/wx1vk75bpdr65g6xwxbj4rw0pk04v5j3-glibc-2.27/lib/libc.so.6 (0x000
07f3a0123f000)
/nix/store/wx1vk75bpdr65g6xwxbj4rw0pk04v5j3-glibc-2.27/lib/ld-linux-x86-64.so.2 => /lib6
4/ld-linux-x86-64.so.2 (0x00007f3a0160b000)
libm.so.6 => /nix/store/wx1vk75bpdr65g6xwxbj4rw0pk04v5j3-glibc-2.27/lib/libm.so.6 (0x000
07f3a010a9000)
```
This means that the Nix build created a 2.1 megabyte binary that only depends on
[glibc][glibc], the implementation of the C language standard library that Nix
prefers.
[glibc]: https://www.gnu.org/software/libc/
For repo cleanliness, add the `result` link to the [gitignore][gitignore]:
```console
$ echo 'result*' >> .gitignore
```
## Shipping it in a Docker image
Now that we have a package built, let's ship it in a docker image. nixpkgs
provides [dockerTools][dockertools] which helps us create docker images out of
Nix packages. Let's create `default.nix` with the following contents:
[dockertools]: https://nixos.org/nixpkgs/manual/#sec-pkgs-dockerTools
```nix
{ system ? builtins.currentSystem }:
let
sources = import ./nix/sources.nix;
pkgs = import sources.nixpkgs { };
helloworld = import ./helloworld.nix { inherit sources pkgs; };
name = "xena/helloworld";
tag = "latest";
in pkgs.dockerTools.buildLayeredImage {
inherit name tag;
contents = [ helloworld ];
config = {
Cmd = [ "/bin/helloworld" ];
Env = [ "ROCKET_PORT=5000" ];
WorkingDir = "/";
};
}
```
And then build it with `nix-build`:
```console
$ nix-build default.nix
```
This will create a tarball containing the docker image information as the result
of the Nix build. Load it into docker using `docker load`:
```console
$ docker load -i result
```
And then run it using `docker run`:
```console
$ docker run --rm -itp 52340:5000 xena/helloworld
```
Now test it using curl:
```console
$ curl http://127.0.0.1:52340
Hello, world!
```
And now you have a docker image you can run wherever you want. The
`buildLayeredImage` function used in `default.nix` also makes Nix put each
dependency of the package into its own docker layer. This makes new versions of
your program very efficient to upgrade on your clusters, realistically this
reduces the amount of data needed for new versions of the program down to what
changed. If nothing but some resources in their own package were changed, only
those packages get downloaded.
This is how I start a new project with Nix. I put all of the code described in
this post in [this GitHub repo][helloworldrepo] in case it helps. Have fun and
be well.
[helloworldrepo]: https://github.com/Xe/helloworld
---
For some "extra credit" tasks, try and see if you can do the following:
- Use the version of [niv][niv] that niv pinned
- Customize the environment of the container by following the [Rocket
configuration documentation](https://rocket.rs/v0.4/guide/configuration/)
- Add some more routes to the program
- Read the [Nix
documentation](https://nixos.org/nix/manual/#chap-writing-nix-expressions) and
learn more about writing Nix expressions
- Configure your editor/IDE to use the `direnv` path

View File

@ -1,556 +0,0 @@
---
title: "How I Start: Rust"
date: 2020-03-15
series: howto
tags:
- rust
- how-i-start
- nix
---
[Rust][rustlang] is an exciting new programming language that makes it easy to
make understandable and reliable software. It is made by Mozilla and is used by
Amazon, Google, Microsoft and many other large companies.
[rustlang]: https://www.rust-lang.org/
Rust has a reputation of being difficult because it makes no effort to hide what
is going on. I'd like to show you how I start with Rust projects. Let's make a
small HTTP service using [Rocket][rocket].
[rocket]: https://rocket.rs
- Setting up your environment
- A new project
- Testing
- Adding functionality
- OpenAPI specifications
- Error responses
- Shipping it in a docker image
## Setting up your environment
The first step is to install the Rust compiler. You can use any method you like,
but since we are requiring the nightly version of Rust for this project, I
suggest using [rustup][rustup]:
[rustup]: https://rustup.rs/
```console
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain nightly
```
If you are using [NixOS][nixos] or another Linux distribution with [Nix][nix]
installed, see [this post][howistartnix] for some information on how to set up
the Rust compiler.
[nixos]: https://nixos.org/nixos/
[nix]: https://nixos.org/nix/
[howistartnix]: https://christine.website/blog/how-i-start-nix-2020-03-08
## A new project
[Rocket][rocket] is a popular web framework for Rust programs. Let's use that to
create a small "hello, world" server. We will need to do the following:
[rocket]: https://rocket.rs/
- Create the new Rust project
- Add Rocket as a dependency
- Write the hello world route
- Test a build of the service with `cargo build`
- Run it and see what happens
### Create the new Rust project
Create the new Rust project with `cargo init`:
```console
$ cargo init --vcs git .
Created binary (application) package
```
This will create the directory `src` and a file named `Cargo.toml`. Rust code
goes in `src` and the `Cargo.toml` file configures dependencies. Adding the
`--vcs git` flag also has cargo create a [gitignore][gitignore] file so that the
target folder isn't tracked by git.
[gitignore]: https://git-scm.com/docs/gitignore
### Add Rocket as a dependency
Open `Cargo.toml` and add the following to it:
```toml
[dependencies]
rocket = "0.4.4"
```
Then download/build [Rocket][rocket] with `cargo build`:
```console
$ cargo build
```
This will download all of the dependencies you need and precompile Rocket, and it
will help speed up later builds.
### Write our "hello world" route
Now put the following in `src/main.rs`:
```rust
#![feature(proc_macro_hygiene, decl_macro)] // Nightly-only language features needed by Rocket
// Import the rocket macros
#[macro_use]
extern crate rocket;
/// Create route / that returns "Hello, world!"
#[get("/")]
fn index() -> &'static str {
"Hello, world!"
}
fn main() {
rocket::ignite().mount("/", routes![index]).launch();
}
```
### Test a build
Rerun `cargo build`:
```console
$ cargo build
```
This will create the binary at `target/debug/helloworld`. Let's run it locally
and see if it works:
```console
$ ./target/debug/helloworld
```
And in another terminal window:
```console
$ curl http://127.0.0.1:8000
Hello, world!
$ fg
<press control-c>
```
The HTTP service works. We have a binary that is created with the Rust compiler.
This binary will be available at `./target/debug/helloworld`. However, it could
use some tests.
## Testing
Rocket has support for [unit testing][rockettest] built in. Let's create a tests
module and verify this route in testing.
[rockettest]: https://rocket.rs/v0.4/guide/testing/
### Create a tests module
Rust allows you to nest modules within files using the `mod` keyword. Create a
`tests` module that will only build when testing is requested:
[rustmod]: https://doc.rust-lang.org/rust-by-example/mod/visibility.html
```rust
#[cfg(test)] // Only compile this when unit testing is requested
mod tests {
use super::*; // Modules are their own scope, so you
// need to explictly use the stuff in
// the parent module.
use rocket::http::Status;
use rocket::local::*;
#[test]
fn test_index() {
// create the rocket instance to test
let rkt = rocket::ignite().mount("/", routes![index]);
// create a HTTP client bound to this rocket instance
let client = Client::new(rkt).expect("valid rocket");
// get a HTTP response
let mut response = client.get("/").dispatch();
// Ensure it returns HTTP 200
assert_eq!(response.status(), Status::Ok);
// Ensure the body is what we expect it to be
assert_eq!(response.body_string(), Some("Hello, world!".into()));
}
}
```
### Run tests
`cargo test` is used to run tests in Rust. Let's run it:
```console
$ cargo test
Compiling helloworld v0.1.0 (/home/cadey/code/helloworld)
Finished test [unoptimized + debuginfo] target(s) in 1.80s
Running target/debug/deps/helloworld-49d1bd4d4f816617
running 1 test
test tests::test_index ... ok
```
## Adding functionality
Most HTTP services return [JSON][json] or JavaScript Object Notation as a way to
pass objects between computer programs. Let's use Rocket's [JSON
support][rocketjson] to add a `/hostinfo` route to this app that returns some
simple information:
[json]: https://www.json.org/json-en.html
[rocketjson]: https://api.rocket.rs/v0.4/rocket_contrib/json/index.html
- the hostname of the computer serving the response
- the process ID of the HTTP service
- the uptime of the system in seconds
### Encoding things to JSON
For encoding things to JSON, we will be using [serde][serde]. We will need to
add serde as a dependency. Open `Cargo.toml` and put the following lines in it:
[serde]: https://serde.rs/
```toml
[dependencies]
serde_json = "1.0"
serde = { version = "1.0", features = ["derive"] }
```
This lets us use `#[derive(Serialize, Deserialize)]` on our Rust structs, which
will allow us to automate away the JSON generation code _at compile time_. For
more information about derivation in Rust, see [here][rustderive].
[rustderive]: https://doc.rust-lang.org/rust-by-example/trait/derive.html
Let's define the data we will send back to the client using a [struct][ruststruct].
[ruststruct]: https://doc.rust-lang.org/rust-by-example/custom_types/structs.html
```rust
use serde::*;
/// Host information structure returned at /hostinfo
#[derive(Serialize, Debug)]
struct HostInfo {
hostname: String,
pid: u32,
uptime: u64,
}
```
To implement this call, we will need another few dependencies in the `Cargo.toml`
file. We will use [gethostname][gethostname] to get the hostname of the machine
and [psutil][psutil] to get the uptime of the machine. Put the following below
the `serde` dependency line:
[gethostname]: https://crates.io/crates/gethostname
[psutil]: https://crates.io/crates/psutil
```toml
gethostname = "0.2.1"
psutil = "3.0.1"
```
Finally, we will need to enable Rocket's JSON support. Put the following at the
end of your `Cargo.toml` file:
```toml
[dependencies.rocket_contrib]
version = "0.4.4"
default-features = false
features = ["json"]
```
Now we can implement the `/hostinfo` route:
```rust
/// Create route /hostinfo that returns information about the host serving this
/// page.
#[get("/hostinfo")]
fn hostinfo() -> Json<HostInfo> {
// gets the current machine hostname or "unknown" if the hostname doesn't
// parse into UTF-8 (very unlikely)
let hostname = gethostname::gethostname()
.into_string()
.or(|_| "unknown".to_string())
.unwrap();
Json(HostInfo{
hostname: hostname,
pid: std::process::id(),
uptime: psutil::host::uptime()
.unwrap() // normally this is a bad idea, but this code is
// very unlikely to fail.
.as_secs(),
})
}
```
And then register it in the main function:
```rust
fn main() {
rocket::ignite()
.mount("/", routes![index, hostinfo])
.launch();
}
```
Now rebuild the project and run the server:
```console
$ cargo build
$ ./target/debug/helloworld
```
And in another terminal test it with `curl`:
```console
$ curl http://127.0.0.1:8000
{"hostname":"shachi","pid":4291,"uptime":13641}
```
You can use a similar process for any kind of other route.
## OpenAPI specifications
[OpenAPI][openapi] is a common specification format for describing API routes.
This allows users of the API to automatically generate valid clients for them.
Writing these by hand can be tedious, so let's pass that work off to the
compiler using [okapi][okapi].
[openapi]: https://swagger.io/docs/specification/about/
[okapi]: https://github.com/GREsau/okapi
Add the following line to your `Cargo.toml` file in the `[dependencies]` block:
```toml
rocket_okapi = "0.3.6"
schemars = "0.6"
okapi = { version = "0.3", features = ["derive_json_schema"] }
```
This will allow us to generate OpenAPI specifications from Rocket routes and the
types in them. Let's import the rocket_okapi macros and use them:
```rust
// Import OpenAPI macros
#[macro_use]
extern crate rocket_okapi;
use rocket_okapi::JsonSchema;
```
We need to add JSON schema generation abilities to `HostInfo`. Change:
```rust
#[derive(Serialize, Debug)]
```
to
```rust
#[derive(Serialize, JsonSchema, Debug)]
```
to generate the OpenAPI code for our type.
Next we can add the `/hostinfo` route to the OpenAPI schema:
```rust
/// Create route /hostinfo that returns information about the host serving this
/// page.
#[openapi]
#[get("/hostinfo")]
fn hostinfo() -> Json<HostInfo> {
// ...
```
Also add the index route to the OpenAPI schema:
```rust
/// Create route / that returns "Hello, world!"
#[openapi]
#[get("/")]
fn index() -> &'static str {
"Hello, world!"
}
```
And finally update the main function to use openapi:
```rust
fn main() {
rocket::ignite()
.mount("/", routes_with_openapi![index, hostinfo])
.launch();
}
```
Then rebuild it and run the server:
```console
$ cargo build
$ ./target/debug/helloworld
```
And then in another terminal:
```console
$ curl http://127.0.0.1:8000/openapi.json
```
This should return a large JSON object that describes all of the HTTP routes and
the data they return. To see this visually, change main to this:
```rust
use rocket_okapi::swagger_ui::{make_swagger_ui, SwaggerUIConfig};
fn main() {
rocket::ignite()
.mount("/", routes_with_openapi![index, hostinfo])
.mount(
"/swagger-ui/",
make_swagger_ui(&SwaggerUIConfig {
url: Some("../openapi.json".to_owned()),
urls: None,
}),
)
.launch();
}
```
Then rebuild and run the service:
```console
$ cargo build
$ ./target/debug/helloworld
```
And [open the swagger UI](http://127.0.0.1:8000/swagger-ui/) in your favorite
browser. This will show you a graphical display of all of the routes and the
data types in your service. For an example, see
[here](https://printerfacts.cetacean.club/swagger-ui/index.html).
## Error responses
Earlier in the /hostinfo route we glossed over error handling. Let's correct
this using the [okapi error type][okapierror]. Let's use the
[OpenAPIError][okapierror] type in the helloworld function:
[okapierror]: https://docs.rs/rocket_okapi/0.3.6/rocket_okapi/struct.OpenApiError.html
```rust
/// Create route /hostinfo that returns information about the host serving
/// this page.
#[openapi]
#[get("/hostinfo")]
fn hostinfo() -> Result<Json<HostInfo>> {
match gethostname::gethostname().into_string() {
Ok(hostname) => Ok(Json(HostInfo {
hostname: hostname,
pid: std::process::id(),
uptime: psutil::host::uptime().unwrap().as_secs(),
})),
Err(_) => Err(OpenApiError::new(format!(
"hostname does not parse as UTF-8"
))),
}
}
```
When the `into_string` operation fails (because the hostname is somehow invalid
UTF-8), this will result in a non-200 response with the `"hostname does not parse
as UTF-8"` message.
## Shipping it in a docker image
Many deployment systems use [Docker][docker] to describe a program's environment
and dependencies. Create a `Dockerfile` with the following contents:
```Dockerfile
# Use the minimal image
FROM rustlang/rust:nightly-slim AS build
# Where we will build the program
WORKDIR /src/helloworld
# Copy source code into the container
COPY . .
# Build the program in release mode
RUN cargo build --release
# Create the runtime image
FROM ubuntu:18.04
# Copy the compiled service binary
COPY --from=build /src/helloworld/target/release/helloworld /usr/local/bin/helloworld
# Start the helloworld service on container boot
CMD ["usr/local/bin/helloworld"]
```
And then build it:
```console
$ docker build -t xena/helloworld .
```
And then run it:
```console
$ docker run --rm -itp 8000:8000 xena/helloworld
```
And in another terminal:
```console
$ curl http://127.0.0.1:8000
Hello, world!
```
From here you can do whatever you want with this service. You can deploy it to
Kubernetes with a manifest that would look something like [this][k8shack].
[k8shack]: https://clbin.com/zSPDs
---
This is how I start a new Rust project. I put all of the code described in this
post in [this GitHub repo][helloworldrepo] in case it helps. Have fun and be
well.
[helloworldrepo]: https://github.com/Xe/helloworld
---
For some "extra credit" tasks, try and see if you can do the following:
- Customize the environment of the container by following the [Rocket
configuration documentation](https://rocket.rs/v0.4/guide/configuration/) and
docker [environment variables][dockerenvvars]
- Use Rocket's [templates][rockettemplate] to make the host information show up
in HTML
- Add tests for the `/hostinfo` route
- Make a route that always returns errors, what does it look like?
[dockerenvvars]: https://docs.docker.com/engine/reference/builder/#env
[rockettemplate]: https://api.rocket.rs/v0.4/rocket_contrib/templates/index.html
Many thanks to [Coleman McFarland](https://coleman.codes/) for proofreading this
post.

Some files were not shown because too many files have changed in this diff Show More