Compare commits

28 Commits

Author SHA1 Message Date
2ac002d798 keep working on sidenotes 2026-03-11 05:19:18 -04:00
6e6351f5cf keep working on sidenotes, copy latest real post over to test 2026-03-09 21:50:51 -04:00
e2049c2e29 tweak header bar styling 2026-03-09 07:58:28 -04:00
173b5ba9f4 theme switcher 2026-03-09 07:48:39 -04:00
0070ed1c19 settle on a dark theme and implement with override 2026-03-08 10:55:41 -04:00
0f5dadbf6f add obsidian/mocha/deep-ocean themes (Gemini) 2026-03-07 13:40:33 -05:00
8fc267e6df add mise config which was missing somehow 2026-03-07 13:38:43 -05:00
827a4406bd remove extraneous themes 2026-03-07 13:25:03 -05:00
657ad09a20 all the themes together + switcher 2026-03-07 13:18:56 -05:00
6bf81e0b20 claude dark v9 "northern lights" 2026-03-07 11:09:28 -05:00
6b8c47cbb4 claude dark v8 "copper & slate" 2026-03-07 11:08:08 -05:00
1142003e40 claude dark v7 "stormfront" 2026-03-07 11:06:36 -05:00
365da1f285 claude dark v6 "midnight garden" 2026-03-07 09:27:30 -05:00
e01cf188e2 claude dark v5, salmon accent 2026-03-07 09:21:34 -05:00
6747faeb7a claude dark v4, blue/gold 2026-03-07 09:18:19 -05:00
7acf1f2c9f start work on sidenotes 2026-03-05 21:39:22 -05:00
13d0ac8de7 make dropcap switchable between ascender/descender 2026-03-05 08:45:49 -05:00
b291e93e75 move to Baskervville for headings, with proper smallcaps variant 2026-03-05 08:15:24 -05:00
b0e6576b33 add SmallCaps component and use for title 2026-03-03 20:31:46 -05:00
6b0a985ee1 finish TOC component 2026-03-01 18:34:48 -05:00
dfdf6c6e66 continue working on post layout, add typography styles 2026-02-28 15:21:36 -05:00
c28f340333 start working on posts with placeholder content 2026-02-28 09:26:10 -05:00
95b58b5615 start work on Astro port 2026-02-27 09:10:25 -05:00
c81531a092 commit unsaved work before starting work on astro migration 2026-02-26 15:03:44 -05:00
bef34007d4 advent of languages 2024 day 4 2024-12-11 05:19:17 -05:00
400da4e539 advent of languages 2024 day 3 2024-12-07 09:01:34 -05:00
2b8989e02e advent of languages day 2 2024-12-03 15:50:49 -05:00
dfc09d8861 advent of languages day 1 2024-12-02 10:34:06 -05:00
88 changed files with 2579 additions and 6390 deletions

13
.gitignore vendored
View File

@@ -1,12 +1,3 @@
.DS_Store .astro/
dist/
node_modules node_modules
/build
/.svelte-kit
/package
.env
.env.*
!.env.example
vite.config.js.timestamp-*
vite.config.ts.timestamp-*
**/_test.*
/scratch

2
.npmrc
View File

@@ -1,2 +0,0 @@
engine-strict=true
resolution-mode=highest

View File

@@ -1,38 +0,0 @@
# create-svelte
Everything you need to build a Svelte project, powered by [`create-svelte`](https://github.com/sveltejs/kit/tree/master/packages/create-svelte).
## Creating a project
If you're seeing this, you've probably already done this step. Congrats!
```bash
# create a new project in the current directory
npm create svelte@latest
# create a new project in my-app
npm create svelte@latest my-app
```
## Developing
Once you've created a project and installed dependencies with `npm install` (or `pnpm install` or `yarn`), start a development server:
```bash
npm run dev
# or start the server and open the app in a new browser tab
npm run dev -- --open
```
## Building
To create a production version of your app:
```bash
npm run build
```
You can preview the production build with `npm run preview`.
> To deploy your app, you may need to install an [adapter](https://kit.svelte.dev/docs/adapters) for your target environment.

11
astro.config.mjs Normal file
View File

@@ -0,0 +1,11 @@
import { defineConfig } from "astro/config";
import mdx from '@astrojs/mdx';
import vue from '@astrojs/vue';
export default defineConfig({
integrations: [
mdx(),
vue(),
],
prefetch: true,
});

1291
bun.lock Normal file

File diff suppressed because it is too large Load Diff

3
mise.toml Normal file
View File

@@ -0,0 +1,3 @@
[tools]
bun = "latest"
node = "24"

3210
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,25 +1,23 @@
{ {
"name": "blog.jfmonty2.com", "dependencies": {
"version": "0.0.1", "@astrojs/check": "^0.9.6",
"private": true, "@astrojs/mdx": "^4.3.13",
"scripts": { "@astrojs/rss": "^4.0.15",
"dev": "vite dev", "@astrojs/sitemap": "^3.7.0",
"build": "vite build", "@astrojs/vue": "^5.1.4",
"preview": "vite preview" "@fontsource-variable/baskervville": "^5.2.3",
}, "@fontsource-variable/baskervville-sc": "^5.2.3",
"devDependencies": { "@fontsource-variable/figtree": "^5.2.10",
"@sveltejs/adapter-auto": "^2.0.0", "astro": "^5.18.0",
"@sveltejs/adapter-static": "^2.0.3", "sharp": "^0.34.5"
"@sveltejs/kit": "^1.20.4", },
"hast-util-to-html": "^9.0.0", "name": "astro",
"hast-util-to-text": "^4.0.0", "version": "0.0.1",
"mdast-util-to-string": "^4.0.0", "scripts": {
"mdsvex": "^0.11.0", "dev": "astro dev",
"sass": "^1.69.5", "build": "astro build",
"svelte": "^4.0.5", "preview": "astro preview",
"unist-util-find": "^3.0.0", "astro": "astro"
"unist-util-visit": "^5.0.0", },
"vite": "^4.4.2" "type": "module"
},
"type": "module"
} }

View File

@@ -0,0 +1,363 @@
---
title: 'Advent of Languages 2024, Day 4: Fortran'
date: 2024-12-10
---
import Sidenote from '@components/Sidenote.astro'
Oh, you thought we were done going back in time? Well I've got news for you, Doc Brown, you'd better not mothball the ol' time machine just yet, because we're going back even further. That's right, for Day 4 I've decided to use Fortran!<Sidenote>Apparently it's officially called `Fortran` now and not `FORTRAN` like it was in days of yore, and has been ever since the 1990s. That's right, when most languages I've used were just getting their start, Fortran was going through its mid-life identity crisis.</Sidenote><Sidenote>When I told my wife that I was going to be using a language that came out in the 1950s, she wanted to know if the next one would be expressed in Egyptian hieroglyphs.</Sidenote>
Really, though, it's because this is day _four_, and I had to replace all those missed Forth jokes with _something_.
## The old that is strong does not wither
Fortran dates back to 1958, making it the oldest programming language still in widespread use.<Sidenote>Says Wikipedia, at least. Not in the article about Fotran, for some reason, but in [the one about Lisp](https://en.wikipedia.org/wiki/Lisp_(programming_language)).</Sidenote> Exactly how widespread is debatable--the [TIOBE index](https://www.tiobe.com/tiobe-index/) puts it at #8, but the TIOBE index also puts Delphi Pascal at #11 and Assembly at #19, so it might have a different idea of what makes a language "popular" than you or I.<Sidenote>For contrast, Stack Overflow puts it at #38, right below Julia and Zig, which sounds a little more realistic to me.</Sidenote> Regardless, it's undeniable that it gets pretty heavy use even today--much more than Forth, I suspect--because of its ubiquity in the scientific and HPC sectors. The website mentions "numerical weather and ocean prediction, computational fluid dynamics, applied math, statistics, and finance" as particularly strong areas. My guess is that this largely comes down to intertia, plus Fortran being "good enough" for the things people wanted to use it for that it was easier to keep updating Fortran than to switch to something else wholesale.<Sidenote>Unlike, say, BASIC, which is so gimped by modern standards that it _doesn't even have a call stack_. That's right, you can't do recursion in BASIC, at least not without managing the stack yourself.</Sidenote>
And update they have! Wikipedia lists 12 major versions of Fortran, with the most recent being Fortran 2023. That's a pretty impressive history for a programming language. It's old enough to retire!
The later versions of Fortran have added all sorts of modern conveniences, like else-if conditionals (77), properly namespaced modules (90), growable arrays (also 90), local variables (2008), and finally, just last year, ternary expressions and the ability infer the length of a string variable from a string literal! Wow!
I have to say, just reading up on Fortran is already feeling modern than it did for Forth, or even C/C++. It's got a [snazzy website](https://fortran-lang.org/)<Sidenote>With a dark/light mode switcher, so you know it's hip.</Sidenote> with obvious links to documentation, sitewide search, and even an online playground. This really isn't doing any favors for my former impression of Fortran as a doddering almost-septegenarian with one foot in the grave and the other on a banana peel.
## On the four(tran)th day of Advent, my mainframe gave to me
The Fortran getting-started guide [literally gives you](https://fortran-lang.org/learn/quickstart/hello_world/) hello-world, so I won't bore you with that here. Instead I'll just note some interesting aspects of the language that jumped out at me:
* There's no `main()` function like C and a lot of other compiled languages, but there are mandatory `program <name> ... end program` delimiters at the start and end of your outermost layer of execution. Modules are defined outside of the `program ... end program` block. Not sure yet whether you can have multiple `program` blocks, but I'm leaning towards no?
* Variables are declared up-front, and are prefixed with their type name followed by `::`. You can leave out the type qualifier, in which case the type of the variable will be inferred not from the value to which it is first assigned, but from its _first letter_: variables whose names start with `i`, `j`, `k`, `l`, `m`, `n` are integers, everything else is a `real` (floating-point). Really not sure what drove that decision, but it's described as deprecated, legacy behavior anyway, so I plan to ignore it.
* Arrays are 1-indexed. Also, multi-dimensional arrays are a native feature! I'm starting to see that built-for-numerical-workloads heritage.
* It has `break` and `continue`, but they're named `exit` and `cycle`.
* There's a _built-in_ parallel-loop construct,<Sidenote>It uses different syntax to define its index and limit. That's what happens when your language development is spread over the last 65 years, I guess.</Sidenote> which "informs the compiler that it may use parallelization/SIMD to speed up execution". I've only ever seen this done at the library level before. If you're lucky your language has enough of a macro system to make it look semi-natural, otherwise, well, I hope you like map/reduce.
* It has functions, but it _also_ has "subroutines". The difference is that functions return values and are expected not to modify their arguments, and subroutines don't return values but may modify their arguments. I guess you're out of luck if you want to modify an argument _and_ return a value (say, a status code or something).
* Function and subroutine arguments are mentioned in the function signature (which looks like it does in most languages), but you really get down to brass tacks in the function body itself, which is where you specify the type and in-or-out-ness of the parameters. Reminds me of PowerShell, of all things.
* The operator for accessing struct fields is `%`. Where other languages do `sometype.field`, in Fortran you'd do `sometype%field`.
* Hey look, it's OOP! We can have methods! Also inheritance, sure, whatever.
Ok, I'm starting to get stuck in the infinite docs-reading rut for which I criticized myself at the start of this series, so buckle up, we're going in.
## The Puzzle
We're given a two-dimensional array of characters and asked to find the word `XMAS` everywhere it occurs, like those [word search](https://en.wikipedia.org/wiki/Word_search) puzzles you see on the sheets of paper they hand to kids at restaurants in a vain attempt to keep them occupied so their parents can have a chance to enjoy their meal.
Hey, Fortran might actually be pretty good at this! At least, multi-dimensional arrays are built in, so I'm definitely going to use those.
First things first, though, we have to load the data before we can start working on it.<Sidenote>Getting a Fortran compiler turned out to be as simple as `apt install gfortran`.</Sidenote>
My word-search grid appears to be 140 characters by 140, so I'm just going to hard-code that as the dimensions of my array. I'm sure there's a way to size arrays dynamically, but life's too short.
### Loading data is hard this time
Not gonna lie here, this part took me _way_ longer than I expected it to. See, the standard way to read a file in Fortran is with the `read()` statement. (It looks like a function call, but it's not.) You use it something like this:
```fortran
read(file_handle, *) somevar, anothervar, anothervar2
```
Or at least, that's one way of using it. But here's the problem: by default, Fortran expects to read data stored in a "record-based" format. In short, this means that it's expected to consist of lines, and each line will be parsed as a "record". Records consist of some number of elements, separated by whitespace. The "format" of the record, i.e. how the line should be parsed, can either be explicitly specified in a slightly arcane mini-language reminiscent of string-interpolation placeholders (just in reverse), or it can be inferred from the number and types of the variables specified after `read()`.
Initially, I thought I might be able to do this:
```fortran
character, dimension(140, 140) :: grid
! ...later
read(file_handle, *) grid
```
The top line is just declaring `grid` as a 2-dimensional array characters, 140 rows by 140 columns. Neat, huh?
But sadly, this kept spitting out errors about how it had encountered the end of the file unexpectedly. I think what was happening was that when you give `read()` an array, it expects to populate each element of the array with one record from the file, and remember records are separated by lines, so this was trying to assign one line per array element. My file had 140 lines, but my array had 140 * 140 elements, so this was never going to work.
My next try looked something like this:
```fortran
do row = 1, 100
read(file_handle, *) grid(row, :)
end do
```
But this also resulted in end-of-file errors. Eventually I got smart and tried running this read statement just _once_, and discovered that it was populating the first row of the array with the first letter of _each_ line in the input file. I think what's going on here is that `grid(1, :)` creates a slice of the array that's 1 row by the full width (so 140), and the `read()` statement sees that and assumes that it needs to pull 140 records from the file _each time this statement is executed_. But records are (still) separated by newlines, so the first call to `read()` pulls all 140 rows, dumps everything but the first character from each (because, I think, the type of the array elements is `character`), puts that in and continues on. So after just a single call to `read()` it's read every line but dumped most of the data.
I'm pretty sure the proper way to do this would be to figure out how to set the record separator, but it's tricky because the "records" (if we want each character to be treated as a record) within each line are smashed right up against each other, but have newline characters in between lines. So I'd have to specify that the separator is sometimes nothing, and sometimes `\n`, and I didn't feel like figuring that out because all of the references I could find about Fortran format specifiers were from ancient plain-HTML pages titled things like "FORTRAN 77 INTRINSIC SUBROUTINES REFERENCE" and hosted on sites like `web.math.utk.edu` where they probably _do_ date back to something approaching 1977.
So instead, I decided to just make it dumber.
```fortran
program advent04
implicit none
character, dimension(140, 140) :: grid
integer :: i
grid = load()
do i = 1, 140
print *, grid(i, :)
end do
contains
function load() result(grid)
implicit none
integer :: handle
character, dimension(140, 140) :: grid
character(140) :: line
integer :: row
integer :: col
open(newunit=handle, file="data/04.txt", status="old", action="read")
do row = 1, 140
! `line` is a `character(140)` variable, so Fortran knows to look for 140 characters I guess
read(handle, *) line
do col = 1, 140
! just assign each character of the line to array elements individually
grid(row, col) = line(col:col)
end do
end do
close(handle)
end function load
end program advent04
```
I am more than sure that there are several dozen vastly better ways of accomplishing this, but look, it works and I'm tired of fighting Fortran. I want to go on to the fun part!
### The fun part
The puzzle specifies that occurrences of `XMAS` can be horizontal, verical, or even diagonal, and can be written either forwards or backwards. The obvious way to do this would be to scan through the array, stop on every `X` character and cheak for the complete word `XMAS` in each of the eight directions individually, with a bunch of loops. Simple, easy, and probably more than performant enough because this grid is only 140x140, after all.<Sidenote>Although AoC has a way of making the second part of the puzzle punish you if you were lazy and went with the brute-force approach for the first part, so we'll see how this holds up when we get there.</Sidenote>
But! This is Fortran, and Fortran's whole shtick is operations on arrays, especially multidimensional arrays. So I think we can make this a lot more interesting. Let's create a "test grid" that looks like this:
```
S . . S . . S
. A . A . A .
. . M M M . .
S A M X M A S
. . M M M . .
. A . A . A .
S . . S . . S
```
Which has all 8 possible orientationS of the word `XMAS` starting from the central X. Then, we can just take a sliding "window" of the same size into our puzzle grid and compare it to the test grid. This is a native operation in Fortran--comparing two arrays of the same size results in a third array whose elements are the result of each individual comparison from the original arrays. Then we can just call `count()` on the resulting array to get the number of true values, and we know how many characters matched up. Subtract 1 for the central X we already knew about, then divide by 3 since there are 3 letters remaining in each occurrence of `XMAS`, and Bob's your uncle, right?
...Wait, no. That won't work because it doesn't account for partial matches. Say we had a "window" that looked like this (I'm only showing the bottom-right quadrant of the window for simplicity):
```
X M X S
S . . .
A . . .
X . . .
```
If we were to apply the process I just described to this piece of the grid, we would come away thinking there was 1 full match of `XMAS`, because there are one each of `X`, `M`, `A`, and `S` in the right positions. Problem is, they aren't all in the right places to be part of the _same_ XMAS, meaning that there isn't actually a match here at all.
To do this properly, we need some way of distinguishing the individual "rays" of the "star", which is how I've started thinking about the test grid up above, so that we know whether _all_ of any given "ray" is present. So what if we do it this way?
1. Apply the mask to the grid as before, but this time, instead of just counting the matches, we're going to convert them all to 1s. Non-matches will be converted to 0.
2. Pick a prime number for each "ray" of the "star". We can just use the first 8 prime numbers (excluding 1, of course). Create a second mask with these values subbed in for each ray, and 1 in the middle. So the ray extending from the central X directly to the right, for instance, would look like this, assuming we start assigning our primes from the top-left ray and move clockwise: `1 7 7 7`
3. Multiply this array by the array that we got from our initial masking operation. Now any matched characters will be represented by a prime number _specific to that ray of the star_.
4. Convert all the remaining 0s in the resulting array to 1s, then take the product of all values in the array.
5. Test whether that product is divisible by the cube of each of the primes used. E.g. if it's divisible by 8, we _know_ that there must have been three 2's in the array, so we _know_ that the top-left ray is entirely present. So we can add 1 to our count of valid `XMAS`es originating at this point.
Will this work? Is it even marginally more efficient than the stupidly obvious way of just using umpty-gazillion nested for loops--excuse me, "do loops"--to test each ray individually? No idea! It sure does sound like a lot more fun, though.
Ok, first things first. Let's adjust the data-loading code to pad the grid with 3 bogus values on each edge, so that we can still generate our window correctly when we're looking at a point near the edge of the grid.
```fortran
grid = '.' ! probably wouldn't matter if we skipped this, uninitialized memory just makes me nervous
open(newunit=handle, file="data/04.txt", status="old", action="read")
do row = 4, 143
read(handle, *) line
do col = 1, 140
grid(row, col + 3) = line(col:col)
end do
end do
```
Turns out assigning a value element to an array of that type of value (like `grid = '.'` above) just sets every array element to that value, which is very convenient.
Now let's work on the whole masking thing.
Uhhhh. Wait. We might have a problem here. When we take the product of all values in the array after the various masking and prime-ization stuff, we could _conceivably end up multiplying the cubes of the first 8 prime numbers. What's the product of the cubes of the first 8 prime numbers?
```
912585499096480209000
```
Hm, ok, and what's the max value of a 64-bit integer?
```
9223372036854775807
```
Oh. Oh, _noooo_.
It's okay, I mean, uh, it's not _that_ much higher. Only two orders of magnitude, and what are the odds of all eight versions of `XMAS` appearing in the same window, anyway? Something like 1/4<sup>25</sup>? Maybe we can still make this work.
```fortran
integer function count_xmas(row, col) result(count)
implicit none
integer, intent(in) :: row, col
integer :: i
integer(8) :: prod
integer(8), dimension(8) :: primes
character, dimension(7, 7) :: test_grid, window
integer(8), dimension(7, 7) :: prime_mask, matches, matches_prime
test_grid = reshape( &
[&
'S', '.', '.', 'S', '.', '.', 'S', &
'.', 'A', '.', 'A', '.', 'A', '.', &
'.', '.', 'M', 'M', 'M', '.', '.', &
'S', 'A', 'M', 'X', 'M', 'A', 'S', &
'.', '.', 'M', 'M', 'M', '.', '.', &
'.', 'A', '.', 'A', '.', 'A', '.', &
'S', '.', '.', 'S', '.', '.', 'S' &
], &
shape(test_grid) &
)
primes = [2, 3, 5, 7, 11, 13, 17, 19]
prime_mask = reshape( &
[ &
2, 1, 1, 3, 1, 1, 5, &
1, 2, 1, 3, 1, 5, 1, &
1, 1, 2, 3, 5, 1, 1, &
19, 19, 19, 1, 7, 7, 7, &
1, 1, 17, 13, 11, 1, 1, &
1, 17, 1, 13, 1, 11, 1, &
17, 1, 1, 13, 1, 1, 11 &
], &
shape(prime_mask) &
)
window = grid(row - 3:row + 3, col - 3:col + 3)
matches = logical_to_int64(window == test_grid)
matches_prime = matches * prime_mask
prod = product(zero_to_one(matches_prime))
count = 0
do i = 1, 8
if (mod(prod, primes(i) ** 3) == 0) then
count = count + 1
end if
end do
end function count_xmas
elemental integer(8) function logical_to_int64(b) result(i)
implicit none
logical, intent(in) :: b
if (b) then
i = 1
else
i = 0
end if
end function logical_to_int64
elemental integer(8) function zero_to_one(x) result(y)
implicit none
integer(8), intent(in) :: x
if (x == 0) then
y = 1
else
y = x
end if
end function zero_to_one
```
Those `&`s are line-continuation characters, by the way. Apparently you can't have newlines inside a function call or array literal without them. And the whole `reshape` business is a workaround for the fact that there _isn't_ actually a literal syntax for multi-dimensional arrays, so instead you have to create a 1-dimensional array and "reshape" it into the desired shape.
Now we just have to put it all together:
```fortran
total = 0
do col = 4, 143
do row = 4, 143
if (grid(row, col) == 'X') then
total = total + count_xmas(row, col)
end if
end do
end do
print *, total
```
These `elemental` functions, by the way, are functions you can ~~explain to Watson~~ apply to an array element-wise. So `logical_to_int64(array)` returns an array of the same shape with all the "logical" (boolean) values replaced by 1s and 0s.
This actually works! Guess I dodged a bullet with that 64-bit integer thing.<Sidenote>Of course I discovered later, right before posting this article, that Fortran totally has support for 128-bit integers, so I could have just used those and not worried about any of this.</Sidenote>
I _did_ have to go back through and switch out all the `integer` variables in `count_xmas()` with `integer(8)`s (except for the loop counter, of course). This changed my answer significantly. I can only assume that calling `product()` on an array of 32-bit integers, then sticking the result in a 64-bit integer, does the multiplication as 32-bit first and only _then_ converts to 64-bit, after however much rolling-over has happened. Makes sense, I guess.
Ok, great! On to part 2!
## Part 2
It's not actually too bad! I was really worried that it was going to tell me to discount all the occurrences of `XMAS` that overlapped with another one, and that was going to be a royal pain the butt with this methodology. But thankfully, all we have to do is change our search to look for _two_ occurrences of the sequence `M-A-S` arranged in an X shape, like this:
```
M . S
. A .
M . S
```
This isn't too difficult with our current approach. Unfortunately it will require four test grids applied in sequence, rather than just one, because again the sequence can be written either forwards or backwards, and we have to try all the permutations. On the plus side, we can skip the whole prime-masking thing, because each test grid is going to be all-or-nothing now. In fact, we can even skip checking any remaining test grids whenver we find a match, because there's no way the same window could match more than one.
Hmm, I wonder if there's a way to take a single starting test grid and manipulate it to reorganize the characters into the other shapes we need?
Turns out, yes! Yes there is. We can use a combination of slicing with a negative step, and transposing, which switches rows with columns, effectively rotating and flipping the array. So setting up our test grids looks like this:
```fortran
character, dimension(3, 3) :: window, t1, t2, t3, t4
t1 = reshape( &
[ &
'M', '.', 'S', &
'.', 'A', '.', &
'M', '.', 'S' &
], &
shape(t1) &
)
t2 = t1(3:1:-1, :) ! flip t1 top-to-bottom
t3 = transpose(t1) ! swap t1 rows for columns
t4 = t3(:, 3:1:-1) ! flip t3 left-to-right
```
Then we can just compare the window to each test grid:
```fortran
window = grid(row - 1:row + 1, col - 1:col + 1)
if ( &
count_matches(window, t1) == 5 &
.or. count_matches(window, t2) == 5 &
.or. count_matches(window, t3) == 5 &
.or. count_matches(window, t4) == 5 &
) then
count = 1
else
count = 0
end if
```
To my complete and utter astonishment, this actualy worked the first time I tried it, once I had figured out all of the array-flipping-and-rotating I needed to create the test grids. It always makes me suspicious when that happens, but Advent of Code confirmed it, so I guess we're good!<Sidenote>Or I just managed to make multiple errors that all cancelled each other out.</Sidenote>
It did expose a surprisingly weird limitation in the Fortran parser, though. Initially I kept trying to write the conditions like this: `if(count(window == t1) == 5)`, and couldn't understand the syntax errors it was throwing. Finally I factored out `count(array1 == array2)` into a separate function, and everything worked beautifully. My best guess is that the presence of two `==` operators inside a single `if` condition, not separated by `.and.` or `.or.`, is just a no-no. The the things we learn.
## Lessons ~~and carols~~
(Whoa now, we're not _that_ far into Advent yet.)
Despite being one of the oldest programming languages still in serious use, Fortran manages to feel surprisingly familiar. There are definite archaisms, like having to define the types of all your variables at the start of your program/module/function,<Sidenote>Even throwaway stuff like loop counters and temporary values.</Sidenote>, having to declare function/subroutine names at the beginning _and end_, and the use of the word "subroutine". But overall it's kept up surprisingly well, given--and I can't stress this enough--that it's _sixty-six years old_. It isn't even using `CAPITAL LETTERS` for everything any more,<Sidenote>Although the language is pretty much case-insensitive so you can still use CAPITALS if you want.</Sidenote> which puts it ahead of SQL,<Sidenote>Actually, I suspect the reason the CAPITALS have stuck around in SQL is that more than most languages, you frequently find yourself writing SQL _in a string_ from another language. Occasionally editors will be smart enough to syntax-highlight it as SQL for you, but for the times they aren't, using `CAPITALS` for all the `KEYWORDS` serves as a sort of minimal DIY syntax highlighting. That's what I think, at least.</Sidenote> and SQL is 10+ years younger.
It still has _support_ for a lot of really old stuff. For instance, you can label statements with numbers and then `go to` a numbered statement, but there's really no use for that in new code. We have functions, subroutines, loops, if-else-if-else conditionals--basically everything you would (as I understand it) use `goto` for back in the day.
Runs pretty fast, too. I realized after I already had a working solution that I had been compiling without optimizations the whole time, so I decided to try enabling them, only to discover that the actual execution time wasn't appreciably different. I figured the overhead of spawning a process was probably eating the difference, so I tried timing just the execution of the main loop and sure enough, without optimizations it took about 2 milliseconds whereas with optimizations it was 690 microseconds. Whee! Native-compiled languages are so fun. I'm too lazy to try rewriting this in Python just to see how much slower it would be, but I'm _pretty_ sure that this time it would be quite noticeable.
Anyway, that about wraps it up for Fortran. My only remaining question is: What is the appropriate demonym for users of Fortran? Python has Pythonistas, Rust has Rustaceans, and so on. I was going to suggest "trannies" for Fortran users, but everyone kept giving me weird looks for some reason.

8
posts/after.mdx Normal file
View File

@@ -0,0 +1,8 @@
---
title: Example after post
date: 2026-02-28
---
## After
Lorem ipsum dolor sit amet.

8
posts/before.mdx Normal file
View File

@@ -0,0 +1,8 @@
---
title: Example previous post
date: 2026-02-21
---
## Before
Lorem ipsum dolor sit amet.

50
posts/test.mdx Normal file
View File

@@ -0,0 +1,50 @@
---
title: This Is A Top-Level Heading
date: 2026-02-27
---
import Sidenote from '@components/Sidenote.astro';
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut ac consectetur mi. Phasellus non risus vitae<Sidenote>hello world</Sidenote> lorem scelerisque semper vel eget arcu. Nulla id malesuada velit. Pellentesque eu aliquam nisi. Cras lacinia enim sit amet ante tincidunt convallis. Donec leo nibh, posuere nec arcu in, congue tempus turpis. Maecenas accumsan mauris ut libero molestie, eget ultrices est faucibus. Donec sed ipsum eget erat pharetra tincidunt. Integer faucibus diam ut cursus cursus.
## A Second-level heading
Nulla at pulvinar quam. Interdum et malesuada fames ac ante ipsum primis in faucibus. In pretium laoreet egestas. Phasellus ut congue ligula, ut egestas sapien. Etiam congue dui at libero placerat, vel accumsan nunc ultrices. In ullamcorper ut nunc a elementum. Vivamus vehicula ut urna sed congue.
### Now let's try a third-level heading
Fusce varius lacinia ultrices. Cras ante velit, sagittis a commodo ac, faucibus nec tortor. Proin auctor, sapien nec elementum vestibulum, neque dolor consectetur lectus, a luctus ante nunc eget ex. Vestibulum bibendum lacus nec convallis bibendum. Nunc tincidunt elementum nulla, sit amet lacinia libero cursus non. Nam posuere ipsum sit amet elit accumsan, cursus euismod ligula scelerisque. Nam mattis sollicitudin semper. Morbi lacinia nec mi vel tempus. Cras auctor dui et turpis laoreet, ut vehicula magna dapibus. Fusce sit amet elit eget dolor varius tempus sed sit amet massa.
Fusce blandit scelerisque massa nec ultrices. Phasellus a cursus ex, sed aliquet justo. Proin sit amet lorem et urna viverra consectetur. In consectetur facilisis nulla, id accumsan metus lacinia quis. Sed nec magna pellentesque, ultricies nisi in, maximus sem. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aliquam facilisis viverra metus, vitae auctor mi venenatis id. Fusce et risus non leo iaculis lacinia vitae ut metus. Phasellus id suscipit nisl, nec sodales velit. Morbi aliquet eros a est condimentum convallis.
#### Heading 4 coming up
Aenean facilisis eu velit vel semper. Sed imperdiet, lorem ut sagittis laoreet, turpis lorem venenatis justo, vel rhoncus enim lorem nec leo. Vestibulum sagittis orci nisl, vulputate tempor sem mattis eget. Pellentesque volutpat turpis sit amet est ultricies maximus. Aliquam sollicitudin semper enim, quis viverra mauris congue blandit. Vestibulum massa dui, efficitur quis lectus eu, bibendum vehicula dui. Suspendisse elementum, tellus a facilisis tincidunt, nulla leo viverra lacus, at lobortis massa ante non ex. Duis sed pretium nibh, eget molestie diam. Suspendisse congue augue metus, pellentesque faucibus magna auctor a. Praesent et sapien quis urna sollicitudin dapibus a ac justo. Integer lobortis, magna ac consectetur egestas, sapien dolor aliquet diam, ut tempor lectus metus sit amet libero. Fusce neque dui, mollis ac dui eget, iaculis semper lectus. Donec porttitor ante mauris, id condimentum neque ultrices in. Suspendisse commodo congue posuere.
##### Finally, we get to heading 5
Praesent non dignissim purus. Ut pharetra lectus sit amet tempor dapibus. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Cras dapibus libero vel enim consequat interdum. Aliquam ac est mollis, ornare dui sed, efficitur felis. Morbi aliquam neque neque, at facilisis arcu suscipit convallis. Integer pulvinar dui lectus, et luctus sapien porttitor nec. Vivamus placerat ultrices consectetur. Etiam molestie non nibh ut viverra. Suspendisse potenti. In sagittis leo non commodo ultricies. Donec vitae ultrices lacus, id mattis purus. Nulla ante lacus, auctor vitae enim sit amet, placerat placerat orci. In tempor eget nunc eget accumsan.
Fusce venenatis dolor at tincidunt commodo. Ut lacinia eu arcu eget pellentesque. Integer a nibh nisi. Phasellus semper quam at lacus finibus pellentesque. Sed porta varius imperdiet. Aenean tempor tellus odio, id sollicitudin neque pellentesque nec. Pellentesque vel ultrices felis. Vivamus eleifend quis leo nec tincidunt. Etiam magna quam, viverra non est et, vestibulum porta tellus. Nam nisi orci, pretium a quam id, malesuada sollicitudin mauris.
Nulla placerat, sem eget bibendum tincidunt, dui tortor ullamcorper ipsum, ut sollicitudin nibh tortor ac mi. Donec efficitur interdum ullamcorper. Cras ac molestie risus, non volutpat erat. Donec vel dignissim velit. Aliquam et turpis eget lorem cursus porttitor eu at ipsum. Nunc vitae leo non quam pharetra iaculis auctor et diam. Nullam convallis quam eu aliquet elementum. Etiam consectetur maximus tincidunt. Vivamus nulla risus, viverra nec mauris at, aliquet sagittis lorem. Duis tempor nunc sem, eget euismod urna porta sed.
## Another second-level heading to test TOC
Phasellus fermentum turpis vel porta vestibulum. Cras sed nisl at magna lacinia finibus tincidunt vitae massa. Maecenas lobortis, sapien non interdum placerat, arcu massa molestie lectus, eget ultricies dolor tortor nec nisl. Aliquam elementum facilisis nisi. Quisque lobortis tincidunt mauris, vel facilisis felis faucibus id. Sed pharetra ante ut quam fringilla fermentum. Nunc porta laoreet dui, ac vestibulum elit varius non. Quisque at risus cursus, mollis nisi vel, lacinia erat. In metus erat, iaculis vitae velit et, gravida hendrerit enim. Integer eget ipsum sit amet tellus ultrices congue. Curabitur ullamcorper vehicula eros, vel aliquam est egestas quis. Cras volutpat, nisi eu ultrices pretium, diam sapien dapibus orci, et tristique nulla lectus semper lectus.
Quisque ut pellentesque eros. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec blandit orci quis iaculis dignissim. Nulla facilisi. Suspendisse vehicula aliquet odio quis feugiat. Maecenas sed aliquam lectus. Nulla ut cursus dolor. Sed iaculis vulputate finibus. Proin non posuere nunc. Sed blandit nisi et euismod hendrerit. Nullam id urna dapibus, placerat mauris auctor, tristique leo. Phasellus mollis, sem vel viverra blandit, nisl nisi suscipit lorem, ut varius orci enim non sapien. Quisque at tempus ipsum. Nulla sit amet accumsan lacus.
### And another third-level heading
Mauris a porttitor justo. Sed eu maximus turpis, eu porta lectus. Morbi quis ullamcorper libero. Cras vehicula quis lorem non varius. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse sit amet egestas odio. Quisque consectetur nunc enim, a molestie nibh congue id. Quisque efficitur accumsan luctus.
Duis viverra odio at dolor semper eleifend. Pellentesque tincidunt augue ultrices lobortis sodales. In semper felis lacus, vel fermentum dolor aliquet at. Aliquam tristique sagittis consequat. Maecenas sodales odio et mauris pulvinar varius. Phasellus imperdiet, magna id gravida efficitur, dui ipsum viverra odio, ut porttitor elit elit sed nisl. Vestibulum vestibulum eros et nisl tempus, ut dictum libero blandit. Sed tempor scelerisque elit non accumsan.
Fusce eget posuere diam, ut venenatis tellus. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Nam quis maximus sem. Nam convallis euismod odio, feugiat volutpat risus sollicitudin eget. Vivamus et imperdiet tortor. Mauris fringilla metus eu eros convallis fringilla. Quisque sodales consequat auctor. Ut vitae ligula porttitor, consectetur mauris sit amet, scelerisque leo. Donec accumsan libero vel nulla placerat, a sagittis turpis pharetra. Fusce rutrum metus nunc. Etiam consequat lobortis blandit. Nunc sed odio ullamcorper, hendrerit dolor vel, euismod dui. Donec id bibendum est. Suspendisse massa ante, pretium ac orci et, tempus vehicula velit.
Aliquam feugiat interdum suscipit. Donec ac erat maximus, aliquam ligula sed, lacinia velit. Praesent a commodo ligula. Phasellus sed felis vel quam dictum facilisis. Nulla justo diam, tempus a efficitur ut, euismod non nisi. Pellentesque tristique rutrum erat, eu placerat tortor malesuada eu. Maecenas mollis mauris metus, sed pellentesque nisl posuere ut.
Donec malesuada sit amet diam rhoncus porttitor. Integer arcu elit, vestibulum ac condimentum a, scelerisque eget ligula. Aenean ultricies suscipit urna, at commodo tellus interdum nec. Sed feugiat nunc urna, non tincidunt libero elementum vel. Vestibulum non tincidunt ligula. Ut eu consectetur risus. Mauris vehicula ligula eget lacinia tempus.
Donec suscipit erat dui, eu porttitor augue condimentum vel. Sed et massa lacinia purus fermentum ultrices elementum sit amet tortor. Quisque feugiat, nulla non auctor egestas, sem odio mollis nisi, eget accumsan ante ipsum ac ex. Integer lobortis mi nunc, interdum blandit erat feugiat ac. Donec aliquam, felis in ultricies rhoncus, sem eros elementum lorem, non congue enim nunc quis ligula. Nulla facilisi. Vestibulum vitae justo ac justo porttitor rutrum a nec tellus. Duis congue lorem a semper maximus. Quisque consectetur dictum tellus, vel lobortis lorem sodales nec. Pellentesque sed enim felis. Aliquam non mattis sapien.

View File

@@ -1,13 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<link rel="preload" href="/Tajawal-Regular.woff2" as="font" type="font/woff2" />
<link rel="alternate" type="application/atom+xml" href="/feed">
<meta name="viewport" content="width=device-width" />
%sveltekit.head%
</head>
<body data-sveltekit-preload-data="hover">
<div style="display: contents">%sveltekit.body%</div>
</body>
</html>

23
src/components/Icon.astro Normal file
View File

@@ -0,0 +1,23 @@
---
export interface Props {
name: string,
};
const { name } = Astro.props;
const icons = import.meta.glob<{string: string}>('@components/icons/*.svg', { query: '?raw', import: 'default' });
const path = `/src/components/icons/${name}.svg`;
if (icons[path] === undefined) {
throw new Error(`Icon ${name} does not exist.`);
}
const icon = await icons[path]();
---
<Fragment set:html={icon} />
<style>
svg {
width: 100%;
height: 100%;
}
</style>

124
src/components/Post.astro Normal file
View File

@@ -0,0 +1,124 @@
---
import '@fontsource-variable/baskervville-sc';
import type { CollectionEntry } from 'astro:content';
import { render } from 'astro:content';
import Toc from '@components/Toc.vue';
import { formatDate } from '@lib/datefmt';
export interface Props {
entry: CollectionEntry<'posts'>,
prevSlug: string | null,
nextSlug: string | null,
};
const { entry, prevSlug, nextSlug } = Astro.props;
const { Content, headings } = await render(entry);
---
<style>
/* 3-column grid: left gutter, center content, and right gutter */
article {
display: grid;
grid-template-columns: minmax(0, 1fr) minmax(0, var(--content-width)) minmax(0, 1fr);
/* a bit of breathing room for narrow screens */
padding: 0 var(--content-padding);
}
h1 {
font-family: 'Baskervville SC Variable';
}
#left-gutter {
grid-column: 1 / 2;
justify-self: end;
}
#right-gutter {
grid-column: 3 / 4;
justify-self: start;
}
.title {
grid-column: 2 / 3;
}
.subtitle {
font-size: 0.85em;
font-style: italic;
margin-top: -0.25rem;
}
.post {
grid-column: 2 / 3;
}
footer {
grid-column: 2 / 3;
margin-bottom: 2.5rem;
display: flex;
justify-content: space-between;
& a {
font-size: 1.25rem;
color: var(--content-color-faded);
text-decoration: underline;
text-underline-offset: 0.25em;
text-decoration-color: transparent;
transition: 150ms;
&:hover {
text-decoration-color: currentColor;
text-decoration: underline;
}
}
}
article {
& :global(section.post::first-letter) {
font-family: 'Baskervville';
color: var(--accent-color);
}
&[data-dropcap-style="descender"] :global(section.post::first-letter) {
initial-letter: 2;
margin-right: 0.5rem;
}
&[data-dropcap-style="ascender"] :global(section.post::first-letter) {
font-size: 2em;
line-height: 1;
}
}
</style>
<article class="prose" data-dropcap-style={entry.data.dropcap}>
<header class="title">
<h1>
<!-- <SmallCaps text={entry.data.title} upperWeight={500} lowerWeight={800} /> -->
{ entry.data.title }
</h1>
<p class="subtitle">{ formatDate(entry.data.date) }</p>
</header>
<div id="left-gutter">
<Toc client:load {headings} />
</div>
<section class="post">
<Content />
</section>
<div id="right-gutter" />
<footer>
{prevSlug && (
<a href={`/${prevSlug}`} data-astro-prefetch>Older</a>
)}
{nextSlug && (
<a href={`/${nextSlug}`} data-astro-prefetch>Newer</a>
)}
</footer>
</article>

View File

@@ -0,0 +1,128 @@
---
import Icon from '@components/Icon.astro';
const id = crypto.randomUUID();
SIDENOTE_COUNT += 1
---
<label for={id} class="counter anchor">{ SIDENOTE_COUNT }</label>
<input {id} type="checkbox" class="toggle" />
<!-- we have to use spans for everything, otherwise Astro "helpfully" inserts
ending </p> tags before every sidenote because you technically can't have
another block-level element inside a <p> -->
<span class="sidenote">
<span class="content">
<span class="counter floating">{ SIDENOTE_COUNT }</span>
<slot />
</span>
<button class="dismiss">
<label for={id}>
<Icon name="chevron-down" />
</label>
</button>
</span>
<style>
.sidenote {
display: block;
position: relative;
font-size: var(--content-size-sm);
hyphens: auto;
/* note: our minimum desirable sidenote width is 15rem, and the gutters are symmetrical, so our
breakpoint between desktop/mobile will be content-width + 2(gap) + 2(15rem) + (scollbar buffer) */
@media(min-width: 89rem) {
--gap: 2.5rem;
--gutter-width: calc(50vw - var(--content-width) / 2);
--scrollbar-buffer: 1.5rem;
--sidenote-width: min(
24rem,
calc(var(--gutter-width) - var(--gap) - var(--scrollbar-buffer))
);
width: var(--sidenote-width);
float: right;
clear: right;
margin-right: calc(-1 * var(--sidenote-width) - var(--gap));
margin-bottom: 0.75rem;
}
@media(max-width: 89rem) {
position: fixed;
left: 0;
right: 0;
bottom: 0;
/* horizontal buffer for the counter and dismiss button */
--padding-x: calc(var(--content-padding) + 1.5rem);
padding: 1rem var(--padding-x);
background-color: var(--bg-color);
box-shadow: 0 -2px 4px -1px rgba(0, 0, 0, 0.06), 0 -2px 12px -2px rgba(0, 0, 0, 0.1);
/* show the sidenote only when the corresponding checkbox is checked */
transform: translateY(calc(100% + 2rem));
transition: transform 0.125s;
/* when moving from shown -> hidden, ease-in */
transition-timing-function: ease-in;
.toggle:checked + & {
border-top: 2px solid var(--accent-color);
transform: translateY(0);
/* when moving hidden -> shown, ease-out */
transition-timing-function: ease-out;
/* the active sidenote should be on top of any other sidenotes as well
(this isn't critical unless you have JS disabled, but it's still annoying) */
z-index: 20;
}
}
}
.content {
display: block;
max-width: var(--content-width);
margin: 0 auto;
}
.counter.anchor {
color: var(--accent-color);
font-size: 0.75em;
margin-left: 0.065rem;
position: relative;
bottom: 0.375rem;
}
.counter.floating {
position: absolute;
/* move it out to the left by its own width + a fixed gap */
transform: translateX(calc(-100% - 0.4em));
color: var(--accent-color);
}
.dismiss {
display: block;
width: 2rem;
margin: 0.5rem auto 0;
color: var(--neutral-gray);
border-radius: 100%;
background: transparent;
border: 1px solid var(--neutral-gray);
padding: 0.25rem;
&:hover, &:active {
color: var(--accent-color);
border-color: var(--accent-color);
}
cursor: pointer;
& label {
cursor: pointer;
}
}
/* this is just to track the state of the mobile sidenote, it doesn't need to be seen */
.toggle {
display: none;
}
</style>

View File

@@ -0,0 +1,73 @@
---
import Icon from '@components/Icon.astro';
---
<div class="theme-switcher">
<button id="switch-to-dark">
<Icon name="sun" />
</button>
<button id="switch-to-light">
<Icon name="moon" />
</button>
</div>
<style>
.theme-switcher {
position: relative;
isolation: isolate;
width: 1.5rem;
height: 1.5rem;
transform: translateY(0.1rem);
}
button {
position: absolute;
inset: 0;
background-color: transparent;
padding: 0;
color: var(--nav-link-color);
border: none;
&:hover {
cursor: pointer;
color: var(--accent-color);
}
/* hide by default, i.e. if JS isn't enabled and the data-theme attribute didn't get set, */
visibility: hidden;
opacity: 0;
transition:
color 0.2s ease,
opacity 0.5s ease,
transform 0.5s ease;
}
:global(html[data-theme="light"]) button#switch-to-dark {
opacity: 1;
visibility: visible;
transform: rotate(360deg);
/* whichever one is currently active should be on top */
z-index: 10;
}
:global(html[data-theme="dark"]) button#switch-to-light {
opacity: 1;
visibility: visible;
transform: rotate(-360deg);
z-index: 10;
}
</style>
<script>
document.getElementById('switch-to-dark')?.addEventListener('click', () => {
localStorage.setItem('theme-preference', 'dark');
document.documentElement.dataset.theme = 'dark';
});
document.getElementById('switch-to-light')?.addEventListener('click', () => {
localStorage.setItem('theme-preference', 'light');
document.documentElement.dataset.theme = 'light';
})
</script>

163
src/components/Toc.vue Normal file
View File

@@ -0,0 +1,163 @@
<script setup lang="ts">
import type { MarkdownHeading } from 'astro';
import { onBeforeUnmount, onMounted, ref } from 'vue';
const props = defineProps<{ headings: MarkdownHeading[] }>();
// headings deeper than h3 don't display well because they are too deeply indented
const headings = props.headings.filter(h => h.depth <= 3);
// for each heading slug, track whether the corresponding heading is above the cutoff point
// (the cutoff point being a hypothetical line 2/3 of the way up the viewport)
let headingStatuses = Object.fromEntries(headings.map(h => ([h.slug, false])));
// we need to store a reference to the observer so we can dispose of it on resize/unmount
let headingObserver: IntersectionObserver | null = null;
// the final slug that should be highlighted as "current" in the TOC
let currentSlug = ref('');
function handleIntersectionUpdate(entries: IntersectionObserverEntry[], headingElems: HTMLElement[]) {
for (const entry of entries) {
const slug = entry.target.id;
const status = entry.isIntersecting;
headingStatuses[slug] = status;
}
// headings are in DOM order, so this gives us the last heading that's still above the cutoff point
for (const elem of headingElems) {
if (headingStatuses[elem.id]) {
currentSlug.value = elem.id;
}
else {
break;
}
}
}
function setupObserver() {
// if there was already an observer, turn it off
if (headingObserver) {
headingObserver.disconnect();
}
const headingElems = headings.map(h => document.getElementById(h.slug)!);
const obs = new IntersectionObserver(
entries => handleIntersectionUpdate(entries, headingElems),
// top margin equal to body height means that the intersection zone extends up beyond
// the top of the document, i.e. elements can only enter/leave the zone at the bottom
{ rootMargin: `${document.body.clientHeight}px 0px -66% 0px` },
);
for (const elem of headingElems) {
obs.observe(elem);
}
headingObserver = obs;
}
onMounted(() => {
// create the observer once on component startup
setupObserver();
// any time the window resizes, the document height could change, so we need to recreate it
window.addEventListener('resize', setupObserver);
});
onBeforeUnmount(() => {
window.removeEventListener('resize', setupObserver);
headingObserver?.disconnect();
});
</script>
<template>
<div id="toc">
<h5>C<span class="lower">ontents</span></h5>
<ul id="toc-list">
<li
v-for="heading in headings"
:data-current="heading.slug == currentSlug"
:style="`--depth: ${heading.depth}`"
>
<span v-show="heading.slug == currentSlug" class="marker"></span>
<a :href="`#${heading.slug}`">
{{ heading.text }}
</a>
</li>
</ul>
</div>
</template>
<style scoped>
#toc {
position: sticky;
top: 1.5rem;
margin-left: 1rem;
margin-right: 4rem;
max-width: 18rem;
font-size: var(--content-size-sm);
color: var(--content-color-faded);
/*
minimum desirable TOC width is 8rem
add 4rem for margins, giving total gutter width of 12.5rem
multiply by 2 since there are two equally-sized gutters, then add content-width (52.5rem)
*/
@media(max-width: 77.5rem) {
display: none;
}
}
h5 {
font-variant: petite-caps;
font-weight: 350;
font-size: var(--content-size);
font-family: 'Figtree Variable';
color: var(--content-color-faded);
max-width: fit-content;
margin-top: 0;
margin-bottom: 0.35em;
/*padding-bottom: 0.25em;*/
border-bottom: 1px solid currentcolor;
/* make the border stretch beyond the text just a bit, because I like the effect */
padding-right: 1.5rem;
/* SmallCaps is an Astro component so we can't use it here, but we can fake it */
& .lower {
font-weight: 500;
}
}
li {
position: relative;
margin-top: 0.45em;
margin-left: calc(0.75em * (var(--depth) - 2));
font-size: var(--content-size-sm);
/* make sure that one item wrapped across multiple lines doesn't just look like multiple items */
line-height: 1.15;
&[data-current="true"], &:hover {
color: var(--content-color);
}
}
.marker {
position: absolute;
left: -0.6rem;
top: 0.05em;
bottom: 0.2em;
width: 0.125rem;
background-color: var(--accent-color);
}
a {
color: inherit;
text-decoration: none;
}
ul {
margin: 0;
padding: 0;
list-style: none;
}
</style>

View File

@@ -0,0 +1,3 @@
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor">
<path stroke-linecap="round" stroke-linejoin="round" d="m19.5 8.25-7.5 7.5-7.5-7.5" />
</svg>

After

Width:  |  Height:  |  Size: 210 B

View File

@@ -0,0 +1,10 @@
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor">
<path stroke-linecap="round" stroke-linejoin="round" d="M21.752 15.002A9.72 9.72 0 0 1 18 15.75c-5.385 0-9.75-4.365-9.75-9.75 0-1.33.266-2.597.748-3.752A9.753 9.753 0 0 0 3 11.25C3 16.635 7.365 21 12.75 21a9.753 9.753 0 0 0 9.002-5.998Z" />
</svg>
<style>
svg {
width: 100%;
height: 100%;
}
</style>

After

Width:  |  Height:  |  Size: 425 B

View File

@@ -0,0 +1,3 @@
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor">
<path stroke-linecap="round" stroke-linejoin="round" d="M12 3v2.25m6.364.386-1.591 1.591M21 12h-2.25m-.386 6.364-1.591-1.591M12 18.75V21m-4.773-4.227-1.591 1.591M5.25 12H3m4.227-4.773L5.636 5.636M15.75 12a3.75 3.75 0 1 1-7.5 0 3.75 3.75 0 0 1 7.5 0Z" />
</svg>

After

Width:  |  Height:  |  Size: 377 B

View File

@@ -0,0 +1,3 @@
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor">
<path stroke-linecap="round" stroke-linejoin="round" d="M6 18 18 6M6 6l12 12" />
</svg>

After

Width:  |  Height:  |  Size: 204 B

19
src/content.config.ts Normal file
View File

@@ -0,0 +1,19 @@
import { defineCollection } from 'astro:content';
import { glob } from 'astro/loaders';
import { z } from 'astro/zod';
const posts = defineCollection({
loader: glob({ pattern: '*.mdx', base: './posts' }),
schema: z.object({
title: z.string(),
date: z.date(),
draft: z.boolean().default(false),
dropcap: z.enum(['ascender', 'descender']).default('descender'),
toc: z.boolean().default(true),
})
});
export const collections = { posts };

1
src/env.d.ts vendored Normal file
View File

@@ -0,0 +1 @@
declare var SIDENOTE_COUNT: number;

View File

@@ -0,0 +1,82 @@
---
import '@styles/main.css';
import '@fontsource-variable/baskervville-sc';
import ThemeSwitcher from '@components/ThemeSwitcher.astro';
---
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width" />
<!-- avoid FOUC by setting the color schme here in the header -->
<script>
const explicitPref = localStorage.getItem('theme-preference');
if (explicitPref) {
document.documentElement.dataset.theme = explicitPref;
} else {
const isLight = window.matchMedia('(prefers-color-scheme: light)').matches;
document.documentElement.dataset.theme = isLight ? 'light' : 'dark';
}
</script>
{/* Note: The styles are inside the document here because otherwise it breaks Astro's parsing */}
<style>
header {
background-color: var(--primary-color-faded);
padding: 0.5rem var(--content-padding);
}
nav {
max-width: var(--content-width);
margin: 0 auto;
display: flex;
gap: 1.5rem;
align-items: baseline;
& a {
font-family: 'Figtree Variable';
font-weight: 500;
font-size: 1.3rem;
color: var(--nav-link-color);
text-decoration: underline;
text-underline-offset: 0.5rem;
text-decoration-color: transparent;
transition: text-decoration-color 0.2s ease, opacity 0.2s ease;
&.home {
font-family: 'Baskervville SC Variable';
font-size: 2rem;
text-decoration-thickness: 0.125rem;
margin-right: auto;
}
&:hover, &:active {
text-decoration-color: var(--accent-color);
}
}
}
.switcher-container {
align-self: center;
}
</style>
</head>
<body>
<header>
<nav>
<a href="/" class="home" data-astro-prefetch>Joe's Blog</a>
<div class="switcher-container">
<ThemeSwitcher />
</div>
<a href="/posts" data-astro-prefetch>Posts</a>
<a href="/about" data-astro-prefetch>About</a>
</nav>
</header>
<main>
<slot />
</main>
</body>
</html>

View File

@@ -1,9 +0,0 @@
<script>
let classes = '';
export {classes as class};
</script>
<p>Hello world!</p>
<pre class={classes}>
<slot></slot>
</pre>

View File

@@ -1,52 +0,0 @@
<script>
// Usage: <Dropcap word="Lorem">ipsum dolor sit amet...</Dropcap>
export let word;
const initial = word.slice(0, 1).toUpperCase();
const remainder = word.slice(1);
// a few letters are narrower at the top, so we need to shift the remainder to compensate
const shiftValues = {
A: '-0.45em',
L: '-0.3em',
R: '-0.25em',
};
const shift = shiftValues[initial] || '0em';
</script>
<style>
@font-face {
font-family: 'Baskerville';
font-style: normal;
font-weight: 400;
src: url(/Baskerville-Regular.woff2) format('woff2');
font-display: block;
}
.drop-cap {
display: block;
float: left;
margin-right: 0.1em;
color: var(--accent-color);
font-size: calc(var(--content-size) * 1.5 * 2);
line-height: 0.8;
font-family: 'Baskerville';
text-transform: uppercase;
}
.first-word {
margin-left: var(--shift);
font-variant: petite-caps;
}
</style>
<svelte:head>
<link rel="preload" href="/Baskerville-Regular.woff2" as="font" type="font/woff2">
</svelte:head>
<span class="drop-cap">{initial}</span>
{#if remainder.length}
<span class="first-word" style:--shift={shift}>{remainder}</span>
{/if}

View File

@@ -1,68 +0,0 @@
<script>
export let level;
export let id = '';
const tag = `h${level}`;
</script>
<style lang="scss">
.h {
position: relative;
}
// shift the anchor link to hang off the left side of the content when there's room
.anchor-wrapper {
// slightly overlap the span with the heading so that it doesn't
// lose its hover state as the cursor moves between them
position: absolute;
padding-right: 0.5em;
left: -1.25em;
@media(max-width: 58rem) {
position: revert;
}
}
a {
// works better to set the size here for line-height reasons
font-size: 0.9em;
// give the anchor link a faded appearance by default
color: hsl(0deg, 0%, 29%);
opacity: 40%;
transition: opacity 150ms, color 150ms;
&:hover {
border-bottom: 0.05em solid currentcolor;
}
}
// emphasize anchor link when heading is hovered or when clicked (the latter for mobile)
.h:hover a, .anchor-wrapper:hover a, .h a:active {
color: var(--accent-color);
opacity: 100%;
}
svg {
// undo the reset that makes images block
display: inline;
width: 1em;
// tiny tweak for optical alignment
transform: translateY(2px);
}
</style>
<svelte:element this={tag} {id} class="h">
<span>
<slot></slot>
</span>
<!-- Icon from https://heroicons.com/ -->
<span class="anchor-wrapper">
<a href="#{id}" >
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" class="w-6 h-6">
<path stroke-linecap="round" stroke-linejoin="round" d="M13.19 8.688a4.5 4.5 0 011.242 7.244l-4.5 4.5a4.5 4.5 0 01-6.364-6.364l1.757-1.757m13.35-.622l1.757-1.757a4.5 4.5 0 00-6.364-6.364l-4.5 4.5a4.5 4.5 0 001.242 7.244" />
</svg>
</a>
</span>
</svelte:element>

View File

@@ -1,44 +0,0 @@
<script context="module">
import { page } from '$app/stores';
function host(url) {
try {
let u = new URL(url);
return u.host;
}
catch {
return null;
}
}
function ext(url) {
}
</script>
<script>
export let href;
export let rel = null;
let url = null;
try {
url = new URL(href);
}
catch {}
let isLocal = false;
if (href.startsWith('/') || url?.host === $page.url.host) {
isLocal = true;
}
// if href is not a valid url, assume that it's a relative link
const path = url?.pathname || href;
// set rel="external" on links to static files (i.e. local links with a dot in them)
if (isLocal && path.search(/\.\w+$/) > -1) {
rel = 'external';
}
</script>
<a data-sveltekit-preload-data={isLocal ? 'hover' : null} {href} {rel}>
<slot></slot>
</a>

View File

@@ -1,150 +0,0 @@
<script context="module">
import '$styles/prose.scss';
import '$styles/code.scss';
import { onMount } from 'svelte';
import { formatDate } from './datefmt.js';
import { makeSlug } from '$lib/utils.js';
import Toc from './Toc.svelte';
import Link from './Link.svelte';
export { Link as a };
</script>
<script>
export let title, date;
export let description = '';
export const draft = false;
export let toc = null;
export let slug;
export let prev = null;
export let next = null;
</script>
<style lang="scss">
.page {
/* 3-column grid: left gutter, center content, and right gutter */
display: grid;
grid-template-columns: minmax(0, 1fr) minmax(0, var(--content-width)) minmax(0, 1fr);
/* a bit of breathing room for narrow screens */
padding: 0 var(--content-padding);
}
/* container for the table of contents */
.left-gutter {
grid-column: 1 / 2;
justify-self: end;
}
.title {
grid-column: 2 / 3;
}
.subtitle {
font-size: 0.9em;
font-style: italic;
margin-top: -0.75rem;
}
.post {
grid-column: 2 / 3;
}
hr {
grid-column: 2 / 3;
width: 100%;
border-top: 1px solid hsl(0 0% 75%);
border-bottom: none;
margin: 2.5rem 0;
}
.footer {
grid-column: 2 / 3;
margin-bottom: 2.5rem;
display: flex;
& a {
display: flex;
align-items: center;
gap: 0.45em;
font-size: 1.25rem;
color: var(--content-color-faded);
text-decoration: underline;
text-underline-offset: 0.25em;
text-decoration-color: transparent;
transition: 150ms;
&:hover {
text-decoration-color: currentColor;
text-decoration: underline;
}
}
& svg {
width: 1em;
transition: 150ms;
}
& .prev:hover svg {
transform: translateX(-50%);
}
& .next:hover svg {
transform: translateX(50%);
}
}
</style>
<svelte:head>
<title>{title} | Joe's Blog</title>
<meta property="og:title" content="{title} | Joe's Blog">
<meta property="og:type" content="article">
<meta property="og:url" content="https://blog.jfmonty2.com/{slug}">
<meta property="og:description" content={description}>
<meta property="og:site_name" content="Joe's Blog">
<!-- Put this here for now, until I can get custom components working for codeblocks -->
<link rel="preload" href="/Hack-Regular.woff2" as="font" type="font/woff2">
</svelte:head>
<div class="page prose">
<div class="title">
<h1 id="{makeSlug(title)}">{title}</h1>
<p class="subtitle">{formatDate(date)}</p>
</div>
<div class="left-gutter">
{#if toc && toc.length !== 0}
<Toc items={toc} />
{/if}
</div>
<div class="post">
<slot></slot>
</div>
<hr>
<div class="footer">
{#if prev}
<a href="/{prev}" class="prev" data-sveltekit-preload-data="hover">
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor">
<path stroke-linecap="round" stroke-linejoin="round" d="M10.5 19.5L3 12m0 0l7.5-7.5M3 12h18" />
</svg>
Previous
</a>
{/if}
{#if next}
<!-- we use margin-left rather than justify-content so it works regardless of whether the "previous" link exists -->
<a href="/{next}" class="next" style="margin-left: auto;" data-sveltekit-preload-data="hover">
<span style:vertical-align={'center'}>Next</span>
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor">
<path stroke-linecap="round" stroke-linejoin="round" d="M13.5 4.5L21 12m0 0l-7.5 7.5M21 12H3" />
</svg>
</a>
{/if}
</div>
</div>

View File

@@ -1,219 +0,0 @@
<style lang="scss">
// minimum desirable sidenote width is 15rem, so breakpoint is
// content-width + 2(gap) + 2(15rem) + 2(scrollbar buffer)
$sidenote-breakpoint: 89.5rem;
// this has to be global because otherwise we can't target the body
:global(body) {
counter-reset: sidenote;
}
.counter.anchor {
color: #444;
margin-left: 0.065rem;
font-size: 0.75em;
position: relative;
bottom: 0.375rem;
color: var(--accent-color);
@media(max-width: $sidenote-breakpoint) {
&:hover {
color: var(--content-color);
cursor: pointer;
}
// only top-level anchors get brackets
&:not(.nested)::before {
content: '[';
}
&:not(.nested)::after {
content: ']';
}
}
}
.counter.floating {
position: absolute;
transform: translateX(calc(-100% - 0.4em));
color: var(--accent-color);
}
// hidden checkbox that tracks the state of the mobile sidenote
.sidenote-toggle {
display: none;
}
.sidenote {
// anchor the counter, which is absolutely positioned
position: relative;
color: #555;
font-size: var(--content-size-sm);
line-height: 1.25;
hyphens: auto;
// desktop display, this can't coexist with mobile styling
@media(min-width: $sidenote-breakpoint) {
// max sidenote width is 20rem, if the window is too small then it's
// the width of the gutter, minus the gap between sidenote and gutter,
// minus an extra 1.5rem to account for the scrollbar on the right
--gap: 2.5rem;
--gutter-width: calc(50vw - var(--content-width) / 2);
--sidenote-width: min(
24rem,
calc(var(--gutter-width) - var(--gap) - 1.5rem)
);
width: var(--sidenote-width);
float: right;
clear: right;
margin-right: calc(-1 * var(--sidenote-width) - var(--gap));
margin-bottom: 0.75rem;
}
@media(max-width: $sidenote-breakpoint) {
position: fixed;
left: 0;
right: 0;
bottom: 0;
// since headings have relative position, any that come after
// the current sidenote in the DOM get stacked on top by default
z-index: 10;
// give us a horizontal buffer for the counter and dismiss button
--padding-x: calc(var(--content-padding) + 1.5rem);
padding: 1rem var(--padding-x);
background-color: white;
box-shadow: 0 -2px 4px -1px rgba(0, 0, 0, 0.06), 0 -2px 12px -2px rgba(0, 0, 0, 0.1);
// show the sidenote only when the corresponding checkbox is checked
transform: translateY(calc(100% + 2rem));
transition: transform 125ms;
// when moving from shown -> hidden, ease-in
transition-timing-function: ease-in;
.sidenote-toggle:checked + & {
transform: translateY(0);
// when moving hidden -> shown, ease-out
transition-timing-function: ease-out;
// the active sidenote should be on top of any other sidenotes as well
// (this isn't critical unless you have JS disabled, but it's still annoying)
z-index: 20;
}
}
}
.sidenote-content {
max-width: var(--content-width);
margin: 0 auto;
&.nested {
margin-right: 0;
margin-top: 0.75rem;
margin-bottom: 0;
}
}
.dismiss {
display: block;
width: max-content;
margin: 0.5rem auto 0;
border-radius: 100%;
background: white;
border: 1px solid hsl(0deg, 0%, 75%);
box-shadow: 1px 1px 4px -1px rgba(0, 0, 0, 0.1);
padding: 0.25rem;
color: hsl(0deg, 0%, 50%);
&:hover, &:active {
color: var(--accent-color);
border: 1px solid var(--accent-color);
}
display: none;
@media(max-width: $sidenote-breakpoint) {
display: block;
}
cursor: pointer;
& label {
cursor: pointer;
}
& svg {
height: 1.5rem;
}
}
// nesting still needs work
/* @media(min-width: $sidenote-breakpoint) {
.nested.sidenote {
margin-right: 0;
margin-top: 0.7rem;
margin-bottom: 0;
}
} */
</style>
<script context="module">
import { writable } from 'svelte/store';
let activeSidenote = writable(null);
</script>
<script>
import { onMount } from 'svelte';
export let count;
let noteBody;
let nested = false;
onMount(() => {
// check to see if the parent node is also a sidenote, if so move this one to the end
let parentContent = noteBody.parentElement.closest('div.sidenote-content');
if (parentContent) {
// extract just the content of the nested note, ditch the rest (i.e. the button)
const noteContent = noteBody.firstChild;
noteBody.remove();
parentContent.appendChild(noteContent);
nested = true;
}
});
let toggle;
activeSidenote.subscribe(activeCount => {
// if we were the active toggle, but are no longer, hide
if (toggle?.checked && activeCount !== count) {
toggle.checked = false;
}
})
function toggleState() {
// if we are the active sidenote, deactivate us (upating the store will trigger subscription)
if ($activeSidenote === count) {
$activeSidenote = null;
}
// otherwise, we are becoming active
else {
$activeSidenote = count;
}
}
</script>
<label for={count} class="counter anchor" class:nested>{count}</label>
<input id={count} bind:this={toggle} on:click={toggleState} type="checkbox" class="sidenote-toggle" />
<!-- outer element so that on mobile it can extend the whole width of the viewport -->
<div class="sidenote" bind:this={noteBody}>
<!-- inner element so that content can be centered -->
<div class="sidenote-content" class:nested>
<span class="counter floating">{count}</span>
<slot></slot>
</div>
<button class="dismiss">
<label for={count}>
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" class="w-6 h-6">
<path stroke-linecap="round" stroke-linejoin="round" d="m19.5 8.25-7.5 7.5-7.5-7.5" />
</svg>
</label>
</button>
</div>

View File

@@ -1,166 +0,0 @@
<script>
import { onMount } from 'svelte';
import { makeSlug } from '$lib/utils.js';
export let items;
items.forEach(i => i.slug = makeSlug(i.text));
let headings = [];
let currentHeadingSlug = null;
let currentSubheadingSlug = null;
function setCurrentHeading() {
for (const h of headings) {
const yPos = h.getBoundingClientRect().y;
if (yPos > (window.innerHeight / 3)) {
break;
}
if (h.tagName === 'H2') {
currentHeadingSlug = h.id;
currentSubheadingSlug = null;
}
if (h.tagName === 'H3') {
currentSubheadingSlug = h.id
}
}
}
function ellipsize(text) {
return text;
// not sure about this, decide on it later
// if (text.length > 40) {
// text = text.slice(0, 40);
// // looks weird when we have an ellipsis following a space
// if (text.slice(-1) === ' ') {
// text = text.slice(0, -1);
// }
// return text + '…';
// }
// return text;
}
onMount (() => {
// These shouldn't change over the life of the page, so we can cache them
headings = Array.from(document.querySelectorAll('h2[id], h3[id]'));
setCurrentHeading();
});
</script>
<svelte:window on:scroll={setCurrentHeading} />
<style lang="scss">
#toc {
position: sticky;
top: 1.5rem;
margin-left: 1rem;
margin-right: 4rem;
max-width: 18rem;
color: var(--content-color-faded);
// minimum desirable TOC width is 8rem
// add 4rem for margins, giving total gutter width of 12.5rem
// multiply by 2 since there are two equally-sized gutters, then add content-width (52.5rem)
@media(max-width: 77.5rem) {
display: none;
}
}
// margin-left is to match the padding on the top-level list items,
// but here it needs to be margin so that the border is also shifted
h5 {
font-variant: petite-caps;
font-weight: 500;
max-width: fit-content;
margin-top: 0;
margin-bottom: 0.25em;
padding-bottom: 0.25em;
border-bottom: 1px solid currentcolor;
// make the border stretch beyond the text just a bit, because I like the effect
padding-right: 1.5rem;
}
ul {
margin: 0;
padding: 0;
list-style: none;
}
li {
position: relative;
margin-top: 0.45em;
font-size: var(--content-size-sm);
// make sure that one item wrapped across multiple lines doesn't just looke like multiple items
line-height: 1.1;
&.depth-2 {
align-items: stretch;
margin-bottom: 0.2rem;
}
&.depth-3 {
align-items: center;
margin-bottom: 0.05rem;
}
&.current, &:hover {
color: var(--content-color);
}
}
.marker {
position: absolute;
left: -0.6rem;
.current &, li:hover & {
background-color: var(--accent-color);
}
&.bar {
width: 0.125rem;
height: 100%;
}
&.dot {
width: 0.2rem;
height: 0.2rem;
border-radius: 50%;
// vertically center within its containing block
top: 0;
bottom: 0;
margin: auto 0;
}
}
// default link styling messes everything up again
a {
color: inherit;
text-decoration: none;
}
</style>
<div id="toc">
<h5>
<span class="heading">Contents</span>
</h5>
<ul>
{#each items as item}
{#if item.depth === 2}
<li class="depth-2" class:current={item.slug === currentHeadingSlug} style:align-items="stretch">
<span class="marker bar"></span>
<a href="#{item.slug}">{ellipsize(item.text)}</a>
</li>
{:else if item.depth === 3}
<li class="depth-3" class:current={item.slug === currentSubheadingSlug} style:align-items="center" style:margin-left="0.75em">
<span class="marker dot"></span>
<a href="#{item.slug}">{ellipsize(item.text)}</a>
</li>
{/if}
{/each}
</ul>
</div>

View File

@@ -1,48 +0,0 @@
<script>
export let floatingCounter = true;
export let classes = '';
export {classes as class};
</script>
<style>
:global(body) {
counter-reset: sidenote unstyled-sidenote;
}
.counter {
counter-increment: unstyled-sidenote;
margin-left: 0.05rem;
}
.counter::after {
content: counter(unstyled-sidenote);
font-size: 0.75em;
position: relative;
bottom: 0.3em;
color: #0083c4;
}
.sidenote {
color: var(--content-color-faded);
font-size: 0.8rem;
}
.sidenote.floatingCounter::before {
content: counter(unstyled-sidenote);
font-size: 0.75rem;
color: #0083c4;
/* Since the sidenote is floated it counts as a positioned element,
so this would make the counter overlap the start of the text... */
position: absolute;
/* ...except that we move it out to the left and up a bit, so
it's hanging out in space. 100% refers to the width of this
pseudo-element, so we handle different-sized counters the same. */
transform: translate(
calc(-100% - 0.16em),
-0.12em
);
}
</style>
<span class="counter"></span>
<span class="sidenote {classes}" class:floatingCounter={floatingCounter}>
<slot></slot>
</span>

View File

@@ -17,15 +17,11 @@ const weekdays = [
]; ];
export function formatDate(timestr) { export function formatDate(date: Date) {
const datestr = timestr.slice(0, 10); const year = date.getFullYear();
const [year, month, monthday] = datestr.split('-').map(n => parseInt(n)); const month = months[date.getMonth() - 1];
// for some reason the Date constructor expects the month index instead of ordinal const monthday = ordinals[date.getDate() - 1];
const weekdayIdx = new Date(year, month - 1, monthday).getDay(); const weekday = weekdays[date.getDay() - 1];
const names = {
month: months[month - 1], return `${weekday}, the ${monthday} of ${month}, A.D. ${year}`;
monthday: ordinals[monthday - 1],
weekday: weekdays[weekdayIdx],
}
return `${names.weekday}, the ${names.monthday} of ${names.month}, A.D. ${year}`;
} }

View File

@@ -1,8 +0,0 @@
const nonAlphaNum = /[^A-Za-z0-9\-]/g;
const space = /\s+/g;
export function makeSlug(text) {
return text
.toLowerCase()
.replace(space, '-')
.replace(nonAlphaNum, '');
}

View File

@@ -1,94 +0,0 @@
// const Node = {
// addChild(child) {
// this.children.push(child);
// return child;
// }
// }
export function tag(name, attrs, children) {
return {
type: 'tag',
tag: name,
attrs: attrs || {},
children: children || [],
addTag(name, attrs, children) {
const child = tag(name, attrs, children);
this.children.push(child);
return child;
},
};
}
export function text(content) {
return {
type: 'text',
text: content,
};
}
export function serialize(node, depth) {
if (!depth) {
depth = 0;
}
const indent = ' '.repeat(depth * 4);
let fragments = [];
// version tag, if this is the top level
if (depth === 0) {
fragments.push('<?xml version="1.0" encoding="UTF-8"?>\n')
}
fragments.push(`${indent}<${node.tag}`);
// this happens if there are multiple text nodes within the same parent
if (node.type === 'text') {
return `${indent}${escape(node.text)}`;
}
if (node.children === undefined) {
console.log(node);
}
// opening tag <element attr="value">
for (const attr in node.attrs) {
fragments.push(` ${attr}="${node.attrs[attr]}"`);
}
if (node.children.length === 0) {
fragments.push(' />');
return fragments.join('');
}
fragments.push('>');
// if the only child is a single text node, skip recursion and just dump contents directly
if (node.children.length === 1 && node.children[0].type === 'text') {
const text = escape(node.children[0].text);
fragments.push(text);
}
// otherwise, start a new line for each child node, then recurse
else {
for (const child of node.children) {
fragments.push('\n');
fragments.push(serialize(child, depth + 1));
}
// no need to verify that there were children, we already did that
fragments.push(`\n${indent}`);
}
fragments.push(`</${node.tag}>`);
return fragments.join('');
}
function escape(text) {
// we aren't going to bother with escaping attributes, so we won't worry about quotes
return text
.replaceAll('&', '&amp;')
.replaceAll('<', '&lt;')
.replaceAll('>', '&gt;');
}

7
src/middleware.ts Normal file
View File

@@ -0,0 +1,7 @@
import { defineMiddleware } from 'astro:middleware';
// set SIDENOTE_COUNT to 0 at the start of every request so that as sidenotes are rendered, it only counts them on the current page
export const onRequest = defineMiddleware((_context, next) => {
globalThis.SIDENOTE_COUNT = 0;
return next();
})

27
src/pages/[slug].astro Normal file
View File

@@ -0,0 +1,27 @@
---
import { getCollection } from 'astro:content';
import BaseLayout from '@layouts/BaseLayout.astro';
import Post from '@components/Post.astro';
export async function getStaticPaths() {
const entries = await getCollection('posts');
entries.sort((a, b) => a.data.date.getTime() - b.data.date.getTime())
// for each route, the page gets passed the entry itself, plus the previous and next slugs
// (if any), so that it can render links to them
return entries.map((entry, idx) => {
const prevSlug = entries[idx - 1]?.id || null;
const nextSlug = entries[idx + 1]?.id || null;
return {
params: { slug: entry.id },
props: { entry, prevSlug, nextSlug },
}
});
}
---
<BaseLayout>
<Post {...Astro.props} />
</BaseLayout>

7
src/pages/index.astro Normal file
View File

@@ -0,0 +1,7 @@
---
import BaseLayout from '@layouts/BaseLayout.astro';
---
<BaseLayout>
<p>Index file</p>
</BaseLayout>

View File

@@ -1,116 +0,0 @@
import { visit, CONTINUE, EXIT, SKIP, } from 'unist-util-visit';
import { find } from 'unist-util-find';
import { toText } from 'hast-util-to-text';
import { makeSlug } from '../lib/utils.js';
import {writeFileSync} from 'node:fs';
import {toHtml} from 'hast-util-to-html';
export function localRehype() {
return (tree, vfile) => {
const needsDropcap = vfile.data.fm.dropcap !== false
let dropcapAdded = false;
let sidenotesCount = 0;
let moduleScript;
let imports = new Set();
if (needsDropcap) {
imports.add("import Dropcap from '$lib/Dropcap.svelte';");
}
visit(tree, node => {
// add slugs to headings
if (isHeading(node)) {
processHeading(node);
imports.add("import Heading from '$lib/Heading.svelte';");
return SKIP;
}
// mdsvex adds a <script context="module"> so we just hijack that for our own purposes
if (isModuleScript(node)) {
moduleScript = node;
}
// convert first letter/word of first paragraph to <Dropcap word="{whatever}">
if (needsDropcap && !dropcapAdded && isParagraph(node)) {
addDropcap(node);
dropcapAdded = true;
}
// add `count` prop to each <Sidenote> component
if (isSidenote(node)) {
// increment the counter first so that the count starts at 1
sidenotesCount += 1;
addSidenoteCount(node, sidenotesCount);
}
});
// insert our imports at the top of the `<script context="module">` tag
if (imports.size > 0) {
const script = moduleScript.value;
// split the script where the opening tag ends
const i = script.indexOf('>');
const openingTag = script.slice(0, i + 1);
const remainder = script.slice(i + 1);
// mdvsex uses tabs so we will as well
const importScript = Array.from(imports).join('\n\t');
moduleScript.value = `${openingTag}\n\t${importScript}${remainder}`;
}
// const name = vfile.filename.split('/').findLast(() => true);
// writeFileSync(`scratch/${name}.json`, JSON.stringify(tree, undefined, 4));
}
}
function processHeading(node) {
const level = node.tagName.slice(1);
node.tagName = 'Heading';
node.properties.level = level;
node.properties.id = makeSlug(toText(node));
}
function addDropcap(par) {
let txtNode = find(par, {type: 'text'});
const i = txtNode.value.search(/\s/);
const firstWord = txtNode.value.slice(0, i);
const remainder = txtNode.value.slice(i);
par.children.unshift({
type: 'raw',
value: `<Dropcap word="${firstWord}" />`,
});
txtNode.value = remainder;
}
function addSidenoteCount(node, count) {
// get the index of the closing >
const i = node.value.search(/>\s*$/);
if (i < 0) {
throw new Error('Failed to add counter to element, closing angle bracket not found.');
}
// splice in the count prop
node.value = `${node.value.slice(0, i)} count={${count}}>`;
}
function isHeading(node) {
return node.type === 'element' && node.tagName.match(/h[1-6]/);
}
function isModuleScript(node) {
return node.type === 'raw' && node.value.match(/^<script context="module">/);
}
function isParagraph(node) {
return node.type === 'element' && node.tagName === 'p';
}
function isSidenote(node) {
return node.type === 'raw' && node.value.match(/<\s*Sidenote/);
}

View File

@@ -1,52 +0,0 @@
import { visit } from 'unist-util-visit';
import { toString } from 'mdast-util-to-string';
import fs from 'node:fs';
// build table of contents and inject into frontmatter
export function localRemark() {
return (tree, vfile) => {
if (vfile.data.fm.toc === false) {
return;
}
let toc = [];
let description = null;
visit(tree, ['heading', 'paragraph'], node => {
// build table of contents and inject into frontmatter
if (node.type === 'heading') {
toc.push({
text: toString(node),
depth: node.depth,
});
}
// inject description (first 25 words of the first paragraph)
if (node.type === 'paragraph' && description === null) {
description = summarize(node);
}
});
vfile.data.fm.toc = toc;
vfile.data.fm.description = description;
}
}
// convert paragraph to single string after stripping everything between html tags
function summarize(par) {
let newChildren = [];
let push = true;
for (const child of par.children) {
if (child.type === 'html') {
push = !push;
continue;
}
if (push) {
newChildren.push(child);
}
}
return toString({type: 'paragraph', children: newChildren});
}

View File

@@ -1,169 +0,0 @@
<script context="module">
import { writable } from 'svelte/store';
let activePreview = writable(null);
</script>
<script>
import { tick } from 'svelte';
import data from './books.json';
const images = import.meta.glob('./images/*.jpg', {eager: true});
export let ref;
const {type, title, author, description, url} = data[ref];
const imageUrl = images[`./images/${ref}.jpg`].default;
$: visible = $activePreview === ref;
let mousePresent = false;
let offset, popover;
async function show() {
$activePreview = ref;
mousePresent = true;
await tick();
const rect = popover.getBoundingClientRect();
// 12px is approximately var(--content-padding)
if (rect.x < 12) {
offset = `${12 - rect.x}px`;
}
}
function hide() {
mousePresent = false;
// mouseenter fires when the mouse moves into the floating div as well,
// so this gives us a "grace period" that applies to either anchor or popover
setTimeout(
() => {
if (!mousePresent && $activePreview === ref) {
$activePreview = null;
}
},
300
);
}
function clickLink(evt) {
// if click happened without hover, then we must be on mobile
if (!visible) {
$activePreview = ref;
evt.preventDefault();
}
// if visible, but mouse is not present, also mobile
else if (visible && !mousePresent) {
$activePreview = null;
evt.preventDefault();
}
}
let detailsLink;
function blurLink(evt) {
// do this in the next task, in case the click was inside the popover
setTimeout(
() => {
// check this here in case it got changed by a different event handler
if ($activePreview == ref) {
$activePreview = null;
}
},
0
)
// if ($activePreview == ref) {
// setTimeout(() => $activePreview = null, 0);
// }
}
</script>
<style lang="scss">
.base {
position: relative;
// on mobile, we want the popover's position to be calculated
// relative to the whole document, not the link text
@media(max-width: 27rem) {
position: static;
}
}
.popover {
position: absolute;
// popover should float above the link text by a bit
bottom: calc(100% + 0.5rem);
// and be centered relative to the link, unless that would put it off screen
left: 50%;
transform: translateX(
calc(-50% + var(--offset, 0px))
);
@media(max-width: 27rem) {
// bounding box is now the whole document
// we want to start from its initial vertical position
bottom: unset;
// center it horizontally, with some space on the sides
width: unset;
left: 1rem;
right: 1rem;
margin-left: auto;
margin-right: auto;
// and move it back up so it's above the text again
transform: translateY(
calc(-100% - 1.5em - 0.5rem)
);
}
// visibility is controlled by the .visible class
display: none;
&.visible {
display: flex;
}
// two-column layout, one for image and one for text
gap: 1rem;
width: 25rem;
height: 192px;
overflow-y: auto;
padding: 0.35rem;
background: white;
box-shadow: 1px 2px 6px rgba(0, 0, 0, 0.1);
border: 1px solid var(--content-color);
z-index: 1;
font-size: var(--content-size-sm);
}
img {
height: 100%;
// sticky position ensures that the image stays visible when we scroll the text
position: sticky;
top: 0;
}
a.details {
color: var(--primary-color);
&:visited {
color: var(--accent-color);
}
}
a:active {
color: var(--accent-color);
}
</style>
<span class="base" on:mouseenter={show} on:mouseleave={hide}><!-- get rid of whitespace
--><a href={url} target="_blank" on:click={clickLink} on:blur={blurLink}>
<slot></slot><!--
--></a><!--
--><div class="popover" bind:this={popover} class:visible style:--offset={offset}>
<img src={imageUrl}>
<div>
<h4>{title}</h4>
<p>
{description}
<a class="details" href={url} target="_blank" bind:this={detailsLink}>More</a>
</p>
</div>
</div><!--
--></span>

View File

@@ -1,23 +0,0 @@
{
"lotr": {
"type": "trilogy",
"title": "The Lord of the Rings",
"author": "J. R. R. Tolkien",
"description": "Epic fantasy trilogy written by Oxford professor and linguist J. R. R. Tolkien. Considered by many to be the major trend-setter for the modern fantasy genre.",
"url": "https://www.goodreads.com/series/66175-the-lord-of-the-rings"
},
"neverwhere": {
"type": "book",
"title": "Neverwhere",
"author": "Neil Gaiman",
"description": "Under the streets of London there's a world most people could never dream of. A city of monsters and saints, murderers and angels, knights in armour and pale girls in black velvet. \"Neverwhere\" is the London of the people who have fallen between the cracks.",
"url": "https://www.goodreads.com/book/show/14497.Neverwhere"
},
"earthsea": {
"type": "series",
"title": "Earthsea Cycle",
"author": "Ursula K. Le Guin",
"description": "Series of high fantasy stories set in an archipelago world where names are power and dragons roam the skies.",
"url": "https://www.goodreads.com/series/40909-earthsea-cycle"
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 103 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 69 KiB

View File

@@ -1,2 +0,0 @@
import '../styles/main.scss';
export const prerender = true;

View File

@@ -1,40 +0,0 @@
<style lang="scss">
.header {
background: var(--primary-color-faded);
}
nav {
max-width: 30rem;
margin: 0 auto;
display: flex;
justify-content: space-between;
& a {
flex: 1;
max-width: 8rem;
padding: 0.25rem 1rem;
font-size: 1.75rem;
color: white;
text-decoration: none;
text-align: center;
&:hover {
background: hsl(0deg 0% 0% / 10%);
}
}
}
</style>
<div class="header">
<nav>
<a data-sveltekit-preload-data="hover" href="/">Home</a>
<a data-sveltekit-preload-data="hover" href="/posts">Posts</a>
<a data-sveltekit-preload-data="hover" href="/about">About</a>
</nav>
</div>
<main>
<slot></slot>
</main>

View File

@@ -1,8 +0,0 @@
export async function load({ data }) {
let post = await import(`./_posts/${data.slug}.svx`);
post.metadata.slug = data.slug;
post.metadata.next = data.next;
return {
post: post.default,
}
}

View File

@@ -1,11 +0,0 @@
import { postData, siblingPosts } from './_posts/all.js';
// this is in a "servserside" loader so that we don't end up embedding the metadata
// for every post into the final page
export function load() {
return {
slug: postData[0].slug,
next: postData[1].slug,
};
}

View File

@@ -1,5 +0,0 @@
<script>
export let data;
</script>
<svelte:component this={data.post} />

View File

@@ -1,14 +0,0 @@
<style>
h1 {
margin-top: 6rem;
}
h1, p {
text-align: center;
}
</style>
<h1>404</h1>
<p>That page doesn't exist. Sorry!</p>

View File

@@ -1,24 +0,0 @@
import { error } from '@sveltejs/kit';
export async function load({ url, params, data }) {
let post;
try {
post = await import(`../_posts/${params.slug}.svx`);
}
catch (err) {
if (err.message.match(/Unknown variable dynamic import/)) {
throw error(404, `Not found: ${url.pathname}`);
}
else {
throw err;
}
}
post.metadata.slug = params.slug;
post.metadata.prev = data.prev;
post.metadata.next = data.next;
return {
post: post.default,
}
}

View File

@@ -1,10 +0,0 @@
import { postData } from '../_posts/all.js';
export function load({ params }) {
const i = postData.findIndex(p => p.slug === params.slug);
return {
prev: i < postData.length - 1 ? postData[i + 1].slug : null,
next: i > 0 ? postData[i - 1].slug : null,
};
}

View File

@@ -1,5 +0,0 @@
<script>
export let data;
</script>
<svelte:component this={data.post} />

View File

@@ -1,26 +0,0 @@
import { dev } from '$app/environment';
const posts = import.meta.globEager('./*.svx');
let postData = [];
for (const path in posts) {
// skip draft posts in production mode
if (!dev && posts[path].metadata.draft) {
continue;
}
// slice off the ./ and the .svx
const slug = path.slice(2, -4);
posts[path].metadata.slug = slug;
postData.push(posts[path].metadata);
}
postData.sort((a, b) => {
// sorting in reverse, so we flip the intuitive order
if (a.date > b.date) return -1;
if (a.date < b.date) return 1;
return 0;
});
export { postData };

View File

@@ -1,113 +0,0 @@
---
title: Axes of Fantasy
date: 2023-12-26
draft: true
---
<script>
import Sidenote from '$lib/Sidenote.svelte';
import BookPreview from '$projects/fantasy/BookPreview.svelte';
</script>
For a while now, I've had a private taxonomy of fantasy books, based on the distinction (or lack thereof) between the fantasy world and our own. It goes something like this:
* *High Fantasy* is set in a world completely separate from our own, with no passage from one to the other. At the most, there might be faint hints that the fantasy world represents ours in the distant past.
* *Low Fantasy* is set in a fantasy world that is separate from ours, but that can be reached (at least some of the time) by some means, such as a portal, magic object, ritual, etc.
* *Urban Fantasy* is fantasy in which the fantasy world is contained _within_ our world, but typically hidden from the prying eyes of mere mortals in some fashion.
I refer to this as a "personal" taxonomy because as far as I can tell, nobody else shares it. The terms are well-known, of course, and there's some overlap--what most people call "High Fantasy" certainly does tend to be set in fantasy worlds with no connection to our own - but "Urban Fantasy" in particular means a whole lot more to most people than just "fantasy set in our world." Most people seem to agree that urban fantasy should be, well, urban - not that it has to take place in _cities_ stricly speaking, but it should at least portray the fantastical elements in and around the real world, and pay some attention to the question of how it stays out of sight of Muggles.
Obviously, my personal classification system is much simpler and stricter than this. To be honest, it's not terribly useful on its own - while the relationship between a fantasy world and our own is certainly _an_ attribute worth considering for classification purposes, it's far from the only one.
So then I got to thinking: If world-overlap is one "axis" along which fantasy can be ranked, what others are there? If we come up with a sufficiently comprehensive set of axes, can we start identifying existing labels (High Fantasy, Urban Fantasy, Epic Fantasy, etc.) as "clusters" of stories which share the same position on _multiple_ axes?
This means that the ideal basis for an axis should be:
* One-dimensional: Ideally, we'd like to be able to give each fantasy work a number, say from 1-100, so that any one is easily relatable to any other. This can't be a true ratiometric scale, obviously, but having something numeric makes it much easier to do fun stuff like searching for "neighbor" stories that sit near a given story on multiple axes.
* Orthogonal: Axes should be _conceptually_ unrelated to one another. Obviously a lot of axes will _tend_ to cluster, just like certain ingredients are commonly paired across a variety of dishes, but it should be possible _in principle_ for a story to occupy any positions on any given pair of axes.
* Objective: As much as possible, at least. Our existing axis of world overlap does well on this metric: it's usually pretty clear where a given story should fall. Sure, there are a few cases where different people might disagree about which of two stories has more or less overlap, but only when they have a very similar amount of overlap to start with. It doesn't seem likely that different people could end up placing the same story on opposite ends of the axis.
* Impactful: A story's position on the axis should go at least some way toward determining what kind of story it is. For example, the climate of the fantasy world would _not_ do well on this metric, since it doesn't matter a lot whether the world is hot or cold when you're asking how it should be classified.<Sidenote>The most impact I can imagine a fantasy world's climate having is something like the situation in A Song of Ice and Fire, where the extremely-long cycle of seasons (each cycle takes decades, if I recall correctly) lead to political differences because e.g. people younger than a certain age have never experienced a winter. But even then, it isn't the climate itself that most people would base their classification on, it's the political situation. It still seems largely incidental, _for classification purposes_, that the political complexity comes partly from environmental factors.</Sidenote>
Okay, so what different axes can we come up with? Obviously we can start with the original one that I wanted to base my taxonomy on:
## World overlap
This is an easy aspect to use for classification, because it's usually quite clear where a given setting should fall. At the left-most extreme you have what I'll call "Otherworld" fantasy, where the fantasy world has absolutely no connection at all to our own world. At the opposite end you'll find most "Urban" fantasy, where the world depicted _is_ the real world, just with added fantastical bits.
Notable subregions include:
### Otherworld Fantasy
No overlap at all. I think most fantasy that's written tends to fall here. At least, it's what most people think of when you say "fantasy book," and the Wikipedia definition of "Fantasy" specifies that it's "typically set in a fictional universe," so I think it's fair to say that this is the "standard" position for a fantasy story to occupy on this axis.
Examples: <BookPreview ref="lotr">_The Lord of the Rings_</BookPreview>, <BookPreview ref="earthsea">_Earthsea_</BookPreview>, _The Prydain Chronicles_, _Wheel of Time_, _Belgariad_, _A Song of Ice and Fire_, etc. Pick up a book from the fantasy section of a bookstore and there's at least a 50% chance it will fall into this category.
### Mythopoeic Fantasy
Much rarer than the previous category, stories of this type are set in the real world, but in a long-forgotten vanished age of which only the faintest echoes are now known. Think of the "A long time ago, in a galaxy far far away" intro to _Star Wars_<Sidenote>_Star Wars_, of course, isn't typically categorized as fantasy, but rather sci-fi - which is funny because _Star Wars_ has a lot more in common with most fantasy than most sci-fi. Starting with straight-up magic, i.e. the Force.</Sidenote> - the fantasy world has _some_ relation to our own, but for all practical purposes it might as well not exist.
The _Conan_ stories are the only ones I know of that fall clearly into this category, although I'm sure there must be others.
Interestingly enough Tolkien's original goal in writing developing his legendarium was to construct this type of setting. In his own words:<Sidenote>This comes from a letter that Tolkien wrote to Milton Waldman, who I believe was his publisher, in 1951.</Sidenote>
> I was from early days grieved by the poverty of my own beloved country: it had no stories of its own (bound up with its tongue and soil), not of the quality that I sought, and found (as an ingredient) in legends of other lands ... Do not laugh! But once upon a time (my crest has long since fallen) I had a mind to make a body of more or less connected legend, ranging from the large and cosmogonic, to the level of romantic fairy-story - the larger founded on the lesser in contact with the earth, the lesser drawing splendour from the vast backcloths - which I could dedicate simply to: to England, to my country.
Unfortunately, perhaps, for Tolkien, but quite fortunately for fans of modern fantasy as we see it today, he wound up creating what is pretty undeniably an Otherworld fantasy. There are occasional references to "nowadays" or "in later times" but even then, the conceit seems to be that the narrator is writing from the later days _of Middle Earth_.
### Portal Fantasy
We now make a rather significant jump into what I'm pretty sure is the second-largest category on this scale, which I'm calling "portal fantasy."<Sidenote>This is actually a term that I've seen used elsewhere, in contrast to the previous two which I just made up.</Sidenote> The "codifying" work for this category<Sidenote>In the same way the LOTR was codifying for much of otherworld fantasy, i.e. it's extremely common now for fantasy worlds to feature Elves, Dwarves, and Men, who all share roughly the same set of characteristics as Tolkien's versions. Even the spelling is Tolkien's - previously, "dwarves" would have been considered incorrect; the standard spelling was "dwarfs."</Sidenote> is, of course, the _Chronicles of Narnia_, but there are plenty of other examples. Apparently there's even a Japanese word for it, _isekai_.<Sidenote>I'm not at all familiar with this subgenre, so I don't know if it's exactly the same thing as what I'm calling "portal fantasy" or just shares some key traits with it.</Sidenote>
Note that the mere existence of portals between worlds, or some sort of established "multiverse," doesn't by itself qualify a story for this category. It's required that one of the worlds in question be _the real world_. Otherwise it's just a different flavor of otherworld fantasy. So no _Riftwar_, _Skyhold_, _Traitor Son Cycle_, etc.
One of the fun things about this classification is that it's a mini-axis in its own right, differentiated primarily by how easy or difficult it is to cross from the real world into the fantasy world or back again:
* On the left or "less overlapping" side you have stories like _Narnia_ or _The Last Rune_, where passage between worlds is spotty and _mostly_ doesn't happen at the behest of the characters, but by happenstance or by the action of some Greater Power that overstrides both worlds.
* Moving rightward, you find stories where points of passage are rare but knowable, usually requiring both a certain time and a certain place. _The Paradise War_ is a good example of this.
* Next you have stories where passage can seemingly be accomplished at any time, but requires a great deal of effort and/or arcane knowledge - think large assemblies of wizards gathered together, chanting in unison around a rune-inscribed circle that glows with eerie light, that sort of thing. _The Wizardry Compiled_ is pretty close to this, from what I remember.
* Finally there are some portal fantasies where the portal exists in a fixed location and can be crossed at any time, e.g. _Stardust_. This seems to be the rarest version, at least based on my own reading.
Other examples of portal fantasy include _The Chronicles of Amber_,<Sidenote>Originally I actually had this split into a separate category that I was going to call "nested-world fantasy", but on further reflection I realized that didn't make sense because a) if the fantasy world is nested inside the real world then it's just some verion of [urban fantasy](#urban-fantasy), and b) if it's the other way around, well, every portal fantasy already postulates the existence of some sort of "magical multiverse" that also contains the real world, and it's fundamentally no different whether the main story is set in the multiverse as a whole or just in some particular part of it.</Sidenote> _The Fionavar Tapestry_, the _Oz_ books, _Droon_,<Sidenote>I'm only putting this here for completeness, not because I've read a bunch of them or anything. _furtive glances from side to side_</Sidenote> the _Fairyland_ books, _The Phantom Tollbooth_,<Sidenote>I think, at least? I never actually finished this one.</Sidenote>, _Shades of Magic_, and _The Keys to the Kingdom_. There are buckets more, but that's all I can think of right now. Plus, this isn't meant to be an exhaustive catalog or anything.
### Alternate History
Fantasy that's set in our world, but with magic.<Sidenote>Or other fantastical elements, of course. Doesn't have to be literally magic-with-an-M.</Sidenote> I hemmed and hawed a lot about whether to even give this category a spot on this scale. You could easily make the argument that a fantasy version of the real world is just a different world, and all of these stories belong in the Otherworld category.
In the end, though, I decided that the point of this axis is to classify fantasy according to how much the fantasy world overlaps with our own, and alternate history involves _quite a lot of overlap_, even though the end result is a world that's not _quite_ identical with the real world.
Broadly speaking there are two variants of alternate history: 1) Either the fantastic has always been a part of life, or 2) it was suddenly introduced into the world by some (usually fairly cataclysmic) event.
Examples of the first variant include _Cecelia and Kate_, the _Temeraire_ books, _Jonathan Strange and Mr Norrell_, etc. An interesting quirk of this variant is that it's almost always set significantly in the past, but for some reason not _quite_ as far back as the quasi-Medieval era that is the bread and butter of most "standard" fantasy. The Napoleonic/Regency era is popular, as is the Victorian era. Modern-day alternate-history stories of this type seem fairly uncommon--_Bartimaeus_ is the only example I can think of off the top of my head.
Examples of the secont variant include _Unsong_, _Reckoners_, and _The Tapestry_. Stories of this type are much more commonly set in the modern day--understandably so, since "what would happen to society if magic were suddenly introduced" is a pretty interesting question to explore.
### Urban Fantasy
This is another term that you'll run across a lot if you do any research at all into fantasy subgenres. Here I'm using it in a very restricted sense, that is, _only_ to refer to the integration of the fantastical elements with the real world, without any of the other themes that are often indicated by the term.
To me, the defining characteristic of urban fantasy is that it's set in the real world, where the fantastical is _present_, but _hidden_. It _has_ to be hidden, because if it weren't then it would unavoidably have a major impact on the world, at which point we'd be back to alternate history.<Sidenote>The ever-relevant TVTropes has [some things to say](https://tvtropes.org/pmwiki/pmwiki.php/Main/Masquerade) on this subject as well.</Sidenote>
So urban fantasy depicts a world where there's magic<Sidenote>Or dragons, or fairies, or whatever fantastical elements the author wants. I'll just use "magic" generally to refer to "the fantastical" for the rest of this section.</Sidenote> but for whatever reason this is completely unknown to most people. Occasional exceptions may be made for top-secret government programs - it isn't that much of a stretch to imagine that if there were magic in the world, then at least some of the powers that be would be aware of it and using it to their advantage. The _Milkweed Triptych_ and the _Checquey Files_ are both examples of this variant.
For the most part, though, the people who know about magic are the people who have magic, plus the occasional Ascended Muggle Sidekick who's there for flavor (and to act as an audience surrogate, probably.) In fact, quite frequently the main conflict of the story is about _preventing_ the magical part of the world from being exposed, either because the magicians are afraid that a world full of angry normies would actually pose a threat to them<Sidenote>In this case the Salem witch trials and similar events are frequently invoked, in-universe as cautionary tales of what might happen "if _they_ find out about us."</Sidenote>, or because the wise and benevolent Wizards' Council has declared that even though they _could_ rule the world, it wouldn't be fair to the poor normies.
Other notable examples of the genre include _The Dresden Files_, _Percy Jackson and the Olympians_, _The Laundry Files_, _Neverwhere_, _American Gods_, the _Artemis Fowl_ books, the _Mither Mages_ series, the _Iron Druid_ series, _Monster Hunter International_, and of course _Harry Potter_.
## Quasi-Historical Era
Another good dimension for differentiating fantasy stories is their "era," so to speak: What real-world historical period provides the basis for the level of technology, social structures, etc?<Sidenote>This is necessarily bound up with the question of _where_ the fantasical cultures get their inspiration, but unfortunately that side of it isn't nearly as easy to map onto a single numerical scale, so I'm going to mostly ignore it for now.</Sidenote>
I don't think it's worth trying to be too precise with the exact historical placement of most fantasy works because most are filled with anachronisms and/or cobbled together from patchworks of different specific times and places,<Sidenote>E.g. the _Belgariad_, which has one country filled with more-or-less French knights of the late Medieval era, and another country populated by not-quite Vikings, in the same world.</Sidenote> so they don't really belong to _one_ precise era.<Sidenote>Exceptions, of course, being things like the _Traitor Son Cycle_, which is _very_ clearly set in an analogue of the late 14th century.</Sidenote> But you can usually get a rough sense of the aesthetic that the author is going for, in broad strokes.
### Antiquity
This is pretty rare, but there are a few examples. The _Codex Alera_ is set in something approximating Imperial Rome, and though I haven't read them I've heard that David Gemmell has written some in an Ancient-Greece-ish setting. I've heard rumors of some ancient Egyptian ones as well.
### Dark Ages
If anything, even rarer than the above. The only ones of which I am aware are the _Belisarius_ series,<Sidenote>Wikipedia terms this "Alternate history science fiction" but it isn't very science-y at all, from what I remember, and it _is_ more than a little magic-y, so I'll call it fantasy.</Sidenote> and (although I haven't read this one) the _Sarantine Mosaic_, both set around 500-600 AD.<Sidenote>So post-fall-of-Rome, which I think counts as Dark Ages.</Sidenote>
### Middle Ages
The _vast_ majority of fantasy is set in something with approximately the aesthetics of Medieval Europe. Tolkien, obviously, was the trend-setter here,<Sidenote>Although interestingly, Tolkien's work seems to clock in rather earlier than your Standard Formulaic Fantasy Setting 1A. Tolkien's world is closest to the _early_ middle ages (circa 1050 or so), from what I understand - e.g. his characters consistently use chain mail rather than plate armor; presumably plate armor doesn't exist in Middle Earth. The obvious reason for this, of course, is that this was the era Tolkien himself had studied most intensively - mostly its literature, from what I understand, but you can't become an expert in the literature of a period without developing at least _some_ sense of what life was like then. So naturally, this was what he drew on when crafting his fantasy settings. Later fantasy, on the other hand, seems to draw most heavily on the _late_ middle ages, circa 1300-1500, with plate armor, chivalry, the occasional joust, etc.</Sidenote>

View File

@@ -1,45 +0,0 @@
---
title: Exposing Docker Containers to your LAN
description: If, for some strange reason, you should want to do such a thing.
date: 2022-03-21
---
<script>
import Sidenote from '$lib/Sidenote.svelte';
</script>
A while back I had occasion to make a number of docker containers directly accessible on the LAN, i.e. without all the usual ceremony of port-forwardism that Docker requires. In retrospect I made it a lot more complicated than it had to be, but I wanted to document the process anyway because you never know when that sort of thing might come in handy.
## Aside: You Probably Don't Want This
In my case, the reason for doing this was so that I could expose multiple difference services that all wanted to bind the same port. In other words, given that I was going to be hosting more than one HTTP-based application, I didn't want to have to remember (and type out all the time) a bunch of different ports to distinguish between the services I wanted to talk to. DNS is great, but it only points to IP addresses<Sidenote>Well, SRV records can include ports, but browsers don't pay attention to those.</Sidenote>, after all.
That said, had I only realized it at the time, there's a much better way to accomplish this than exposing entire containers to the LAN, and much less... questionable from a security standpoint: **Just bind multiple IPs on the host**. Docker allows you to specify what IP address to bind when forwarding a port to a container, so you can forward e.g. 192.168.50.21:80 to App 1, and 192.168.50.22:80 to App 2, and neither the apps nor the users need ever worry their pretty little heads about a thing. This is better than exposing the container directly - containerized applications generally expect to be pretty isolated from a networking point of view, with external traffic only hitting the one or two ports that they specify as their window to the outside world. So if some packaged application has to run its own Redis server<Sidenote>Because some people just can't help jamming Redis into every app they write, it's like a spinal reflex or something.</Sidenote>, it might not take the extra step of only binding to localhost, and congratulations now anyone on the LAN can read your session cookies or whatever.<Sidenote>Alternatively you can do what I did: Set up a _shared_ Redis server for a _bunch_ of different applications, in Docker of course, and then _knowingly_ expose that to the entire LAN, and damn the torpedoes. I cannot legally recommend this course of action.</Sidenote>
The caveat here is of course that you need to be sure the IP addresses you use aren't going to be stolen out from under you by somebody's iPad or something next time it connects to the network. This is easy if you control the DHCP server, and either easy or impossible if you don't. For reasons that I've never fully understood, but _probably_ boil down to leaving room for people to do exactly this sort of thing, many standard DHCP configurations assign IPs from just a portion of the available range. `.100` is a common start point in a /24 network, so you can usually expect that `.2`-`.99`<Sidenote>Someday I'm going to set up a network where the router is at, like, .233 or something instead of .1, just to freak out the one or two people who might ever notice.</Sidenote> will be available for you to work your will upon.
The worse solution (exposing containers directly to the LAN) has this same caveat, so it's just worse in every way, there's really no advantage except that _maybe_ it's lower-overhead, since not as much forwarding of packets needs to take place. So yeah, probably just don't unless your containerized application _really needs_ Layer 2 access to the network, like it's an intrusion detection system and needs keep an eye on broadcast traffic or something.
## Anyway
With that all out of the way, having hopefully convinced you that this is almost never a good idea, here's how to do it:
```
docker network create \\
-d ipvlan \\
--subnet 192.168.50.0/24 \\
--gateway 192.168.50.1 \\
-o parent=eth0 \\
lan
docker run --network lan --ip 192.168.50.24 some/image:tag
```
That's it! You're done, congratulations. (Obviously `--subnet`, `--gateway`, and `--parent` should be fed values appropriate to your network.)
This isn't actually what the first draft of this post said. Initially I was going to suggest using the `macvlan` driver, and then go into a whole spiel about how if you do this and you also want the host to be able to talk to its containers, then you have to create _another_ (non-Docker-managed) `macvlan` interface in `bridge` mode, then route an IP range or two via that interface, as described [here](https://blog.oddbit.com/post/2018-03-12-using-docker-macvlan-networks/).
`ipvlan` is a lot easier, though, and gives you almost exactly the same result. The only difference is that with `macvlan` Docker will actually make up a MAC address for the virtual interface and respond to ARP queries and so on with that. With `ipvlan` it just uses the host MAC. My suspicion is that this is probably another argument _for_ `ipvlan`, as I think I remember reading that multiple MAC addresses on one physical interface is considered a Bad Sign by some network watchdog types of things.
I'm really not sure why I ended up going for `macvlan` in my own case. Maybe `ipvlan` was a later invention so the guides I came across weren't aware of it? Anyway it's there, and it works a lot better than `macvlan` for most use cases, so it's almost certainly what you should use.<Sidenote>In the event that you need to use either of them, that is. Which you probably [don't](#aside-you-probably-dont-want-this).</Sidenote>
So there you have it. You can dump containers on your LAN, and they will (from a networking standpoint) behave as if they were their own machines. But you probably don't want to.

View File

@@ -1,171 +0,0 @@
---
title: The Hitchiker's Guide to Mesh VPNs
description: The golden age of VPNery is upon us.
date: 2022-03-17
---
<script>
import Sidenote from '$lib/Sidenote.svelte';
</script>
Recently at work we've been moving to a new VPN, and naturally as part of that process we done a bunch of research into the available options before settling on one. Mostly I want to document that for my own future reference, so that if this question comes up again I don't have to go redo it all, but if it ends up being helpful to someone else someday then that's great too. (If I ever get this blog site launched, that is. Currently it's not looking too good.)
TL;DR: We ended up going with [Tailscale](https://tailscale.com), because it looked the most user-friendly, had the security features we wanted, and was something I had already used personally so it was more of a known quantity than some of the others.
## A Brief History of VPNing
There are a lot of different VPN softwares out there. Traditionally there were two main types: site-to-site and client-server. Site-to-site VPNs were for connecting geographically separated LANs into one big super-LAN, useful if you had one company with two offices in different cities or something. Client-server VPNs were for hooking individual users outside the office into your corporate network so that they could access the fileshare, locally-hosted whatevers, and so on. Maybe you could even enforce traffic filtering policies by forcing all of their traffic to go through the VPN first, where it could be inspected and potentially blocked if it were determined to be non-kosher. Seems a bit control-freaky to me, but maybe if I were responsible for the network administration of thousands of users I'd feel differently.
More recently, things have started to change in the VPN world. A new power is rising; its victory is at hand. This night, the land will be stained with the blood of IPSec. Erm. Ahem. The new breed is "mesh VPNs," and they're really starting to take hold.<Sidenote>To be fair, they're not _new_ exactly; the oldest one of which I am aware has been arround since 1998. It's just that for some reason nobody paid them much attention until more recently.</Sidenote> The main difference is that instead of being site-to-site or client-server (also known as hub-and-spoke), mesh VPNs establish a _direct_ network transit between any pair of devices that want to communicate. Which is great; it means you can send a packet straight to _any other machine on your network_. You can extend your LAN across any geographical boundaries and (almost) any network conditions, while still remaining secure in the knowledge that your communication is totally encrypted and eavesdropper-proof.
Actually, it's even _better_ than a LAN, because you can enforce access control rules on packets flowing between any two nodes, rather than just packets that cross a network boundary. This is a Big Deal, because it means that your virtual "LAN" is no longer the soft underbelly of your network security. In the olden days, someone who managed to get a foothold in your network was pretty much at liberty to talk to anyone and anything, because what was your firewall going to do about it? ARP-spoof every client on the network so it can inspect the traffic? Sounds like a fast track to a flaky and congested network to me. With a mesh VPN, on the other hand, since every packet between hosts is passing "through" the VPN, it's free to enforce whatever access controls your heart desires.<Sidenote>You may point out that the only way to do this is to leave it up to the individual nodes to enforce these ACLs, and you'd be right. But that's not really a problem, either. Yes, two nodes could collaborate to twiddle with their local copy of the ACL and pass traffic that you haven't permitted. But you know what else two collaborators could do? Send each other emails. Or chat on Discord. Or mail USB sticks across the country. Your firewall isn't there to prevent communication between two consenting parties, it's there to prevent communication between one consenting and one _unconsenting_ party.</Sidenote>
If you've worked with cloud services much you'll notice this is more or less exactly what "security groups" do, and that's no accident. The big public clouds have been using software-defined networking since before everybody else, because you kind of have to when you sell virtual servers. You're already halfway there because if the servers are virtual, then so are their network interfaces, right? And you don't want to just dump them onto a physical LAN because that's just asking for any Tom, Dick or Harry with a credit card to come along and sniff your network traffic. So it's security groups and "virtual private clouds" all the way.
## Table Stakes
All of which is to say, in a somewhat meandering way, that we decided pretty early on that we wanted a mesh VPN solution to replace our existing hub-and-spoke architecture. For us the security implications (as discussed above) were the main draw, but a mesh VPN has other advantages over the more classical type. For one thing, it's a lot easier to scale your VPN up when all the network has to do is route packets, and individual hosts are responsible for the encryption/decryption part. Also, mesh VPNs can have better latency because they're a lot more flexible with routing - you're able to take full advantage of the internet's existing mechanisms for minimizing transit time, instead of having to make detours through a small set of required nodes. Also, NAT holepunching. _Technically_ not required for a mesh VPN, but pretty useless without it, since the majority of internet-connected devices in the world tend to be behind NATs.<Sidenote>I haven't checked this. Don't quote me on it.</Sidenote>
So for us, the boxes that a VPN needed to tick were:
* Mesh topology
* NAT holepunching
* With ACLs
* User-friendly enough that we could feasibly expect people to install it on their own machines
## Interlude: Wireguard
If you've been following the state of the art in VPNery for the last few years, then you've heard of [Wireguard](https://wireguard.com). It first started making serious waves (to my knowledge) in 2018, when Linus Torvalds referred to it as a "work of art" (as compared to OpenVPN and IPSec) on the Linux kernel mailing list. Given Torvalds' reputation for acerbic comments regarding code quality, the fact that he was referring to _someone else's code_ as a "work of art" raised a few eyebrows. One thing led to another, eventually Wireguard was adopted into the mainline Linux kernel, and Jason A. Donenfeld became the herald of the new Golden Age of Networking.
Wireguard is relevant to our discussion for being an encrypted tunnel protocol that Works Really Well, which is why at least three of the options I've looked at are based on it. I say "based on", however, because Wireguard is _not_ a mesh VPN on its own. By itself, Wireguard gives you nothing more than an encrypted tunnel between two points. It's fast and low-latency and (can be) in-kernel so it's very low-overhead, and the connections are all secured with public/private keypairs like SSH. Also like SSH, however, it gives you exactly zero help when it comes to distributing those keys, and if you're looking for some form of automatic peer discovery you're barking up the wrong tree.
## The Field
That's ok, though, because there are a lot mesh VPNs out there that do all those things, some of them built on Wireguard and some not, so let's talk about them!
### ZeroTier
I'm starting with this one because it's one of the most well-established players (been around since 2011, in fact) and was the one I personally discovered first. ZeroTier is a mesh VPN that provides ACLs and NAT holepunching, like everything that we're interested in. Unlike _any_ of the others, though, it actually emulates at layer *2* rather than layer 3, meaning that it can have _broadcast traffic_. This immediately makes it interesting from a user-friendliness standpoint, since how great would it be if your fileshare automatically showed up on your VPN via its built-in mDNS (or whatever) advertisement features?
Another nice feature of Zerotier is that connecting to a network requires a lot less ceremony than some of the other options. Just enter the 16-digit network id, then wait for the network admin to approve your join request. Or, if it's a public<Sidenote>Yes, a "public virtual private network". No, it doesn't have to make sense.</Sidenote> ZeroTier network, you get in immediately.
That's the theory, at least. In practice - well, in practice I haven't tried it with broadcast traffic. I have, however, tried it to connect my own personal network of devices (desktops, laptop, Raspberry Pi, a server or two, and some cloud VMs). Short story: It didn't work all that well for me. To be fair, I could usually get _some_ kind of connectivity, but it was very unpredictable in both bandwidth and latency. In a particularly frustrating twist, the two nodes that I had the _most_ trouble connecting were cloud VMs from different providers, which makes _no sense_ because the main thing that kills these sorts of mesh VPNs is NAT, and the VMs all had _public IPv4 addresses._ This should have been _easy!_
Anyway, although I no longer use it, I do retain a soft spot in my heart for Zerotier, and it has some characteristics (the aforementioned VLAN properties) that really set it apart from the rest. If I were trying to set up a virtual LAN party with a group of friends to play a local-network-only game, I'd probably try Zerotier first.
Also you can self-host the network controller, although I think you lose the shiny web interface if you do that and have to use the API to configure it.
### Nebula
[Nebula](https://github.com/slackhq/nebula) is one of the newer crop of mesh VPNs that seem to be popping up like weeds lately. It ticks most of our boxes (mesh, ACLs, NAT holepunching) but does so in ways that all seem just _ever_ so slightly sub-optimal (for us, at least). It's based on the Noise protocol framework<Sidenote>Which I understand at only the most basic level. Something something ChaCha Poly1305 elliptic curves?</Sidenote>, on which Wireguard is also based, making them... sibling protocols, I guess?
Nebula was developed by Slack to support their... somewhat _interesting_ [architecture](https://slack.engineering/building-the-next-evolution-of-cloud-networks-at-slack/),<Sidenote>Look, I don't work at Slack, I'm not terribly familiar with their requirements... but is it really the simplest solution to use _hundreds of AWS accounts_ to manage your resources? At that scale, can't you just... rent a bunch of bare metal servers and hook them into a big cluster with, like, Nomad and Consul or something? I dunno. Maybe it's all justified, I'm just not convinced.</Sidenote> and seems like a pretty solid piece of work. It's completely self-hostable, which I consider a plus, it uses modern cryptography, and it probably works very well for the use case for which it was designed. Unfortunately for our use case, it's not really designed to be used directly by end-users, e.g. the only way to configure it seems to be through its main config file, and the only way to operate it is through the CLI. Not a problem when all you need to do is hook together a bunch of cloud VMs and the odd dev machine or two, but not great if you want Janice over in HR to be able to talk to the network share.
The other thing I'm not a huge fan of is that as far as I can tell, firewall rules are configured individually on each host. Again, not a problem when you're spinning up VMs from some kind of master image that has the rules all baked in, but not something I want to repeat 50 times on everybody's laptop (or worse, walk them through writing YAML over screen-sharing or something.) I'm sure it wouldn't be too hard to build some kind of automation to work around that, but if we were looking to build our own thing we would have just started with vanilla Wireguard and built up from there.
### Innernet
Which leads us to [Innernet](https://blog.tonari.no/introducing-innernet), which is pretty much just exactly that. The introductory blog post says it better than I can:
> _*In the beginning*_, we had a shared manually-edited WireGuard config file and many sighs were heard whenever we needed to add a new peer to the network.
> _*In the middle ages*_, there were bash scripts and a weird Vault backend with questionable-at-best maintainability that got new machines on the network and coordinated things like IP allocation. Many groans could be heard whenever these flimsy scripts broke for any reason.
> _*In the end*_, we decided to sit down, sigh one long and hopefully final time, and write `innernet`.
So, great! What's more, it's self-hosted, built in Rust (with ♥, no doubt) and uses kernel-mode Wireguard (actually I think it uses "whatever Wireguard is available on the host system", which is kernel-mode if you're on Linux and not otherwise). Unfortuantely, it's still a fairly immature project, so it's lacking things like (again) user-friendliness, which may or may not be a dealbreaker depending on your wants and needs.
Even more unfortunately, it bases its security model around CIDR network segments, just like old-skool corporate networks, which to my mind is a huge step backwards from the more flexible "security group" model that the other candidates use. The critical difference is that a given device has only _one_ "targetable attribute" with which to specify it in your firewal rules. This tends to lead to over-proliferation of access because Device A is in Group Z but needs access to Thing Q, which the rest of Group Z doesn't _really_ need but you also don't want to move Device A into its own special group because now you have to duplicate the access rules for Group Z, and then if they change you have to remember to update the new group too, and who wants to deal with that? So you give all of Group Z access to Thing Q, and before you know it you're back to having a "soft underbelly" of a LAN where an attacker who gets in can talk to virtually anything they want to if they jump through a few hoops.
The Innernet documentation points out that CIDRs can be nested, which is true, so I guess you can have an `engineering` CIDR and then within that an `engineering-managers` CIDR that has all the access of `engineering` plus a few. But what happens when you have a `sales` CIDR with a `sales-manager` who needs the managery bits to match `engineering-managers`, but not the engineering bits, and oh no you're back to duplicating firewall rules because you've locked yourself into an arbitrary limit of one "role" per device?
In theory you could solve this by allowing a single device to have multiple IPs in multiple different CIDRs, but it's apparently a core principle of Innernet's design that "Peers always have only one assigned IP address, and that address is permanently associated with them." So that's out.
(I'm also less than entirely comfortable with fixed-size address spaces in an environment where they're not _really_ necessary, because what happens when the /24 you've allocated for `doodad-watchers` needs its 257th member? But that's an ancillary concern and could probably be managed fairly easily by careful allocation of address blocks.)
In conclusion, I'm conflicted. There's a lot to like about Innernet, and I'm interested to see where they take it as time goes on, but I find myself disagreeing just a little too much with some of the fundamental design choices. I may still end up trying it out some day, since setting up a new VPN for my personal fleet of network-connected thingies is my idea of a fun weekend, but I doubt I'll ever use it seriously unless there's some signficant change in how access control works.
Oh yeah, and there's no Windows client as yet. Hard to sell switching your whole workforce to Linux just so you can use a cool VPN thingy.
### Cloudflare One
Ok, I'm cheating a little bit. [Cloudflare One](https://www.cloudflare.com/cloudflare-one/) technically isn't a mesh VPN, because it always routes your traffic through a Cloudflare gateway, rather than establishing direct links between devices and letting them do the communicating. I'm including it here anyway, because the _result_ is pretty comparable to what you get from these mesh VPNs: A logically "flat" network in which any node can communicate with any other node, subject to centrally-administered access control rules. It even gets you _most_ of the latency and throughput advantages you'd get from a true mesh VPN, because Cloudflare's edge is basically everywhere and its capacity is effectively infinite, as far as the lowly user is concerned.
It's surprisingly inexpensive, as well, with a free tier for up to 50 users, a $7/user/month tier for intermediate cases, and a "call us for pricing" option if you tend to use scientific notation when you talk about your company's market cap. We ended up deciding against it anyway, largely because of some anecdotal claims about its user-friendliness being not-so-great, and the fact that... well, Cloudflare already gets their greasy paws<Sidenote>He said, on the blog site hosted behind Cloudflare's CDN.</Sidenote> on something like 15% of internet traffic as it stands, and do we really want to contribute to that?<Sidenote>Not that I have anything against Cloudflare, mind. They seem great so far. They just give me the same feeling as 2010-era Google, and look how that turned out.</Sidenote>
Also, the one place where you'd feel the lack of true mesh-ness would be LAN communication, which was actually a concern for us. Proper mesh VPNs can detect when two clients are on the same LAN and route their traffic accordingly, so lower latency, higher throughput, yadda yadda. As far as I can tell, Cloudflare's needs every packet to pass through the Cloudflare edge (aka "the internet"), meaning it turns LAN hops into WAN hops. Probably not a big deal for their customers, since this product is pretty clearly targeting Proper Enterprise types, and they undoubtedly have built-up layers of LAN cruft that you couldn't dig your way out of with a backhoe and so wouldn't be using it within their LAN anyway. A slightly bigger deal for us, since "route even LAN traffic through the VPN so we can enforce ACLs" was one of our stated goals.
### Netmaker
Netmaker is a newcomer to this space; the first commit in their Github repo is from March of 2021. It looks to be quite functional, though, with the whole nine yards - full mesh, NAT holepunching, ACLs, and traffic relays for those stubborn NATs that just can't be punched. Pretty impessive for a year and change, which is probably why they got funded by YCombinator.
It's fully self-hostable, with some fancy options for HA cluster-type setups if you want to do that. (The Netmaker docs also introduced me to [rqlite](https://github.com/rqlite/rqlite), which looks like quite an interesting project.) We probably came closer to settling on this one than any others in this list (other than the one we did settle on), and I'd still really like to play with it at some point.
It seems to use kernel-mode Wireguard, which is a big plus in my book. Presumably that's platform-dependent, e.g. I don't think MacOS and maybe Windows have kernel-mode Wireguard yet, but presumably it will be easy to slot in once it does arrive on a given platform.
My one gripe is with the way it does ACLs. It looks like the ACL configuration is just a simple yes/no to every distinct pair of peers in your network, the question being "can these two peers communicate dircectly?" No mention of ports, either source<Sidenote>To be fair, the concept of the "source port" is largely irrelevant when dealing with software-defined networking. In my experience you tend think about _flows_ more than individual packets (ZeroTier being the exception), so the source port is just whatever ephemeral port gets assigned to the connection.</Sidenote> or destination. Also no mention of groups/roles/tags/etc, which means that the number of buttons to click is going to scale with the square of your network size. Not my idea of fun. On the other hand, ACLs are a very new feature (just added in the last release), so maybe they will improve over time.
Regardless, Netmaker looks like an extremely interesting project and I'd very much like to try it out at some point.
### [Tailscale](https://tailscale.com/)
Obviously, this is the one we settled on. The Cadillac of the bunch. Although not the oldest, I'd probably call Tailscale the most well-established of the candidates in this list. It didn't take them very long (I think they started in 2018 or 2019?) because their product is just really damn good. It slices, it dices, it meshes, it firewalls, and it even twiddles with your DNS settings so that you can type `ping homepi` and `homepi` will resolve to the Tailscale-internal IP of the raspberry pi that's hanging out with the dust bunnies next to your cable modem.
So why did we like it? Well, for one I had been using it for about a year and a half to connect my personal devices, so I knew it would get the job done. That's not the only reason, though. A few of the others:
**User-friendliness:** Installing Tailscale is basically just downloading the app and logging in. There's pratcically nothing to it. After that it just hums along quietly in the background, and your things are magically connected to your other things whenver you want them to be. This is what networking should feel like. Too bad script kiddies with DDoS botnets have ruined it all for us over the last 20 years.
**The Best NAT holepunching:** I don't think I'm exaggerating here. As [they explain](https://tailscale.com/blog/how-nat-traversal-works/), Tailscale goes a lot further than "try sending packets both ways and give up if it doesn't work." Among the various tricks it pulls is sending a whole bunch of packets and hoping the birthday paradox kicks in and one of them gets through, which I think is pretty clever.
**Magic DNS:** To be fair, I haven't looked super deeply into what all of the competitors do for this, but it's a pretty big quality-of-life feature. Admittedly Tailscale IPs are stable (as long as you don't clear the device's local state), so you could just stick a public DNS record somewhere that points `devicename.yourdomain.net` to a Tailscale IP. You could even automate it, if you really felt like it. Still, _not_ having to do that is worth something, especially given [how much of a pain it is](https://tailscale.com/blog/2021-09-private-dns-with-magicdns/) to manage split-horizon DNS<Sidenote>Which is why this is the Achilles heel of Magic DNS. Immediately upon starting to set up Tailscale we spent an entire morning trying to debug why DNS queries for single-label names on Windows were taking 2+ seconds to resolve. However, since Magic DNS is still officially in beta, I'll give it a pass on that for the time being.</Sidenote> (it's even worse on other platforms, from what I hear.)
Looking back over these I realize that I might be slightly underselling it: it's hard to overemphasize how well Tailscale _just works_. You kind of have to use it to appreciate it - Tailscale discussions are chock-full of people saying variations on "I never understood why everyone was so crazy about it, I mean it's just a mesh VPN right? There's a bunch of those. But then I tried it and OMG THIS IS THE BEST THING EVER TELL EVERYONE!!!" The attention paid to the little details at every level is just phenomenal. If Apple (old Apple, under Steve Jobs) had decided to go after networking rather than laptops and phones, they might have come up with something like Tailscale.
Of course, it's not _perfect_. What ever is? I have a few (minor) nitpicks:
**Cost:** This is probably the one that comes up the most. Tailscale plans start at $5/user/month (except for the free tier, which is only suitable for a single user) and go up from there. Any reasonably-complex network will need the $15/user/month plan, which is (I think) more than any other VPN on this list. You get what you pay for, of course, but that doesn't change the fact that you do pay for it. Absolutely worth it, in my opinion, but it does make it a harder sell to a lot of people.
**Usermode Wireguard:** Obviously this currently only applies to Linux (and maybe BSD?) as far as I'm aware. Still, it would be nice if Tailscale could make use of kernel-mode Wireguard where available, since otherwise you're leaving throughput on the table. For example, between two fairly beefy machines I get about 680 Mb/s throughput when testing with iPerf. Between one beefy machine and one Synology NAS with a wimpy CPU, I get about 300. Obviously the extent to which this matters depends on what you're trying to do, and it's more than fast enough for most use cases. It just bugs me that it could be better.
**Data Sovereignty:** (Network sovereignty?) Different people will weight this one differently, but at the end of the day it's true that Tailscale runs a coordination server that is responsible for telling your network who's in it and what kind of access they get. If they decide to add an invisible node that can talk to any of your devices on any port, there's not really anything you can do about it.<Sidenote>Note that this still doesn't mean they can eavsedrop on network traffic between two nodes you _do_ control. Even if you can't make NAT traversal work and end up using a relay, the actual network flows are encrypted with Wireguard. Effectively, each packet is encrypted with its destination's public key. And since private keys are generated on the client, the control server has no ability to decrypt them.</Sidenote> It's not quite as much control over your infrastructure as a third-party SSO service gets, but it's up there. Oh, and I don't think it's officially mentioned on their site, but I've seen comments from Tailscale employees that they can do an on-premise control server for big enough enterprise installs.
### Headscale
No discussion of Tailscale would be complete without mentioning [Headscale](https://github.com/juanfont/headscale), a community-driven re-implementation of the Tailscale control plane. You can point the official Tailscale clients at it, although they may require [a bit of hackery](https://github.com/juanfont/headscale/blob/main/docs/windows-client.md) to work properly. And the Tailscale people have said that although it's not officially supported, they are personally in favor of its existence, which I take to mean that they _probably_ won't intentionally break its functionality with an update within the immediate future.
It solves the cost issue of Tailscale, although it introduces the cost of having to maintain it yourself, which may or may not be something you'd worry about. It does introduce a UX penalty, and I doubt that's going to change any time soon - the Tailscale people don't seem to mind its existence, but I can't see them going very far out of their way to make it easier for something that exists specifically so that people can avoid paying for their service. Still, if you _really really_ want Tailscale, but you simply can't justify the cost, or you're _especially_ paranoid about the control plane, it's worth a shot.
## The Rest of the Iceberg
The above options are what I've researched in depth, but they're far from the only mesh VPN solutions out there. I've come across others, but didn't look into them closely for one reason or another - they were either missing some critical component of what we needed, or I didn't discover them until too late, or I just got a weird feeling from them for whatever reason. Still, I'll mention them here in case they happen to be what anybody else is looking for:
### Tinc
Tinc is the OG. It's been around since 1998 and still has a community of dedicated users to this day. It does full-mesh, NAT traversal, and even (aparently) some LAN stuff, like ZeroTier.<Sidenote>I don't get the impression it fully emulates Layer 2 the way ZeroTier does, rather it just has the ability to "bridge" LANs together, which I assume just means "forward broadcast traffic over the tunnel." Probably works ok for small LANs, but I'd hate to see how it scales.</Sidenote>
It doesn't do ACLs, as far as I am aware, which made it a non-starter for us, so that's why it's down here rather than up in the previous section. Moreover, I can't help wondering - if Tinc has been doing this so long, why is it still so niche? Mesh VPNs are obviously great, so why hasn't Tinc eaten the world?
One possibility (borne out by a few anecdotes that I've seen online) is that Tinc just doesn't perform very well. And I don't just mean in terms of raw bandwidth<Sidenote>Although its bandwidth doesn't seem to be great, from the few benchmarks I've seen.</Sidenote>, I mean everything. How often does NAT traversal fail? How long does it take state changes to propagate through the network? How often does it randomly disconnect without saying anything?
From a brief glance at its documentation it also seems that it might be a bit of a pain to manage. E.g. the documentation recommends manually distributing configuration by sending config files back and forth, which doesn't sound terribly pleasant.
### PeerVPN
I don't really know too much about this one, it just popped up when I was Googling around. It looks like it has the basics, i.e. peer discovery and NAT traversal, and probably not any kind of access control, but the site is extremely minimal so I can't get much of a read on it.
### FreeLAN
Much like the above, just something that showed up while I was looking around. It looks to be a bigger project than PeerVPN, or at least the website is a little more fleshed out. I honestly can't quite parse out all of its features - I don't _think_ it does NAT traversal? I can't quite tell for sure, though. The documentation is a little light. Although it does mention that it uses X.509 certificates, which is an instant turnoff for me because messing with X.509 is a _pain_.
### VPNCloud
VPNCloud is a little more fully-featured, like the bigger players I've mentioned. It doesn't seem to do access control, so it's not a true contender for our use-case, but it does look like it works fairly well for what it does do. Their site claims that they've gotten multiple gigabits of throughput between m5.large AWS instances (so, not terribly beefy) which is better than pretty much anything else I've seen other than vanilla Wireguard.
### Netbird
The first time I ran across [this one](https://netbird.io), it was called "Wiretrustee". A change for the better, I think. It looks to be pretty much exactly "open-source Tailscale", so my guess is it will entirely live or die by how well it executes on that. Obviously Tailscale is great, and Headscale proves that there are people who would like to run the control plane themselves, so there's a market for them. Unfortunately it looks like their monetization scheme is "be Tailscale" (i.e. run a hosted version and charge for anything over a single user), at which point why wouldn't you just use Tailscale?
### And More
There's a handy [list](https://github.com/HarvsG/WireGuardMeshes) on Github of Wireguard mesh things, some of which I've already mentioned. And I'm sure even more will continue to pop up like weeds, since everybody seems to want one and a surprisingly large number of people are happy to just sit down and write their own. I guess that's proof that Wireguard made good choices about what problems to address and what to ignore - not an easy task, especially the latter.
## Where Do We Go From Here
It's an exciting time in the world of networking. The Tailscale people talk a lot about this on their blog, because of course they do, but the advent of high-performance, low-overhead VPNery has opened up some pretty interesting possibilities in the world of how we interact with computers. Most excitingly it promises something of a return to the Good Old LAN Days, where every device on the network was trusted by default and no one ever worried about things like authentication and encryption, because why would anyone want to do anything unpleasant to your computer? The Internet made that position untenable, but Tailscale and its ilk hope to bring it back again, With some added benefits from modern cryptography. I can't say whether they'll succeed, but if nothing else it's looking like a fun ride.

View File

@@ -1,36 +0,0 @@
---
title: Imagining A Passwordless Future
description: Can we replace passwords with something more user-friendly?
date: 2021-04-30
draft: true
dropcap: false
---
<script>
import Sidenote from '$lib/Sidenote.svelte';
</script>
Passwords are the *worst*.
How many times have you groaned becuase *yet another* password-related thing
was messed up? Forgotten passwords, passwords that you're *sure* you wrote
down but can't find for some reason, passwords that you definitely *did* make
a record of but the site is inexplicably refusing to accept, passwords that
get silently truncated because your bank is still using 3DES for some reason,
the list goes on. It's constant point of pain for almost everyone, and even
after 20+ years of trying to make it work we *still* haven't figured out a
foolproof method. Password managers help, but they aren't perfect. How many
times have you created a password for a new account somewhere, saved it, and
then discovered that your save didn't go through - maybe it didn't meet the
requirements (because your 24-character string of gibberish didn't includ a s
p e c i a l c h a r a c t e r), or maybe your cable box got hit by
lightning just as you clicked Save, or *whatever*. The fact is that passwords
are a pain, and it seems to be a pretty intractable problem.
You know what aren't a pain, or at least not nearly to the same extent? Keys.
That's right, physical stick-em-in-a-lock-and-turn metal keys. They've been
around since forever,<Sidenote>This is an example sidenote.</Sidenote>
and I doubt they'll be going anywhere any time soon.
I really hate passwords.
I use them, of course, because I can't not. And I use a password manager, because to my mind that's the current best compromise between being secure and absolutely losing your mind, but it still isn't great. Sometimes my password manager bugs out and refuses to auto-fill the password box, so I have to go hunt it down and copy-paste it in.<Sidenote>If I'm lucky. If I'm unlucky, the site will have disabled pasting into password inputs because "security," and I'm stuck having to type in a 16-character string of gibberish on a mobile phone, because that's how life is.</Sidenote> Other times I'll create a password, the password manager will happily file it away, and then I'll discover that it didn't meet the site's requirements,<Sidenote>Another test</Sidenote> because my auto-generated gibberish string didn't include the *right* special characters, and now I have the wrong password saved.

View File

@@ -1,101 +0,0 @@
---
title: The Kubernetes Alternative I Wish Existed
date: 2023-10-01
draft: true
---
<script>
import Sidenote from '$lib/Sidenote.svelte';
</script>
I use Kubernetes on my personal server, largely because I wanted to get some experience working with it. It's certainly been helpful in that regard, but after a year and a half or so I think I can pretty confidently say that it's not the ideal tool for my use-case. Duh, I guess? But I think it's worth talking about _why_ that's the case, and what exactly _would_ be the ieal tool.
## The Kubernetes Way™
Kubernetes is a very intrusive orchestration system. It would very much like the apps you're running to be doing things _its_ way, and although that's not a _hard_ requirement it tends to make everything subtly more difficult when that isn't the case. In particular, Kubernetes is targeting the situation where you:
* Have a broad variety of applications that you want to support,
* Have written all or most of those applications yourself,<Sidenote>"You" in the organizational sense, not the personal one.</Sidenote>
* Need those applications to operate at massive scale, e.g. concurrent users in the millions.
That's great if you're Google, and surprise! Kubernetes is a largely Google-originated project,<Sidenote>I'm told that it's a derivative of Borg, Google's in-house orchestration platform.</Sidenote> but it's absolute _garbage_ if you (like me) are just self-hosting apps for your own personal use and enjoyment. It's garbage because, while you still want to support a broad variety of applications, you typically _didn't_ write them yourself and you _most definitely don't_ need to scale to millions of concurrent users. More particularly, this means that the Kubernetes approach of expecting everything to be aware that it's running in Kubernetes and make use of the platform (via cluster roles, CRD's etc) is very much _not_ going to fly. Instead, you want your orchestration platform to be as absolutely transparent as possible: ideally, a running application should need to behave no differently in this hypothetical self-hosting-focused orchestration system than it would if it were running by itself on a Raspberry Pi in your garage. _Most especially_, all the distributed-systems crap that Kubernetes forces on you is pretty much unnecessary, because you don't need to support millions<Sidenote>In fact, typically your number of concurrent users is going to be either 1 or 0.</Sidenote> of concurrent users, and you don't care if you incur a little downtime when the application needs to be upgraded or whatever.
## But Wait
So then why do you need an orchestration platform at all? Why not just use something like [Harbormaster](https://gitlab.com/stavros/harbormaster) and call it a day? That's a valid question, and maybe you don't! In fact, it's quite likely that you don't - orchestration platforms really only make sense when you want to distribute your workload across multiple physical servers, so if you only have the one then why bother? However, I can still think of a couple of reasons why you'd want a cluster even for your personal stuff:
* You don't want everything you host to become completely unavailable if you bork up your server somehow. Yes, I did say above that you can tolerate some downtime, and that's still true - but especially if you like tinkering around with low-level stuff like filesystems and networking, it's quite possible that you'll break things badly enough<Sidenote>And be sufficiently busy with other things, given that we're assuming this is just a hobby for you.</Sidenote> that it will be days or weeks before you can find the time to fix them. If you have multiple servers to which the workloads can migrate while one is down, that problem goes away.
* You don't want to shell out up front for something hefty enough to run All Your Apps, especially as you add more down the road. Maybe you're starting out with a Raspberry pi, and when that becomes insufficient you'd like to just add more Pis rather than putting together a beefy machine with enough RAM to feed your [Paperless](https://github.com/paperless-ngx/paperless-ngx) installation, your [UniFi controller](https://help.ui.com/hc/en-us/articles/360012282453-Self-Hosting-a-UniFi-Network-Server), your Minecraft server(s), and your [Matrix](https://matrix.org) server.
* You have things running in multiple geographical locations and you'd like to be able to manage them all together. Maybe you built your parents a NAS with Jellyfin on it for their files and media, or you run a tiny little proxy (another Raspberry Pi, presumably) in your grandparents' network so that you can inspect things directly when they call you for help because they can't print their tax return.
Okay, sure, maybe this is still a bit niche. But you know what? This is my blog, so I get to be unrealistic if I want to.
## So what's different?
Our hypothetical orchestrator system starts out in the same place as Kubernetes--you have a bunch of containerized applications that need to be run, and a pile of physical servers on which you'd like to run them. You want to be able to specify at a high level in what ways things should run, and how many of them, and so on. You don't want to worry about the fiddly details like deciding which container goes on which host, or manually moving all of `odin`'s containers to `thor` when the Roomba runs over `odin`'s power cable while you're on vacation on the other side of the country. You _might_ even want to be able to specify that a certain service should run _n_ replicas, and be able to scale that up and down as needed, though that's a decidedly less-central feature for our orchestrator than it is for Kubernetes. Like I said above, you don't typically need to replicate your services for traffic capacity, so _if_ you're replicating anything it's probably for availability reasons instead. But true HA is usually quite a pain to achieve, especially for anything that wasn't explicitly designed with that in mind, so I doubt a lot of people bother.
So that much is the same. But we're going to do everything else differently.
Where Kubernetes is intrusive, we want to be transparent. Where Kubernetes is flexible and pluggable, we will be opinionated. Where Kubernetes wants to proliferate statelessness and distributed-systems-ism, we will be perfectly content with stateful monotliths.<Sidenote>And smaller things, too. Microliths?</Sidenote> Where Kubernetes expects cattle, we will accept pets. And so on.
The basic resources of servering are ~~wheat~~ ~~stone~~ ~~lumber~~ compute, storage, and networking, so let's look at each in detail.
## Compute
"Compute" is an amalgamate of CPU and memory, with a side helping of GPU when necessary. Obviously these are all different things, but they tend to work together more directly than either of them does with the other two major resources.
### Scheduling
Every orchestrator of which I am aware is modeled as a first-class distributed system: It's assumed that it will consist of more than one instance, often _many_ more than one, and this is baked in at the ground level.<Sidenote>Shoutout to [K3s](https://k3s.io) here for bucking this trend a bit: while it's perfectly capable of functioning in multi-node mode, it's capable of running as a single node and just using SQLite as its storage backend, which is actually quite nice for the single-node use case.</Sidenote>
I'm not entirely sure this needs to be the case! Sure, for systems like Kubernetes that are, again intended to map _massive_ amounts of work across _huge_ pools of resources it definitely makes sense; the average `$BIGCORP`-sized Kubernetes deployment probably couldn't even _fit_ the control plane on anything short of practically-a-supercomputer. But for those of us who _don't_ have to support massive scale, I question how necessary this is.
The obvious counterpoint is that distributing the system isn't just for scale, it's also for resiliency. Which is true, and if you don't care about resiliency at all then you should (again) probably just be using Harbormaster or something. But here's the thing: We care about stuff running _on_ the cluster being resilient, but how much do we care about the _control plane_ being resilient? If there's only a single control node, and it's down for a few hours, can't the workers just continue happily running their little things until told otherwise?
We actually have a large-scale example of something sort of like this in the recent Cloudflare outage.
### Virtualization
The de facto standard unit of virtualization in the orchesetration world is the container. Containers have been around in one form or another for quite a while, but they really started to take off with the advent of Docker, because Docker made them easy. I want to break with the crowd here, though, and use a different virtualization primitive, namely:
#### AWS Firecracker
You didn't write all these apps yourself, and you don't trust them any further than you can throw them. Containers are great and all, but you'd like a little more isolation. Enter Firecracker. This does add some complexity where resource management is concerned, especially memory, since by default Firecracker wants you to allocate everything up front. But maybe that's ok, or maybe we can build in some [ballooning](https://github.com/firecracker-microvm/firecracker/blob/main/docs/ballooning.md) to keep things under control.
VM's are (somewhat rightfully) regarded as being a lot harder to manage than containers, partly because (as mentioned previously) they tend to be less flexible with regard to memory requirements, but also because it's typically a lot more difficult to do things like keep them all up to date. Managing a fleet of VM's is usually just as operationally difficult as managing a fleet of physical machines.
But [it doesn't have to be this way!](https://fly.io/blog/docker-without-docker/) It's 2023 and the world has more or less decided on Docker<Sidenote>I know we're supposed to call them "OCI Images" now, but they'll always be Docker images to me. Docker started them, Docker popularized them, and then Docker died because it couldn't figure out how to monetize an infrastructure/tooling product. The least we can do is honor its memory by keeping the name alive.</Sidenote> images as the preferred format for packaging server applications. Are they efficient? Hell no. Are they annoying and fiddly, with plenty of [hidden footguns](https://danaepp.com/finding-api-secrets-in-hidden-layers-within-docker-containers)? You bet. But they _work_, and they've massively simplified the process of getting a server application up and running. As someone who has had to administer a Magento 2 installation, it's hard not to find that appealing.
They're especially attractive to the self-hosting-ly inclined, because a well-maintained Docker image tends to keep _itself_ up to date with a bare minimum of automation. I know "automatic updates" are anathema to some, but remember, we're talking self-hosted stuff here--sure, the occasional upgrade may break your Gitea<Sidenote>Or maybe not. I've been running Gitea for years now and never had a blip.</Sidenote> server, but I can almost guarantee that you'll spend less time fixing that than you would have manually applying every update to every app you ever wanted to host, forever.
So we're going to use Docker _images_ but we aren't going to use Docker to run them. This is definitely possible, as alluded to above. Aside from the linked Fly post though, other [attempts](https://github.com/weaveworks-liquidmetal/flintlock) in the same [direction](https://github.com/firecracker-microvm/firecracker-containerd) don't seem to have taken off, so there's probably a fair bit of complexity here that needs to be sorted out.
## Networking
Locked-down by default. You don't trust these apps, so they don't get access to the soft underbelly of your LAN. So it's principle-of-least-privilege all the way. Ideally it should be possible when specifying a new app that it gets network access to an existing app, rather than having to go back and modify the existing one.
## Storage
Kubernetes tends to work best with stateless applications. It's not entirely devoid of [tools](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) for dealing with state, but state requires persistent storage and persistent storage is hard in clusters.<Sidenote>In fact, I get the sense that for a long time you were almost completely on your own with storage, unless you were using a managed Kubernetes project like GKE where you're just supposed to use whatever the provider offers for storage. More recently things like Longhorn have begun improving the situation, but "storage on bare-metal Kubernetes" still feels decidedly like a second-class citizen to me.</Sidenote>
Regardless, we're selfhosting here, which means virtually _everything_ has state. But fear not! Distributed state is hard, yes, but most of our apps aren't going to be truly distributed. That is, typically there's only going to be one instance running at a time, and it's acceptable to shut down the existing instance before spinning up a new one. So let's look at what kind of complexity we can avoid by keeping that in mind.
**We don't need strong consistency:** You're probably just going to be running a single instance of anything that involves state. Sure, you can have multiple SQLite processes writing to the same database and it _can_ be ok with that, unless it isn't.<Sidenote> From the SQLite FAQ: "SQLite uses reader/writer locks to control access to the database. But use caution: this locking mechanism might not work correctly if the database file is kept on an NFS filesystem. This is because `fcntl()` file locking is broken on many NFS implementations."</Sidenote> But you _probably_ don't want to run the risk of data corruption just to save yourself a few seconds of downtime.
This means that whatever solution we come up with for storage is going to be distributed and replicated almost exclusively for durability reasons, rather than for keeping things in sync. Which in turn means that it's _probably fine_ to default to an asynchronous-replication mode, where (from the application's point of view) writes complete before they're confirmed to have safely made it to all the other replicas in the cluster. This is good because the storage target will now appear to function largly like a local storage target, rather than a networked one, so applications that were written with the expectation of using local storage for their state will work just fine. _Most especially_, this makes it _actually realistic_ to distribute our storage across multiple geographic locations, whereas with a synchronous-replication model the latency impact of doing that would make it a non-starter.
**Single-writer, multi-reader is default:** With all that said, inevitably people are going to find a reason to try mounting the same storage target into multiple workloads at once, which will eventually cause conflicts. There's only so much we can do to prevent people from shooting themselves in the foot, but one easy win would be to default to a single-writer, multi-reader mode of operation. That way at least we can prevent write conflicts unless someone intentionally flips the enable-write-conflicts switch, in which case, well, they asked for it.
## Configuration
YAML, probably? It's fashionable to hate on YAML right now, but I've always found it rather pleasant.<Sidenote>Maybe people hate it because their primary experience of using it has been in Kubernetes manifests, which, fair enough.</Sidenote> JSON is out because no comments. TOML is out because nesting sucks. Weird niche supersets of JSON like HuJSON and JSON5 are out because they've been around long enough that if they were going to catch on, they would have by now. Docker Swarm config files<Sidenote>which are basically just Compose files with a few extra bits.</Sidenote> are my exemplar par excellence here. (comparison of Kubernetes and Swarm YAML?) (Of course they are, DX has always been Docker's Thing.)
We are also _definitely_ going to eschew the Kubernetes model of exposing implementation details in the name of extensibility.<Sidenote>See: ReplicaSets, EndpointSlices. There's no reason for these to be first-class API resources like Deployments or Secrets, other than to enable extensibility. You never want users creating EndpointSlices manually, but you might (if you're Kubernetes) want to allow an "operator" service to fiddle with them, so you make them first-class resources because you have no concept of the distinction between external and internal APIs.</Sidenote>
## Workload Grouping
It's always struck me as odd that Kubernetes doesn't have a native concept for a heterogenous grouping of pods. Maybe it's because Kubernetes assumes it's being used to deploy mostly microservices, which are typically managed by independent teams--so workloads that are independent but in a provider/consumer relationship are being managed by different people, probably in different cluster namespaces anyway, so why bother trying to group them?
Regardless, I think Nomad gets this exactly right with the job/group/task hierarchy. I'd like to just copy that wholesale, but with more network isolation.

View File

@@ -1,24 +0,0 @@
---
title: 'Languages: High and Low'
description: How high is up?
date: 2022-08-19
draft: true
---
<script>import Sidenote from '$lib/Sidenote.svelte';</script>
Here's a fun way to start an argument: Go ask on a programming forum whether C should be considered a "high-level" or "low-level" language. The majority of responses will be along the lines of "low-level, obviously, you have to call `malloc()` all the time", but every once in a while you'll find some crusty old wizard who earned his sripes on MULTICS grousing about how kids these days don't understand what a _real_ low-level language looks like, and back in my day we had to flip individual bits with our teeth, and so on.
The truth is, asking whether a language is high- or low-level unqualifiedly is sort of like asking whether an object is heavy. Heavy relative to what? The question is open-ended without some frame of reference.<Sidenote>Of course, most people have an implicit frame of reference for this question that resolves to "heavy relative to the objects I commonly encounter in my day-to-day life". As it turns out (in my opinion), most programmers have a similar implicit frame of reference when it comes to languages, which is where you get the disagreement I mentioned earlier.</Sidenote> A boulder is heavy relative to a peanut, but quite light compared to an oil tanker.
A better question, in my opinion, is whether one language is _higher-_ or _lower-level_ than another, and this is where it gets interesting. Most people will probably agree that higher-level languages are _more abstract_ than lower-level ones, i.e. they take more of the fiddly details of what's actually going on inside the computer and hide them away so that you, the programmer, don't have to worry about them.
You can probably throw little language-specific details back and forth until the cows come home, but I think there are a few common "break-points" that you can use to group languages into buckets of similarly high- or low-level-ness. To me, they look something like this:
* **Level 1** langauges give you control over individual registers (just assembly, basically)
* **Level 2** languages give you control over memory allocation and raw pointers. E.g. C and C++, and Zig I guess? Also a lot of older languages like Pascal and Ada.
* **Level 3** languages handle memory allocation and deallocation for you, but still distinguish between the stack and the heap. Java, C#, and Go fall here, as do Nim, Crystal, probably Pony, V, and others.<Sidenote>You can even make the argument that Rust belongs at this level. Even though it's often referred to as a "systems" language, which most people associate with "low level", it _kind of_ abstracts away the work of allocating and deallocating memory. It just doesn't abstract it quite as _much_ as other languages, since you do still have to think about it.</Sidenote> You can further split this level into those that require a runtime (Java, C#) and those that don't (Go, Nim, Crystal, etc.) Although more properly I guess the latter category just embed their runtime, rather than requiring it to exist separately.
* **Level 4** languages give you control over things like intermediate variables and so on - there are probably a lot of sub-levels here, like having to instantiate loop variables vs. having them handled for you,<Sidenote>Or both! Hi Javascript!</Sidenote> but I think this is the core of it. Most dynamic/interpreted languages probably fall here: Python, JS, Ruby, PHP, Perl, etc.
* **Level 5** languages don't give you control over any of those. The only one I can think of off the top of my head is SQL, but there are probably others. I have a hunch that at this point you tend to see a lot more domain-specific stuff, since it's easier to abstract away details when you know what the use-case will look like.
I can also imagine a Level 0 which gives you control over things that aren't currently exposed by the underlying hardware. For instance, you could have a language construct that allows you to "pin" memory regions to a certain cache level, guaranteeing that you can always access it quickly. Or you could attach a "hint" to a branching codepath that allows you to override the CPU's branch predictor, if you know that one case will be overwhelmingly more common than the other. I wonder whether we'll start to see this sort of thing in the future, as Moore's Law continues to slow down and people start looking for more and more ways to squeeze maximum performance out of their hardware.

View File

@@ -1,30 +0,0 @@
---
title: 'Mixing GUIs and CLIs on Windows: A Cautionary Tale'
date: 2024-06-17
---
<script>import Sidenote from '$lib/Sidenote.svelte';</script>
If you've used desktop Linux, then I'm sorry for you.<Sidenote>I also use desktop Linux. I'm sorry for me, too.</Sidenote> You will, however, most likely be familiar with the practice of using the same app from either the CLI or the GUI, depending on how you invoke it and what you want to do with it. In some cases, the CLI merely replicates the functionality of the GUI, but (due to being, you know, a CLI) is much easier to incorporate in scripts and such. In other cases ([Wezterm](https://wezfurlong.org/wezterm/) is a good example) the GUI app acts as a "server" with which the CLI communicates to cause it to do various things while it runs.
On Linux, this is as natural as breathing. There's nothing in particular that distinguishes a "GUI app" from a "CLI app", other than that the GUI app _happens_ to ultimately call whatever system APIs are involved in creating windows, drawing text, and so on. Moreover, even when running its GUI, a Linux app always has stdout, stderr, etc. In most day-to-day usage, these get stashed in some inscrutable location at the behest of Gnome or XFCE or whatever ends up being responsible for spawning and babysitting GUI apps most of the time, but you can always see them if you launch the app from a terminal instead. In fact, this is a common debugging step when an app is misbehaving: launch it from a terminal so you can see whether it's spitting out errors to console that might help diagnose the problem.
Since Windows also has both GUIs and CLIs, you might naively expect the same sort of thing to work there, but woe betide you if you try to do this. Windows thinks every app must be _either_ a GUI app or a CLI app, and never the twain shall meet, or at lest never the twain shall meet without quite a significant degree of jank.
Every Windows executable is flagged somehow<Sidenote>I don't know how precisely, probably a magic bit somewhere in the executable header or something like that.</Sidenote> as "GUI app" or "CLI app", and this results in different behavior on launch. CLI apps are allocated a [console](https://learn.microsoft.com/en-us/windows/console/definitions), on which concept I'm not _entirely_ clear but which seems somewhat similar to a [pty](https://man7.org/linux/man-pages/man7/pty.7.html) on Linux. GUI apps, on the other hand, are not expected to produce console output and so are not allocated a console at all, which means that if they try to e.g. write to stdout they just... don't. I'm not sure what exactly happens when they try: in my experience e.g. `println!()` in Rust just becomes a no-op, but it's possible that this is implemented on the Rust side because writing to stdout from a GUI app would crash your program otherwise, or something.
Aha, says the clever Windows developer, but I am a practitioner of the Deep Magicks, and I know of APIs such as [`AllocConsole`](https://learn.microsoft.com/en-us/windows/console/allocconsole) and [`FreeConsole`](https://learn.microsoft.com/en-us/windows/console/freeconsole) which allow an app to control the existence and attached-ness of its Windows consoles. But not so fast, my wizardly acquaintance. Yes, you can do this, but it's still _janky as hell_. There are two basic approaches: you can either a) flag the executable as a GUI app, then call `AllocConsole` and `AttachConsole` to get a console which can then be used for stdout/err/etc, or you can b) flag the executable as a CLI app, so it gets allocated a console by default, then call `FreeConsole` to get rid of it if you decide you don't want it.
If you do a), the problem is that the app doesn't have a console at its inception, so `AllocConsole` creates an entirely _new_ console, with no connection to the console from which you invoked the app. So it pops up in a new window, which is typically the default terminal emulator<Sidenote>On Windows 10 and earlier, this defaults to `conhost.exe`, which is the terminal emulator equivalent of a stone knife chipped into chape by bashing it against other stones.</Sidenote> rather than whatever you have set up, and - even worse - _it disappears as soon as your app exits_, because of course its lifecycle is tied to that of the app. So the _extremely standard_ CLI behavior of "execute, print some output, then exit" doesn't work, because there's no time to _read_ that output before the app exits and the window disappears.
Alternatively, you can call `AttachConsole` with the PID of the parent process, or you can just pass `-1` instead of a real PID to say "use the console of the parent process". But this almost as terrible, because - again - the app _doesn't have a console when it launches_, so whatever shell you used to launch it will just assume that it doesn't need to wait for any output and blithely continue on its merry way. If you then attempt to write to stdout, you _will_ see the output, but it will be interleaved with your shell prompt, keyboard input, and so on, so again, not really usable.
Ok, so you do b) - flag your app as a CLI app, then call `FreeConsole` as soon as it launches to detach from the console that gets automatically assigned to it. Unfortunately this doesn't work either. When you launch a CLI app in a context that expects a GUI, such as the Start menu, it gets assigned a brand-new console window, again using whatever is the default terminal emulator. In my experience, it isn't consistently possible (from within the process at least) to call `FreeConsole` quickly enough to prevent this window from at least flashing briefly on the desktop. Livable? Sure, I guess, but it would be a sad world indeed if we never aimed higher than just _livable_.
Up until now, my solution has been to simply create two copies of my executable, one GUI and one CLI, put them in different directories, and add the directory of the CLI executable to my `PATH` so that it's the one that gets invoked when I run `mycommand` in a terminal. This works ok, despite being fairly inelegant, but just today I discovered a better way via [this rant](https://www.devever.net/~hl/win32con).<Sidenote>With whose sentiments I must agree in every particular.</Sidenote> Apparently you can specify the `CREATE_NO_WINDOW` flag when creating the process, which prevents it from creating a new window. Unfortunately, as that page notes, this requires you to control the invocation of the process, so the only way to make proper use of it is to create a "shim" executable that calls your main executable (which will be, in this case, the CLI-first version) with the `CREATE_NO_WINDOW` flag, for when you want to run in GUI mode. That post also points out that if you have a `.exe` file and a `.com` file alongside each other, Windows will prefer the `.com` file when the app is invoked via the CLI, so your shim can be `app.exe` and your main executable `app.com`. I haven't tried this yet myself, but it sounds like it would work, and it's a general enough solution<Sidenote>One could even imagine a generalized "shim" executable which, when executed, simply looks at its current executable path, then searches in the same directory for another executable with the same name but the `.com` extension, and executes that with the `CREATE_NO_WINDOW` flag.</Sidenote> that app frameworks such as [Tauri](https://tauri.app/)<Sidenote>Building a Tauri app is how I encountered this problem in the first place, so I would be _quite_ happy if Tauri were to provide a built-in solution.</Sidenote> might eventually handle it for you.
Another solution that was suggested to me recently (I think this is the more "old school" way of handling this problem) is to create a new virtual desktop, then ensure that the spurious console window gets created there so that it's out of sight. I haven't tried this myself, so I'm not familiar with the details, but my guess is that like the above it would require you to control the invocation of the process, so there isn't really any advantage over the other method, and it's still hacky as hell.
Things like this really drive home to me how thoroughly Windows relegates the CLI to being a second-class citizen. In some ways it almost feels like some of the products of overly-optimistic 1960s-era futurism, like [designing a fighter jet without a machine gun](https://en.wikipedia.org/wiki/McDonnell_Douglas_F-4_Phantom_II?useskin=vector) because we have guided missiles now, and _obviously_ those are better, right? But no, in fact it turns out that sometimes a gun was _actually_ preferable to a guided missile, because surprise! Different tools have different strengths and weaknesses.
Of course, if Microsoft had been in charge of the F-4 it would have [taken them 26 years](https://devblogs.microsoft.com/commandline/windows-command-line-introducing-the-windows-pseudo-console-conpty/) to finally add the machine gun, and when they did it would have fired at a 30-degree angle off from the heading of the jet, so I guess we can be thankful that we don't have to use our terminal emulators for air-to-air dogfights, at least.

View File

@@ -1,32 +0,0 @@
---
title: Password Strength, Hackers, and You
date: 2023-10-21
draft: true
---
<script>
import Sidenote from '$lib/Sidenote.svelte';
</script>
Every once in a while, as my friends and family can attest, I go off on a random screed about passwords, password strength, password cracking, logins, etc. To which they listen with polite-if-increasingly-glassy-eyed expressions, followed by an equally polite change of conversational topic. To avoid falling into this conversational tarpit _quite_ so often, I've decided to write it all up here, so that instead of spewing it into an unsuspecting interlocutor's face I can simply link them here.<Sidenote>Maybe I can get business cards printed, or something.</Sidenote> Whereupon they can say "Thanks, that sounds interesting," and proceed to forget that it ever existed. So it's a win-win: I get to feel like I've Made A Difference, and they don't have to listen to a half-hour of only-marginally-interesting infosec jargon.
So.
## Password Strength
Everyone knows that the "best" password is at least 27 characters long and contains a mix of upper and lowercase letters, symbols, atomic symbols, and ancient Egyptian hieroglyphs. What may be slightly less known is exactly _why_ this is the recommended approach to picking passwords, and how the same goal might be accomplished by other, less eye-gougingly awful means.
So how do we measure the "strength" of a password? Ultimately, for the purposes of our discussion here, password strength comes down to one thing: How many tries<Sidenote>On average, that is. Obviously (especially with randomly-chosen passwords) the _exact_ number of tries is going to be somewhat random.</Sidenote> would it take for someone to guess this password? There are two ~~facets~~ to this question: 1) How many possible passwords are there (this is sometimes referred to as the "key space"), and 2) How likely is each of them to be the correct password?
The first of those questions is pretty easy to answer in the most basic sense: The number of possible passwords is the maximum password length, raised to the power of the number of possible characters. For instance, if the maximum password length is 16 characters, and the number of possible characters is 95<Sidenote>I.e. uppercase + lowercase + symbols.</Sidenote>, then the
So what makes a "strong" password? Most people have a pretty good intuition for this, I think: A strong password is one that can't be easily guessed. The absolute _worst_ password is something that might be guessed by someone who knows nothing at all about you, such as `password` or `123456`<Sidenote>This is, in fact, the most common password (or was last I checked), according to [Pwned Passwords](https://haveibeenpwned.com/passwords).</Sidenote> Only slightly stronger is a password that's obvious to anyone who knows the slightest bit about its circumstances, such as your first name or the name of the site/service/etc. to which it logs you in.
Ok, so it's pretty clear what makes a _really_ bad password. But what about an only-sort-of-bad password? This is where intuition starts to veer off the rails a little bit, I think. The "guessability" of a password might be quantified as "how long, on average, would it take to guess"? Unfortuantely, the intuitive situation of "guessing" a password is pretty divergent from the reality of what a password cracker is actually doing when they try to crack passwords. Most people, based on the conversations I've had, envision "password guessing" as someone sitting at a computer, typing in potential passwords one by one. Or, maybe slightly more sophisticatedly, they imagine a computer firing off attempted logins from a list of potential passwords, but critically, _against the live system that is under attack._ This is a problem, because most password cracking (at least, the kind you have to worry about) _doesn't_ take place against live login pages. Instead, it happens in what's known as an "offline" attack, when the password cracker has managed to obtain a copy of the password database and starts testing various candidates against it. To explain this, though, we have to take a little detour into...
## Password storage
Unless the system in question is hopelessly insecure (and there are such systems; we'll talk about that in a bit) it doesn't store a copy of your password in plain text. Instead it stores what's called a _hash_, which is what you get when you run the password through a particular type of data-munging process called a _hashing algorithm_. A good password hashing algorithm has two key properties that make it perfect for this use case: It's _non-reversible_, and it's _computationally expensive_.
### One-way hashing
Suppose your password is `password`, and its hash is something like `X03MO1qnZdYdgyfeuILPmQ`. The non-reversibility of the hashing algorithm means that given the second value, there isn't any direct way to derive the first again. The only way to figure it out is to, essentially, guess-and-check against a list of potential candidate inputs. If that sounds a little bit like black magic, don't worry - I felt the same way when I first encountered the concept. How can a hash be irreversible _even if you know the algorithm_?

View File

@@ -1,200 +0,0 @@
---
title: Sidenotes
description: An entirely-too-detailed dive into how I implemented sidenotes for this blog.
date: 2023-08-14
draft: true
---
<script>
import Sidenote from '$lib/Sidenote.svelte';
import UnstyledSidenote from '$lib/UnstyledSidenote.svelte';
</script>
<style>
.counter {
counter-increment: sidenote;
}
.counter::before {
content: counter(sidenote);
color: var(--accent-color);
font-size: 0.75rem;
position: relative;
bottom: 0.2rem;
margin-left: 0.1rem;
}
.sidenote-absolute {
position: absolute;
left: calc(50% + min(100%, var(--content-width)) / 2 + 1rem);
max-width: 12rem;
font-size: 0.75rem;
}
.sidenote-absolute::before {
content: counter(sidenote);
color: var(--accent-color);
font-size: 0.75rem;
position: relative;
bottom: 0.2rem;
margin-right: 0.1rem;
}
:global(.sn-float) {
float: right;
}
:global(.sn-clear) {
float: right;
clear: right;
}
:global(.sn-gutter) {
float: right;
width: 14rem;
margin-right: -14rem;
}
:global(.sn-gap) {
float: right;
width: 14rem;
margin-right: -16rem;
}
:global(.sn-var-width) {
float: right;
--width: min(14rem, calc(50vw - var(--content-width) / 2) - 2rem);
width: var(--width);
margin-right: calc(0rem - var(--width) - 2rem);
}
</style>
One of my major goals when building this blog was to have sidenotes. I've always been a fan of sidenotes on the web, because the most comfortable reading width for a column of text is <em>far</em> less than the absurd amounts of screen width we tend to have available, and what else are we going to use it for?<Sidenote>Some sites use it for ads, of course, which is yet another example of how advertising ruins everything.</Sidenote>
Footnotes don't really work on the web the way they do on paper, since the web doesn't have page breaks. You _can_ stick your footnotes in a floating box at the bottom of the page, so they're visible at the bottom of the text just like they would be on a printed page, but this sacrifices precious vertical space.<Sidenote>On mobile, it's _horizontal_ space that's at a premium, so I do use this approach there. Although I'm a pretty heavy user of sidenotes, so I have to make them toggleable as well or they'd fill up the entire screen.</Sidenote> Plus, you usually end up with the notes further away from the point of divergence than they would be as sidenotes anyway.
I'm also not a huge fan of show-on-hover/click for marginalia, because it requires an extra interaction--and often a fairly precise one, which is always annoying.<Sidenote>This is especially true on mobile, where I've found myself selecting text instead of showing/hiding a note because I didn't get my finger in quite the right place.</Sidenote> Admittedly this style _does_ get you the absolute minimum distance between the marginalia and the main content, but I think the extra interaction is too heavy a price to pay.<Sidenote>Except on mobile, as mentioned. Mobile displays just don't have _any_ extra space at all, so you're left choosing between various unappealing options.</Sidenote>
So we're left with sidenotes, which I consider the crème de la crème of web-based marginalia. So okay, sure, sidenotes are great and all, but how do we actually _do_ them? Well! _wipes imaginary sweat from brow_ It sure was tough, and for a while there I thought I'd never make it through, but I done did figgered it out in the end!<Sidenote>_Narrator:_ He had not figured it out. He had googled around until he found someone else who had figured it out, and then copied their solution.</Sidenote>
## The Suboptimal Solution: Absolute Positioning
I'm naturally lazy, so I wanted the authoring experience to be as low-friction as possible so that I wouldn't end up foregoing sidenotes just because they were too much of a pain to put in. Since I had already settled on [mdsvex](https://mdsvex.pngwn.io/docs) for authoring my posts, I wanted sidenotes to be just another component that I could throw in mid-stream whenever I had an ancillary thought to express. This meant that DOM-wise, the sidenotes were going to be mixed right in with the main body text. Since I was also hoping to do this in pure CSS,<Sidenote>Because as much as I claim not to care, I secretly fear the Hacker News anti-Javascript brigade and desperately crave their approval.</Sidenote> meant that I was going to have to do something that removed the sidenote from the normal document flow, such as `position: absolute`.
My first approach was something like this:
```css
.sidenote {
position: absolute;
/* 50% takes us to the midpoint of the page,
half of content-width gets out out to the gutter,
and the extra 1rem gives us some breathing room. */
left: calc(50% + var(--content-width) / 2 + 1rem);
max-width: 12rem;
font-size: 0.75rem;
}
```
And it worked! Sort of. Here's an example.<span class="counter"></span><span class="sidenote-absolute">My initial take on sidenotes. Seems to be working, right?</span> Unfortunately it has a major flaw: Absolute positioning removes an element from the document flow _entirely_, while I wanted sidenotes to still flow with _each other_, That doesn't happen with this solution--if you have multiple sidenotes too close together, they will overlap because absolute positioning Just Doesn't Care.<span class="counter"></span><span class="sidenote-absolute">Like this one.</span><span class="counter"><span class="sidenote-absolute" style="transform: translateY(0.2rem)">And this one, which I've moved down just a smidge to make the overlap more apparent.</span>
Obviously, it isn't that hard to just scan through the page looking for sidenotes, detect when they overlap, and then (since they're already absolutely positioned) adjust their `top` values appropriately to get rid of the overlap. But I didn't want to do this for a variety of reasons.
* I wanted to write this as a Svelte component, which means that's the obvious place to put this logic. But because there are many instances of the component and I only want to run the collision-detection logic once, it has to be coordinated across multiple instances of the same component, which is painful.
* Because we have to wait for the sidenote elements to _have_ concrete positions before we can detect whether they collide, we can't do this until they are mounted (i.e. inserted into the DOM). I was concerned that this would cause [FOUC](https://en.wikipedia.org/wiki/Flash_of_unstyled_content)-like problems, although in retrospect I don't actually recall it happening.<Sidenote>Possibly it was mitigated by the way Svelte batches DOM updates.</Sidenote>However, since I was always planning on static-rendering the site and letting SvelteKit do client-side hydration on page load, I don't think the possibility could ever be ruled out entirely.
* Anything that triggered a reflow could cause the text to move around, but the sidenotes might not follow suit.<Sidenote>Specifically: sidenotes that had been adjusted to get rid of overlap would stay where they were, because they would already have an explicit `top` property. Sidenotes that hadn't been adjusted would move up and down as text reflowed, but this meant they could end up overlapping again.</Sidenote> [There are a lot of things that can cause a reflow](https://gist.github.com/paulirish/5d52fb081b3570c81e3a),<Sidenote>And this is just the ones that come from Javascript! It doesn't even address stuff like resizing the window or expanding/collapsing a `<details>` element.</Sidenote> and I'd have to listen to all of them if I wanted this to be a fully general solution. Sure, I could just be aware of this problem and avoid using reflow-causing events where possible--but I wanted the freedom to be able to add as much interactivity as I felt like to any given blog post without having to worry.
None of these problems are _completely_ inaddressible, but it was all going to be very fiddly to fix properly, so I decided to do a bit more research before throwing in the towel. And boy am I glad that I did, because it turns out that with enough...
## CSS Wizardry
...anything is possible.
Eventually I ran across [this post](https://scripter.co/sidenotes-using-only-css/), which solved my problem almost perfectly. The basic idea is extremely straightforward:
1. Give your sidenotes a `float` and `clear` in the same direction, so that they are removed from the regular document flow _but_ (and this is crucual) _they will still take each other into account for layout purposes._
2. Give them a fixed width, and then:
3. Give them a negative margin equal to the max-width, so that they are pulled out of the body of the text and hang out in the gutter.
It's shockingly simple, to be honest--I would never have thought of it myself, but I'm glad somebody out there did.<Sidenote>It's worth noting that this same approach seems to be used by [Tufte CSS](https://edwardtufte.github.io/tufte-css/), which I had looked at previously but had failed to comprehend, possibly because it doesn't really go into detail about its sidenote mechanism.</Sidenote> The only problem is that you can't nest sidenotes, which is something I had hoped to support, but we'll get to that in a bit.
## Implementation
It took me quite a while (longer than it should have, probably) to really grok this, so I wanted to go through the implementation step-by-step and show the effect of each component part. For starters, let's just get the basic appearance out of the way:
```css
body {
counter-reset: sidenote;
}
.counter {
counter-increment: sidenote;
margin-left: 0.05rem;
}
.counter::after {
content: counter(unstyled-sidenote);
font-size: 0.75em;
position: relative;
bottom: 0.3em;
color: var(--accent-color);
}
.sidenote {
color: var(--content-color-faded);
font-size: 0.8rem;
}
.sidenote::before {
content: counter(unstyled-sidenote);
font-size: 0.75rem;
color: var(--accent-color);
/* Since the sidenote is floated it counts as a positioned element,
so this would make the counter overlap the start of the text... */
position: absolute;
/* ...except that we move it out to the left and up a bit, so
it's hanging out in space. 100% refers to the width of this
pseudo-element, so we handle different-sized counters the same. */
transform: translate(
calc(-100% - 0.16em),
-0.12em
);
}
```
This handles font size, color, and counters--CSS counters are very convenient for this, because they automatically adjust themselves whenever I go back and add or remove a sidenote earlier in the page. That gives us sidenote that looks like this:<UnstyledSidenote floatingCounter={false}>We're going to use a different color counter for these ones, so they can be more easily distinguished.</UnstyledSidenote>
It's still in flow, so our first change will be to remove it from the standard flow with `float: right`. Doing that moves it over to the side, like so.<UnstyledSidenote class="sn-float">The float also unmoors it from the text baseline.</UnstyledSidenote> Notice how it still takes up space in the body text, even though it's happening in a different place than its DOM location.
To keep it from doing that, we'll add a combination of a fixed width and a negative margin. The fixed width is primarily to give us a "target" number for the negative margin, since there isn't a good way to reference the width of the _current_ item when defining margins. (`margin-right: 100%` would move it by the width of the _containing_ block, which is not what we want.) With that in place, here's what we get.<UnstyledSidenote class="sn-gutter">Looking pretty good!</UnstyledSidenote> Unfortunately this example and subsequent ones don't work on mobile, since there are no gutters. Sorry about that! You'll have to view the desktop version to make them show up.
The next step is to keep the sidenotes from overlapping when there are multiple of them in quick succession, like these two.<UnstyledSidenote class="sn-gutter">This is one sidenote.</UnstyledSidenote><UnstyledSidenote class="sn-gutter">Another sidenote, which overlaps the first.</UnstyledSidenote> We do that with the `clear` property, which, when applied to a floated element, causes it to drop below any preceding floated elements on the specified side with which it would otherwise share a line.
This is easiest to show with an example, so let's do that. Here are two sidenotes with just `float: right` and no negative margin.<UnstyledSidenote class="sn-float">One.</UnstyledSidenote><UnstyledSidenote class="sn-float">Two.<span style="margin-right: 0.75rem"></span></UnstyledSidenote> [[Click here]] to animate the negative margin being applied to first the one, then the other. Applying negative margin to the first sidenote creates space for the other one to move to the side, since by nature floats want to form a horizontal row against the side of their containing block. Once we start applying negative margin to the second sidenote, though, normal flow rules don't apply, and they start to overlap.
This is fixed by `clear` because it changes the behavior of floats. Here are the same two sidenotes as above, but with `clear: right` applied to the second.<UnstyledSidenote class="sn-float">One.</UnstyledSidenote><UnstyledSidenote class="sn-clear">Two.</UnstyledSidenote> The `clear` property causes the second sidenote to drop below the first, which happens to be exactly the behavior that we want. All that's left is to apply the negative margin like so<UnstyledSidenote class="sn-clear sn-gutter">Three.</UnstyledSidenote><UnstyledSidenote class="sn-clear sn-gutter">Four.</UnstyledSidenote>and the whole stack will slide right over into the gutter.
It's smack up against the body text, though. In fact, since the floating counter hangs off to the left, it actually overlaps with the body text.(Depending on line wrapping, this may not be immediately apparent from the above.)
We can fix that in one of two ways. 1) We can increase the negative margin so that it's _greater_ than the width of the sidenote, or 2) We can just stick in some padding.<UnstyledSidenote class="sn-gap">Voila! Collision avoided.</UnstyledSidenote> I like the first option better, because it better reflects what we're actually doing here--margin is for creating caps _outside_ and _between_ elements, while padding is for gaps _inside_.
Here's what we have so far:
```css
.sidenote {
float: right;
width: 14rem;
margin-right: -16rem;
}
```
We still have a bit of a problem, though. Because we've assigned the sidenote a fixed width, it doesn't automatically shrink when the window gets too small for it. Obviously, of course, at _some_ point we're going to switch to the mobile version, which displays at the bottom of the screen and can be toggled on or off. But there are plenty of widths where sidenotes would still work perfectly well, just with a slightly narrower width than our initial `14rem`.
Fortunately, CSS `calc()` is widely supported and does exactly what we need.<UnstyledSidenote class="sn-var-width">Here we are! You may need to resize your window to get full effect.</UnstyledSidenote> Let's take a look:
```css
.sidenote {
float: right;
--width: min(
14rem,
calc( (100vw - var(--content-width) ) / 2 - 2rem )
);
width: var(--width);
margin-right: calc(0rem - var(--width) - 2rem);
}
```
To calculate the width, we take the full viewport (`100vw`) and subtract the width of the main column (`var(--content-width)`). This gives us the combined width of both gutters, but since we only want the width of a single gutter we divide by 2. Then we subtract a further `2rem` so that our width is a little less than the full width of ther gutter, to give us some breathing room.
For the margin, we just take the value we calculated for the width and subtract it from 0 (to make it negative), then subtract a further 2rem to pull the sidenote out by that much more to give us breathing room.

View File

@@ -1,130 +0,0 @@
---
title: Let's Design A Simpler SocketIO
date: 2021-10-16
description: SocketIO is packed with features. But do we really need all of them all the time?
draft: true
---
Listen, don't get me wrong. SocketIO is great. It provides tons of features,
fantastic platform support, is widely deployed by a hugely diverse set of
companies, and has been around long enough that it probably has most of the
easily-encountered bugs ironed out.
So why wouldn't you want to use it? Well, a couple of reasons occur to me.
One, it's not exactly small. The unpacked library weighs in at just over 1MB,
which isn't a lot if it's a core component of your application (e.g. if
you're building a real time chat app) but is a bit much if you're only
looking for a small subset of its features.
Two, it's reasonably complex. Again, not insurmountably so, but complex enough
that it probably isn't worth hacking together a basic SocketIO client in your
REPL of choice if you just want to test something real quick. And on the
server side, it's complex enough that you'll probably want to avoid rolling
your own if possible. This becomes especially troublesome if you already have
a working application and you just want to sprinkle in a little real-time
interactivity, rather than building your whole application around that
assumption. In my (admittedly limited) experience, the existing SocketIO
integrations don't always play very nicely with the existing server
frameworks, although if you stick to the most widely-used stuff you're
probably fine.
And honestly, it's just a lot of complexity to introduce if you just want a
simple event stream. You could argue that you don't even need websockets for
this - Server Sent Events are a thing, as are simple HTTP requests with a
streaming response - but in this day and age, the set of solutions to the
problem of "persistent communication between server and client" has pretty
firmly coalesed around websockets. They're there, they're supported, there
are lots of libraries - you might as well just go with the flow.
## Ok, so what are you saying?
Basically, that we need something that's _like_ SocketIO but lighter-weight,
and solves a more limited set of problems. Specifically, the problem I'm
looking to solve is _event streams_ - given a web service and a client, how
does the client detect that _things are happening_ in the background so that
it can update itself accordingly?
The use cases for this are pretty extensive. Most obviously, you can use it to
implement a notifications system on pretty much any webapp that
involves "users want to know when things happen," which is pretty broad.
Maybe you're running an ecommerce shop and you want to notify your customers
that the item they've had their eye on is back in stock. Or you've just
opened up a new promotion and they should check it out. Maybe you're running
a website that displays a stock ticker, and you need up-to-the-second data on
stock prices. Or you've got a dashboard with some kind of real-time
monitoring chart, whatever it's measuring, and you want to keep it up to date
with a minimum of overhead. Pub/sub is a powerful concept, which is why
people keep re-implementing it. Like I'm doing here. Get over it.
## But why can't I just use SocketIO for this, again?
I mean, you _can._ But SocketIO does so much _more_ than simple pub/sub.
Connection multiplexing, automatic reconnects, receipt acknowledgements, the
list goes on. All of these features are great if, again, you are implementing
an instant messenger. They even have a feature called "rooms," which mentions
in its documentation that it "makes it easy to implement private messages,"
among other things, so it's pretty clear who their target is.
And it's a great target! Lots of people need instant messaging. Every website
in the world seems to pop up a bubble saying "Hi, I'm [friendly-sounding
name]! Do you want to talk about giving us money?" 30 seconds after you hit
their page. Everyone with a customer-service team has discovered or will soon
discover that most issues are easier to resolve over text than over the
phone, especially if your CS is outsourced to some foriegn country so your
reps all have an accent. SocketIO exists for a reason, and it's a very good
reason, and if that's what you need then go for it, knock yourself out.
But if all you need is a simple event stream with pub/sub semantics, then keep
reading, because that's what I want to talk about.
## Fine. Make your pitch, I'll listen.
The protocol I'm imagining should solve three basic problems:
* Authentication
* Connection management (keepalive, automatic reconnects)
* ...and the actual pub/sub itself, of course.
Let's go through each of these in turn.
### Authentication
The protocol purists might start to set up a bit of a racket here. Ignore
those guys, they suck. Listen, every web-based protocol in the world should
at least be _aware_ of the question of authentication. Maybe the awareness
should stop at "that's being handled so I don't need to think about it," but
at least that much is pretty necessary. I don't know exactly how much Web
traffic is authenticated vs. unauthenticated (ask Cloudflare, they might) but
according to some quick Googling an Akamai bigwig said in 2019 that 83% of
the traffic they see is API traffic. I imagine that API traffic is
overwhelmingly authenticated, and when you factor in the fact that a large
part of the rest is social media, which is also going to be
mostly-authenticated, I imagine you'll end up with somewhere between "a whole
lot" and "the vast majority."
So you need authentication, and websockets don't give it to you. Well, they
leave it open, kinda - RFC 6455 says that the websocket opening handshake
(which starts with an HTTP request) can include
> Optionally, other header
fields, such as those used to send cookies or request authentication to a
server.
But in practice, this still kinda sucks. It sucks because you can't
have _one_ authentication method that's dead-simple and works for everybody.
Either you're coming from the browser, in which case you're stuck with
session cookies or URL params and that's it, or you're coming from some kind
of scripting environment where you'd love to be able to just stick a bearer
token in the `Authorization` header like everybody else does, but that's not
how the browser does it so tough luck.
The only solution that works easily with all clients is to put the
auth in a URL param. So let's just do that. Unfortunately, that creates a new
issue: we can't just use a plain bearer token any more, because now it's in
the URL and URL's go all sorts of places - server logs, CDNs, browser address
bars, etc. Probably the best thing to do here is to simply sign the URL
[a la AWS](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth).
Fortunately, since we're only dealing with a very specific type of request,
we don't need to bother with the authenticated headers business that AWS
does.
The browser has very limited capabilities when it comes to modifying the request, so we should probably stick to a signature that can be included directly in the URL as a couple of querystring params.

View File

@@ -1,84 +0,0 @@
---
title: 'Converting ssh keys from old formats'
date: 2024-07-06
---
<script>
import Sidenote from '$lib/Sidenote.svelte';
</script>
Like a lot of people, my main experience with private keys has come from using them for SSH. I'm familiar with the theory, of course - I know generally what asymmetric encryption does,<Sidenote>Although exactly _how_ it does so is still a complete mystery to me. I've looked up descriptions of RSA several times, and even tried to work my way through a toy example, but it's never helped. And I couldn't even _begin_ to explain elliptic curve cryptography beyond "black math magic".</Sidenote> and I know that it means a compromised server can't reveal your private key, which is nice although if you only ever use a given private key to SSH into your server and the server is already compromised, is that really so helpful?<Sidenote>Yes, yes, I know that it means you can use the same private key for _multiple_ things without having to worry, but in practice a lot of people seem to use separate private keys for separate things, and even though I'm not entirely sure why I feel uncomfortable doing otherwise.</Sidenote>
What I was less aware of, however, was the various ways in which private keys can be _stored_, which rather suddenly became a more-than-purely-academic concern to me this past week. I had an old private key lying around which had originally been generated by AWS, and used a rather old format,<Sidenote>The oldest, I believe, that's in widespread use still.</Sidenote> and I needed it to be comprehensible by newer software which loftily refused to have anything to do with such outdated ways of expressing itself.<Sidenote>Who would write such obdurately high-handed software, you ask? Well, uh. Me, as it turns out. In my defense, though, I doubt it would have taken _less_ time to switch to a different SSH-key library than to figure out the particular magic incantation needed to get `ssh-keygen` to do it.</Sidenote> No problem, thought I, I'll just use `ssh-keygen` to convert the old format to a newer format! Unfortunately this was frustratingly<Sidenote>And needlessly, it seems to me?</Sidenote> difficult to figure out, so I'm writing it up here for posterity and so that I never have to look it up again.<Sidenote>You know how it works. Once you've taken the time to really describe process in detail, you have it locked in and never have to refer back to your notes.</Sidenote>
## Preamble: Fantastic Formats and Where to Find Them
I was aware, of course, that private keys are usually delivered as files containing big blobs of Base64-encoded text, prefaced by headers like `-----BEGIN OPENSSH PRIVATE KEY----`, and for whatever reason lacking file extensions.<Sidenote>Well, for the ones generated by `ssh-keygen`, at least. OpenSSL-generated ones often use `.key` or `.pem`, but those aren't typically used for SSH, so are less relevant here.</Sidenote> What I wasn't aware of is that there are actually several _different_ such formats, which although they look quite similar from the outside are internally pretty different. There are three you're likely to encounter in the wild:
1. OpenSSH-formatted keys, which start with `BEGIN OPENSSH KEY`<Sidenote>Plus the leading and trailing five dashes, but I'm tired of typing those out.</Sidenote> and are the preferred way of formatting private keys for use with SSH,
2. PCKS#8-formatted keys, which start with `BEGIN PRIVATE KEY` or `BEGIN ENCRYPTED PRIVATE KEY`, and
3. PEM or PKCS#1 format, which starts with `BEGIN RSA KEY`.
The oldest of these is PEM/PKCS#1 - the naming is a bit wishy-washy here. "PEM" when applied specifically to key files _for use with SSH_, i.e. the way that `ssh-keygen` uses it, seems to refer to this specific format. But "PEM" more generally is actually just a [generic container for binary data](https://en.wikipedia.org/w/index.php?title=Privacy-Enhanced_Mail&useskin=vector) that gets used a lot whenever it's helpful for binary data to be expressible as plaintext. In fact, _all_ of the private key formats I've ever seen used with OpenSSH<Sidenote>PuTTY does its own thing, which is why it's a major pain in the neck to convert between them, especially from the OpenSSH side. Fortunately [this is much less of a problem than it used to be.](https://github.com/PowerShell/Win32-OpenSSH?tab=readme-ov-file)</Sidenote> are some form of PEM file in the end.
Whatever you call it, however, this format has the major limitation that it can only handle RSA keys, which is why it fell out of favor when elliptic-curve cryptography started becoming more popular. The successor seems to have been PKCS#8, which is pretty similar but can hold other types of private keys. I haven't researched this one in quite as much detail, but I'm guessing that it also is a little nicer in how it handles encrypting private keys, since when you encrypt a PKCS#1 key it gets a couple of extra headers at the top, like this:
```
-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: AES-128-CBC,0B0A76ABB134DAFEB5C94C71760442EB
tOSEoYVcYcVEXl6TfBRjFRSihE3660NGRu692gAOqdYayozIvU9xpfeVCSlYO...
```
whereas when you encrypt a PKCS#8 private key the header just changes from `BEGIN PRIVATE KEY` to `BEGIN ENCRYPTED PRIVATE KEY` before starting in on the Base64 bit.
Both PKCS#1 and PKCS#8 use the same method for encoding the actual key data (which, when you get right down to it, is usually just a set of numbers with particular properties and relations between them): ASN.1, or Abstract Syntax Notation One. ASN.1 is... [complicated](https://en.wikipedia.org/wiki/ASN.1#Example). It seems very flexible, but it also seems like overkill unless you're trying to describe a very complex document, such as an X.509 certificate, or the legal code of the United States of America. But OpenSSH, it seems, longed for simpler days, when just reading a blasted private key didn't require pulling in a whole pile of parsing machinery, so it struck out on its own, and thereby was born the OpenSSH Private Key Format. This format does _not_ seem to be described in any RFCs, in fact the only detailed descriptions I can find are a [couple](https://coolaj86.com/articles/the-openssh-private-key-format/) of [blog posts](https://dnaeon.github.io/openssh-private-key-binary-format/) from people who figured it out for themselves from the OpenSSH source code.<Sidenote>Presumably this format is much simpler to parse and allows OpenSSH to do away with all the cumbersome drugery of dealing with ASN.1... or would, except that OpenSSH is still perfectly happy to read PKCS#1 and #8 keys. Maybe the eventual plan is to stop supporting the older formats? It's been 10 years, according to [another random blog post](https://www.thedigitalcatonline.com/blog/2021/06/03/public-key-cryptography-openssh-private-keys/), but maybe give it another 10 and we'll see some change?</Sidenote> I think this format can support every type of key that OpenSSH does, although I haven't personally confirmed that.<Sidenote>This is coming across as sarcastic, but I actually don't blame OpenSSH for coming up with its own private key format. If I had to deal with parsing ASN.1 I'd probably be looking for ways out too.</Sidenote>
## The `ssh-keygen` Manpage is a Tapestry of Lies
So, I thought, I can use `ssh-keygen` to convert between these various and sundry formats, right? It can do that, it _has_ to be able to do that, right?
Well, yes. It _can_, but good luck figuring out _how_. For starters, like many older CLI tools, `ssh-keygen` has an awful lot of flags and options, and it's hard to distinguish between which are _modifiers_ - "do the same thing, but differently" - and _modes of operation_ - "do a different thing entirely". The modern way to handle this distinction is with subcommands which take entirely different sets of arguments, but `ssh-keygen` dates back to a time before that was common.
It also dates back to a time when manpages were the primary way of communicating detailed documentation for CLI tools,<Sidenote>These days it seems more common to provide a reasonably-detailed `--help` output and then just link to web-based docs for more details.</Sidenote> which you'd _think_ would make it possible to figure out how to convert from one private key format to another, but oh-ho-ho! Not so fast, my friend. Here, feast your eyes on this:
```
-i This option will read an unencrypted private (or public) key file in the format specified by the -m option and print an
OpenSSH compatible private (or public) key to stdout. This option allows importing keys from other software, including
several commercial SSH implementations. The default import format is “RFC4716”.
```
Sounds great, right? Import private keys from other formats, i.e. convert them to our format, right? But it's _lying_. The `-i` mode doesn't accept private keys _at all_, that I've been able to tell, whatever their format. Giving it one will first prompt you for the passphrase, if any (so it's lying about needing an unencrypted input, although that's not a big deal) and then tell you bluntly that the your file is not in a valid format. The specific error message varies slightly with the particular format - attempting to give it a PEM file (with the appropriate option) returns `<file> is not a recognized public key format`, PKCS8 gets `unrecognised raw private key format`, and speicfy OpenSSH format just says `parse key: invalid format`. So really, the only thing this mode is useful for is reading a _public_ key in some PEM-ish format, and spitting out the line that you can used in `authorized_keys` - the one that starts with `ssh-rsa`, `ssh-ed25519`, etc.
## Enlightenment Ensues
But wait! The `-i` option mentions that the formats accepted are specified by the `-m` option, so let's take a look there:
```
-m key_format
Specify a key format for key generation, the -i (import), -e (export) conversion options, and the -p change passphrase
operation. The latter may be used to convert between OpenSSH private key and PEM private key formats. The supported
key formats are: “RFC4716” (RFC 4716/SSH2 public or private key), “PKCS8” (PKCS8 public or private key) or “PEM” (PEM
public key). By default OpenSSH will write newly-generated private keys in its own format, but when converting public
keys for export the default format is “RFC4716”. Setting a format of “PEM” when generating or updating a supported pri
vate key type will cause the key to be stored in the legacy PEM private key format.
```
Notice anything? I didn't, the first eleventy-seven times I read through this, because I was looking for a _list of known formats_, not a hint that you might be able to use _yet another option_ to do something _only marginally related to its core functionality_. So I missed that "The latter may be used to convert beteween OpenSSH private key and PEM private key<Sidenote>By PEM here OpenSSH apparently means both PKCS#1 _and_ PKCS#8, since it works perfectly well for both.</Sidenote> formats", and instead spent a while chasing my tail about RFC4716. This turns out to be not very helpful because it's _exclusively_ about a PEM-type encoding for _public_ keys, and doesn't mention private keys at all! OpenSSH seems to internally consider RFC4716 as the public-key counterpart to its home-grown private-key format, but this isn't explicitly laid out anywhere. Describing it as "RFC 4716/SSH2 public or private key" is confusing at best, because as we've established RFC 4716 doesn't mention private keys at all. "SSH2 private key" isn't obviously a thing: RFC 4716 public keys have the header `BEGIN SSH2 PUBLIC KEY`, but OpenSSH-formate private keys don't say anything about SSH2. The only way to figure out how it's being interpreted is to note that whenever `ssh-keygen` accepts the `-m` option _and_ happens to condescend to operate on private keys instead of public keys, giving it `-m RFC4716` produces and consumes private keys of the OpenSSH flavor.
_Anyway_. The documentation is so obtuse here that I didn't even discover this from the manpage at all, in the end. I had to get it from some random Github gist that I unfortunately can't find any more, but was probably written by someone just as frustrated as I am over the ridiculousness of this whole process. The _only_ way to change the format of a private key is to tell `ssh-keygen` that you want to _change its passphrase_? It's [git checkout](https://stevelosh.com/blog/2013/04/git-koans/#s2-one-thing-well) all over again.
At first I wondered whether maybe this is intentional, for security reasons, so that you don't accidentally remove the password from a private key while changing its format. But finally I don't think that makes sense, since if it's encrypted to start with then `ssh-keygen` is going to need its passphrase before it can make the conversion anyway, in which case there's no reason it can't just keep it hanging around and re-encrypt with the same passphrase after converting.
Most probably this is just a case of a tool evolving organically over time rather than being intentionally designed from the top down, and sure, that's understandable. Nobody sets out to create a tool that lies to you on its manpage<Sidenote>Maybe the `-i` option _did_ work with private keys at some point, although I have difficulty imagining why that functionality might have been removed.</Sidenote> and re-purposes modes of operation for other, only marginally-related operations, it just happens gradually over time because updates and new features are made in isolation, without consideration of the whole.
## Imagining a Brighter Tomorrow
But it doesn't have to be this way! Nothing (that I can see) prevents the `-i` option from being updated to accept private keys as well as public keys: it's clearly perfectly capable of telling when a file _isn't_ a valid _public_ key in the specified format, so it it seems like it could just parse it as a private key instead, and keep going if successful. Or an entirely new option could be added for converting private keys. `-c` is already taken for changing comments, but there are a few letters remaining. I don't see a `-j` on the manpage, for instance, or an `-x`.
I realize that an unforgiving reading of my travails in this endeavour might yield the conclusion that I'm an idiot with no reading comprehension, and that the manpage _clearly_ stated the solution to my problem _all along_, and if I had just RTFM<Sidenote>Noob.</Sidenote> then I could have avoided all this frustration, but that seems [a little unfair](https://xkcd.com/293/) to me.<Sidenote>Besides, I'm annoyed, and it's more satisfying to blame others than admit any fault of my own.</Sidenote> When you're writing the help message or manpage for your tool, you should _expect_ that people will be skimming it, looking to pick out the tidbits that are important to them right now, since for any tool of reasonable complexity 95% of the documentation is going to be irrelevant to any single user in any single situation.
How could the manpage be improved? Well, for starters, it could _not lie_ about the fact that the `-i` option doesn't do anything with private keys at all. If it were _really_ trying to be helpful it could even throw in a hint that if you want to work with private keys, you're barking up the wrong tree. Maybe it could say "Note: This option only accepts public keys. For private key conversions see -p and -m." or something like that. The `-p` option should probably also mention somewhere in its description that it happens to also be the preferred method for converting formats, since right now it only talks about changing passphrases.
Anyway, thanks for listening to my TED talk. At least now I'll never forget how to convert a private key again.

View File

@@ -1,58 +0,0 @@
---
title: Sufficiently Advanced Technology Is Often Distinguishable From Magic
description: I see what Arthur C. Clarke was getting at, but I don't think I agree.
date: 2022-05-14
draft: true
---
<script>
import Sidenote from '$lib/Sidenote.svelte';
</script>
Arthur C. Clarke is famous for many things, among them being his dictum that "Sufficiently advanced technology is indistinguishable from magic."<Sidenote>Which I had always thought of as "Clarke's Law", I looked it up when I went to write this and discovered that it's actually Clarke's _Third_ Law. Apparently he has two others. I don't know that I've ever run into them, though. Maybe he should have led with this one?</Sidenote> I'm often happy to take issue with Clarke's opinions, so today I want to put this one on the chopping block.
Before we get started, a note: the pedantic may argue that by "indistinguishable" he simply means "from the outside," as it were. That is, if you're an untrained citizen of the Galactic Empire or whatever, your knowledge of how a hyperdrive works is so far removed from the reality that it might as well be magic - certainly you couldn't build a new one from scratch, or even fix a broken one. That's as may be, but it's not the interpretation that I want to address here, for two reasons: 1) it's boring, and 2) I don't think that's what Clarke actually meant when he coined the phrase.
In fact, I think Clarke was thinking more along these lines: Advanced technology (by which I mean, technology that is more advanced than anything we have today) and magic both postulate that the impossible is possible. In the case of non-fundamental advances you can kind of see how this might come about, like an economically feasible Mach 3 passenger aircraft, for instance. But as technology gets further and further from the current state of the art, and encompasses more and more that's not just "we don't know if it's possible" but "we actually think it's impossible" the divorce from reality becomes so complete that it's _just as profound_ as a wizard who levitates and summons fireballs out of the air by sheer power of will.<Sidenote>It's also interesting to note that Clarke wrote primarily _near-future_ sci-fi - his most famous work is set _twenty years ago_, for goodness sake! But perhaps that has something to do with his apparent disdain for grander flights of sci-fi fancy.</Sidenote>
That's what I'm disagreeing with. In particular I take issue with the term "indistinguishable," because it implies there is _no basis_ by which the technology in question can be distinguished from magic. I think, however, that there usually _is_ such a distinction, and in fact it's quite easy to make. And it doesn't have anything to do with _how_ advanced the technology is, which may be why it didn't occur to Clarke, but this is my blog so I get to be as nitpicky as I like.<Sidenote>Yes, I know that I just implied above that I'm above pedantry, so what? Still my blog, and I don't have to be consistent unless I want to.</Sidenote>
To me, the distinction between technology and magic has always hinged more on the _mechanism_ of the thing than its degree of connection with reality. To wit: Does the [magic, technology] constitute an _intrinsic_ or _extrinsic_ means for someone to influence the world around them? If intrinsic, then it's magic. If extrinsic, then it's technology.
When I say "intrinsic" and "extrinsic" I'm mostly thinking of the origin of the power. An intrinsic power is something natural that the user is born with, while an extrinsic power is conferred by artifice - usually some sort of constructed device. There are definitely edge cases that blur the line between these distinctions, but they're mostly pretty clear when you're looking at individual examples.
For example, a wizard who moves objects by focusing his mind and exerting his will on the universe is using magic. A starship captain who moves objects using a tractor beam is using technology, because the tractor beam constitutes an _extrinsic_ means of interacting with the world. The only intrinsic tools that the starship captain uses are (presumably) hands to manipulate controls of some sort.<Sidenote>There could, of course, be some kind of computer-mediated direct-brain interface, but that definitely still counts as technology since, again, the computer is merely reacting to _normal human actions_, in this case thoughts.</Sidenote>
Interestingly, by this definition there's no need for magic to be restricted to a certain subset of the population. You could easily imagine a world where _everyone_ has the power of telekinesis or something, or even one of many powers, and in fact it would be quite interesting to explore the ramifications of such a world. Mostly, however, stories that portray magic as we've defined it portray it as something available to only a few.
## Original form
Clarke's Law<Sidenote>Actually it's Clarke's Third Law, there are two others. Shows what I know. I will, however, continue to refer to it as "Clarke's Law" for the time being, since it's easier to type and I'm lazy.</Sidenote>, i.e. "Sufficiently advanced technology is indistinguishable from magic," is a well-known dictum in science fiction. I've never had a significant reason to disagree with it in the past, but recently I read _Elder Race_ by Adrian Tchaikovsky and it got me thinking. The upshot is, I've come to the conclusion that (within the world of fiction, of course) sufficiently advanced technology actually _is_ distinguishable from magic, in fact almost always so. Moreover, the distinction is really quite simple: Does the "magic" operate through _extrinsic_ or _intrinsic_ means? Does the magic-user act by operating a device that acts on the natural world, or does he simply exert his will and the world conforms to his desire? If the former, it's probably technology, and if the latter, it's probably magic.
Before I get started though, the book: _Elder Race_ is quite enjoyable, and not very long either, so you should definitely read it if you're into either sci-fi _or_ fantasy, because it manages to be both. In the interest of avoiding too many spoilers I won't go into too much detail, but the main conceit of the book is spoiled by the jacket blurb anyway, so I won't worry too much about that one. In brief: _Elder Race_ is an enjoyable and fairly in-depth exploration of Clarke's Law. It spends a lot of time considering not just the basic aspects (Look, flying machines! Must be magic!) but deeper questions, like: how would you even go about explaining technology to someone from an un-technological society?
Unsurprisingly, it comes away with more or less the conclusion that you can't really: the technologically unaware will continue to regard your flying machines as magical conveyances held aloft by arcane powers, your radio as deep wizardry that allows you to commune with distant spirits, and so on. You can try to explain it all you like, but if you say "science" your listener will hear "magic," and if you say "it's just an understanding of natural forces built up over generations" they will hear "it's just hidden knowledge of the secrets of the universe, handed down from the ancients." There is a communications barrier that is, according to this view, insurmountable without starting at the very beginning and working your way up.
Now, this may or may not be true, but I'd like to take issue with the more general formulation of Clarke's Law. I've always taken the "indistinguishable" bit to mean that _no one_ can distinguish the two, not just that _those unfamiliar with technology_ can make the distinction. I don't think that's the case, though. I think that you _can_ distinguish between magic and technology, and that the distinction is trivial at least in many cases. The question you can usually ask, and often get a clear answer to, is: "Does the [magic/technology] operate by means of devices, or does it rely on internal powers of the user?" if the former, it's technology. If the latter, it's magic.
Let's take some examples. On the magic side, think of some of the classic swords-and-sorcery canon: _Earthsea_, _Wheel of Time_<Sidenote>Much as I dislike it, it's undeniably genre-defining.</Sidenote>, _Prydain Chronicles_, _Chronicles of Amber_, _Belgariad_, and so on.<Sidenote>You might notice that I've skipped LOTR here: don't worry, it will show up later.</Sidenote> All of these have in common that magic is effected by a _direct act of will_. There is no mediating device or artifice, the magician simply exerts his will on the universe. There may be techniques involved, or limits to what the magic can accomplish, but there's fundamentally just some direct connection between the wizard's will and the natural world that other people don't have, and that's what makes him a wizard.
On the other hand, we have sci-fi. Note that a sci-fi story's position on the technology/magic scale is distinct from where it sits on the "sci-fi hardness" scale, although the two are often correlated: sci-fi that incorporates magic, as defined here, tends to be on the softer side. Still, I can name some examples of sci-fi that's unquestionably pure technology. A lot of Heinlein's stuff qualifies, for example _The Moon is a Harsh Mistress_. Bujold's "Vorkosiverse" (I don't think that's the official name) also qualifies, as far as I can remember, and serves as a good example of the distinction between "soft" sci-fi and "magical" sci-fi: it's very soft, but doesn't incorporate any magic. _Ender's Game_. _Snow Crash_ (ok, that one wasn't too hard, most near-future sci-fi is necessarily free of magic.) Plenty of short stories, although for some reason I can't think of any right now except for _Nerves_.
I'm cherry-picking here, of course. That's ok though, I'm not intending these examples to be an argument, more a set of examples that you can nod your head to and think "Yes, these clearly deal with magic/technology." But there are plenty of things that aren't so clear-cut, so let's take a look at those and see what we make of them.
# Built-in technology
What do you call it when the magic-or-technology operates by means of something that _is_ a natural part of the person or animal? What if there was a massive multi-generational genetic engineering effort that resulted in a race of psionic people? Is that magic, or technology?
This one's tough, but I think I have to come down on the side of "it's still technology."
Sci-fi, but actually magic
- Psi in Federation stories
- MCU
- Star Wars
- Cloak of Aesir
- Madeleine L'Engle stuff
- The Stars My Destination / The Demolished Man
Magic, but really technology
- Harry Potter?

View File

@@ -1,23 +0,0 @@
---
title: The Enduring Shell
date: 2023-11-26
draft: true
---
<script>
import Sidenote from '$lib/Sidenote.svelte';
</script>
Over twenty years ago, Neal Stephenson wrote an essay/pamphlet/novella/whatever-you-want-to-call-it titled [_In the beginning was the Command Line_](http://project.cyberpunk.ru/lib/in_the_beginning_was_the_command_line/). It's worth reading, and you should definitely do that at some point, but you should finish this first because it's quite long and Neal Stephenson is a much better writer than I am, so I worry you wouldn't come back.<Sidenote>I should probably also mention that it's Stephenson at his, ah, least restrained, so it's rather meandering. Don't get me wrong, it's _well-written_ meandering, but I don't think you can argue that an essay about command lines isn't meandering when it includes a thousand-word segment about Disney World.</Sidenote> As you might expect, Stephenson spends a lot of that material talking about the CLI versus the GUI, as though they were opposite poles of some abstract computational magnet. It's been a while since I read it, but I distinctly remember him describing the advent of the GUI as a sort of impending inevitability, an unfortunate but unstoppable end to which all things in time will eventually come. It's a little like watching [_Valkyrie_](https://www.imdb.com/title/tt0985699/), actually--you know whe whole time how it's going to turn out, but you can't keep yourself from watching it anyway.
The impending doom in this case is the ultimate triumph of the GUI over the CLI. Reading Stephenson's essay, you would be excused in coming away with the impression that the GUI is the way of the future, and that the CLI will eventually be relegated to the status of a quaint, old-timey practice and fall out of use except as a curiosity.<Sidenote>This isn't the only place I've run across this line of thought, either. David Edelman's [Jump 225 trilogy](https://www.goodreads.com/series/45075-jump-225) is set in a world where programming is no longer text-based but accomplished by manipulating a 3-dimensional model of the program; the programmer's tools are a set of physical instruments that he uses to maniuplate the program-model in various ways.</Sidenote>
He might have been surprised, then<Sidenote>He's still alive, I guess I could just ask him.</Sidenote> if he had known that today, in the far-distant future of 2023, many people (mostly technical people, it is to be admitted) use the command line every day, and that in some ways it's more alive and well than it ever has been. It's still not the dominant paradigm of computer interfaces for most people of course, and never will be again--that ship has most definitely sailed. But at the same time it's not going away any time soon, because there are aspects of the CLI that make it _better_ than a GUI for many uses.
A long time ago, the first time I needed to encode or transcode a video, I [downloaded Handbrake](https://handbrake.fr/downloads2.php).<Sidenote>I'm pretty sure the download page looked exactly the same then as it does now (except for the cookie warning, of course). It's nice that there are a few islands of stability in the sea of change that is the Internet.</Sidenote> I think I had read about it on Lifehacker, back when Lifehacker was good. I remember at the time being vaguely surprised that it came in both GUI and CLI flavors,<Sidenote>And you can thank Microsoft for that, as they have in their infinite wisdom decided that a given executable should function as either a CLI app or a GUI app, but never, ever be permitted to do both.</Sidenote> since it had never occurred to me for even the barest moment that I might want to use Handbrake via anything other than a GUI.
A lot of time has passed since then, and now I can easily imagine situations where I'd want the CLI version of Handbrake rather than the GUI. So what are those situations? What is it about the CLI that has kept it hanging around all these years, hanging on grimly by its fingertips in some cases, while generation after generation of graphical whizmos have come and gone? There are a number of reasons, I think.
## CLI apps are easier to write
blah blah words here

View File

@@ -1,176 +0,0 @@
---
title: Thoughts on Vue vs Svelte
description: They're more similar than they are different, but they say the most bitter enemies are those who have the fewest differences.
date: 2023-06-29
---
<script>
import Sidenote from '$lib/Sidenote.svelte';
</script>
Recently I've had a chance to get to know Vue a bit. Since my frontend framework of choice has previously been Svelte (this blog is built in Svelte, for instance) I was naturally interested in how they compared.
This is necessarily going to focus on a lot of small differences, because Vue and Svelte are really much more similar than they are different. Even among frontend frameworks, they share a lot of the same basic ideas and high-level concepts, which means that we get to dive right into the nitpicky details and have fun debating `bind:attr={value}` versus `:attr="value"`. In the meantime, a lot of the building blocks are basically the same or at least have equivalents, such as:
* Single-file components with separate sections for markup, style, and logic
* Automatically reactive data bindings
* Two-way data binding (a point of almost religious contention in certain circles)
* An "HTML-first" mindset, as compared to the "Javascript-first" mindset found in React and its ilk. The best way I can describe this is by saying that in Vue and Svelte, the template wraps the logic, whereas in React, the logic wraps the template.
I should also note that everything I say about Vue applies to the Options API unless otherwise noted, because that's all I've used. I've only seen examples of the Composition API (which looks even more like Svelte, to my eyes), I've never used it myself.
With that said, there are plenty of differences between the two, and naturally I find myself in possession of immediate and vehement Preferences.<Sidenote>Completely arbitrary, of course, so feel free to disagree!</Sidenote> Starting with:
## Template Syntax
Overall I think I favor Vue here. Both Vue and Svelte expect you to write most of your code in "single-file components", which are collections of markup, style, and logic<Sidenote>Much like a traditional HTML page.</Sidenote> that work together to describe the appearance and behavior of a component. But naturally, they do it slightly differently. Vue adds custom vue-specific attributes directly to the HTML elements, such as:
```markup
<div v-if="items.length">
<p>Please choose an item.</p>
<ul>
<li v-for="item in items">{{ item.name }}</li>
</ul>
</div>
<div v-else>
<p>There are no items available.</p>
</div>
```
While Svelte takes the more common approach of wrapping bits of markup in its own templating constructs:
```svelte
{#if items.length}
<div>
<p>Please choose an item</p>
<ul>
{#each items as item}
<li>{item.name}</li>
</ul>
</div>
{:else}
<div>
<p>There are no items available.</p>
</div>
```
While Vue's approach may be a tad unorthodox, I find that I actually prefer it in practice. It has the killer feature that, by embedding itself inside the existing HTML, it doesn't mess with my indentation - which is something that has always bugged me about Mustache, Liquid, Jinja, etc.<Sidenote>Maybe it's silly of me to spend time worrying about something so trivial, but hey, this whole post is one big bikeshed anyway.</Sidenote>
Additionally (and Vue cites this as the primary advantage of its style, I think) the fact that Vue's custom attributes are all syntactically valid HTML means that you can actually embed Vue templates directly into your page source. Then, when you mount your app to an element containing Vue code, it will automatically figure out what to do with it.<Sidenote>AlpineJS also works this way, but this is the *only* way that it works - it doesn't have an equivalent for Vue's full-fat "app mode" as it were.</Sidenote> This strikes me as a fantastic way to ease the transition between "oh I just need a tiny bit of interactivity on this page, so I'll just sprinkle in some inline components" and "whoops it got kind of complex, guess I have to factor this out into its own app with a build step and all now."
Detractors of this approach might point out that it's harder to spot things like `v-if` and `v-for` when they're hanging out inside of existing HTML tags, but that seems like a problem that's easily solved with a bit of syntax highlighting. However I do have to admit that it's a reversal of the typical order in which you read code: normally you see the control-flow constructs _first_, and only _after_ you've processed those do you start to worry about whatever they're controlling. So you end up with a sort of [garden-path-like](https://xkcd.com/2793/) problem where you have to mentally double back and re-read things in a different light. I still don't think it's a huge issue, though, because in every case I'm come across the control flow bits (so `v-if`, `v-for`, and `v-show`) are specified _immediately_ after the opening tag. So you don't really have to double back by an appreciable amount, and it doesn't take too long to get used to it.
Continuing the exploration of template syntax, Vue has some cute shorthands for its most commonly-used directives, including `:` for `v-bind` and `@` for `v-on`. Svelte doesn't really have an equivalent for this, although it does allow you to shorten `attr={attr}` to `{attr}`, which can be convenient. Which might as well bring us to:
## Data Binding
I give this one to Svelte overall, although Vue has a few nice conveniences going for it.
Something that threw me a tiny bit when I first dug into Vue was that you need to use `v-bind` on any attribute that you want to have a dynamic value. So for instance, if you have a data property called `isDisabled` on your button component, you would do `<button v-bind:disabled="isDisabled">` (or the shorter `<button :disabled="isDisabled">`).
The reason this threw me is that Svelte makes the very intuitive decision that since we already have syntax for interpolating variables into the text contents of our markup, we can just reuse the same syntax for attributes. So the above would become `<button disabled={isDisabled}>`, which I find a lot more straightforward.<Sidenote>If your interpolation consists of a single expression you can even leave off the quote marks (as I did here), which is pleasant since you already have `{}` to act as visual delimiters.</Sidenote> I also find it simpler in cases where you want to compose a dynamic value out of some fixed and some variable parts, e.g. `<button title="Save {itemsCount} items">` vs. `<button :title="&#96;Save ${itemsCount} items&#96;">`.
Two-way bindings in Svelte are similarly straightforward, for example: `<input type="checkbox" bind:checked={isChecked}>` In Vue this would be `<input type="checkbox" v-model="isChecked">`, which when you first see it doesn't exactly scream that the value of `isChecked` is going to apply to the `checked` property of the checkbox. On the other hand, this does give Vue the flexibility of doing special things for e.g. the values of `<select>` elements: `<select v-model="selectedOption">` is doing quite a bit of work, since it has to interact with not only the `<select>` but the child `<option>`s as well. Svelte just throws in the towel here and tells you to do `<select bind:value={selectedOption}>`, which looks great until you realize that `value` isn't technically a valid attribute for a `<select>`. So Svelte's vaunted principle of "using the platform" does get a _little_ bent out of shape here.
Oh, and two-way bindings in Vue get _really_ hairy if it's another Vue component whose attribute you want to bind, rather than a built-in form input. Vue enforces that props be immutable from the inside, i.e. a component isn't supposed to mutate its own props. So from the parent component it doesn't look too bad:
```markup
<ChildComponent v-model="childValue" />`
```
But _inside_ the child component:
```js
export default {
props: ['modelValue'],
emits: ['update:modelValue'],
methods: {
doThing() {
this.$emit('update:modelValue', newValue)
}
}
}
```
In Svelte, you just `bind:` on a prop of a child component, and then if the child updates the prop it will be reflected in the parent as well. I don't think there's any denying that's a lot simpler.<Sidenote>I think this is where the "two-way data binding" holy wars start to get involved, but I actually really like the way Svelte does things here. I think most of the furor about two-way data binding refers to bindings that are _implicitly_ two-way, i.e. anyone with a reference to some value can mutate it in ways the original owner didn't expect or intend it to. (KnockoutJS observables work this way, I think?) In Svelte's case, though, this is only possible if you explicitly pass the state with `bind:`, which signifies that you _do_ want this state to be mutated by the child and that you have made provisions therefor. My understanding is that in React you'd just be emitting an event from the child component and handling that event up the tree somewhere, so in practice it's basically identical. That said, I haven't used React so perhaps I'm not giving the React Way™ a fair shake here.</Sidenote>
Vue does have some lovely convenience features for common cases, though. One of my favorites is binding an object to the `class` of an HTML element, for example: `<button :class="{btn: true, primary: false}">` Which doesn't look too useful on its own, but move that object into a data property and you can now toggle classes on the element extremely easily by just setting properties on the object. The closest Svelte comes is `<button class:btn={isBtn} class:primary={isPrimary}>`, which is a lot more verbose. Vue also lets you bind an array to `class` and the elements of the array will be treated as individual class names, which can be convenient in some cases if you have a big list of classes and you're toggling them all as a set. <Sidenote>Since I'm a fan of TailwindCSS, this tends to come up for me with some regularity.</Sidenote>
The other area where I vastly prefer Vue's approach over Svelte's is in event handlers. Svelte requires that every event handler be a function, either named or inline, so with simple handlers you end up with a lot of `<button on:click={() => counter += 1}` situations. Vue takes the much more reasonable approach of letting you specify a plain statement as your event handler, e.g. `<button @click="counter += 1">`. For whatever reason this has always particularly annoyed me about Svelte, so Vue's take is very refreshing.
Admittedly, the Svelte approach does lead more gracefully into more complex scenarios where you need to capture the actual JS event: it just gets passed to the function. Vue kind of capitulates on consistency here and _also_ lets you pass the name of a function to an event handler, which is then called with the event as an argument. _Oooor_, you can reference the event via the special variable `$event`, which is convenient but feels a bit shoehorned in.
I'm ragging on Vue for its inconsistency here but I should note that I still do prefer the Vue approach, warts and all. "A foolish consistency is the hobgoblin of small minds," after all, and Vue's syntax is just so _convenient_. Besides, it optimizes for the 95% of the time I don't care about capturing the event, because realistically when am I going to want to do that? In both Vue and Svelte, all the traditional use cases for capturing an event are solved in other ways:
* You don't usually need `event.target`, because you can just give yourself a handle to the element directly (via `ref` in Vue, `bind:this=` in Svelte)
* You don't need to use it to get the value of an input (common with events like `change`), because you're just going to use a two-way binding for that
* In Vue, you don't even need it to check for modifier keys, because Vue gives you special syntax for this like `@event.shift`. (Svelte doesn't have an equivalent for this, so advantage Vue here again.)
You really only need to access the event when you're doing something more exotic, e.g. handling a bubbling event on a parent element and you need to check which child was actually the target, which does happen but again not the _majority_ of the time.
## Declaring Reactive Values
In Vue, reactive values (by which I mean "values that can automatically trigger a DOM update when they change") are either passed in as `props`, or declared in `data`. Or derived from either of those sources in `computed`. Then you reference them, either directly in your template or as properties of `this` in your logic. Which works fine, more or less, although you can run into problems if you're doing something fancy with nested objects or functions that get their own `this` scope.<Sidenote>It's worth noting that the Composition API avoids this, at the cost of having to call `ref()` on everything and reference `reactiveVar.value` rather than `reactiveVar` by itself.</Sidenote> The split between how you access something from the template and how you access it from logic was a touch surprising to me at first, though.
In Svelte, variables are just variables, you reference them the same way from everywhere, and if they need to be reactive it (mostly) just happens automagically.<Sidenote>And of course, after I first wrote this but just before I was finally ready to publish, Svelte went ahead and [changed this on me](https://svelte.dev/blog/runes). I'll leave my comments here as I originally wrote them, just keep in mind that if these changes stick then Svelte becomes even _more_ similar to Vue's composition API.</Sidenote> Svelte has a lot more freedom here because it's a compiler, rather than a library, so it can easily insert calls to its special `$$invalidate()` function after any update to a value that needs to be reactive.
Both frameworks allow you to either derive reactive values from other values, or just execute arbitrary code in response to data updates. In Vue these are two different concepts - derived reactive values are declared in `computed`, and reactive statements via the `watch` option. In Svelte they're just the same thing: Prefix any statement with `$:` (which is actually valid JS, as it turns out) and it will automatically be re-run any time one of the reactive values that it references gets updated. So both of the following:
```js
$: let fullname = `${firstname} ${lastname}`;
$: console.log(firstname, lastname);
```
would re-run any time `firstname` or `lastname` is updated, assuming those are reactive values to begin with.
Overall I tend to prefer the simplicity of Svelte's approach to reactivity, although I do find the `$:` syntax a little weird. It may be valid JS, but it's not valid JS that anybody actually _uses_. Moreover its official meaning doesn't have anything to do with what Svelte is using it for, so the fact that iT's vAliD jAVaSCriPt doesn't really do much for me. I think Vue's `computed` and `watch` options are much more obvious, if only from how they're named.
That said, I don't have any better ideas for marking reactive statements in Svelte, especially given that sometimes you _want_ a statement to ignore updates even if it does reference a value that might be updated. So maybe this is just one of those compromises you have to make.
## Code Structure
I go back and forth on this one, but I _think_ I have a slight preference for Svelte (at least, at the moment.) The major difference is that Vue<Sidenote>If you're using the Options API, at least.</Sidenote> enforces a lot more structure than Svelte: Data is in `props`/`data`/`computed`, logic is in `methods`, reactive stuff is in `watch`, etc. Svelte, by contrast, just lets you do basically whatever you want. It does require that you have only one `<script>` tag, so all your logic ends up being co-located, but that's pretty much it. Everything else is just a convention, like declaring props at the top of your script.
The advantage of Vue's approach is that it can make it easier to find things when you're jumping from template to logic: you see `someFunction(whatever)`, you know it's going to be under `methods`. With Svelte, `someFunction` could be defined anywhere in the script section.<Sidenote>Code structure is actually one area that I think might be improved by the recently-announced Svelte 5 changes: Because you can now declare reactive state anywhere, rather than just at the top level of your script, you can take all the discrete bits of functionality within a single component and bundle each one up in its own function, or even factor them out into different files entirely. I can imagine this being helpful, but I haven't played with it yet so I don't know for sure how it will shake out.</Sidenote>
On the other hand, this actually becomes a downside once your component gets a little bit complex. Separation of concerns is nice and all, but sometimes it just doesn't work very well to split a given component, and it ends up doing several unrelated or at least clearly distinct things. In Vue-land, the relevant bits of state, logic, etc. are all going to be scattered across `data`/`methods`/etc, meaning you can't really see "all the stuff that pertains to this one bit of functionality" in one place. It's also very clunky to split the logic for a single component across multiple JS files, which you might want to do as another way of managing the complexity of a large component. If you were to try, you'd end up with a big "skeleton" in your main component file, e.g.
```js
export default {
import {doThing, mungeData} from './otherfile.js';
// ...
computed: {
mungeData,
// ...
}
methods: {
doThing,
// ...
},
}
```
which doesn't seem very pleasant.
As a matter of fact, this was one of the primary [motivations](https://web.archive.org/web/20201109010309/https://composition-api.vuejs.org/#logic-reuse-code-organization)<Sidenote>Archive link, since that url now redirects to the [current Composition API FAQ](https://vuejs.org/guide/extras/composition-api-faq.html).</Sidenote> for the introduction of the Composition API in the first place. Unfortunately it also includes the downside that you have to call `ref()` on all your reactive values, and reference them by their `.value` property rather than just using the main variable. It's funny that this bothers me as much as it does, given that `this.someData` is hardly any more concise than `someData.value`, but there's no accounting for taste, I guess. Using `this` just feels more natural to me, although what feels most natural is Svelte's approach where you don't have to adjust how you reference reactive values at all.
Also, as long as we're harping on minor annoyances: For some reason I cannot for the life of me remember to put commas after all my function definitions in `computed`, `methods` etc. in my Vue components. It's such a tiny thing, but it's repeatedly bitten me because my workflow involves Vue automatically rebuilding my app every time I save the file, and I'm not always watching the console output because my screen real estate is in use elsewhere.<Sidenote>E.g. text editor on one screen with two columns of text, web page on one half of the other screen and dev tools on the other half. Maybe I need a third monitor?</Sidenote> So I end up forgetting a comma, the rebuild fails but I don't notice, and then I spend five minutes trying to figure out why my change isn't taking effect before I think to check for syntax errors.
It would be remiss of me, however, not to point out that one thing the Vue Options API enables<Sidenote>Kind of its initial _raison d'être_, from what I understand.</Sidenote> which is more or less impossible<Sidenote>I mean, you could do it, but you'd have to ship the entire Svelte compiler with your page.</Sidenote> with Svelte is at-runtime or "inline" components, where you just stick a blob of JS onto your page that defines a Vue component and where it should go, and Vue does the rest on page load. Svelte can't do this because it's a compiler, so naturally it has to compile your components into a usable form. This has many advantages, but sometimes you don't want to or even _can't_ add a build step, and in those cases Vue can really shine.
## Miscellany
### Performance
Performance isn't really a major concern for me when it comes to JS frameworks, since I don't tend to build the kind of extremely-complex apps where the overhead of the framework starts to make a difference. For what it's worth, though, the [Big Benchmark List](https://krausest.github.io/js-framework-benchmark/current.html) has Vue slightly ahead of Svelte when it comes to speed.<Sidenote>Although [recent rumors](https://twitter.com/Rich_Harris/status/1688581184018583558) put the next major version of Svelte _very_ close to that of un-framework'd vanilla JS, so this might change in the future.</Sidenote> I don't know how representative this benchmark is of a real-world workload.
As far as bundle size goes, it's highly dependent on how many components you're shipping - since Svelte compiles everything down to standalone JS and there's no shared framework, the minimum functional bundle can be quite small indeed. The flipside is that it grows faster with each component than Vue, again because there's no shared framework to rely on. So a Svelte app with 10 components will probably be a lot smaller than the equivalent Vue app, but scale that up to 1000 components and the advantage will most likely have flipped. The Svelte people say that this problem doesn't tend to crop up a lot in practice, but I have yet to see real-world examples for the bundle size of a non-trivial<Sidenote>Probably because no one wants to bother implementing the exact same app in two different frameworks just to test a theory.</Sidenote> app implemented in Vue vs. Svelte.
### Ecosystem
Vue has been around longer than Svelte, so it definitely has the advantage here. That said, Svelte has been growing pretty rapidly in recent years and there is a pretty decent ecosystem these days. This blog, for instance, uses [SvelteKit](https://kit.svelte.dev) and [mdsvex](https://mdsvex.pngwn.io/). But there are definitely gaps, e.g. I wasn't able to find an RSS feed generator when I went looking.<Sidenote>Arguably this is a lack in the SvelteKit ecosystem rather than the Svelte ecosystem, but I think it's fair to lump it together. SvelteKit is dependent on Svelte, so naturally it inherits all of Svelte's immaturity issues plus more of its own.</Sidenote> If I'd been using Vue/Nuxt it would have been available as a [first-party integration](https://content.nuxtjs.org/v1/community/integrations). All in all I'd say if a robust ecosystem is important to you then Vue is probably the better choice at this point.
### Stability
Not in terms of "will it crash while you're using it," but in terms of "will code that you write today still be usable in five years." This is always a bit of a big ask in the JS world, because everyone is always pivoting to chase the new shiny. As I write this now (and as I referenced above), Svelte has just announced a [change](https://svelte.dev/blog/runes) to how reactivity is done. The new style is opt-in for the moment, but that's never completely reassuring--there are plenty of examples of opt-in features that became required eventually. Vue had a similar moment with their 2-to-3 switch,<Sidenote>Just like Python, hmm. What is it about the 2-to-3 transition? Maybe we should call it Third System Effect?</Sidenote> but to be fair they have so far stuck to their promise to keep the Options API a first-class citizen.
I think that means I have to give Vue the edge on this one, because while both frameworks now have an "old style" vs. a "new style" Vue at least has proven their willingness to continue supporting the old style over the last few years.
## What's Next
I don't think we've reached the "end-game" when it comes to UI paradigms, either on the web or more generally. I _do_ think that eventually, _probably_ within my lifetime, we will see a stable and long-lasting consensus emerge, and the frenetic pace of "framework churn" in the frontend world will slow down somewhat. What exact form this will take is very much up in the air, of course, but I have a sneaking suspicion that WebAssembly will play a key part, if it can ever get support for directly communicating with the DOM (i.e. without needing to pass through the JS layer). _If_ and when that happens, it will unlock a huge new wave of frontend frameworks that don't have to involve on Javascript at all, and won't that be interesting?
But for now I'll stick with Svelte, although I think Vue is pretty good too. Just don't make me use React, please.

View File

@@ -1,72 +0,0 @@
---
title: Why the Internet is Terrible
date: 2024-11-16
---
<script>import Sidenote from '$lib/Sidenote.svelte';</script>
I just got done deleting ~30 bogus user accounts from my [personal Gitea insteance](https://git.jfmonty2.com). They all had reasonable-ish-sounding names, one empty repository, and profiles that looked like [this](/bogus_user_profile.jpg). Note the exceedingly spammy link to a real site (still up as of writing) and the ad-copy bio.
Obviously this is just SEO spam. My Gitea instance got found by some automated system that noticed it had open registration,<Sidenote>The more fool I.</Sidenote> so it registered a bunch of bogus user accounts, added links to whatever sites it was trying to pump, added related text in the bio, and then sat back and waited for search engines to pick up on these new backlinks and improve the reputation of said sites, at least until the search engines catch on and downgrade the reputation of my Gitea instance.
This particular problem was easy enough to deal with: Just remove the offending users, and all their works, and all their empty promises. But it got me thinking about the general online dynamic that _everybody online is out to get you._
## The Internet is terrible, and everyone knows it
This isn't a news, of course. People go around [saying things like](https://www.stilldrinking.org/programming-sucks):
>Here are the secret rules of the internet: five minutes after you open a web browser for the first time, a kid in Russia has your social security number. Did you sign up for something? A computer at the NSA now automatically tracks your physical location for the rest of your life. Sent an email? Your email address just went up on a billboard in Nigeria.
and everyone just smiles and nods, because that's what they've experienced. I've encountered people who are highly reluctant to pay for anything online via credit card--they would much rather use the phone and give their credit card number to a real person who is presumably capable of stealing it, should they so desire--because the general terribleness of the internet has become so ingrained into their psyche that this feels like the better option, and you know what? I can't even blame them.
Anyone who works on web applications for a living (or a hobby) is _especially_ aware of this, because odds are that they've been burned by it already or at least are familiar with any number of existing examples. The very existence of sites like [Have I Been Pwned](https://haveibeenpwned.com) is predicated on the inescapable terribleness the permeates every nook and cranny of the Internet.
Of course, people trying to take advantage of the careless and clueless isn't a new phenomenon. The term "snake oil salesman" dates back to the 18th century and refers people who would go around selling _literal snake oil_<Sidenote>Probably not harvested from actual snakes, but they sure told people it was.</Sidenote> as a miracle cure, hair restorative, and whatever else. I'm fairly confident that as long as money has existed, there have been unscrupulous people making a living off of tricking it out of other people.
But something about the Internet makes it much more _present_, more in your face, than old-timey snake-oil salesmen. I've seen no hard numbers on this, and I don't know how you would even begin to estimate it, but but I would guess that the incidence rate of this sort of thing is vastly higher online than it's ever been in meatspace.
So what is it about the Internet that makes deception so much more prevalent? Ultimately, I think it boils down to three things: availability, automation, and anonymity. The Three A's of Awfulness, if you will.
## You're in the bad part of town
Have you ever wondered why physical locks are so easy to pick? It takes some know-how, but from what I can tell, most commonly-sold locks [can be bypassed within a minute](https://www.youtube.com/@lockpickinglawyer/videos). I'm just going to say it right here, and I don't think this is a controversial take: For a web application that would be an unacceptably low level of security. If it took an attacker less than a minute to, say, gain administrative access to a web application, I'd consider it just this side of "completely unsecured".
But! Meatspace is not the internet. The constraints are different. Over the lifetime of a given lock, the number of people who will ever be in a position to attempt to pick it is usually quite low, compared to the number of people who exist in the world. Of course, the circumstances matter a lot too: A lock in a big city is within striking distance of many more potential lock-pickers than the lock on a farm out in corn country somewhere, which is part of why people in cities are frequently much more concerned about keeping their doors locked than people in rural areas. And within a single city, people who live in the bad parts of town tend to worry more than people who don't, etc.
But on the Internet, everyone is in the bad part of town _all the time!_ That's right, there's nothing separating your podunk website from every aspiring journeyman member of Evil Inc. except a few keystrokes and a click or two. It doesn't take Sir Scams-A-Lot any longer to send an email to you than to your less-fortunate neighbors in the housing projects, and so on.<Sidenote>This is also my beef with [this xkcd comic](https://xkcd.com/1958/). The real danger isn't that people will do things to the _physical_ environment to mess with self-driving cars (like repainting lines on the road), but that they'll do something remotely from the other side of the world, and no one will know until their car drives off a bridge or whatever. And sure, most people aren't murderers. But even if there are only a few people in the world who are sufficiently unhinged as to set up fatal traffic accidents between total strangers, _if your self-driving car is Internet-connected then those people might have the opportunity._</Sidenote>
In other words, the size of the "target pool" for someone who has a) an Internet connection and b) no conscience is _literally everyone else with an internet connection._ At last count, that number was in the billions and rising. This alone would make "online scurrilousness" a far more attractive career choice than "cat thief", but don't worry, it gets even worse!
## Their strength is as the strength of ten
You might be tempted to think something like "Sure, being online gives the seamier sort of people immediate access to basically everyone in the world. But that shouldn't really change the overall incidence of these sorts of things, because after all, there are only so many hours in the day. A hard-working evildoer can still only affect a certain number of people per unit time, right? _right?_" But alas, even this limitation pales before the awesome might of modern communications infrastructure.
In meatspace, you can only be in one place at a time. If you're over on Maple Street burglarizing Mr. and Mrs. Holyoke's home, you can't also be selling fake stock certificates on Jefferson Ave, or running a crooked blackjack game in the abandoned warehouse off Stilton. But we aren't in meatspace any more, ~~Toto~~. We're _online_, where everything is done with computers. You know what computers really love doing? _Endlessly repeating the same boring repetitive task forever._ The Internet is a medium uniquely suited to automated consumption. In fact, approximately 30% of all internet traffic comes from automated systems, [according to Clouflare](https://radar.cloudflare.com/traffic#bot-vs-human), and they should know.
So what does a clever-but-unscrupulous technologist do? That's right, he goes looking for vulnerabilities in widely-used platforms like Wordpress, finds one, then sets up an automated system to identify and exploit vulnerable Wordpress installs. Or he uses an open-source large language model like [Llama](https://www.llama.com/) to send phishing emails to every email address he can get his hands on, and maybe even correspond with susceptible people across multiple messages,<Sidenote>This is something I'm sure we'll see more and more of as time goes on. I'm sure it's already happening, and it's only going to get worse.</Sidenote> or just tricks people into clicking on a link to a fake Log In With Google page where he snarfs up their usernames and passwords, or _whatever_. There are a million and one ways an unethical person can take advantage of others _without ever having to personally interact with them._ This acts as a force-multiplier for evil people, and I think it's a major contributor to the overwhelming frequency with which you encounter this sort of thing online.<Sidenote>Astute readers may realize that while you can't automate meatspace in exactly the same way as you can automate computers, you can still do the next-best thing: _get other people to do it for you._ This is the fundamental insight of the Mafia don, and organized crime more generally. Thing is, though, all of these subsidiary evildoers have to be just as willing to break the law as the kingpin string-puller, so it doesn't quite act as a force-multiplier for evil in the same way.</Sidenote>
Interestingly, the automate-ability of anything that happens over the Internet seems to have leaked back into the phone system as well. I don't think anybody would disagree that scam phone calls are far more common than they used to be.<Sidenote>Unless "Dealer Services" has developed a truly pathological level of concern for the vehicle warranty I didn't even know I had.</Sidenote> I suspect, although I don't have any hard evidence to back it up, that this is largely due to the ease with which you can automate phone calls these days via internet-to-phone bridge services like [Twilio](https://twilio.com). The hit rate for this sort of thing has to be incredibly low--especially as people start to catch on and stop answering calls from numbers they don't know--so it only makes sense for the scammer if it costs them _virtually nothing_ to attempt.
One might ask why this wasn't the case before the Internet, since auto-dialing phone systems certainly predate the widespread use of the Internet,<Sidenote> The [Telephone Consumer Protection Act](https://en.wikipedia.org/wiki/Telephone_Consumer_Protection_Act_of_1991) attempted to regulate them as far back as 1991!</Sidenote> so why didn't this happen then? I suspect that again, this comes down to ease of automation. In the 90s, you needed expensive dedicated equipment to set up a robocalling operation, but today you can just do it from your laptop.
## The scammer with no name
There's a third contrast with meatspace that makes life easier for people whose moral compass has been replaced by, say, an avocado: _Nobody knows who you are online._ In real life, being physically present at the scene of a crime exposes you to some degree of risk. There might be witnesses or security cameras, your coat might snag on a door and leave some fibers behind for the forensic team to examine, you might drop some sweat somewhere and leave DNA lying around, and of course there are always good ol' fingerprints.<Sidenote>Once again, the Mafia model demonstrates how you might insulate yourself from some of these risks, but again, it's not quite as complete because _somebody_ has to be there, and that somebody might talk. And yes, the Mafia [took steps](https://en.wikipedia.org/wiki/Omert%C3%A0) to remedy that problem as well, but that's why Witness Protection was invented.</Sidenote>
All of this is much less of an issue online. In fact, one of the loudest and most attention-seeking hacking groups literally just called themselves [Anonymous](https://en.wikipedia.org/wiki/Anonymous_(hacker_group)). Of course, [then a bunch of them got arrested](https://www.bbc.com/news/world-latin-america-17195893), so maybe they weren't _quite_ as anonymous as they seemed to think they were. Still, I think it's safe to say that it's a lot easier to stay anonymous when you're committing crimes online vs. in person. Or from another angle, it takes (on average) significantly more law-enforcement effort to de-anonymize a criminal online than in person.<Sidenote>I can't seem to find it any more, but I'm pretty sure I remember reading an article a while back that talked about how the NSA/FBI/etc. managed to identify people like [Silk Road](https://en.wikipedia.org/wiki/Silk_Road_(marketplace)) higher-ups. From what I recall, it was pretty resource-intensive and not really realistic except for high-priority targets.</Sidenote>
I'm pointing out the downsides here, of course, but it's worth noting that online anonymity is a coin with two faces. It's fundamental to the question of privacy, especially from governments who would love nothing better than to know every sordid detail of their citizens' lives forever.<Sidenote>Don't believe me? Just look at how hard any number of major governments have been trying to effectively outlaw things like end-to-end encrypted chat apps. Here's the [UK](https://www.wired.com/story/britain-admits-defeat-online-safety-bill-encryption/), [US](https://www.eff.org/deeplinks/2020/06/senates-new-anti-encryption-bill-even-worse-earn-it-and-thats-saying-something), [Australia](https://www.schneier.com/blog/archives/2024/09/australia-threatens-to-force-companies-to-break-encryption.html), etc. They don't give a crap about "safety" or "exploitative content". This is about surveillance. </Sidenote> In general, anything that improves privacy (such as end-to-end encryption, VPNs, proxies, etc.) also makes anonymity easier for people whose motives are less laudable than "I don't think the government should know everything bout me."
## The economics of evil
In the end, you can think of this all as a question of economics.<Sidenote>Seems like you can think of anything as a question of economics, if you try hard enough. [Even theology](https://en.wikipedia.org/wiki/Economy_of_Salvation).</Sidenote> The Internet is rife with scams, thievery, and general [scum and villainy](https://www.youtube.com/watch?v=Xcb4_QwP6fE) because it brings down the cost of doing such things to the point that it becomes worth it. There's no need to spend time or money moving from place to place, because you can do it all from the comfort of your own home. Instead of spending time on each individual operation you can put in the effort to automate it up-front and then sit back and reap the benefits (or keep finding more things to automate). The risk of doing all of this (which is a form of cost) is significantly lower than it would be to do something equivalent in real life. And all of this you get for the low, low price of your immortal soul! What's not to like?
## Will it ever change?
The Internet has often reminded me, alternately, of a) the Industrial Revolution and b) the Wild West. It reminds me of the Industrial Revolution because there are great examples of unscrupulous people taking advantage of a new set of economic realities to make tons of money at the expense of poor everyday folk who are just trying to live their lives. And not just straight-up criminals like we've been discussing, but also exploitative businesses and corporations (adtech, anybody?) that hearken back to the days of e.g. factory owners profiting from the slow destruction of their workers' lives. But the Internet also calls to mind the Wild West of the mid-to-late 1800s. Like the Wild West, it's a huge new swathe of unexplored territory rich with opportunity, if a little uncivilized.
But eventually, both the Industrial Revolution and the Wild West settled down and got a little more civilized. Eventually people developed things like labor unions and OSHA regulations,<Sidenote>Which I never thought I'd be holding up as a _good_ thing, because in my personal experience they've mostly been a source of frustration. But something tells me that if I were a worker in a 19th-century textile factory, I would have been very glad for some basic safety requirements.</Sidenote> and the world of heavy industry got a little more equitable. And eventually, the Wild West became civilized enough that you couldn't just walk into a saloon and shoot someone just because you felt like it.<Sidenote>Please note, I have no idea if this was ever _really_ possible, I'm basing it mostly on spaghetti Westerns and the like.</Sidenote>
Will the same thing happen to the Internet? I don't know. It might! Already you can start to see a sort of social "immune system" developing with regard to things like phishing emails and calls. For instance, I know plenty of people who have a policy of never answering their phone at all if the call is from a number they don't recognize.<Sidenote>Consumer Reports [claims](https://www.consumerreports.org/robocalls/mad-about-robocalls/) that this is actually 70% of US adults, which is a staggering number. Heaven help us if the scammers figure out how to reliably spoof numbers from people you know.</Sidenote> Unfortunateloy it's harder to make this work for something like poorly-secured web services, because it isn't easy to tell before you sign up for a service whether it's likely to get breached and leak your personal info in six months.
Ultimately the only workable solutions will have to a) increase the cost of carrying out these attacks, or b) reduce (on average) the reward. In the end it probably won't be _solved_ completely, much like crime isn't _solved_ today. But I'm hopeful that, much like today's Texans don't have to worry much about their stagecoach being waylaid by bandits, we'll see less and less of it as time goes on.

View File

@@ -1,27 +0,0 @@
<script>
import '$styles/prose.scss';
</script>
<style>
.content {
max-width: var(--content-width);
margin: 0 auto;
}
</style>
<svelte:head>
<title>About Me | Joe's Blog</title>
</svelte:head>
<div class="prose content">
<h1>About Me</h1>
<p>(Joe's wife wrote this because Joe feels weird writing about himself.)</p>
<p>Joe is a quirky, techy Tolkienite with a beautiful singing voice, an uncanny ability to do mental math, a bony, un-cuddleable frame, and a big mushy heart. He enjoys bike riding, computers, watching TV, reading about computers, playing Breath of the Wild, building computers, talking about something called "programming languages", and spending time with his family (which often involves fixing their computers). He graduated with a Liberal Arts degree from Thomas Aquinas College, the school of his forebears. He often remarks that he has greatly benefitted from the critical thinking skills he acquired at his alma mater in his current line of work.</p>
<p>He has spent, at the current time, about 2 years working on this blog. Most of his posts are about all of the work it took and everything he learned making this blog. Unlike most "bloggers", he has started with many blog posts and no blog, rather than a blog without posts. "Someday", he says, "I will actually get that blog up". I always nod encouragingly.</p>
<p>If you are reading this, then that day has arrived. We hope you enjoy it, and maybe even learn something along the way.</p>
</div>

View File

@@ -1,53 +0,0 @@
import { tag, text, serialize } from '$lib/xml.js';
import { postData } from '../_posts/all.js';
export const prerender = true;
export function GET() {
return new Response(renderFeed(), {
headers: {'Content-Type': 'application/atom+xml'}
});
}
function renderFeed() {
const feed = tag('feed', {xmlns: 'http://www.w3.org/2005/Atom'});
feed.addTag('id', {}, [text('https://blog.jfmonty2.com/')])
feed.addTag('title', {}, [text("Joe's Blog")]);
feed.addTag('link', {rel: 'alternate', href: 'https://blog.jfmonty2.com/'});
feed.addTag('link', {rel: 'self', href: 'https://blog.jfmonty2.com/feed/'});
const lastUpdate = iso(postData[0].updated || postData[0].date);
feed.addTag('updated', {}, [text(lastUpdate)]);
const author = feed.addTag('author');
author.addTag('name', {}, [text('Joseph Montanaro')]);
for (const post of postData) {
const url = `https://blog.jfmonty2.com/${post.slug}`
const entry = feed.addTag('entry');
entry.addTag('title', {}, [text(post.title)]);
entry.addTag('link', {rel: 'alternate', href: url});
entry.addTag('id', {}, [text(url)]);
const publishedDate = iso(post.date);
entry.addTag('published', {}, [text(publishedDate)])
const updatedDate = iso(post.updated || post.date);
entry.addTag('updated', {}, [text(updatedDate)]);
entry.addTag('content', {type: 'html'}, [text(renderDescription(post))]);
}
return serialize(feed);
}
function renderDescription(post) {
return `<p>${post.description} <a href="https://blog.jfmonty2.com/${post.slug}">Read more</a></p>`;
}
function iso(datetimeStr) {
return new Date(datetimeStr).toISOString();
}

View File

@@ -1,86 +0,0 @@
<script>
import '$styles/prose.scss';
import { formatDate } from '$lib/datefmt.js';
import { postData } from '../_posts/all.js';
</script>
<style lang="scss">
.wrapper {
padding: 0 var(--content-padding);
}
.posts {
max-width: var(--content-width);
margin: 0 auto;
}
hr {
margin: 2.5rem 0;
border-color: #eee;
}
.post-date {
color: var(--content-color-faded);
}
.draft-notice {
vertical-align: middle;
font-size: 0.75rem;
padding: 0 0.3rem;
color: #e00;
background-color: #ffd9d9;
border: 1px solid red;
border-radius: 20% / 50%;
}
.post-link {
text-decoration: none;
&:hover {
text-decoration: underline;
}
}
h2 {
display: flex;
align-items: center;
gap: 0.75rem;
margin-top: 0.5rem;
margin-bottom: 0.75rem;
font-size: 1.5rem;
& a {
color: currentcolor;
}
}
</style>
<svelte:head>
<title>Posts</title>
</svelte:head>
<div class="wrapper">
<div class="posts prose">
<h1 style:text-align="center">All Posts</h1>
{#each postData as post, idx}
<div class="post">
<div class="post-date">{new Date(post.date).toISOString().split('T')[0]}</div>
<h2 class="prose">
<a data-sveltekit-preload-data="hover" class="post-link" href="/{post.slug}">
{post.title}
</a>
{#if post.draft}
<span class="draft-notice">Draft</span>
{/if}
</h2>
<p>{post.description}</p>
</div>
{#if idx < postData.length - 1}
<hr>
{/if}
{/each}
</div>
</div>

View File

@@ -1,27 +0,0 @@
@import 'prism-dracula';
@font-face {
font-family: 'Hack';
font-style: normal;
font-weight: 400;
src: url(/Hack-Regular.woff2) format('woff2');
font-display: block;
}
code {
padding: 0.05rem 0.2rem 0.1rem;
background: #eee;
border-radius: 0.2rem;
font-size: 0.75em;
font-family: 'Hack', monospace;
}
pre[class*="language-"] {
line-height: 1.25;
}
pre > code[class*="language-"] {
font-size: 0.75em;
font-family: 'Hack', monospace;
}

17
src/styles/main.css Normal file
View File

@@ -0,0 +1,17 @@
@import '@fontsource-variable/figtree';
@import 'reset.css';
@import 'vars.css';
@import 'prose.css';
body {
font-family: 'Figtree Variable', sans-serif;
font-weight: 350;
font-size: var(--content-size);
line-height: var(--content-line-height);
color: var(--content-color);
background-color: var(--bg-color);
}
a {
color: var(--link-color);
}

View File

@@ -1,39 +0,0 @@
@import 'reset';
@font-face {
font-family: 'Tajawal';
font-style: normal;
font-weight: 400;
src: url(/Tajawal-Regular.woff2) format('woff2');
font-display: block;
}
:root {
--content-size: 1.25rem;
--content-size-sm: 1rem;
--content-line-height: 1.4;
--content-width: 52.5rem;
--content-padding: 0.65rem;
--content-color: #1e1e1e;
--content-color-faded: #555;
--primary-color: hsl(202deg 72% 28%);
--primary-color-faded: hsl(202deg 14% 36%);
--accent-color: hsl(0deg, 92%, 29%);
--accent-color-faded: hsl(0deg, 25%, 55%);
@media(max-width: 640px) {
--content-line-height: 1.25;
--content-size: 1.15rem;
--content-size-sm: 0.9rem;
}
}
body {
font-family: 'Tajawal', sans-serif;
font-size: var(--content-size);
line-height: var(--content-line-height);
letter-spacing: -0.005em;
color: var(--content-color);
}

View File

@@ -1,121 +0,0 @@
/**
* Dracula Theme originally by Zeno Rocha [@zenorocha]
* https://draculatheme.com/
*
* Ported for PrismJS by Albert Vallverdu [@byverdu]
*/
code[class*="language-"],
pre[class*="language-"] {
color: #f8f8f2;
background: none;
text-shadow: 0 1px rgba(0, 0, 0, 0.3);
font-family: Consolas, Monaco, 'Andale Mono', 'Ubuntu Mono', monospace;
text-align: left;
white-space: pre;
word-spacing: normal;
word-break: normal;
word-wrap: normal;
-moz-tab-size: 4;
-o-tab-size: 4;
tab-size: 4;
-webkit-hyphens: none;
-moz-hyphens: none;
-ms-hyphens: none;
hyphens: none;
}
/* Code blocks */
pre[class*="language-"] {
padding: 1em;
margin: 1em 0;
overflow: auto;
border-radius: 0.3em;
}
:not(pre) > code[class*="language-"],
pre[class*="language-"] {
background: #282a36;
}
/* Inline code */
:not(pre) > code[class*="language-"] {
padding: .1em;
border-radius: .3em;
white-space: normal;
}
.token.comment,
.token.prolog,
.token.doctype,
.token.cdata {
color: #6272a4;
}
.token.punctuation {
color: #f8f8f2;
}
.namespace {
opacity: .7;
}
.token.property,
.token.tag,
.token.constant,
.token.symbol,
.token.deleted {
color: #ff79c6;
}
.token.boolean,
.token.number {
color: #bd93f9;
}
.token.selector,
.token.attr-name,
.token.string,
.token.char,
.token.builtin,
.token.inserted {
color: #50fa7b;
}
.token.operator,
.token.entity,
.token.url,
.language-css .token.string,
.style .token.string,
.token.variable {
color: #f8f8f2;
}
.token.atrule,
.token.attr-value,
.token.function,
.token.class-name {
color: #f1fa8c;
}
.token.keyword {
color: #8be9fd;
}
.token.regex,
.token.important {
color: #ffb86c;
}
.token.important,
.token.bold {
font-weight: bold;
}
.token.italic {
font-style: italic;
}
.token.entity {
cursor: help;
}

View File

@@ -1,30 +1,35 @@
@import '@fontsource-variable/baskervville';
.prose { .prose {
h1, h2, h3, h4, h5, h6 { h1, h2, h3, h4, h5, h6 {
font-family: -apple-system, system-ui, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Ubuntu, Arial, sans-serif; font-family: 'Baskervville Variable', serif;
font-weight: 600; font-weight: 650;
color: #464646; margin-bottom: 0.25rem;
color: var(--heading-color);
letter-spacing: 0.015em;
line-height: 1.25
} }
h1 { h1 {
margin-top: 0.5em; margin-top: 0.5em;
font-size: 2em; font-size: 2.25em;
font-variant: petite-caps;
} }
h2 { h2 {
font-size: 1.5em; font-size: 1.75em;
} }
h3 { h3 {
font-size: 1.2em; font-size: 1.4em;
} }
h4 { h4 {
font-size: 1.1em; font-size: 1.2em;
} }
h1, h2, h3, h4 { h5, h6 {
margin-bottom: 0.5em; font-size: 1em;
font-weight: 700;
} }
p, ul, ol { p, ul, ol {

22
src/styles/reset.css Normal file
View File

@@ -0,0 +1,22 @@
/* This reset lifted largely from Josh Comeau's "CSS for JS Devs" course */
/* Use a more-intuitive box-sizing model. */
*, *::before, *::after {
box-sizing: border-box;
}
/* Remove default margin */
* {
margin: 0;
}
/* Allow percentage-based heights in the application */
html, body {
min-height: 100%;
}
/* Improve media defaults */
img, picture, video, canvas, svg {
display: block;
max-width: 100%;
}

View File

@@ -1,22 +0,0 @@
// This reset lifted largely from Josh Comeau's "CSS for JS Devs" course
// Use a more-intuitive box-sizing model.
*, *::before, *::after {
box-sizing: border-box;
}
// Remove default margin
* {
margin: 0;
}
// Allow percentage-based heights in the application
html, body {
min-height: 100%;
}
// Improve media defaults
img, picture, video, canvas, svg {
display: block;
max-width: 100%;
}

75
src/styles/vars.css Normal file
View File

@@ -0,0 +1,75 @@
:root {
--content-size: 1.25rem;
--content-size-sm: 1rem;
--content-line-height: 1.5;
--content-width: 52.5rem;
--content-padding: 0.65rem;
/* squish things down a little on mobile so more text fits on the screen */
@media(max-width: 640px) {
--content-line-height: 1.25;
--content-size: 1.15rem;
--content-size-sm: 0.9rem;
}
/* light-mode colors */
--bg-color: hsl(0deg 0% 100%);
/* text */
--content-color: hsl(0deg 0% 20%);
--content-color-faded: #555;
/* links */
--primary-color: hsl(202deg 72% 28%);
--primary-color-faded: hsl(202deg 14% 36%);
/* indicators, hover effects, etc */
--accent-color: hsl(0deg 92% 29%);
--accent-color-faded: hsl(0deg 25% 55%);
/* misc */
--heading-color: hsl(0deg 0% 27%);
--link-color: var(--primary-color);
--nav-link-color: white;
--neutral-gray: hsl(0deg 0% 30%);
/* dark-mode colors (defined here so that we only have to update them in one place) */
--dark-bg-color: hsl(220deg 10% 13%);
--dark-content-color: hsl(30deg 10% 75%);
--dark-content-color-faded: hsl(25deg 6% 50%);
--dark-primary-color: hsl(220deg 15% 40%);
--dark-primary-color-faded: hsl(220deg 12% 18%);
--dark-accent-color: hsl(18deg 70% 55%);
--dark-accent-color-faded: hsl(18deg 30% 45%);
--dark-heading-color: hsl(35deg 25% 88%);
--dark-link-color: hsl(202deg 50% 50%);
--dark-nav-link-color: var(--dark-heading-color);
--dark-neutral-gray: hsl(220deg 10% 45%);
&[data-theme="dark"] {
--bg-color: var(--dark-bg-color);
--content-color: var(--dark-content-color);
--content-color-faded: var(--dark-content-color-faded);
--primary-color: var(--dark-primary-color);
--primary-color-faded: var(--dark-primary-color-faded);
--accent-color: var(--dark-accent-color);
--accent-color-faded: var(--accent-color-faded);
--heading-color: var(--dark-heading-color);
--link-color: var(--dark-link-color);
--nav-link-color: var(--dark-nav-link-color);
--neutral-gray: var(--dark-neutral-gray);
}
&:not([data-theme="light"]) {
@media(prefers-color-scheme: dark) {
color-scheme: dark;
--bg-color: var(--dark-bg-color);
--content-color: var(--dark-content-color);
--content-color-faded: var(--dark-content-color-faded);
--primary-color: var(--dark-primary-color);
--primary-color-faded: var(--dark-primary-color-faded);
--accent-color: var(--dark-accent-color);
--accent-color-faded: var(--accent-color-faded);
--heading-color: var(--dark-heading-color);
--link-color: var(--dark-link-color);
--nav-link-color: var(--dark-nav-link-color);
--neutral-gray: var(--dark-neutral-gray);
}
}
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 119 KiB

View File

@@ -1,33 +0,0 @@
import { resolve } from 'node:path';
import staticAdapter from '@sveltejs/adapter-static';
import { vitePreprocess } from '@sveltejs/vite-plugin-svelte';
import { mdsvex } from 'mdsvex';
import { localRemark } from './src/plugins/remark.js';
import { localRehype } from './src/plugins/rehype.js';
/** @type {import('@sveltejs/kit').Config} */
const config = {
extensions: ['.svelte', '.svx'],
preprocess: [
mdsvex({
layout: './src/lib/Post.svelte',
remarkPlugins: [localRemark],
rehypePlugins: [localRehype],
}),
vitePreprocess(),
],
kit: {
// adapter-auto only supports some environments, see https://kit.svelte.dev/docs/adapter-auto for a list.
// If your environment is not supported or you settled on a specific environment, switch out the adapter.
// See https://kit.svelte.dev/docs/adapters for more information about adapters.
adapter: staticAdapter(),
alias: {
'$styles': 'src/styles',
'$projects': 'src/projects',
}
}
};
export default config;

13
tsconfig.json Normal file
View File

@@ -0,0 +1,13 @@
{
"extends": "astro/tsconfigs/strictest",
"include": [".astro/types.d.ts", "**/*"],
"exclude": ["dist"],
"compilerOptions": {
"paths": {
"@components/*": ["./src/components/*"],
"@layouts/*": ["./src/layouts/*"],
"@lib/*": ["./src/lib/*"],
"@styles/*": ["./src/styles/*"]
}
}
}

View File

@@ -1,6 +0,0 @@
import { sveltekit } from '@sveltejs/kit/vite';
import { defineConfig } from 'vite';
export default defineConfig({
plugins: [sveltekit()]
});