Compare commits

..

36 Commits

Author SHA1 Message Date
0f90778b53 refactor!: deploy headscale 2026-02-15 16:06:54 -08:00
dec32b9766 fix: pin sonarr version 2026-02-14 14:18:47 -08:00
64c13da521 fix: add no chat reports to minecraft-main 2026-02-13 20:26:17 -08:00
13f8c64f29 feat: add axiom to minecraft creative 2026-02-12 11:14:50 -08:00
27681c3ff5 fix: update pearce udm router endpoint 2026-02-06 13:40:34 -08:00
f83dca42ea feat: add pearce udm router 2026-02-05 18:27:16 -08:00
94e550787e fix!: only create mesh to routers 2026-02-04 21:48:00 -08:00
508d5a3525 fix: re add firewall configuration 2026-02-04 21:29:19 -08:00
a42e02514e feat: add tux to mesh 2026-02-04 21:21:22 -08:00
413f16fb6f fix: remove firewall configuration 2026-02-04 21:14:41 -08:00
c85cf06186 feat(wg): add mesh tunneling to devices & routers 2026-02-04 20:57:09 -08:00
63f9d3418c fix(safety): add fallback dns servers and fallback terminal 2026-02-03 22:58:30 -08:00
46460039af fix: jj sign commits 2026-02-03 21:43:05 -08:00
b9709dd655 feat: add jj 2026-02-03 21:29:33 -08:00
165eee9dd7 fix(homelab): add more resources to creative world 2026-02-01 16:13:09 -08:00
fd28865071 feat(homelab): add more optimization to servers 2026-01-31 15:22:56 -08:00
51afe1240d feat(homelab): add elytra trims 2026-01-30 19:54:02 -08:00
74909b9cd4 feat(homelab): add carpet & monitor keybinds 2026-01-30 14:35:50 -08:00
bd4dd7ba23 feat(homelab): migrate server to fabric 2026-01-29 17:20:25 -08:00
ae407c99c1 fix(homelab): use new fqdn for creative world 2026-01-25 14:29:25 -08:00
d086bb61ed feat(homelab): add old server 2026-01-25 00:13:23 -08:00
4d003329c7 feat(homelab): add minecraft management interface 2026-01-24 11:08:01 -08:00
80ae32d799 refactor!: initial commit, setup cli 2026-01-23 22:30:14 -08:00
58ead2bb23 feat: quickshell multi monitor 2026-01-19 18:16:08 -08:00
5c3252746f feat: add quickshell 2026-01-18 23:37:26 -08:00
68588f5d72 feat: use fenix for cargo, add dynamic linking and pkg-config paths 2026-01-17 12:52:13 -08:00
ee0cb47e34 fix(homelab): use db session provider 2026-01-16 19:18:25 -08:00
c9866fd424 feat(homelab): add fast leaf decay 2026-01-16 19:12:44 -08:00
b1ecb9f021 feat: add manual backup job 2026-01-15 23:33:07 -08:00
da17ccc446 feat(homelab)!: migrate to svelte kit, add more server stats endpoints 2026-01-12 00:19:11 -08:00
73f8cb91c4 feat(homelab)!: create interface for homelab management, use templating for route generation & support more options and route types 2026-01-12 00:18:53 -08:00
29cff3bf84 feat(homelab)!: parse from env or config.toml to allow containerization 2026-01-08 23:06:08 -08:00
dd904c151d feat(homelab): start containerizing homelab binary 2026-01-06 09:01:45 -08:00
b02a06faa7 feat(homelab): add more aliases, setup ssh keys on yubikey 2026-01-04 20:56:25 -08:00
7b76ffd34f feat(homelab)!: setup pihole entry generation, add treeminer to minecraft-main 2026-01-03 23:45:41 -08:00
4f79df9bf2 feat: add custom nixos minimal iso 2026-01-03 01:56:35 -08:00
84 changed files with 8127 additions and 215 deletions

View File

@@ -6,12 +6,13 @@ return {
lua = { "stylua" },
luau = { "stylua" },
rust = { "rustfmt", lsp_format = "fallback" },
javascript = { "prettier" },
typescript = { "prettier" },
json = { "prettier" },
tsx = { "prettier" },
javascript = { "prettierd" },
typescript = { "prettierd" },
json = { "prettierd" },
tsx = { "prettierd" },
nix = { "nixfmt" },
go = { "gofmt" },
svelte = { "prettierd" },
},
format_on_save = {
-- These options will be passed to conform.format()

View File

@@ -69,6 +69,16 @@ local function setup_java()
})
end
local function setup_svelte()
require("lspconfig").svelte.setup({
root_dir = require("lspconfig.util").root_pattern("svelte.config.js", "svelte.config.ts", "package.json"),
})
end
local function setup_qml()
require("lspconfig").qmlls.setup({})
end
return {
{
"neovim/nvim-lspconfig",
@@ -101,14 +111,18 @@ return {
"jdtls",
"clangd",
"cmake",
"cssls",
"qmlls",
},
automatic_enable = { exclude = { "luau_lsp", "lua_ls" } },
automatic_enable = { exclude = { "luau_lsp", "lua_ls", "svelte" } },
})
setup_luau()
setup_lua()
setup_ts()
setup_nix()
setup_java()
setup_svelte()
setup_qml()
end,
},
}

View File

@@ -20,6 +20,7 @@ return {
"html",
"templ",
"go",
"qmljs",
},
auto_install = true,
highlight = {

View File

@@ -0,0 +1 @@
/run/user/1000/quickshell/vfs/d51aaef3451280dfdf2f7cf03412fd13/.qmlls.ini

View File

@@ -0,0 +1,52 @@
//qmllint disable unqualified
//qmllint disable unused-imports
//qmllint disable uncreatable-type
import QtQuick
import QtQuick.Layouts
import Quickshell
import "components"
PanelWindow {
id: root
anchors.top: true
anchors.left: true
anchors.right: true
implicitHeight: 34
color: "#1a1b26"
required property var modelData
screen: modelData
property int fontSize: 18
property color colBg: "#1a1b26"
property color colFg: "#a9b1d6"
property color colMuted: "#444b6a"
property color colCyan: "#0db9d7"
property color colPurple: "#ad8ee6"
property color colRed: "#f7768e"
property color colYellow: "#e0af68"
property color colBlue: "#7aa2f7"
RowLayout {
anchors.fill: parent
Rectangle {
Layout.preferredWidth: 30
Layout.preferredHeight: 28
color: "transparent"
Image {
anchors.fill: parent
source: "file:///home/luca/dotfiles/.config/quickshell/icons/nixos.png"
fillMode: Image.PreserveAspectFit
}
}
Workspaces {}
Item {
Layout.fillWidth: true
}
Clock {}
}
}

View File

@@ -0,0 +1,34 @@
import QtQuick
import QtQuick.Layouts
Item {
id: clockRoot
implicitWidth: timeText.implicitWidth
implicitHeight: timeText.implicitHeight
property string dateTime: ""
function updateTime() {
dateTime = Qt.formatDateTime(new Date(), "ddd, MMM dd | HH:mm:ss");
}
Timer {
interval: 1000
running: true
repeat: true
triggeredOnStart: true
onTriggered: clockRoot.updateTime()
}
Text {
id: timeText
text: clockRoot.dateTime
color: "#a9b1d6"
font {
pixelSize: 16
family: "Comic Relief"
bold: true
}
anchors.centerIn: parent
}
}

View File

@@ -0,0 +1,84 @@
//qmllint disable unqualified
//qmllint disable unused-imports
//qmllint disable uncreatable-type
import QtQuick
import QtQuick.Layouts
import Quickshell
import Quickshell.Hyprland
import Quickshell.Io
Repeater {
model: Hyprland.workspaces.values
delegate: Rectangle {
id: workspaceComponent
Layout.preferredWidth: 30
Layout.preferredHeight: parent.height
color: "transparent"
property bool isActive: modelData.focused
property int lastActive: 1
property int wsId: modelData.id
property bool isHovered: mouseHandler.containsMouse
states: [
State {
name: "active"
when: isActive
PropertyChanges {
target: workspaceComponent
color: Qt.rgba(1, 1, 1, 0.2)
}
PropertyChanges {
target: underline
color: root.colPurple
}
},
State {
name: "hovered"
when: isHovered && !isActive
PropertyChanges {
target: workspaceComponent
color: Qt.rgba(1, 1, 1, 0.1)
}
PropertyChanges {
target: underline
color: "#7aa2f7"
}
}
]
transitions: Transition {
ColorAnimation {
duration: 200
}
}
Text {
text: modelData.id
color: isActive ? "#a9b1d6" : "#7aa2f7"
font {
pixelSize: root.fontSize
bold: true
family: "Comic Relief"
}
anchors.centerIn: parent
}
Rectangle {
id: underline
width: 30
height: 3
color: root.colBg
anchors.bottom: parent.bottom
anchors.horizontalCenter: parent.horizontalCenter
}
MouseArea {
id: mouseHandler
hoverEnabled: true
anchors.fill: parent
onClicked: Hyprland.dispatch("workspace " + modelData.id)
}
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

View File

@@ -0,0 +1,15 @@
//qmllint disable unqualified
//qmllint disable unused-imports
//qmllint disable uncreatable-type
import QtQuick
import QtQuick.Layouts
import Quickshell
Scope {
Variants {
model: Quickshell.screens
delegate: Component {
Bar {}
}
}
}

View File

@@ -1,5 +1,5 @@
keys:
- &luca age1qu9y0dn5a704dggwmpaaurxqrhxm0qn8czgv5phka56y48sw7u8qkyn637
- &luca age13rqgrxh0fm23n3krf6v7yrrlnhhvs8256cusxqfs2l5xz8rgavssdhte4r
creation_rules:
- path_regex: secrets/[^/]+\.(yaml|json|env|ini)$

1
aliases/happly.sh Executable file
View File

@@ -0,0 +1 @@
helmfile apply -f ~/dotfiles/nix/homelab/helm "$@"

1
aliases/kapply.sh Executable file
View File

@@ -0,0 +1 @@
kubectl apply -k ~/dotfiles/nix/homelab/kustomize

BIN
icons Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

60
nix/flake.lock generated
View File

@@ -81,6 +81,27 @@
},
"parent": []
},
"fenix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
],
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1768632427,
"narHash": "sha256-Y6kP10exkn5UiK9ead2Gky8TFsFZSsyT4f69DMKm0Wo=",
"owner": "nix-community",
"repo": "fenix",
"rev": "edd560269f0d9ad75bd3da292ce4d9d27efdd22a",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "fenix",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
@@ -186,17 +207,56 @@
"type": "github"
}
},
"quickshell": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1768689040,
"narHash": "sha256-Tlnr5BulJcMers/cb+YvmBQW4nKHjdKo9loInJkyO2k=",
"ref": "refs/heads/master",
"rev": "7a427ce1979ce7447e885c4f30129b40f3d466f5",
"revCount": 729,
"type": "git",
"url": "https://git.outfoxxed.me/outfoxxed/quickshell"
},
"original": {
"type": "git",
"url": "https://git.outfoxxed.me/outfoxxed/quickshell"
}
},
"root": {
"inputs": {
"custom-fonts": "custom-fonts",
"fenix": "fenix",
"home-manager": "home-manager",
"nixos-wsl": "nixos-wsl",
"nixpkgs": "nixpkgs_2",
"nixpkgs-before": "nixpkgs-before",
"quickshell": "quickshell",
"sops-nix": "sops-nix",
"status-bar": "status-bar"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1768468158,
"narHash": "sha256-DfifO/Se9ogmp5rxe/OwmRIz20/w6BsbWC1s4kL1Bzc=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "adbff8baedae53f9955fe60c0d470ecd77b4f548",
"type": "github"
},
"original": {
"owner": "rust-lang",
"ref": "nightly",
"repo": "rust-analyzer",
"type": "github"
}
},
"sops-nix": {
"inputs": {
"nixpkgs": [

View File

@@ -21,6 +21,15 @@
url = "github:Mic92/sops-nix";
inputs.nixpkgs.follows = "nixpkgs";
};
fenix = {
url = "github:nix-community/fenix";
inputs.nixpkgs.follows = "nixpkgs";
};
quickshell = {
url = "git+https://git.outfoxxed.me/outfoxxed/quickshell";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs =
@@ -70,6 +79,7 @@
inherit inputs;
meta = {
hostname = host.name;
architecture = host.architecture;
};
pkgs-before = import inputs.nixpkgs-before { system = host.architecture; };
};

2827
nix/homelab/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -5,9 +5,23 @@ edition = "2024"
[dependencies]
anyhow = "1.0.100"
askama = "0.15.1"
clap = { version = "4.5.53", features = ["derive"] }
nom = "8.0.0"
reqwest = { version = "0.13.1", features = ["json"] }
serde = { version = "1.0.228", features = ["serde_derive"] }
ssh2 = "0.9.5"
thiserror = "2.0.17"
toml = "0.9.10"
tokio = { version = "1.49.0", features = ["macros", "rt"] }
toml = { version = "0.9.10", optional = true }
urlencoding = "2"
[workspace]
members = [
".",
"cli"
]
[features]
default = ["file-config"]
file-config = ["dep:toml"]

2
nix/homelab/api/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
target
.env

2842
nix/homelab/api/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,19 @@
[package]
name = "homelab-api"
version = "0.1.0"
edition = "2024"
[dependencies]
actix-web = "4.12.1"
dotenvy = "0.15.7"
nom = "8.0.0"
rcon = { version = "0.6.0", features = ["rt-tokio"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
thiserror = "2.0.17"
tokio = "1.49.0"
tokio-util = { version = "0.7", features = ["io"] }
kube = { version = "2.0.1", features = ["client", "runtime", "ws"] }
k8s-openapi = { version = "0.26", features = ["v1_32"] }
chrono = "0.4"
futures = "0.3"

27
nix/homelab/api/flake.lock generated Normal file
View File

@@ -0,0 +1,27 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1767799921,
"narHash": "sha256-r4GVX+FToWVE2My8VVZH4V0pTIpnu2ZE8/Z4uxGEMBE=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "d351d0653aeb7877273920cd3e823994e7579b0b",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-25.11",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

38
nix/homelab/api/flake.nix Normal file
View File

@@ -0,0 +1,38 @@
{
description = "Homelab api";
inputs = {
nixpkgs.url = "github:nixos/nixpkgs?ref=nixos-25.11";
};
outputs =
{ nixpkgs, ... }@inputs:
let
systems = [
"x86_64-linux"
"aarch64-linux"
];
forAllSystems =
f:
nixpkgs.lib.genAttrs systems (
system:
f {
inherit system;
pkgs = nixpkgs.legacyPackages.${system};
}
);
in
{
devShells = forAllSystems (
{ system, pkgs }:
{
default = pkgs.mkShell {
buildInputs = with pkgs; [
openssl
pkgconf
];
};
}
);
};
}

View File

@@ -0,0 +1,36 @@
use actix_web::{HttpResponse, web};
use serde::{Deserialize, Serialize};
use crate::AppState;
use crate::error::Result;
use crate::kubernetes;
#[derive(Deserialize)]
pub struct RestoreRequest {
pub backup_file: String,
}
#[derive(Serialize)]
pub struct RestoreResponse {
pub server: String,
pub job_name: String,
pub backup_file: String,
pub status: String,
}
/// POST /api/minecraft/{server}/restore
pub async fn create_restore(
app_state: web::Data<AppState>,
path: web::Path<ServerPath>,
body: web::Json<RestoreRequest>,
) -> Result<HttpResponse> {
let job_name =
kubernetes::create_restore_job(&app_state.kube, &path.server, &body.backup_file).await?;
Ok(HttpResponse::Created().json(RestoreResponse {
server: path.server.clone(),
job_name,
backup_file: body.backup_file.clone(),
status: "created".to_string(),
}))
}

View File

@@ -0,0 +1,5 @@
pub mod server_stats;
pub mod server_uptime;
pub mod world_size;
pub(self) const MINECRAFT_NAMESPACE: &str = "minecraft";

View File

@@ -0,0 +1,36 @@
use actix_web::{HttpResponse, web};
use serde::Serialize;
use crate::{
AppState,
endpoints::server_uptime::{PodUptime, get_pod_uptime},
error::Result,
rcon::parse_online_list,
};
#[derive(Serialize)]
struct ServerStats {
pub status: String,
pub players_online: u16,
pub max_players: u16,
pub uptime: PodUptime,
pub world_size: Option<String>,
}
pub async fn get_server_stats(app_state: web::Data<AppState>) -> Result<HttpResponse> {
let list_response = app_state.rcon.cmd("list").await?;
let (_, (players_online, max_players)) =
parse_online_list(&list_response).map_err(|e| crate::error::Error::Parse(e.to_string()))?;
let uptime = get_pod_uptime(&app_state.kube, "main").await?;
let stats = ServerStats {
status: "Online".to_string(),
players_online,
max_players,
uptime,
world_size: None,
};
Ok(HttpResponse::Ok().json(stats))
}

View File

@@ -0,0 +1,56 @@
use actix_web::{HttpResponse, web};
use k8s_openapi::api::core::v1::Pod;
use kube::{Api, Client, api::ListParams};
use crate::{
AppState,
endpoints::MINECRAFT_NAMESPACE,
error::{Error, Result},
};
#[derive(serde::Serialize)]
pub(super) struct PodUptime {
pub seconds: i64,
pub started_at: String,
}
pub async fn get_uptime(
app_state: web::Data<AppState>,
path: web::Path<String>,
) -> Result<HttpResponse> {
let server = path.into_inner();
let uptime = get_pod_uptime(&app_state.kube, &server).await?;
Ok(HttpResponse::Ok().json(uptime))
}
pub(super) async fn find_minecraft_pod(client: &Client, server_name: &str) -> Result<Pod> {
let pods: Api<Pod> = Api::namespaced(client.clone(), MINECRAFT_NAMESPACE);
let label_selector = format!("app=minecraft-{server_name}");
let lp = ListParams::default().labels(&label_selector);
let pod_list = pods.list(&lp).await?;
pod_list
.items
.into_iter()
.next()
.ok_or_else(|| Error::PodNotFound(format!("minecraft-{server_name}")))
}
pub(super) async fn get_pod_uptime(client: &Client, server_name: &str) -> Result<PodUptime> {
let pod = find_minecraft_pod(client, server_name).await?;
let start_time = pod
.status
.and_then(|s| s.start_time)
.ok_or_else(|| Error::PodExec("pod has no start time".into()))?;
let now = chrono::Utc::now();
let duration = now.signed_duration_since(&start_time.0);
Ok(PodUptime {
seconds: duration.num_seconds(),
started_at: start_time.0.to_rfc3339(),
})
}

View File

@@ -0,0 +1,56 @@
use actix_web::{HttpResponse, web};
use k8s_openapi::api::core::v1::Pod;
use kube::{Api, Client};
use serde::Serialize;
use crate::{
AppState,
endpoints::{MINECRAFT_NAMESPACE, server_uptime::find_minecraft_pod},
error::{Error, Result},
};
#[derive(Serialize)]
pub struct WorldSizeResponse {
pub server: String,
pub size: String,
}
pub async fn get_world_size(
app_state: web::Data<AppState>,
path: web::Path<String>,
) -> Result<HttpResponse> {
let server = path.into_inner();
let size = get_size_inner(&app_state.kube, &server).await?;
Ok(HttpResponse::Ok().json(WorldSizeResponse {
server: server.clone(),
size,
}))
}
pub async fn get_size_inner(client: &Client, server_name: &str) -> Result<String> {
let pod = find_minecraft_pod(client, server_name).await?;
let pod_name = pod
.metadata
.name
.ok_or_else(|| Error::PodNotFound("no pod name".into()))?;
let output = exec_in_pod(client, &pod_name, vec!["du", "-sh", "/data"]).await?;
let size = output
.split_whitespace()
.next()
.unwrap_or("unknown")
.to_string();
Ok(size)
}
async fn exec_in_pod(client: &Client, pod_name: &str, command: &str) {
let pods: Api<Pod> = Api::namespaced(client.clone(), MINECRAFT_NAMESPACE);
const params = AttachParams {
stdin: true,
..Default::default()
};
}

View File

@@ -0,0 +1,33 @@
use actix_web::{HttpResponse, ResponseError, body::BoxBody, http::StatusCode};
use thiserror::Error;
pub type Result<T> = core::result::Result<T, Error>;
#[derive(Debug, Error)]
pub enum Error {
#[error("rcon error: {0}")]
Rcon(#[from] rcon::Error),
#[error("parse error: {0}")]
Parse(String),
#[error("kubernetes error: {0}")]
Kube(#[from] kube::Error),
#[error("pod not found: {0}")]
PodNotFound(String),
#[error("pod exec error: {0}")]
PodExec(String),
}
impl ResponseError for Error {
fn status_code(&self) -> StatusCode {
match self {
Self::Rcon(_) => StatusCode::SERVICE_UNAVAILABLE,
Self::PodNotFound(_) => StatusCode::NOT_FOUND,
Self::Kube(_) | Self::PodExec(_) => StatusCode::BAD_GATEWAY,
_ => StatusCode::INTERNAL_SERVER_ERROR,
}
}
fn error_response(&self) -> HttpResponse<BoxBody> {
HttpResponse::build(self.status_code()).body(self.to_string())
}
}

View File

@@ -0,0 +1,219 @@
use futures::TryStreamExt;
use k8s_openapi::api::batch::v1::Job;
use k8s_openapi::api::core::v1::Pod;
use kube::Client;
use kube::api::{Api, AttachParams, ListParams, PostParams};
use crate::error::{Error, Result};
const NFS_SERVER: &str = "192.168.27.2";
const NFS_BACKUP_PATH: &str = "/backup/minecraft";
/// Find the Minecraft server pod by server name (e.g., "main", "creative")
/// Execute a command in a pod and return stdout
pub async fn exec_in_pod(client: &Client, pod_name: &str, command: Vec<&str>) -> Result<String> {
let pods: Api<Pod> = Api::namespaced(client.clone(), MINECRAFT_NAMESPACE);
let ap = AttachParams {
stdout: true,
stderr: true,
..Default::default()
};
let mut attached = pods.exec(pod_name, command, &ap).await?;
let mut stdout_str = String::new();
if let Some(stdout) = attached.stdout() {
let bytes: Vec<u8> = tokio_util::io::ReaderStream::new(stdout)
.try_collect::<Vec<_>>()
.await
.map_err(|e| Error::PodExec(e.to_string()))?
.into_iter()
.flatten()
.collect();
stdout_str = String::from_utf8_lossy(&bytes).to_string();
}
// Wait for exec to finish
attached
.join()
.await
.map_err(|e| Error::PodExec(e.to_string()))?;
Ok(stdout_str.trim().to_string())
}
/// Get the world size by running du -sh /data in the pod
/// Get pod uptime by calculating time since pod started
/// Create a restore job for a Minecraft server
pub async fn create_restore_job(
client: &Client,
server_name: &str,
backup_file: &str,
) -> Result<String> {
let jobs: Api<Job> = Api::namespaced(client.clone(), MINECRAFT_NAMESPACE);
let job = build_restore_job(server_name, backup_file);
let job_name = job
.metadata
.name
.clone()
.unwrap_or_else(|| "unknown".into());
jobs.create(&PostParams::default(), &job).await?;
Ok(job_name)
}
fn build_restore_job(server_name: &str, backup_file: &str) -> Job {
use k8s_openapi::api::core::v1::{
Container, EnvVar, NFSVolumeSource, PersistentVolumeClaimVolumeSource, PodSecurityContext,
PodSpec, PodTemplateSpec, Volume, VolumeMount,
};
let job_name = format!(
"minecraft-restore-{server_name}-{}",
chrono::Utc::now().timestamp()
);
let restore_script = r#"
set -e
echo "=========================================="
echo "Minecraft World Restore Job"
echo "=========================================="
echo ""
SERVER_NAME="${SERVER_NAME}"
BACKUP_FILE="${BACKUP_FILE:-latest.tgz}"
BACKUP_PATH="/backups/${BACKUP_FILE}"
DATA_DIR="/data"
echo "Configuration:"
echo " Server: ${SERVER_NAME}"
echo " Backup file: ${BACKUP_FILE}"
echo " Backup path: ${BACKUP_PATH}"
echo " Data directory: ${DATA_DIR}"
echo ""
if [ ! -f "${BACKUP_PATH}" ]; then
echo "ERROR: Backup file not found: ${BACKUP_PATH}"
echo ""
echo "Available backups:"
ls -lh /backups/ | grep "minecraft-${SERVER_NAME}" || echo " (none found)"
exit 1
fi
echo "Backup file found"
echo " Size: $(du -hL ${BACKUP_PATH} | cut -f1)"
echo ""
if [ -d "${DATA_DIR}/world" ]; then
echo "WARNING: Existing world data found!"
echo "Removing existing world data..."
rm -rf "${DATA_DIR}/world" "${DATA_DIR}/world_nether" "${DATA_DIR}/world_the_end"
echo "Old world data removed"
fi
echo ""
echo "Extracting backup..."
tar -xzf "${BACKUP_PATH}" -C "${DATA_DIR}/"
echo ""
echo "=========================================="
echo "Restore Complete!"
echo "=========================================="
"#;
Job {
metadata: kube::core::ObjectMeta {
name: Some(job_name),
namespace: Some(MINECRAFT_NAMESPACE.to_string()),
labels: Some(
[("app".to_string(), "minecraft-restore".to_string())]
.into_iter()
.collect(),
),
..Default::default()
},
spec: Some(k8s_openapi::api::batch::v1::JobSpec {
backoff_limit: Some(0),
ttl_seconds_after_finished: Some(3600),
template: PodTemplateSpec {
metadata: Some(kube::core::ObjectMeta {
labels: Some(
[("app".to_string(), "minecraft-restore".to_string())]
.into_iter()
.collect(),
),
..Default::default()
}),
spec: Some(PodSpec {
restart_policy: Some("Never".to_string()),
security_context: Some(PodSecurityContext {
fs_group: Some(2000),
run_as_user: Some(1000),
run_as_group: Some(3000),
..Default::default()
}),
containers: vec![Container {
name: "restore".to_string(),
image: Some("busybox:latest".to_string()),
command: Some(vec!["sh".to_string(), "-c".to_string()]),
args: Some(vec![restore_script.to_string()]),
env: Some(vec![
EnvVar {
name: "SERVER_NAME".to_string(),
value: Some(server_name.to_string()),
..Default::default()
},
EnvVar {
name: "BACKUP_FILE".to_string(),
value: Some(backup_file.to_string()),
..Default::default()
},
]),
volume_mounts: Some(vec![
VolumeMount {
name: "data".to_string(),
mount_path: "/data".to_string(),
..Default::default()
},
VolumeMount {
name: "backups".to_string(),
mount_path: "/backups".to_string(),
read_only: Some(true),
..Default::default()
},
]),
..Default::default()
}],
volumes: Some(vec![
Volume {
name: "data".to_string(),
persistent_volume_claim: Some(PersistentVolumeClaimVolumeSource {
claim_name: format!("minecraft-{server_name}-datadir"),
..Default::default()
}),
..Default::default()
},
Volume {
name: "backups".to_string(),
nfs: Some(NFSVolumeSource {
server: NFS_SERVER.to_string(),
path: NFS_BACKUP_PATH.to_string(),
..Default::default()
}),
..Default::default()
},
]),
..Default::default()
}),
},
..Default::default()
}),
..Default::default()
}
}

View File

@@ -0,0 +1,83 @@
mod endpoints;
mod error;
mod rcon;
use std::env;
use actix_web::{App, HttpServer, web};
use kube::Client;
use crate::rcon::RconClient;
pub struct AppState {
rcon: RconClient,
kube: Client,
}
struct Env {
rcon_password: String,
}
#[cfg(debug_assertions)]
fn load_env() -> Env {
dotenvy::dotenv().ok();
Env {
rcon_password: env::var("RCON_PASSWORD")
.expect("environment variable RCON_PASSWORD must be set"),
}
}
#[cfg(not(debug_assertions))]
fn load_env() -> Env {
Env {
rcon_password: env::var("RCON_PASSWORD")
.expect("environment variable RCON_PASSWORD must be set"),
}
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
let env = load_env();
// Initialize Kubernetes client
// Uses in-cluster config when running in k3s, falls back to ~/.kube/config locally
let kube_client = Client::try_default()
.await
.expect("failed to create Kubernetes client");
let app_state = web::Data::new(AppState {
rcon: RconClient::new(env.rcon_password),
kube: kube_client,
});
HttpServer::new(move || {
App::new()
.app_data(app_state.clone())
.route(
"/",
web::get()
.to(async || concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"))),
)
.service(
web::scope("/api")
.route(
"/minecraft-server-stats",
web::get().to(endpoints::server_stats::get_server_stats),
)
// .route(
// "/minecraft/{server}/world-size",
// web::get().to(endpoints::kubernetes::get_world_size),
// )
.route(
"/minecraft/{server}/uptime",
web::get().to(endpoints::server_uptime::get_uptime),
), // .route(
// "/minecraft/{server}/restore",
// web::post().to(endpoints::kubernetes::create_restore),
// ),
)
})
.bind(("127.0.0.1", 8080))?
.run()
.await
}

View File

@@ -0,0 +1,53 @@
use nom::{
IResult, Parser, bytes::complete::tag, character::complete::digit1, combinator::map_res,
sequence::preceded,
};
use rcon::Connection;
use tokio::{net::TcpStream, sync::Mutex};
use crate::error::Result;
fn parse_u16(input: &str) -> IResult<&str, u16> {
map_res(digit1, |s: &str| s.parse::<u16>()).parse(input)
}
pub fn parse_online_list(input: &str) -> IResult<&str, (u16, u16)> {
let (remaining, (online, max)) = (
preceded(tag("There are "), parse_u16),
preceded(tag(" of a max of "), parse_u16),
)
.parse(input)?;
Ok((remaining, (online, max)))
}
pub struct RconClient {
connection: Mutex<Option<Connection<TcpStream>>>,
rcon_password: String,
}
impl RconClient {
pub fn new(rcon_password: String) -> Self {
Self {
connection: None.into(),
rcon_password,
}
}
pub async fn cmd(&self, command: &str) -> Result<String> {
let mut connection = self.connection.lock().await;
if connection.is_none() {
let conn = create_connection(&self.rcon_password).await?;
*connection = Some(conn)
}
Ok(connection.as_mut().unwrap().cmd(command).await?)
}
}
async fn create_connection(rcon_password: &str) -> Result<Connection<TcpStream>> {
Ok(Connection::<TcpStream>::builder()
.enable_minecraft_quirks(true)
.connect("192.168.27.12:25575", rcon_password)
.await?)
}

2
nix/homelab/askama.toml Normal file
View File

@@ -0,0 +1,2 @@
[general]
dirs = ["src/templates"]

View File

@@ -0,0 +1,21 @@
[package]
name = "homelab-cli"
version = "0.1.0"
edition = "2024"
[dependencies]
anyhow = "1.0.100"
askama = "0.15.1"
chrono = "0.4.43"
clap = { version = "4.5.54", features = ["derive"] }
dialoguer = "0.12.0"
futures = "0.3.31"
indicatif = "0.18.3"
k8s-openapi = { version = "0.27.0", features = ["latest", "schemars", "v1_35"] }
kube = { version = "3.0.0", features = ["runtime", "derive"] }
schemars = "1.2.0"
serde_json = "1.0.149"
serde_yaml = "0.9.34"
thiserror = "2.0.18"
thiserror-ext = "0.3.0"
tokio = { version = "1.49.0", features = ["macros", "rt"] }

View File

@@ -0,0 +1,3 @@
[[escaper]]
path = "askama::filters::Text"
extensions = ["yaml"]

View File

@@ -0,0 +1,144 @@
use askama::Template;
use clap::{Args, Subcommand};
use futures::{AsyncBufReadExt, StreamExt, TryStreamExt};
use k8s_openapi::api::apps::v1::Deployment;
use k8s_openapi::api::batch::v1::Job;
use k8s_openapi::api::core::v1::Pod;
use kube::api::{Api, LogParams, Patch, PatchParams, PostParams};
use kube::runtime::{WatchStreamExt, watcher};
use serde_json::json;
use crate::State;
use crate::error::{ErrorKind, Result};
use crate::reporter::Reporter;
const NAMESPACE: &str = "minecraft";
#[derive(Debug, Args)]
pub struct MinecraftCommand {
#[command(subcommand)]
command: MinecraftSubcommand,
}
#[derive(Debug, Subcommand)]
enum MinecraftSubcommand {
/// Backup a minecraft world
Backup {
/// the world to backup
#[arg(short, long)]
world: String,
},
}
#[derive(Template)]
#[template(path = "backup-job.yaml")]
struct BackupJobTemplate<'a> {
world: &'a str,
}
impl MinecraftCommand {
pub async fn run(&self, state: State) -> Result<()> {
match &self.command {
MinecraftSubcommand::Backup { world } => backup_world(state, world).await,
}
}
}
pub async fn backup_world(state: State, world: &str) -> Result<()> {
let reporter = Reporter::new();
let job_name = format!("minecraft-{}-backup", world);
reporter.log(format!("Scaling deployment minecraft-{world}"));
scale_deployment(&state.client, NAMESPACE, &format!("minecraft-{world}"), 0).await?;
reporter.status("Creating backup job...");
let job = build_backup_job(world)?;
let jobs: Api<Job> = Api::namespaced(state.client.clone(), NAMESPACE);
jobs.create(&PostParams::default(), &job).await?;
reporter.status("Waiting for pod to start...");
let pods: Api<Pod> = Api::namespaced(state.client.clone(), NAMESPACE);
let pod_name = wait_for_job_pod(&pods, &job_name).await?;
reporter.status("Running backup...");
stream_pod_logs(&pods, &pod_name, &reporter).await?;
let job = jobs.get(&job_name).await?;
let status = job.status.as_ref();
let succeeded = status.and_then(|s| s.succeeded).unwrap_or(0);
let failed = status.and_then(|s| s.failed).unwrap_or(0);
reporter.log(format!("Scaling deployment minecraft-{world}, replicas: 1"));
scale_deployment(&state.client, NAMESPACE, &format!("minecraft-{world}"), 1).await?;
if succeeded > 0 {
reporter.success("Backup complete");
Ok(())
} else if failed > 0 {
reporter.fail("Backup job failed");
Err(ErrorKind::BackupFailed("Job failed".to_string()).into())
} else {
reporter.fail("Backup job status unknown");
Err(ErrorKind::BackupFailed("Unknown status".to_string()).into())
}
}
async fn scale_deployment(
client: &kube::Client,
namespace: &str,
name: &str,
replicas: i32,
) -> Result<()> {
let deployments: Api<Deployment> = Api::namespaced(client.clone(), namespace);
let patch = json!({ "spec": { "replicas": replicas } });
deployments
.patch(name, &PatchParams::default(), &Patch::Merge(&patch))
.await?;
Ok(())
}
async fn wait_for_job_pod(pods: &Api<Pod>, job_name: &str) -> Result<String> {
let label_selector = format!("job-name={}", job_name);
let config = watcher::Config::default().labels(&label_selector);
let mut stream = watcher(pods.clone(), config).applied_objects().boxed();
while let Some(pod) = stream.try_next().await? {
let name = pod.metadata.name.as_deref().unwrap_or_default();
let phase = pod
.status
.as_ref()
.and_then(|s| s.phase.as_deref())
.unwrap_or_default();
if phase == "Running" || phase == "Succeeded" || phase == "Failed" {
return Ok(name.to_string());
}
}
Err(ErrorKind::BackupFailed("Pod never started".to_string()).into())
}
fn build_backup_job(world: &str) -> Result<Job> {
let template = BackupJobTemplate { world };
let yaml = template.render()?;
Ok(serde_yaml::from_str(&yaml)?)
}
async fn stream_pod_logs(pods: &Api<Pod>, pod_name: &str, reporter: &Reporter) -> Result<()> {
let params = LogParams {
follow: true,
..Default::default()
};
let stream = pods.log_stream(pod_name, &params).await?;
let mut lines = stream.lines();
while let Some(line) = lines.try_next().await? {
reporter.log_event(&line);
}
Ok(())
}

View File

@@ -0,0 +1,8 @@
use clap::Subcommand;
mod minecraft;
#[derive(Subcommand, Debug)]
pub enum Commands {
/// minecraft management
Minecraft(minecraft::MinecraftCommand),
}

View File

@@ -0,0 +1,20 @@
use thiserror::Error;
#[derive(Error, Debug, thiserror_ext::Box)]
#[thiserror_ext(newtype(name = Error))]
pub enum ErrorKind {
#[error("kube error: {0}")]
Kube(#[from] kube::Error),
#[error("watcher error: {0}")]
Watcher(#[from] kube::runtime::watcher::Error),
#[error("io error: {0}")]
Io(#[from] std::io::Error),
#[error("backup failed: {0}")]
BackupFailed(String),
#[error("template error: {0}")]
Template(#[from] askama::Error),
#[error("error deserializing yaml: {0}")]
Yaml(#[from] serde_yaml::Error),
}
pub type Result<T> = core::result::Result<T, Error>;

View File

@@ -0,0 +1,57 @@
mod commands;
mod error;
mod reporter;
use std::{ops::Deref, sync::Arc};
use clap::{CommandFactory, Parser};
use crate::{commands::Commands, error::Result};
#[derive(Parser, Debug)]
#[command(version, long_about = "homelab cli")]
struct Cli {
#[command(subcommand)]
command: Option<Commands>,
}
struct AppState {
client: kube::Client,
}
#[derive(Clone)]
struct State(Arc<AppState>);
impl State {
pub fn new(inner: AppState) -> Self {
Self(Arc::new(inner))
}
}
impl Deref for State {
type Target = AppState;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl AppState {
pub async fn new() -> Result<Self> {
Ok(Self {
client: kube::Client::try_default().await?,
})
}
}
#[tokio::main(flavor = "current_thread")]
async fn main() -> anyhow::Result<()> {
let cli = Cli::parse();
let app_state = State::new(AppState::new().await?);
match cli.command {
Some(Commands::Minecraft(cmd)) => cmd.run(app_state.clone()).await?,
_ => Cli::command().print_long_help()?,
};
Ok(())
}

View File

@@ -0,0 +1,52 @@
use std::fmt::Display;
use indicatif::{ProgressBar, ProgressStyle};
pub struct Reporter {
spinner: ProgressBar,
}
pub const TICK_CHARS: &str = "⣷⣯⣟⡿⢿⣻⣽⣾";
impl Reporter {
pub fn new() -> Self {
let spinner = ProgressBar::new_spinner();
spinner.set_style(
ProgressStyle::with_template(
"{prefix:.dim}{msg:>8.214/yellow} {spinner} [{elapsed_precise}]",
)
.unwrap()
.tick_chars(TICK_CHARS),
);
spinner.enable_steady_tick(std::time::Duration::from_millis(100));
Self { spinner }
}
pub fn status(&self, msg: impl Into<String>) {
self.spinner.set_message(msg.into());
}
pub fn log_event(&self, line: &str) {
self.spinner.suspend(|| {
println!("{}", line);
});
}
pub fn log<T: Display>(&self, text: T) {
self.spinner.suspend(|| println!("{}", text))
}
pub fn success(&self, msg: &str) {
self.spinner.finish_with_message(format!("{}", msg));
}
pub fn fail(&self, msg: &str) {
self.spinner.finish_with_message(format!("{}", msg));
}
}
impl Default for Reporter {
fn default() -> Self {
Self::new()
}
}

View File

@@ -0,0 +1,40 @@
apiVersion: batch/v1
kind: Job
metadata:
name: minecraft-{{ world }}-backup
namespace: minecraft
labels:
app: minecraft-backup
world: {{ world }}
spec:
ttlSecondsAfterFinished: 300
backoffLimit: 0
template:
metadata:
labels:
job-name: minecraft-{{ world }}-backup
spec:
restartPolicy: Never
containers:
- name: backup
image: busybox
command:
- "sh"
- "-c"
- |
tar -czvf /backups/minecraft-{{ world }}-manual.tar.gz -C /data .
volumeMounts:
- name: data
mountPath: /data
readOnly: true
- name: backups
mountPath: /backups
volumes:
- name: data
persistentVolumeClaim:
claimName: minecraft-{{ world }}-datadir
readOnly: true
- name: backups
nfs:
server: 192.168.27.2
path: /backup/minecraft

View File

@@ -0,0 +1 @@
apiVersion: batch/v1

View File

@@ -53,6 +53,15 @@ routes = [
port = 25565,
private = false
},
{
kind = "TCP",
name = "minecraft-rcon",
namespace = "minecraft",
entrypoint = "rcon",
service = "minecraft-main-rcon",
port = 25575,
private = true
},
{
name = "prowlarr",
namespace = "media",
@@ -93,4 +102,29 @@ routes = [
port = 80,
private = true
},
{
name = "mesh",
namespace = "networking",
service = "headscale",
port = 8080,
private = false
}
]
[pihole]
url = "https://pihole.lucalise.ca"
password_file = "/run/secrets/pihole_password"
extra_hosts = [
"192.168.27.12 mc-rocket.privatedns.org",
"192.168.27.12 mc-rocket-creative.privatedns.org",
"192.168.27.12 mc-rocket-creative.duckdns.org",
"192.168.27.12 git.lucalise.ca",
"192.168.27.2 rufus",
"192.168.27.11 kube"
]
[router]
host = "192.168.15.1:22"
user = "luca"
key_path = "/home/luca/.ssh/id_ed25519"
lease_file = "/var/dhcpd/var/db/dhcpd.leases"

View File

@@ -45,6 +45,13 @@ releases:
values:
- values/gitea.yaml
- name: gitea-runners
namespace: git
chart: gitea-charts/actions
version: 0.0.2
values:
- values/gitea-runners.yaml
# Storage
- name: longhorn
namespace: longhorn-system
@@ -53,6 +60,8 @@ releases:
values:
- defaultSettings:
defaultReplicaCount: 1
- defaultBackupStore:
backupTarget: nfs://192.168.27.2:/backup/longhorn
- persistence:
defaultClassReplicaCount: 1
@@ -93,6 +102,13 @@ releases:
values:
- values/minecraft/creative.yaml
- name: minecraft-old
namespace: minecraft
chart: minecraft-charts/minecraft
version: 5.0.0
values:
- values/minecraft/old.yaml
- name: home-assistant
namespace: home
chart: home-assistant/home-assistant

View File

@@ -0,0 +1,5 @@
enabled: true
statefulset:
nodeSelector:
kubernetes.io/hostname: rufus
giteaRootURL: https://git.lucalise.ca

View File

@@ -15,10 +15,11 @@ gitea:
ROOT: /mnt/git-data/git/repositories
server:
ROOT_URL: https://git.lucalise.ca
SSH_DOMAIN: git.lucalise.ca
database:
DB_TYPE: sqlite3
session:
PROVIDER: memory
PROVIDER: db
cache:
ADAPTER: memory
queue:

View File

@@ -3,23 +3,37 @@ resources:
cpu: 1
memory: 500Mi
limits:
memory: 4Gi
memory: 5Gi
cpu: 2000m
minecraftServer:
eula: "TRUE"
type: "PAPER"
type: "FABRIC"
version: "1.21.11"
difficulty: hard
motd: "A Minecraft Server."
gameMode: creative
memory: 4G
memory: 5G
rcon:
enabled: true
withGeneratedPassword: false
port: 25575
existingSecret: rcon-credentials
secretKey: rcon-password
modrinth:
projects:
- fabric-api
- tree-vein-miner
- lithium
- servux
- ferrite-core
- carpet
- elytra-trims
- fabric-language-kotlin
- c2me-fabric
- scalablelux
- axiom
nodeSelector:
kubernetes.io/hostname: kube

View File

@@ -8,7 +8,7 @@ resources:
minecraftServer:
eula: "TRUE"
type: "PAPER"
type: "FABRIC"
version: "1.21.11"
difficulty: hard
motd: "A Minecraft Server."
@@ -19,6 +19,19 @@ minecraftServer:
port: 25575
existingSecret: rcon-credentials
secretKey: rcon-password
modrinth:
projects:
- fabric-api
- tree-vein-miner
- lithium
- servux
- ferrite-core
- carpet
- elytra-trims
- fabric-language-kotlin
- c2me-fabric
- scalablelux
- no-chat-reports
nodeSelector:
kubernetes.io/hostname: kube

View File

@@ -0,0 +1,29 @@
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
memory: 2Gi
cpu: 1
minecraftServer:
eula: "TRUE"
type: "VANILLA"
version: "1.7.10"
difficulty: hard
motd: "A Minecraft Server."
memory: 4G
rcon:
enabled: true
withGeneratedPassword: false
port: 25575
existingSecret: rcon-credentials
secretKey: rcon-password
nodeSelector:
kubernetes.io/hostname: rufus
persistence:
dataDir:
enabled: true
Size: 2Gi

View File

@@ -14,3 +14,6 @@ minecraftRouter:
- externalHostname: "mc-rocket-creative.privatedns.org"
host: "minecraft-creative"
port: 25565
- externalHostname: "mc-rocket-old.privatedns.org"
host: "minecraft-old"
port: 25565

View File

@@ -12,7 +12,7 @@ serviceDns:
type: LoadBalancer
mixedService: true
annotations:
metallb.universe.tf/loadBalancerIPs: "192.168.18.32"
metallb.universe.tf/loadBalancerIPs: "192.168.27.13"
resources:
requests:

View File

@@ -0,0 +1,33 @@
apiVersion: batch/v1
kind: Job
metadata:
name: minecraft-backup-manual
namespace: minecraft
spec:
template:
spec:
restartPolicy: Never
containers:
- name: backup
image: busybox
command:
- sh
- -c
- |
set -e
BACKUP_FILE=/backups/manual/minecraft-main-manual.tar.gz
echo "creating backup: ${BACKUP_FILE}"
tar -czf "${BACKUP_FILE}" -C /data .
volumeMounts:
- name: data
mountPath: /data
- name: backups
mountPath: /backups
volumes:
- name: data
persistentVolumeClaim:
claimName: minecraft-main-datadir
- name: backups
nfs:
server: 192.168.27.2
path: /backup/minecraft

View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: Pod
metadata:
name: headscale-migrate
namespace: networking
spec:
restartPolicy: Never
containers:
- name: migrate
image: nouchka/sqlite3
command: ["sleep", "infinity"]
volumeMounts:
- name: data
mountPath: /var/lib/headscale
volumes:
- name: data
persistentVolumeClaim:
claimName: headscale-data

View File

@@ -5,6 +5,7 @@ resources:
- ./metallb/pool.yaml
- ./traefik/config.yaml
- ./traefik/private-networks.yaml
- ./traefik/private-networks-tcp.yaml
- ./traefik/chains.yaml
- ./cert-manager/config.yaml
- ./routes.yaml
@@ -14,3 +15,6 @@ resources:
- ./media/radarr.yaml
- ./media/qbittorrent.yaml
- ./media/flaresolverr.yaml
- ./networking/headscale/config.yaml
- ./networking/headscale/headscale.yaml

View File

@@ -17,8 +17,6 @@ metadata:
namespace: media
labels:
app: qbittorrent
annotations:
kubectl.kubernetes.io/default-container: qbittorrent
spec:
replicas: 1
selector:
@@ -28,6 +26,8 @@ spec:
metadata:
labels:
app: qbittorrent
annotations:
kubectl.kubernetes.io/default-container: qbittorrent
spec:
containers:
- name: gluetun

View File

@@ -34,7 +34,7 @@ spec:
spec:
containers:
- name: sonarr
image: lscr.io/linuxserver/sonarr
image: lscr.io/linuxserver/sonarr:4.0.16
securityContext:
runAsUser: 0
runAsGroup: 0
@@ -43,9 +43,9 @@ spec:
name: http
env:
- name: PUID
value: "1000"
value: "0"
- name: PGID
value: "1000"
value: "0"
- name: TZ
value: "America/Vancouver"
volumeMounts:

View File

@@ -5,7 +5,7 @@ metadata:
namespace: metallb-system
spec:
addresses:
- 192.168.27.12-192.168.27.30
- 192.168.27.12-192.168.27.40
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement

View File

@@ -0,0 +1,50 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: headscale-config
namespace: networking
data:
acl.json: |
{
"tagOwners": {
"tag:personal": ["lucalise@"],
},
"acls": [
{"action": "accept", "src": ["tag:personal"], "dst": ["tag:personal:*"]},
{"action": "accept", "src": ["tag:personal"], "dst": ["autogroup:internet:*"]},
{"action": "accept", "src": ["tag:personal"], "dst": ["192.168.15.0/27:*", "192.168.27.0/24:*", "192.168.20.0/26:*"]}
]
}
config.yaml: |
server_url: https://mesh.lucalise.ca
listen_addr: 0.0.0.0:8080
metrics_listen_addr: 0.0.0.0:9090
noise:
private_key_path: /var/lib/headscale/noise_private.key
prefixes:
v4: 10.100.0.0/24
v6: fd7a:115c:a1e0::/48
database:
type: sqlite3
sqlite:
path: /var/lib/headscale/db.sqlite
policy:
path: /etc/headscale/acl.json
dns:
override_local_dns: false
base_domain: m.net
derp:
server:
enabled: false
urls:
- https://controlplane.tailscale.com/derpmap/default
auto_update_enabled: true
update_frequency: 24h
log:
level: info

View File

@@ -0,0 +1,88 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: headscale-data
namespace: networking
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: headscale
namespace: networking
labels:
app: headscale
spec:
replicas: 1
selector:
matchLabels:
app: headscale
template:
metadata:
labels:
app: headscale
spec:
containers:
- name: headscale
image: docker.io/headscale/headscale
command: ["headscale", "serve"]
ports:
- containerPort: 8080
name: http
- containerPort: 9090
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 512m
memory: 1Gi
livenessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 10
periodSeconds: 30
readinessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 5
periodSeconds: 10
volumeMounts:
- name: headscale-data
mountPath: /var/lib/headscale
- name: headscale-config
mountPath: /etc/headscale/config.yaml
subPath: config.yaml
- name: headscale-config
mountPath: /etc/headscale/acl.json
subPath: acl.json
volumes:
- name: headscale-data
persistentVolumeClaim:
claimName: headscale-data
- name: headscale-config
configMap:
name: headscale-config
---
apiVersion: v1
kind: Service
metadata:
name: headscale
namespace: networking
labels:
app: headscale
spec:
selector:
app: headscale
ports:
- port: 8080
targetPort: http
protocol: TCP
name: http

View File

@@ -130,6 +130,23 @@ spec:
- name: minecraft-router-mc-router
port: 25565
---
apiVersion: traefik.io/v1alpha1
kind: IngressRouteTCP
metadata:
name: minecraft-rcon
namespace: minecraft
spec:
entryPoints:
- rcon
routes:
- match: HostSNI(`*`)
middlewares:
- name: private-networks-tcp
namespace: kube-system
services:
- name: minecraft-main-rcon
port: 25575
---
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
@@ -261,3 +278,19 @@ spec:
group: traefik.io
kind: Middleware
name: private-networks
---
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: mesh
namespace: networking
spec:
parentRefs:
- name: traefik-gateway
namespace: kube-system
hostnames:
- mesh.lucalise.ca
rules:
- backendRefs:
- name: headscale
port: 8080

View File

@@ -36,6 +36,12 @@ spec:
default: true
exposedPort: 25565
protocol: TCP
rcon:
port: 25575
expose:
default: true
exposedPort: 25575
protocol: TCP
persistence:
enabled: true

View File

@@ -0,0 +1,12 @@
apiVersion: traefik.io/v1alpha1
kind: MiddlewareTCP
metadata:
name: private-networks-tcp
namespace: kube-system
spec:
ipAllowList:
sourceRange:
- 10.0.0.0/8
- 172.16.0.0/12
- 192.168.0.0/16
- 100.64.0.0/10

View File

@@ -1,8 +1,9 @@
use std::collections::BTreeSet;
use askama::Template;
use serde::{Deserialize, Serialize};
use crate::{Config, HelperError};
use crate::HelperError;
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
pub struct Route {
@@ -21,17 +22,49 @@ impl Route {
fn hostname(&self) -> &str {
self.hostname.as_ref().unwrap_or(&self.name)
}
fn service(&self) -> &str {
self.service.as_ref().unwrap_or(&self.name)
}
}
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
#[derive(Serialize, Deserialize, Default, Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
enum RouteKind {
#[default]
HTTP,
TCP,
}
pub fn generate_routes(config: &Config) -> Result<(), HelperError> {
let routes = config.routes.iter().enumerate().try_fold(
#[derive(Template)]
#[template(path = "httproute.yaml", escape = "none")]
struct HttpRoute<'a> {
name: &'a str,
namespace: &'a str,
hostname: &'a str,
service: &'a str,
port: i16,
private: bool,
}
#[derive(Template)]
#[template(path = "ingressroutetcp.yaml", escape = "none")]
struct TcpRoute<'a> {
name: &'a str,
namespace: &'a str,
entrypoint: &'a str,
service: &'a str,
port: i16,
private: bool,
}
#[derive(Template)]
#[template(path = "http_middleware_chain.yaml", escape = "none")]
struct MiddlewareChain<'a> {
namespace: &'a str,
}
pub fn generate_routes(routes: &Vec<Route>) -> Result<(), HelperError> {
let routes_content = routes.iter().enumerate().try_fold(
String::new(),
|mut acc, (i, r)| -> Result<_, HelperError> {
if i > 0 {
@@ -41,131 +74,71 @@ pub fn generate_routes(config: &Config) -> Result<(), HelperError> {
Ok(acc)
},
)?;
let chains = generate_chains(&config.routes);
std::fs::write("kustomize/routes.yaml", &routes)?;
let chains = generate_chains(routes)?;
std::fs::write("kustomize/routes.yaml", &routes_content)?;
std::fs::write("kustomize/traefik/chains.yaml", &chains)?;
println!("Wrote: {}", routes);
println!("Wrote: {}", routes_content);
Ok(())
}
fn generate_route(route: &Route) -> Result<String, HelperError> {
Ok(match route.kind {
RouteKind::HTTP => generate_http_route(route),
RouteKind::TCP => generate_tcp_route(route)?,
})
match route.kind {
RouteKind::HTTP => {
let template = HttpRoute {
name: &route.name,
namespace: &route.namespace,
hostname: route.hostname(),
service: route.service(),
port: route.port,
private: route.private,
};
Ok(template.render()?)
}
RouteKind::TCP => {
let entrypoint = route
.entrypoint
.as_ref()
.ok_or(HelperError::TCPEntryPoint(route.name.clone()))?;
let template = TcpRoute {
name: &route.name,
namespace: &route.namespace,
entrypoint,
service: route.service(),
port: route.port,
private: route.private,
};
Ok(template.render()?)
}
}
}
fn generate_chains(routes: &[Route]) -> String {
fn generate_chains(routes: &[Route]) -> Result<String, HelperError> {
let namespaces = routes
.iter()
.filter_map(|r| {
if !r.private {
return None;
}
Some(r.namespace.as_str())
})
.filter_map(|r| r.private.then_some((r.kind.clone(), &r.namespace)))
.collect::<BTreeSet<_>>();
namespaces
.iter()
.enumerate()
.fold(String::new(), |mut acc, (i, n)| {
.try_fold(String::new(), |mut acc, (i, (kind, namespace))| {
match kind {
RouteKind::HTTP => {}
_ => {
return Ok(acc);
}
}
if i > 0 {
acc.push_str("\n---\n");
}
acc.push_str(&format!(
r#"apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: private-networks
namespace: {}
spec:
chain:
middlewares:
- name: private-networks
namespace: kube-system"#,
n
));
acc
let rendered = match kind {
RouteKind::HTTP => Some(MiddlewareChain { namespace }.render()?),
_ => None,
};
if let Some(rendered) = rendered {
acc.push_str(&rendered);
}
Ok(acc)
})
}
fn generate_http_route(route: &Route) -> String {
let mut filters_section = String::new();
if route.private {
filters_section = format!(
r#"
filters:
- type: ExtensionRef
extensionRef:
group: traefik.io
kind: Middleware
name: private-networks"#
);
};
format!(
r#"apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: {}
namespace: {}
spec:
parentRefs:
- name: traefik-gateway
namespace: kube-system
hostnames:
- {}.lucalise.ca
rules:
- backendRefs:
- name: {}
port: {}{}"#,
route.name,
route.namespace,
route.hostname(),
route.service.as_ref().unwrap_or_else(|| &route.name),
route.port,
filters_section
)
.trim_end()
.to_string()
}
fn generate_tcp_route(route: &Route) -> Result<String, HelperError> {
let mut middlewares_section = String::new();
if route.private {
middlewares_section = format!(
r#"
middlewares:
- name: private-networks"#
);
}
Ok(format!(
r#"apiVersion: traefik.io/v1alpha1
kind: IngressRouteTCP
metadata:
name: {}
namespace: {}
spec:
entryPoints:
- {}
routes:
- match: HostSNI(`*`){}
services:
- name: {}
port: {}"#,
route.name,
route.namespace,
route
.entrypoint
.as_ref()
.ok_or(HelperError::TCPEntryPoint(route.name.clone()))?,
middlewares_section,
route.service.as_ref().unwrap_or_else(|| &route.name),
route.port
)
.trim_end()
.to_string())
}

87
nix/homelab/src/config.rs Normal file
View File

@@ -0,0 +1,87 @@
#[cfg(not(feature = "file-config"))]
use std::{collections::HashSet, env};
use serde::{Deserialize, Serialize};
use crate::{PiHoleConfig, RouterConfig, commands::generate_routes::Route, error::Result};
#[derive(Serialize, Deserialize)]
pub struct Config {
pub routes: Option<Vec<Route>>,
pub pihole: Option<PiHoleConfig>,
pub router: Option<RouterConfig>,
}
#[cfg(not(feature = "file-config"))]
struct EnvCollector {
missing: Vec<&'static str>,
}
#[cfg(not(feature = "file-config"))]
impl EnvCollector {
fn new() -> Self {
Self {
missing: Vec::new(),
}
}
fn get(&mut self, key: &'static str) -> Option<String> {
match env::var(key) {
Ok(val) => Some(val),
Err(_) => {
self.missing.push(key);
None
}
}
}
fn finish(self) -> Result<()> {
if self.missing.is_empty() {
Ok(())
} else {
Err(crate::error::Error::MissingEnvVars(self.missing.join(", ")))
}
}
}
#[cfg(not(feature = "file-config"))]
pub fn parse_config() -> Result<Config> {
let mut env = EnvCollector::new();
let pihole_url = env.get("PIHOLE_URL");
let pihole_password = env.get("PIHOLE_PASSWORD_FILE");
let pihole_hosts = env.get("PIHOLE_EXTRA_HOSTS");
let router_host = env.get("ROUTER_HOST");
let router_user = env.get("ROUTER_USER");
let router_key = env.get("ROUTER_KEY_PATH");
let router_lease = env.get("ROUTER_LEASE_FILE");
env.finish()?;
Ok(Config {
routes: None,
pihole: Some(PiHoleConfig {
url: pihole_url.unwrap(),
password_file: pihole_password.unwrap(),
extra_hosts: Some(
pihole_hosts
.unwrap()
.split('\n')
.map(String::from)
.collect::<HashSet<String>>(),
),
}),
router: Some(RouterConfig {
host: router_host.unwrap(),
user: router_user.unwrap(),
key_path: router_key.unwrap().into(),
lease_file: router_lease.unwrap(),
}),
})
}
#[cfg(feature = "file-config")]
pub fn parse_config() -> Result<Config> {
let bytes = std::fs::read("./config.toml")?;
Ok(toml::from_slice::<Config>(&bytes)?)
}

View File

@@ -10,6 +10,16 @@ pub enum Error {
IO(#[from] std::io::Error),
#[error("command return non 0 exit code: {0}")]
ExitCode(i32),
#[error("HTTP error: {0}")]
Http(#[from] reqwest::Error),
#[error("Pi-hole API error: {0}")]
PiHole(String),
#[cfg(not(feature = "file-config"))]
#[error("missing environment variables: {0}")]
MissingEnvVars(String),
#[cfg(feature = "file-config")]
#[error("error parsing toml: {0}")]
TomlParse(#[from] toml::de::Error),
}
pub type Result<T> = std::result::Result<T, Error>;

View File

@@ -1,16 +1,20 @@
mod commands;
mod config;
mod dns;
mod error;
mod lease_parser;
mod pihole;
mod transport;
use std::path::Path;
use std::path::{Path, PathBuf};
use std::{collections::HashSet, env};
use anyhow::Context;
use clap::{CommandFactory, Parser};
use serde::{Deserialize, Serialize};
use thiserror::Error;
use crate::config::parse_config;
use crate::{
commands::{
Commands,
@@ -18,6 +22,7 @@ use crate::{
},
dns::Router,
lease_parser::{BindingState, Lease},
pihole::PiHoleClient,
transport::SSHTransport,
};
@@ -32,46 +37,96 @@ struct Cli {
pub enum HelperError {
#[error("error reading file")]
ReadFile(#[from] std::io::Error),
#[error("error parsing config toml")]
TomlError(#[from] toml::de::Error),
#[error("entrypoint required for tcproute: {0:?}")]
TCPEntryPoint(String),
#[error("template rendering error: {0}")]
Template(#[from] askama::Error),
}
#[derive(Serialize, Deserialize)]
pub struct Config {
routes: Vec<Route>,
pub struct PiHoleConfig {
url: String,
password_file: String,
extra_hosts: Option<HashSet<String>>,
}
pub fn parse_config<T: AsRef<Path>>(path: T) -> anyhow::Result<Config> {
let bytes = std::fs::read(&path).context(format!(
"failed to read config file: {}",
path.as_ref().display()
))?;
Ok(toml::from_slice::<Config>(&bytes)?)
#[derive(Serialize, Deserialize)]
pub struct RouterConfig {
host: String,
user: String,
key_path: PathBuf,
lease_file: String,
}
fn main() -> anyhow::Result<()> {
#[tokio::main(flavor = "current_thread")]
async fn main() -> anyhow::Result<()> {
let cli = Cli::parse();
match &cli.command {
Some(Commands::GenerateRoutes {}) => {
let config = parse_config("./config.toml")?;
generate_routes(&config)?;
let config = parse_config()?;
let routes = config
.routes
.context("routes in config are required for generating route manifests")?;
generate_routes(&routes)?;
}
Some(Commands::SyncDNS {}) => {
let r = Router::new(SSHTransport::new("192.168.15.1:22")?);
let leases = r
.dhcp_leases("/var/dhcpd/var/db/dhcpd.leases")?
let config = parse_config()?;
let pihole_config = config
.pihole
.context("pihole configuration is necessary for syncing dns")?;
let router_config = config
.router
.context("router configuration is necessary for syncing dns")?;
let password = std::fs::read_to_string(&pihole_config.password_file)
.context(format!(
"failed to read pihole password from {}",
pihole_config.password_file
))?
.trim()
.to_string();
let leases = tokio::task::spawn_blocking(move || -> anyhow::Result<Vec<Lease>> {
let r = Router::new(SSHTransport::new(
&router_config.host,
&router_config.user,
&router_config.key_path,
)?);
let leases = r
.dhcp_leases(&router_config.lease_file)?
.into_iter()
.filter(|l| {
l.binding_state == BindingState::Active && l.client_hostname.is_some()
})
.collect();
Ok(leases)
})
.await??;
let mut desired = leases
.into_iter()
.filter(|l| {
if !(l.binding_state == BindingState::Active && l.client_hostname.is_some()) {
return false;
}
true
.map(|l| {
format!(
"{} {}",
l.ip,
l.client_hostname.expect("filtered for Some above")
)
})
.collect::<Vec<Lease>>();
println!("{:#?}", leases);
.collect::<HashSet<String>>();
if let Some(extra_hosts) = pihole_config.extra_hosts {
desired.extend(extra_hosts);
}
println!("Found {} active leases with hostnames", desired.len());
let client = PiHoleClient::new(&pihole_config.url, &password).await?;
let stats = client.sync_hosts(desired).await?;
println!(
"Sync complete: added {}, removed {}",
stats.added, stats.removed
);
}
None => Cli::command().print_long_help()?,
}

171
nix/homelab/src/pihole.rs Normal file
View File

@@ -0,0 +1,171 @@
use std::collections::HashSet;
use reqwest::Client;
use serde::{Deserialize, Serialize};
use crate::error::{Error, Result};
pub struct PiHoleClient {
client: Client,
base_url: String,
sid: String,
}
#[derive(Debug)]
pub struct SyncStats {
pub added: usize,
pub removed: usize,
}
#[derive(Debug, Deserialize)]
struct AuthResponse {
session: Session,
error: Option<AuthError>,
}
#[derive(Debug, Deserialize)]
struct Session {
valid: bool,
sid: Option<String>,
}
#[derive(Debug, Deserialize)]
struct AuthError {
message: String,
}
#[derive(Debug, Serialize)]
struct AuthRequest {
password: String,
}
#[derive(Debug, Deserialize)]
struct ConfigResponse {
config: DnsConfig,
}
#[derive(Debug, Deserialize)]
struct DnsConfig {
dns: DnsHosts,
}
#[derive(Debug, Deserialize)]
struct DnsHosts {
hosts: Vec<String>,
}
impl PiHoleClient {
pub async fn new(base_url: &str, password: &str) -> Result<Self> {
let client = Client::new();
let url = format!("{}/api/auth", base_url.trim_end_matches('/'));
let response = client
.post(&url)
.json(&AuthRequest {
password: password.to_string(),
})
.send()
.await?;
let auth: AuthResponse = response.json().await?;
if !auth.session.valid {
return Err(Error::PiHole(format!(
"authentication failed: {}",
auth.error.unwrap().message
)));
}
let sid = auth.session.sid.ok_or_else(|| {
Error::PiHole("authentication succeeded but no session ID returned".to_string())
})?;
Ok(Self {
client,
base_url: base_url.trim_end_matches('/').to_string(),
sid,
})
}
pub async fn get_hosts(&self) -> Result<HashSet<String>> {
let url = format!("{}/api/config/dns/hosts", self.base_url);
let response = self
.client
.get(&url)
.header("X-FTL-SID", &self.sid)
.send()
.await?;
if let Err(e) = response.error_for_status_ref() {
return Err(Error::PiHole(format!("failed to get hosts: {}", e)));
}
let config: ConfigResponse = response.json().await?;
Ok(config.config.dns.hosts.into_iter().collect())
}
pub async fn add_host(&self, entry: &str) -> Result<()> {
let encoded = urlencoding::encode(entry);
let url = format!("{}/api/config/dns/hosts/{}", self.base_url, encoded);
let response = self
.client
.put(&url)
.header("X-FTL-SID", &self.sid)
.send()
.await?;
if let Err(e) = response.error_for_status_ref() {
let body = response.text().await.unwrap_or_default();
return Err(Error::PiHole(format!(
"failed to add host '{}': {} - {}",
entry, e, body
)));
}
Ok(())
}
pub async fn delete_host(&self, entry: &str) -> Result<()> {
let encoded = urlencoding::encode(entry);
let url = format!("{}/api/config/dns/hosts/{}", self.base_url, encoded);
let response = self
.client
.delete(&url)
.header("X-FTL-SID", &self.sid)
.send()
.await?;
if let Err(e) = response.error_for_status_ref() {
let body = response.text().await.unwrap_or_default();
return Err(Error::PiHole(format!(
"failed to delete host '{}': {} - {}",
entry, e, body
)));
}
Ok(())
}
pub async fn sync_hosts(&self, desired: HashSet<String>) -> Result<SyncStats> {
let current = self.get_hosts().await?;
let to_delete = current.difference(&desired).cloned().collect::<Vec<_>>();
let to_add = desired.difference(&current).cloned().collect::<Vec<_>>();
for entry in &to_delete {
self.delete_host(entry).await?;
}
for entry in &to_add {
self.add_host(entry).await?;
}
Ok(SyncStats {
added: to_add.len(),
removed: to_delete.len(),
})
}
}

View File

@@ -0,0 +1,10 @@
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: private-networks
namespace: {{ namespace }}
spec:
chain:
middlewares:
- name: private-networks
namespace: kube-system

View File

@@ -0,0 +1,23 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: {{ name }}
namespace: {{ namespace }}
spec:
parentRefs:
- name: traefik-gateway
namespace: kube-system
hostnames:
- {{ hostname }}.lucalise.ca
rules:
- backendRefs:
- name: {{ service }}
port: {{ port }}
{%- if private %}
filters:
- type: ExtensionRef
extensionRef:
group: traefik.io
kind: Middleware
name: private-networks
{%- endif %}

View File

@@ -0,0 +1,18 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRouteTCP
metadata:
name: {{ name }}
namespace: {{ namespace }}
spec:
entryPoints:
- {{ entrypoint }}
routes:
- match: HostSNI(`*`)
{%- if private %}
middlewares:
- name: private-networks-tcp
namespace: kube-system
{%- endif %}
services:
- name: {{ service }}
port: {{ port }}

View File

@@ -9,7 +9,7 @@ pub struct SSHTransport {
}
impl SSHTransport {
pub fn new(host: &str) -> Result<Self> {
pub fn new(host: &str, user: &str, key_path: &Path) -> Result<Self> {
let stream = TcpStream::connect(host)?;
let mut s = Self {
@@ -17,12 +17,7 @@ impl SSHTransport {
};
s.session.set_tcp_stream(stream);
s.session.handshake()?;
s.session.userauth_pubkey_file(
"luca",
None,
Path::new("/home/luca/.ssh/id_ed25519"),
None,
)?;
s.session.userauth_pubkey_file(user, None, key_path, None)?;
Ok(s)
}
}

27
nix/iso/flake.lock generated Normal file
View File

@@ -0,0 +1,27 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1767325753,
"narHash": "sha256-yA/CuWyqm+AQo2ivGy6PlYrjZBQm7jfbe461+4HF2fo=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "64049ca74d63e971b627b5f3178d95642e61cedd",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-25.11",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

34
nix/iso/flake.nix Normal file
View File

@@ -0,0 +1,34 @@
{
description = "NixOS ISO";
inputs = {
nixpkgs.url = "github:nixos/nixpkgs?ref=nixos-25.11";
};
outputs =
{ nixpkgs, ... }:
{
nixosConfigurations = {
iso = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
"${nixpkgs}/nixos/modules/installer/cd-dvd/installation-cd-minimal.nix"
../modules/keys.nix
(
{
config,
...
}:
{
users.users = {
nixos.openssh.authorizedKeys.keys = config.authorized_ssh;
root.openssh.authorizedKeys.keys = config.authorized_ssh;
};
services.openssh.enable = true;
}
)
];
};
};
};
}

View File

@@ -21,6 +21,7 @@
roboto
roboto-mono
open-sans
comic-relief
];
fonts.fontDir.enable = true;
commonPackages = with pkgs; [
@@ -50,7 +51,6 @@
gnumake
watchman
bat
rustup
emote
pkg-config
openssl
@@ -59,7 +59,6 @@
sops
yubikey-personalization
yubikey-manager
gnupg
(pass.withExtensions (exts: with exts; [ pass-import ]))
python3
jdt-language-server
@@ -78,25 +77,42 @@
helmfile
jless
fd
dig
just
];
programs.nix-ld.enable = lib.mkDefault true;
programs.zsh.enable = lib.mkDefault true;
services.openssh.enable = lib.mkDefault true;
hardware.enableAllFirmware = true;
sops.defaultSopsFile = ../../secrets/sops.yaml;
sops.age.sshKeyPaths = [ "/etc/ssh/id_ed25519" ];
programs.gnupg.agent = {
enable = true;
enableSSHSupport = true;
enableSSHSupport = false;
pinentryPackage = pkgs.pinentry-gtk2;
};
services.pcscd.enable = true;
services.udev.packages = with pkgs; [ yubikey-personalization ];
services.udev.packages = with pkgs; [
yubikey-personalization
yubikey-manager
];
programs.ssh.startAgent = true;
programs.neovim = lib.mkDefault {
enable = true;
defaultEditor = true;
vimAlias = true;
};
sops = {
defaultSopsFile = ../../secrets/secrets.yaml;
age.sshKeyPaths = [ "/home/luca/.ssh/id_ed25519" ];
secrets = {
"pihole_password" = {
owner = "luca";
};
"k3s_token" = {
owner = "luca";
};
};
};
};
}

View File

@@ -21,5 +21,7 @@
./dns.nix
./mounts.nix
./nfs-mesh.nix
./rust.nix
# ./networking/wireguard-mesh.nix
];
}

View File

@@ -3,6 +3,8 @@
lib,
config,
pkgs-before,
inputs,
meta,
...
}:
@@ -12,6 +14,15 @@
};
config = lib.mkIf config.desktop.enable {
i18n.inputMethod = {
enable = true;
type = "fcitx5";
fcitx5.addons = with pkgs; [
fcitx5-mozc
fcitx5-gtk
];
};
environment.systemPackages = with pkgs; [
vscode-fhs
pavucontrol
@@ -50,6 +61,8 @@
lm_sensors
fanctl
waypipe
inputs.quickshell.packages.${meta.architecture}.default
alacritty
];
boot.kernelModules = [
"iptables"
@@ -108,6 +121,14 @@
};
xdg.configFile = {
"hypr/hyprlock.conf".source = ../../custom/hyprlock/hyprlock.conf;
"fcitx5/config".text = ''
[Hotkey]
TriggerKeys=
EnumerateWithTriggerKeys=True
EnumerateForwardKeys=
EnumerateBackwardKeys=
EnumerateSkipFirst=False
'';
};
services.dunst = {
enable = true;
@@ -202,9 +223,15 @@
"$mod, k, movefocus, u"
"$mod, j, movefocus, d"
"$mod, Space, togglesplit"
"$mod SHIFT, h, movewindow, l"
"$mod SHIFT, l, movewindow, r"
"$mod SHIFT, v, exec, bash -c ~/dotfiles/scripts/copy.sh"
"$mod SHIFT, s, exec, bash -c ~/dotfiles/scripts/screenshot.sh"
"$mod, p, exec, bash -c ~/dotfiles/scripts/project.sh"
"$mod SHIFT, k, exec, bash -c ~/dotfiles/scripts/layout.sh"
"$mod SHIFT, j, exec, fcitx5-remote -t"
"$mod CTRL, h, focusmonitor, l"
"$mod CTRL, l, focusmonitor, r"
"$mod, 0, workspace, 10"
"$mod SHIFT, 0, movetoworkspacesilent, 10"
@@ -235,14 +262,14 @@
",XF86MonBrightnessDown, exec, bash -c 'brightnessctl s 5%- && perc=$(( \$(brightnessctl get) * 100 / \$(brightnessctl max) )) && notify-send \"Brightness\" -h int:value:\$perc -h string:synchronous:brightness -u low'"
];
general = {
gaps_in = 0;
gaps_in = 5;
gaps_out = 10;
};
dwindle = {
preserve_split = true;
};
decoration = {
rounding = 0;
rounding = 10;
blur = {
enabled = false;
};
@@ -252,16 +279,21 @@
"XCURSOR_SIZE,24"
"LIBVA_DRIVER_NAME,nvidia"
"__GLX_VENDOR_LIBRARY_NAME,nvidia"
# "GTK_IM_MODULE,fcitx"
# "QT_IM_MODULE,fcitx"
"XMODIFIERS,@im=fcitx"
];
exec-once = [
"status-bar"
# "status-bar"
"qs"
"wl-clip-persist --clipboard regular"
"fcitx5 -d"
];
monitor = [
"eDP-1, 1920x1080, 0x0, 1"
];
input = {
kb_layout = "us";
kb_layout = "us,jp";
touchpad = {
natural_scroll = true;
};

View File

@@ -17,13 +17,6 @@
enable = true;
dns = "systemd-resolved";
};
# networking.extraHosts = ''
# 75.157.238.86 traefik.lucalise.ca
# 75.157.238.86 media.lucalise.ca
# 75.157.238.86 git.lucalise.ca
# 75.157.238.86 storage.lucalise.ca
# 75.157.238.86 home-assistant.lucalise.ca
# '';
services.resolved = {
enable = true;
@@ -32,13 +25,12 @@
"1.0.0.1"
];
domains = [
"consul"
"service.consul"
"node.consul"
"~."
];
extraConfig = ''
[Resolve]
DNS=192.168.20.5:8600
DNS=192.168.27.13:53 1.1.1.1 1.0.0.1
ResolveUnicastSingleLabel=yes
'';
};

View File

@@ -0,0 +1,90 @@
{
pkgs,
lib,
config,
...
}:
let
meshHosts = {
kumatani = {
address = "kumatani";
publicKey = "pKkl30tba29FG86wuaC0KrpSHMr1tSOujikHFbx75BM=";
isRouter = false;
ip = "10.100.0.1";
};
usahara = {
address = "usahara";
publicKey = "4v7GyAIsKfwWjLMVB4eoosJDvLkIDHW0KsEYoQqSnh4=";
isRouter = false;
ip = "10.100.0.2";
};
tux = {
address = "tux";
publicKey = "Z17ci3Flk1eDAhJ8QZSUgtmlw6BVu4XqvpqLKLWTYWw=";
isRouter = false;
ip = "10.100.0.3";
};
oakbay-pfsense = {
endpoint = "oakbay.lucalise.ca:51822";
publicKey = "xOTPZBIC9m1BkkiLCOUTty3b7/NOvslteVQHzEFxqWQ=";
isRouter = true;
ip = "10.100.0.250";
routes = [
"10.100.0.0/24"
"192.168.15.0/27"
"192.168.20.0/26"
"192.168.27.0/24"
];
};
pearce-udm = {
endpoint = "pearce.kisame.ca:51823";
publicKey = "hDb2DzI+isaqXLdxwAF1hc5Nid8TK/M1SQ+zDpf9QxY=";
isRouter = true;
ip = "10.100.0.251";
routes = [
"192.168.18.0/27"
];
};
};
getEndpoint =
name: host:
if host.isRouter or false then "${host.endpoint}" else "${host.address}:${toString 51820}";
mkPeer = name: host: {
publicKey = host.publicKey;
allowedIPs = [ "${host.ip}/32" ] ++ (host.routes or [ ]);
endpoint = getEndpoint name host;
persistentKeepalive = 25;
dynamicEndpointRefreshSeconds = 300;
};
mkPeersFor =
selfName:
lib.mapAttrsToList mkPeer (
lib.filterAttrs (name: host: name != selfName && (host.isRouter or false)) meshHosts
);
selfConfig = meshHosts.${config.networking.hostName} or null;
in
{
config = lib.mkIf (selfConfig != null) {
networking.wireguard.interfaces = {
wg0 = {
privateKeyFile = "/etc/wireguard/private.key";
ips = [ "${selfConfig.ip}/32" ];
listenPort = 51820;
peers = mkPeersFor config.networking.hostName;
};
};
networking.firewall = {
allowedUDPPorts = [ 51820 ];
trustedInterfaces = [ "wg0" ];
};
systemd.tmpfiles.rules = [
"d /etc/wireguard 0700 root root -"
];
};
}

44
nix/modules/rust.nix Normal file
View File

@@ -0,0 +1,44 @@
{
pkgs,
lib,
config,
inputs,
...
}:
{
options.rust = {
enable = lib.mkEnableOption "enable rust" // {
default = true;
};
};
config = lib.mkIf config.rust.enable {
nixpkgs.overlays = [ inputs.fenix.overlays.default ];
environment.systemPackages = with pkgs; [
(pkgs.fenix.stable.withComponents [
"cargo"
"clippy"
"rust-src"
"rustc"
"rustfmt"
])
openssl
pkgconf
];
environment.variables = {
PKG_CONFIG_PATH =
with pkgs;
lib.makeSearchPath "/lib/pkgconfig" [
openssl.dev
];
LD_LIBRARY_PATH = "/run/current-system/sw/share/nix-ld/lib";
};
programs.nix-ld.libraries = with pkgs; [
openssl
zlib
brotli
unixODBC
glib
];
};
}

View File

@@ -16,6 +16,8 @@
oh-my-posh = import ./omp.nix;
eza = import ./eza.nix;
mise = import ./mise.nix;
bacon.enable = true;
jujutsu = import ./jj.nix;
};
xdg.mimeApps = import ./mime.nix;

14
nix/users/luca/jj.nix Normal file
View File

@@ -0,0 +1,14 @@
{
enable = true;
settings = {
user = {
email = "luca_lise@icloud.com";
name = "lucalise";
};
signing = {
behavior = "own";
backend = "ssh";
key = "~/.ssh/id_ed25519.pub";
};
};
}

View File

@@ -33,6 +33,7 @@ in
"rust"
"kubectl"
"helm"
"jj"
];
};
plugins = [

18
scripts/layout.sh Executable file
View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
# Rofi-based keyboard layout switcher for Hyprland
layouts="🇨🇦 Canadian (CA)
🇯🇵 Japanese (JP)"
selected=$(echo "$layouts" | rofi -dmenu -p "Layout")
case "$selected" in
*"Canadian"*)
hyprctl switchxkblayout all 0
notify-send -h string:synchronous:keyboard "Keyboard" "🇨🇦 Canadian (CA)"
;;
*"Japanese"*)
hyprctl switchxkblayout all 1
notify-send -h string:synchronous:keyboard "Keyboard" "🇯🇵 Japanese (JP)"
;;
esac

17
secrets/secrets.yaml Normal file
View File

@@ -0,0 +1,17 @@
pihole_password: ENC[AES256_GCM,data:G3CZJBY=,iv:hlSzQjx58VvPHyGg1ACsGaU+IIB9AIHRpg7d+iv/yfM=,tag:Xx8voi71SPkt49dg4zMXKw==,type:str]
k3s_token: ENC[AES256_GCM,data:5/HQiwLReJl17Ga2N+TDSQ==,iv:6OUU4eWC1vKVkmTQqFi+gsz9Hhu8zvkkAKsM6YEWXP8=,tag:+92zg7XN8+gxIb+gbmPy3Q==,type:str]
sops:
age:
- recipient: age13rqgrxh0fm23n3krf6v7yrrlnhhvs8256cusxqfs2l5xz8rgavssdhte4r
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBmM1NaM1dxcVJ1allzWkNN
OGxiVU5TaUJLdXNRcGxvSWFLTGpnNFRNV2lVCnNpZ3JhdzRBQU95T25OaTEybXov
RENXSXRSbDYyN0JhYWE1eGpRRmlodm8KLS0tIFZYdFI4UG1kZ1dFaWdIMW05VFov
eUVXbXhnZnJuRlFRUUZDZTlYSTFPTGsKBLPIKq6inYtfvS0EsHIg6DAxflRI1fk4
cuuP0HbytzqbCON5pl2ArQtD53N7pn+meT3B26mCVoC+130NQMN8Xg==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2026-01-04T07:24:16Z"
mac: ENC[AES256_GCM,data:X/Giw8DMwwVOVUFkKCenzUdC3IPloJhSkZggPM+2HwVoVqadXNFCdFn+UOdPYJG8veSKVAP7QnerdIw/ZZ/3DKXZSi+pkM5g7Nfe9Dsw/ES398LC5oZjUyi9GRwpT9YzMHhQXNCH4fnt4wo+niSl0wGGjo3RYHL1iSaIaXoASqM=,iv:VtY53DWZb301pYspTGubxyaG69P2uhaGngnCcDQQvNw=,tag:Mk8MFrn3zAGNVlHTpQuxCA==,type:str]
unencrypted_suffix: _unencrypted
version: 3.11.0

View File

@@ -1,16 +0,0 @@
win_pw: ENC[AES256_GCM,data:TGsPs+6wFQ==,iv:7KTF9YuPGDRJE3zcZgt6WJVIKEOp1DkOckP6QY4c9Rk=,tag:kVzkKEuSoP3vEakQT46/aQ==,type:str]
sops:
age:
- recipient: age1qu9y0dn5a704dggwmpaaurxqrhxm0qn8czgv5phka56y48sw7u8qkyn637
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBReVNmSm82KzF1dHYrWlRj
ZnRLMU4yK2ZBQ0huRmNPL3NZV05lWXBaVG1vCm5vcFJOQ3hUeUMzUTI2TDd2TUJq
NGdVbEtncTdWczVjWWxWNFh2Z3g4Q28KLS0tIFBkUnlTdEZNZkszQi9yQTg5K0hl
RmQzN1Y2SUVlT1pGYXV0SW1vb0dHNDgKWchy7XFkxpGuhly4ZefRFZc6+oqcWJzI
HJqnVLiGI6jSKOXT7WV1d+g0Qt4zHGe9tquHxi4BNdxu81lNPVE3iQ==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2025-08-26T05:15:00Z"
mac: ENC[AES256_GCM,data:670OWObSti3BN4AigbDcRvwud/JH/lUzQeWnUXgaST//FIfX6fY293AN0GJ2+3+C+K4Jd80QYh0DThaagMeTBw9/uJTQ10sER7MeknlWzOxmBEBl0fbvHn/t5v6H1yZ4XtcNV8p3RSiSC93k3z2tI4ERLsDzqBGyzkHXZGww4hc=,iv:oq4DxQWzPb80XiCD2WYaRDkqHNeBNUDiKWEMzQSDD/w=,tag:jT3JFUcC8DmHCh4Y6L++Vg==,type:str]
unencrypted_suffix: _unencrypted
version: 3.10.2