aboutsummaryrefslogtreecommitdiff
path: root/packages
diff options
context:
space:
mode:
Diffstat (limited to 'packages')
-rw-r--r--packages/default.nix10
-rw-r--r--packages/dnsmasq-leases-html/README.md37
-rw-r--r--packages/dnsmasq-leases-html/default.nix36
-rwxr-xr-xpackages/dnsmasq-leases-html/dnsmasq-leases-html.py37
-rw-r--r--packages/dnsmasq-leases-html/templates/index.html60
-rw-r--r--packages/git-blame-stats/default.nix26
-rwxr-xr-xpackages/git-blame-stats/git-blame-stats.py95
-rw-r--r--packages/git-broom/default.nix26
-rwxr-xr-xpackages/git-broom/git-broom.py350
-rw-r--r--packages/import-gh-to-gitea/README.org12
-rwxr-xr-xpackages/import-gh-to-gitea/archive-projects.py49
-rwxr-xr-xpackages/import-gh-to-gitea/delete-gh-repositories.py84
-rwxr-xr-xpackages/import-gh-to-gitea/import-gh-to-gitea.py62
-rw-r--r--packages/ipconverter/default.nix29
-rwxr-xr-xpackages/ipconverter/ipconverter.py32
-rw-r--r--packages/music-organizer/README.org21
-rw-r--r--packages/music-organizer/default.nix15
-rw-r--r--packages/music-organizer/go.mod5
-rw-r--r--packages/music-organizer/go.sum4
-rw-r--r--packages/music-organizer/main.go271
-rw-r--r--packages/numap/README.org47
-rw-r--r--packages/numap/go.mod3
-rw-r--r--packages/numap/internal/hwids/hwids.go148
-rw-r--r--packages/numap/internal/sysfs/parse.go21
-rw-r--r--packages/numap/internal/sysfs/pci.go145
-rw-r--r--packages/numap/numa.go116
-rw-r--r--packages/numap/numap.go31
-rw-r--r--packages/perf-flamegraph-pid/default.nix25
-rwxr-xr-xpackages/perf-flamegraph-pid/perf-flamegraph-pid.sh20
-rw-r--r--packages/scheddomain/go.mod3
-rw-r--r--packages/scheddomain/main.go153
-rw-r--r--packages/schedlatency/go.mod3
-rw-r--r--packages/schedlatency/main.go254
-rw-r--r--packages/seqstat/default.nix25
-rwxr-xr-xpackages/seqstat/seqstat.py30
35 files changed, 2285 insertions, 0 deletions
diff --git a/packages/default.nix b/packages/default.nix
new file mode 100644
index 0000000..8e537c9
--- /dev/null
+++ b/packages/default.nix
@@ -0,0 +1,10 @@
+{ pkgs, ... }:
+
+pkgs.lib.makeScope pkgs.newScope (pkgs: {
+ # dnsmasq-to-html = pkgs.callPackage ./dnsmasq-leases-html { };
+ # git-blame-stats = pkgs.callPackage ./git-blame-stats { };
+ # git-broom = pkgs.callPackage ./git-broom { };
+ # ipconverter = pkgs.callPackage ./ipconverter { };
+ # perf-flamegraph-pid = pkgs.callPackage ./perf-flamegraph-pid { };
+ seqstat = pkgs.callPackage ./seqstat { };
+})
diff --git a/packages/dnsmasq-leases-html/README.md b/packages/dnsmasq-leases-html/README.md
new file mode 100644
index 0000000..2437deb
--- /dev/null
+++ b/packages/dnsmasq-leases-html/README.md
@@ -0,0 +1,37 @@
+Generates a static HTML page with a list of all the leases allocated by `dnsmasq`.
+
+A simple template written in the jinja syntax is used.
+
+The file containing the leases is expected to be at `/var/lib/dnsmasq/dnsmasq.leases`, but this can be overwritten by setting the environment variable `DNSMASQ_LEASES`.
+
+The output of the script is written to `/var/lib/dnsmasq/leases.html` by default, but the destination can be overwritten by setting the environment variable `DNSMASQ_LEASES_OUT`.
+
+The script can be executed automatically by `dnsmasq` if the configuration for `dhcp-script` is set to the path of the script. This will only be executed when a *new* lease is created or an *old* lease is deleted. To execute the script when a lease is *updated* you need to use the configuration `script-on-renewal`.
+
+A configuration looks like this:
+
+``` ini
+dhcp-script=${pkgs.tools.dnsmasq-to-html}/bin/dnsmasq-leases-html
+script-on-renewal
+```
+
+## nginx
+To serve the page with nginx, you can use the following configuration:
+
+``` nix
+services.nginx = {
+ enable = true;
+ virtualHosts."dnsmasq" = {
+ listen = [
+ {
+ addr = "192.168.6.1";
+ port = 8067;
+ }
+ ];
+ locations."/" = {
+ root = "/var/lib/dnsmasq";
+ index = "leases.html";
+ };
+ };
+};
+```
diff --git a/packages/dnsmasq-leases-html/default.nix b/packages/dnsmasq-leases-html/default.nix
new file mode 100644
index 0000000..478c4cc
--- /dev/null
+++ b/packages/dnsmasq-leases-html/default.nix
@@ -0,0 +1,36 @@
+{ lib, stdenvNoCC, pkgs }:
+
+stdenvNoCC.mkDerivation rec {
+ pname = "dnsmasq-leases-html";
+ src = ./dnsmasq-leases-html.py;
+ templates = ./templates;
+ version = "0.1.0";
+
+ buildInputs = [
+ (pkgs.python310.withPackages (ps: with ps; [
+ jinja2
+ ]))
+ ];
+
+ propagatedBuildInputs = [
+ (pkgs.python310.withPackages (ps: with ps; [
+ jinja2
+ ]))
+ ];
+
+ dontUnpack = true;
+ dontBuild = true;
+
+ installPhase = ''
+ mkdir -p $out/bin
+ cp $src $out/bin/${pname}
+ cp -r $templates $out/bin/templates
+ '';
+
+ meta = with pkgs.lib; {
+ description = "CLI to generate a HTML page with dnsmasq leases.";
+ license = licenses.mit;
+ platforms = platforms.unix;
+ maintainers = [ ];
+ };
+}
diff --git a/packages/dnsmasq-leases-html/dnsmasq-leases-html.py b/packages/dnsmasq-leases-html/dnsmasq-leases-html.py
new file mode 100755
index 0000000..c1f03db
--- /dev/null
+++ b/packages/dnsmasq-leases-html/dnsmasq-leases-html.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python3
+
+import datetime
+import ipaddress
+import os
+
+from jinja2 import Environment, FileSystemLoader
+
+
+outfile = os.getenv("DNSMASQ_LEASES_OUT", "/var/lib/dnsmasq/leases.html")
+leases_file = os.getenv("DNSMASQ_LEASES", "/var/lib/dnsmasq/dnsmasq.leases")
+
+leases = []
+
+with open(leases_file, "r") as f:
+ for line in f:
+ content = line.rstrip("\n").split(" ")
+ lease = dict()
+ if int(content[0]) == 0:
+ lease["expire"] = "never"
+ else:
+ lease["expire"] = datetime.datetime.fromtimestamp(int(content[0]))
+ lease["MAC"] = content[1]
+ lease["IP"] = ipaddress.ip_address(content[2])
+ lease["hostname"] = content[3]
+ leases.append(lease)
+
+leases = sorted(leases, key=lambda d: d["IP"])
+
+dir_path = os.path.dirname(os.path.realpath(__file__))
+templates_dir = os.path.join(dir_path, "templates")
+environment = Environment(loader=FileSystemLoader(templates_dir))
+template = environment.get_template("index.html")
+
+content = template.render(leases=leases)
+with open(outfile, "w") as fh:
+ print(content, file=fh)
diff --git a/packages/dnsmasq-leases-html/templates/index.html b/packages/dnsmasq-leases-html/templates/index.html
new file mode 100644
index 0000000..913a0c9
--- /dev/null
+++ b/packages/dnsmasq-leases-html/templates/index.html
@@ -0,0 +1,60 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+ <meta charset="utf-8">
+ <title>Leases assigned by dnsmasq</title>
+ <style type="text/css">
+ body {
+ margin: auto;
+ width: 70%;
+ font-family: monospace;
+ font-size: 16px;
+ }
+ .center {
+ margin-left: auto;
+ margin-right: auto;
+ }
+ td, th {
+ padding-left: 1em;
+ padding-right: 1em;
+ padding-top: .5em;
+ padding-bottom: .5em;
+ }
+ td:first-child, th:first-child {
+ padding-left: .25em;
+ }
+ td:last-child, th:last-child {
+ padding-right: .25em;
+ }
+ th {
+ padding-top: 1em;
+ text-align: left;
+ }
+ tr:nth-child(even) {
+ background: #eee;
+ }
+ form {
+ display: inline;
+ }
+ </style>
+</head>
+
+<body>
+ <table>
+ <tr>
+ <th>IP address</th>
+ <th>MAC address</th>
+ <th>Hostname</th>
+ <th>Expire</th>
+ </tr>
+ {% for lease in leases %}
+ <tr>
+ <td>{{ lease.IP }}</td>
+ <td>{{ lease.MAC }}</td>
+ <td>{{ lease.hostname }}</td>
+ <td>{{ lease.expire }}</td>
+ </tr>
+ {% endfor %}
+ </table>
+</body>
+</html>
diff --git a/packages/git-blame-stats/default.nix b/packages/git-blame-stats/default.nix
new file mode 100644
index 0000000..aab7cfb
--- /dev/null
+++ b/packages/git-blame-stats/default.nix
@@ -0,0 +1,26 @@
+{ lib, python3, stdenvNoCC, pkgs }:
+
+stdenvNoCC.mkDerivation rec {
+ pname = "git-blame-stats";
+ src = ./git-blame-stats.py;
+ version = "0.1.1";
+
+ nativeBuildInputs = with pkgs; [ python3 ];
+ propagatedBuildInputs = with pkgs; [ python3 ];
+
+ dontUnpack = true;
+ dontBuild = true;
+
+ installPhase = ''
+ mkdir -p $out/bin
+ cp $src $out/bin/${pname}
+ '';
+
+
+ meta = with pkgs.lib; {
+ description = "CLI to reports git blame statistics per author.";
+ license = licenses.mit;
+ platforms = platforms.unix;
+ maintainers = [ ];
+ };
+}
diff --git a/packages/git-blame-stats/git-blame-stats.py b/packages/git-blame-stats/git-blame-stats.py
new file mode 100755
index 0000000..3cc4f4a
--- /dev/null
+++ b/packages/git-blame-stats/git-blame-stats.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python3
+
+import argparse
+import subprocess
+from typing import Any
+
+
+parser = argparse.ArgumentParser()
+parser.add_argument(
+ "rev", metavar="revision", type=str, help="the revision", default="HEAD", nargs="?"
+)
+args = parser.parse_args()
+
+authors: dict[str, Any] = dict()
+max_lenght_author = 0
+max_lenght_email = 0
+
+
+def get_files(rev):
+ """Returns a list of files for the repository, at the given path, for the given revision."""
+ tree = subprocess.run(
+ ["git", "ls-tree", "--name-only", "-r", rev],
+ capture_output=True,
+ check=True,
+ encoding="utf-8",
+ )
+ return tree.stdout.splitlines()
+
+
+def line_info(filename, rev):
+ """Generates a set of commit blocks using `git blame` for a file.
+
+ Each block corresponds to the information about a single line of code."""
+ blame = subprocess.run(
+ ["git", "blame", "-w", "--line-porcelain", rev, "--", filename],
+ capture_output=True,
+ encoding="utf-8",
+ check=True,
+ )
+ block = []
+ for line in blame.stdout.splitlines():
+ block.append(line)
+ if line.startswith("\t"):
+ yield block
+ block = []
+
+
+files = get_files(args.rev)
+
+for filename in files:
+ try:
+ for block in line_info(filename.rstrip(), args.rev):
+ author = ""
+ author_email = ""
+ commit = ""
+ skip = False
+ for i, val in enumerate(block):
+ if i == 0:
+ commit = val.split()[0]
+ continue
+ if val.startswith("author "):
+ author = " ".join(val.split()[1:])
+ continue
+ if val.startswith("author-mail"):
+ author_email = " ".join(val.split()[1:])
+ continue
+ if val.startswith("\t") and val == "\t":
+ skip = True
+ if skip:
+ continue
+ if authors.get(author, None) is None:
+ authors[author] = {
+ "email": author_email,
+ "commits": set(),
+ "files": set(),
+ "lines": 0,
+ }
+ authors[author]["commits"].add(commit)
+ authors[author]["files"].add(filename)
+ authors[author]["lines"] += 1
+ if len(author) > max_lenght_author:
+ max_lenght_author = len(author)
+ if len(author_email) > max_lenght_email:
+ max_lenght_email = len(author_email)
+ except Exception:
+ continue
+
+for author, stats in authors.items():
+ email = stats["email"]
+ lines = stats["lines"]
+ commits = len(stats["commits"])
+ files = len(stats["files"])
+ print(
+ f"{author:{max_lenght_author}} {email:{max_lenght_email}} {lines:6} {commits:6} {files:6}"
+ )
diff --git a/packages/git-broom/default.nix b/packages/git-broom/default.nix
new file mode 100644
index 0000000..fea555f
--- /dev/null
+++ b/packages/git-broom/default.nix
@@ -0,0 +1,26 @@
+{ lib, python3, stdenvNoCC, pkgs }:
+
+stdenvNoCC.mkDerivation rec {
+ pname = "git-broom";
+ src = ./git-broom.py;
+ version = "0.1.0";
+
+ nativeBuildInputs = with pkgs; [ python3 ];
+ propagatedBuildInputs = with pkgs; [ python3 ];
+
+ dontUnpack = true;
+ dontBuild = true;
+
+ installPhase = ''
+ mkdir -p $out/bin
+ cp $src $out/bin/${pname}
+ '';
+
+
+ meta = with pkgs.lib; {
+ description = "CLI to delete local and remote git branches that have been merged.";
+ license = licenses.mit;
+ platforms = platforms.unix;
+ maintainers = [ ];
+ };
+}
diff --git a/packages/git-broom/git-broom.py b/packages/git-broom/git-broom.py
new file mode 100755
index 0000000..8721b3c
--- /dev/null
+++ b/packages/git-broom/git-broom.py
@@ -0,0 +1,350 @@
+#!/usr/bin/env python3
+
+import argparse
+import os
+import re
+import subprocess
+import sys
+from typing import List, Dict
+
+import logging
+
+logging.basicConfig(format="[%(asctime)s]%(levelname)s:%(message)s", level=logging.INFO)
+
+# regular expression to find the name of the main branch on the remote
+re_match_remote_branch = re.compile(r"ref: refs/heads/(?P<branch>\S+)\tHEAD")
+
+# never delete any branches or references with one of these names
+immortal_ref = ["main", "master", "HEAD"]
+
+# that's how my remotes are usually named, and in that order of preference.
+preferred_remotes = ["origin", "github", "work"]
+
+
+class GitConfig(object):
+ """Represent the configuration for the git repository."""
+
+ def __init__(self) -> None:
+ self.guess_remote()
+ self.guess_primary_branch()
+ self.remote_ref = f"{self.remote_name}/{self.primary_branch}"
+ self.me = os.getenv("USER")
+
+ def guess_remote(self) -> None:
+ """Guess the name and URL for the remote repository.
+
+ If the name of the remote is from the list of preferred remote, we
+ return the name and URL.
+
+ If we don't have a remote set, throw an exception.
+ If we don't find any remote, throw an exception.
+ """
+ candidates = subprocess.run(
+ ["git", "config", "--get-regexp", "remote\.[a-z0-9]+.url"],
+ capture_output=True,
+ check=True,
+ encoding="utf-8",
+ ).stdout.splitlines()
+
+ if len(candidates) == 0:
+ raise ValueError("No remote is defined.")
+
+ remotes = dict()
+
+ for candidate in candidates:
+ parts = candidate.split(" ")
+ remote = parts[0].split(".")[1]
+ url = parts[1]
+ remotes[remote] = url
+
+ for remote in preferred_remotes:
+ if remote in remotes:
+ self.remote_name = remote
+ self.remote_url = remotes[remote]
+ return
+
+ raise ValueError("can't find the preferred remote.")
+
+ def guess_primary_branch(self) -> None:
+ """Guess the primary branch on the remote.
+
+ If we can't figure out the default branch, thrown an exception.
+ """
+ remote_head = subprocess.run(
+ ["git", "ls-remote", "--symref", self.remote_name, "HEAD"],
+ capture_output=True,
+ check=True,
+ encoding="utf-8",
+ ).stdout.splitlines()
+
+ for l in remote_head:
+ m = re_match_remote_branch.match(l)
+ if m:
+ self.primary_branch = m.group("branch")
+ return
+
+ raise ValueError(
+ f"can't find the name of the remote branch for {self.remote_name}"
+ )
+
+
+def is_git_repository() -> bool:
+ """Check if we are inside a git repository.
+
+ Return True if we are, false otherwise."""
+ res = subprocess.run(
+ ["git", "rev-parse", "--show-toplevel"], check=False, capture_output=True
+ )
+ return not res.returncode
+
+
+def fetch(remote: str):
+ """Fetch updates from the remote repository."""
+ subprocess.run(["git", "fetch", remote, "--prune"], capture_output=True, check=True)
+
+
+def ref_sha(ref: str) -> str:
+ """Get the sha from a ref."""
+ res = subprocess.run(
+ ["git", "show-ref", ref], capture_output=True, check=True, encoding="utf-8"
+ )
+ return res.stdout.rstrip()
+
+
+def get_branches(options: List[str]) -> List[str]:
+ """Get a list of branches."""
+ return subprocess.run(
+ ["git", "branch", "--format", "%(refname:short)"] + options,
+ capture_output=True,
+ check=True,
+ encoding="utf-8",
+ ).stdout.splitlines()
+
+
+def ref_tree(ref: str) -> str:
+ """Get the reference from a tree."""
+ return subprocess.run(
+ ["git", "rev-parse", f"{ref}^{{tree}}"],
+ check=True,
+ capture_output=True,
+ encoding="utf-8",
+ ).stdout.rstrip()
+
+
+def rebase_local_branches(config: GitConfig, local_rebase_tree_id: dict) -> None:
+ """Try to rebase the local branches that have been not been merged."""
+ for branch in get_branches(["--list", "--no-merged"]):
+ _rebase_local_branch(branch, config, local_rebase_tree_id)
+
+
+def _rebase_local_branch(
+ branch: str, config: GitConfig, local_rebase_tree_id: dict
+) -> None:
+ res = subprocess.run(
+ [
+ "git",
+ "merge-base",
+ "--is-ancestor",
+ config.remote_ref,
+ branch,
+ ],
+ check=False,
+ capture_output=True,
+ )
+ if res.returncode == 0:
+ logging.info(
+ f"local branch {branch} is already a descendant of {config.remote_ref}."
+ )
+ local_rebase_tree_id[branch] = ref_tree(branch)
+ return
+
+ logging.info(f"local branch {branch} will be rebased on {config.remote_ref}.")
+ subprocess.run(
+ ["git", "checkout", "--force", branch], check=True, capture_output=True
+ )
+ res = subprocess.run(
+ ["git", "rebase", config.remote_ref], check=True, capture_output=True
+ )
+ if res.returncode == 0:
+ logging.info(f"local branch {branch} has been rebased")
+ local_rebase_tree_id[branch] = ref_tree(branch)
+ else:
+ logging.error(f"failed to rebase local branch {branch}.")
+ subprocess.run(["git", "rebase", "--abort"], check=True)
+ subprocess.run(
+ ["git", "checkout", "--force", config.primary_branch], check=True
+ )
+ subprocess.run(["git", "reset", "--hard"], check=True)
+
+
+def rebase_remote_branches(
+ config: GitConfig, local_rebase_tree_id: dict, main_sha: str
+) -> None:
+ for branch in get_branches(
+ ["--list", "-r", f"{config.me}/*", "--no-merged", config.remote_ref]
+ ):
+ _rebase_remote_branches(branch, config, local_rebase_tree_id, main_sha)
+
+
+def _rebase_remote_branches(
+ branch: str, config: GitConfig, local_rebase_tree_id: dict, main_sha: str
+) -> None:
+ remote, head = branch.split("/")
+ if head in immortal_ref:
+ return
+
+ res = subprocess.run(
+ ["git", "merge-base", "--is-ancestor", config.remote_ref, branch],
+ check=False,
+ capture_output=True,
+ )
+ if res.returncode == 0:
+ logging.info(
+ f"local branch {branch} is already a descendant of {config.remote_ref}."
+ )
+ return
+
+ logging.info(f"remote branch {branch} will be rebased on {config.remote_ref}.")
+
+ sha = ref_sha(branch)
+ subprocess.run(["git", "checkout", "--force", sha], capture_output=True, check=True)
+ res = subprocess.run(
+ ["git", "rebase", config.remote_ref],
+ capture_output=True,
+ check=True,
+ )
+ if res.returncode == 0:
+ new_sha = ref_sha("--head")
+ short_sha = new_sha[0:8]
+ logging.info(f"remote branch {branch} at {sha} rebased to {new_sha}.")
+ if new_sha == main_sha:
+ logging.info(f"remote branch {branch}, when rebased, is already merged!")
+ logging.info(f"would run `git push {remote} :{head}'")
+ elif new_sha == sha:
+ logging.info(f"remote branch {branch}, when rebased, is unchanged!")
+ elif ref_tree(new_sha) == local_rebase_tree_id.get(head, ""):
+ logging.info(f"remote branch {branch}, when rebased, same as local branch!")
+ logging.info(f"would run `git push --force-with-lease {remote} {head}'")
+ else:
+ logging.info(
+ f"remote branch {branch} has been rebased to create {short_sha}!"
+ )
+ logging.info(
+ f"would run `git push --force-with-lease {remote} {new_sha}:{head}'"
+ )
+ else:
+ logging.error(f"failed to rebase remote branch {branch}.")
+ subprocess.run(["git", "rebase", "--abort"], check=True)
+ subprocess.run(
+ ["git", "checkout", "--force", config.primary_branch], check=True
+ )
+ subprocess.run(["git", "reset", "--hard"], check=True)
+
+
+def destroy_remote_merged_branches(config: GitConfig, dry_run: bool) -> None:
+ """Destroy remote branches that have been merged."""
+ for branch in get_branches(
+ ["--list", "-r", f"{config.me}/*", "--merged", config.remote_ref]
+ ):
+ remote, head = branch.split("/")
+ if head in immortal_ref:
+ continue
+ logging.info(f"remote branch {branch} has been merged")
+ if dry_run:
+ logging.info(f"would have run git push {remote} :{head}")
+ else:
+ subprocess.run(
+ ["git", "push", remote, f":{head}"], check=True, encoding="utf-8"
+ )
+
+
+def destroy_local_merged_branches(config: GitConfig, dry_run: bool) -> None:
+ """Destroy local branches that have been merged."""
+ for branch in get_branches(["--list", "--merged", config.remote_ref]):
+ if branch in immortal_ref:
+ continue
+
+ logging.info(f"local branch {branch} has been merged")
+ if dry_run:
+ logging.info(f"would have run git branch --delete --force {branch}")
+ else:
+ subprocess.run(
+ ["git", "branch", "--delete", "--force", branch],
+ check=True,
+ encoding="utf-8",
+ )
+
+
+def workdir_is_clean() -> bool:
+ """Check the git workdir is clean."""
+ res = subprocess.run(
+ ["git", "status", "--porcelain"],
+ check=True,
+ capture_output=True,
+ encoding="utf-8",
+ ).stdout.splitlines()
+ return not len(res)
+
+
+def main(dry_run: bool) -> bool:
+ if not is_git_repository():
+ logging.error("error: run this inside a git repository")
+ return False
+
+ if not workdir_is_clean():
+ logging.error("the git workdir is not clean, commit or stash your changes.")
+ return False
+
+ config = GitConfig()
+
+ # what's our current sha ?
+ origin_main_sha = ref_sha(config.remote_ref)
+
+ # let's get everything up to date
+ fetch(config.remote_name)
+
+ # let's get the new sha
+ main_sha = ref_sha(config.remote_ref)
+
+ if origin_main_sha != main_sha:
+ logging.info(f"we started with {origin_main_sha} and now we have {main_sha}")
+
+ local_rebase_tree_id: Dict[str, str] = dict()
+
+ # try to rebase local branches that have been not been merged
+ rebase_local_branches(config, local_rebase_tree_id)
+
+ # try to rebase remote branches that have been not been merged
+ rebase_remote_branches(config, local_rebase_tree_id, main_sha)
+
+ # let's checkout to main now and see what left to do
+ subprocess.run(
+ ["git", "checkout", "--force", config.primary_branch],
+ check=True,
+ capture_output=True,
+ )
+
+ # branches on the remote that have been merged can be destroyed.
+ destroy_remote_merged_branches(config, dry_run)
+
+ # local branches that have been merged can be destroyed.
+ destroy_local_merged_branches(config, dry_run)
+
+ # TODO: restore to the branch I was on before ?
+ return True
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="delete local and remote branches that have been merged."
+ )
+ parser.add_argument(
+ "--dry-run",
+ action=argparse.BooleanOptionalAction,
+ help="when set to True, do not execute the destructive actions",
+ default=True,
+ )
+ args = parser.parse_args()
+
+ if not main(args.dry_run):
+ sys.exit(1)
diff --git a/packages/import-gh-to-gitea/README.org b/packages/import-gh-to-gitea/README.org
new file mode 100644
index 0000000..2e26b88
--- /dev/null
+++ b/packages/import-gh-to-gitea/README.org
@@ -0,0 +1,12 @@
+#+TITLE: Import GitHub repositories to gitea
+
+Scripts to move my repositories from GitHub to my instance of [[https://git.fcuny.net][gitea]].
+
+* import repositories
+#+begin_src sh
+python3.10 import-gh-to-gitea.py -g (pass api/github/terraform|psub) -G (pass api/git.fcuny.net/gh-import|psub)
+#+end_src
+* archiving repositories
+#+begin_src sh
+python3.10 archive-projects.py -t (pass api/git.fcuny.net/gh-import|psub)
+#+end_src
diff --git a/packages/import-gh-to-gitea/archive-projects.py b/packages/import-gh-to-gitea/archive-projects.py
new file mode 100755
index 0000000..41bd898
--- /dev/null
+++ b/packages/import-gh-to-gitea/archive-projects.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python3
+
+import argparse
+
+import requests
+
+
+def main(api_token):
+ s = requests.Session()
+ s.headers.update({"Authorization": f"token {api_token}"})
+ s.headers.update({"Accept": "application/json"})
+ s.headers.update({"Content-Type": "application/json"})
+
+ not_done = True
+ page = 1
+ while not_done:
+ url = f"https://git.fcuny.net/api/v1/user/repos?page={page}&limit=10"
+ res = s.get(
+ url,
+ timeout=5,
+ )
+ res.raise_for_status()
+
+ repos = res.json()
+ if len(repos) == 0:
+ not_done = False
+ else:
+ page = page + 1
+
+ for repo in repos:
+ if repo.get("owner").get("login") == "attic":
+ if repo.get("archived") is False:
+ name = repo.get("name")
+ data = {"archived": True}
+ res = s.patch(
+ f"https://git.fcuny.net/api/v1/repos/attic/{name}", json=data
+ )
+ res.raise_for_status()
+ print(f"set {name} to archived: {res.status_code}")
+
+
+if __name__ == "__main__":
+ argp = argparse.ArgumentParser()
+ argp.add_argument("-t", "--token-file", nargs=1, type=argparse.FileType("r"))
+
+ args = argp.parse_args()
+ api_token = args.token_file[0].readline().strip()
+
+ main(api_token)
diff --git a/packages/import-gh-to-gitea/delete-gh-repositories.py b/packages/import-gh-to-gitea/delete-gh-repositories.py
new file mode 100755
index 0000000..b87c0f6
--- /dev/null
+++ b/packages/import-gh-to-gitea/delete-gh-repositories.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python3.10
+
+import argparse
+
+import requests
+
+
+def main(gitea_api_token, gh_api_token):
+ gitea = requests.Session()
+ gitea.headers.update({"Authorization": f"token {gitea_api_token}"})
+ gitea.headers.update({"Accept": "application/json"})
+ gitea.headers.update({"Content-Type": "application/json"})
+
+ not_done = True
+ page = 1
+
+ gitea_repos = []
+ while not_done:
+ url = f"https://git.fcuny.net/api/v1/user/repos?page={page}&limit=10"
+ res = gitea.get(
+ url,
+ timeout=5,
+ )
+ res.raise_for_status()
+
+ repos = res.json()
+ if len(repos) == 0:
+ not_done = False
+ else:
+ page = page + 1
+
+ for repo in repos:
+ name = repo.get("name")
+ gitea_repos.append(name)
+
+ github = requests.Session()
+ github.headers.update({"Authorization": f"token {gh_api_token}"})
+ github.headers.update({"Accept": "application/vnd.github.v3+json"})
+
+ not_done = True
+ page = 1
+ github_repos = []
+ while not_done:
+ url = f"https://api.github.com/user/repos?page={page}&type=all"
+ res = github.get(
+ url,
+ timeout=5,
+ )
+ res.raise_for_status()
+ repos = res.json()
+ if len(repos) == 0:
+ not_done = False
+ else:
+ page = page + 1
+
+ for repo in repos:
+ name = repo.get("name")
+ if (
+ repo.get("owner").get("login") == "fcuny"
+ and repo.get("private") == True
+ ):
+ github_repos.append(name)
+
+ for repo in github_repos:
+ if repo in gitea_repos:
+ url = f"https://api.github.com/repos/fcuny/{repo}"
+ print(f"deleting {url}")
+ res = github.delete(
+ url,
+ timeout=5,
+ )
+ res.raise_for_status()
+
+
+if __name__ == "__main__":
+ argp = argparse.ArgumentParser()
+ argp.add_argument("-t", "--gt-file", nargs=1, type=argparse.FileType("r"))
+ argp.add_argument("-T", "--gh-file", nargs=1, type=argparse.FileType("r"))
+
+ args = argp.parse_args()
+ gitea_api_token = args.gt_file[0].readline().strip()
+ github_api_token = args.gh_file[0].readline().strip()
+
+ main(gitea_api_token, github_api_token)
diff --git a/packages/import-gh-to-gitea/import-gh-to-gitea.py b/packages/import-gh-to-gitea/import-gh-to-gitea.py
new file mode 100755
index 0000000..b59c8eb
--- /dev/null
+++ b/packages/import-gh-to-gitea/import-gh-to-gitea.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python3
+
+
+import argparse
+
+import requests
+
+
+def main(gh_api_token, gitea_api_token):
+ s = requests.Session()
+ s.headers.update({"Authorization": f"token {gh_api_token}"})
+ s.headers.update({"Accept": "application/vnd.github.v3+json"})
+
+ # hardcoded number of items per page, pagination is not handled.
+ res = s.get("https://api.github.com/user/repos?per_page=200&type=all", timeout=5)
+ res.raise_for_status()
+
+ repos = res.json()
+
+ gts = requests.Session()
+ gts.headers.update({"Accept": "application/json"})
+ gts.headers.update({"Content-Type": "application/json"})
+ gts.headers.update({"Authorization": f"token {gitea_api_token}"})
+ for repo in repos:
+ # archived projects go to the attic.
+ owner = ""
+ if repo.get("archived"):
+ owner = "attic"
+ else:
+ owner = "fcuny"
+
+ data = {
+ "auth_username": "fcuny",
+ "auth_token": gh_api_token,
+ "clone_addr": repo.get("html_url"),
+ "mirror": False,
+ "private": repo.get("private"),
+ "repo_name": repo.get("name"),
+ "repo_owner": owner,
+ "service": "git",
+ "description": repo.get("description"),
+ }
+ print(f"importing {data['repo_name']} from {data['clone_addr']}")
+ res = gts.post(
+ "https://git.fcuny.net/api/v1/repos/migrate",
+ json=data,
+ )
+ try:
+ res.raise_for_status()
+ except Exception as e:
+ print(f"failed for {data['repo_name']} with {e}")
+
+
+if __name__ == "__main__":
+ argp = argparse.ArgumentParser()
+ argp.add_argument("-g", "--gh-token-file", nargs=1, type=argparse.FileType("r"))
+ argp.add_argument("-G", "--gitea-token-file", nargs=1, type=argparse.FileType("r"))
+ args = argp.parse_args()
+
+ gh_api_token = args.gh_token_file[0].readline().strip()
+ gitea_api_token = args.gitea_token_file[0].readline().strip()
+ main(gh_api_token, gitea_api_token)
diff --git a/packages/ipconverter/default.nix b/packages/ipconverter/default.nix
new file mode 100644
index 0000000..4580396
--- /dev/null
+++ b/packages/ipconverter/default.nix
@@ -0,0 +1,29 @@
+{ lib, python3, stdenvNoCC, pkgs }:
+
+stdenvNoCC.mkDerivation rec {
+ pname = "ipconverter";
+ version = "0.1.0";
+
+ src = ./ipconverter.py;
+
+ buildInputs = with pkgs; [ python3 ];
+ propagatedBuildInputs = with pkgs; [ python3 ];
+
+ dontUnpack = true;
+ dontBuild = true;
+
+ installPhase = ''
+ mkdir -p $out/bin
+ cp $src $out/bin/${pname}
+ chmod a+x $out/bin/${pname}
+ ln -s $out/bin/${pname} $out/bin/ip2int
+ ln -s $out/bin/${pname} $out/bin/int2ip
+ '';
+
+ meta = with lib; {
+ description = "Helper script to convert an IP address to an integer.";
+ license = with licenses; [ mit ];
+ platforms = platforms.unix;
+ maintainers = with maintainers; [ fcuny ];
+ };
+}
diff --git a/packages/ipconverter/ipconverter.py b/packages/ipconverter/ipconverter.py
new file mode 100755
index 0000000..6b01d5d
--- /dev/null
+++ b/packages/ipconverter/ipconverter.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python3
+
+import argparse
+import ipaddress
+import sys
+
+argp = argparse.ArgumentParser()
+argp.add_argument("infile", nargs="?", type=argparse.FileType("r"), default=sys.stdin)
+args = argp.parse_args()
+
+# read the input, filter out commented lines and remove new line characters
+string_ips = [
+ ip
+ for line in args.infile.readlines()
+ if (ip := line.strip()) and not ip.startswith("#")
+]
+
+# convert entries to int if the string is a numeric value
+ips = list(map(lambda n: int(n) if n.isnumeric() else n, string_ips))
+
+
+def conv(n):
+ """helper function to convert based on the name of the program"""
+ return int(n) if argp.prog == "ip2int" else str(n)
+
+
+for ip in ips:
+ try:
+ r = conv(ipaddress.ip_address(ip))
+ print(f"{ip:15} → {r:15}")
+ except Exception as e:
+ print(f"error: {e}", file=sys.stderr)
diff --git a/packages/music-organizer/README.org b/packages/music-organizer/README.org
new file mode 100644
index 0000000..a42a196
--- /dev/null
+++ b/packages/music-organizer/README.org
@@ -0,0 +1,21 @@
+#+TITLE: music organizer
+
+the tool takes a couple of arguments:
+- ~-dest~: where will the music be stored
+- a list of directories to scan
+
+all files that have tags that can be read will be processed and moved to the specified destination.
+
+files are organized like this: ={artist}/{album}/{track number} {track title}.{track format}=
+
+the tool ensures that files are not already present in the destination. if there's already a file with the same name, it checks that the md5 sum of the files are identical. if they are not, it logs a message.
+
+* build
+#+BEGIN_SRC sh
+go build
+#+END_SRC
+
+* install
+#+BEGIN_SRC sh
+go install
+#+END_SRC
diff --git a/packages/music-organizer/default.nix b/packages/music-organizer/default.nix
new file mode 100644
index 0000000..1242e34
--- /dev/null
+++ b/packages/music-organizer/default.nix
@@ -0,0 +1,15 @@
+{ pkgs, ... }:
+
+pkgs.buildGoModule rec {
+ name = "music-organizer";
+ src = ./.;
+ vendorSha256 = "sha256-pQpattmS9VmO3ZIQUFn66az8GSmB4IvYhTTCFn6SUmo=";
+ nativeBuildInputs = with pkgs; [ go ];
+
+ meta = with pkgs.lib; {
+ description = "CLI to organize my music in folders.";
+ license = licenses.mit;
+ platforms = platforms.linux;
+ maintainers = [ ];
+ };
+}
diff --git a/packages/music-organizer/go.mod b/packages/music-organizer/go.mod
new file mode 100644
index 0000000..ba9a1b8
--- /dev/null
+++ b/packages/music-organizer/go.mod
@@ -0,0 +1,5 @@
+module golang.fcuny.org/music-organizer
+
+go 1.17
+
+require github.com/dhowden/tag v0.0.0-20220617232555-e66a190c9f5b
diff --git a/packages/music-organizer/go.sum b/packages/music-organizer/go.sum
new file mode 100644
index 0000000..3383f0e
--- /dev/null
+++ b/packages/music-organizer/go.sum
@@ -0,0 +1,4 @@
+github.com/dhowden/itl v0.0.0-20170329215456-9fbe21093131/go.mod h1:eVWQJVQ67aMvYhpkDwaH2Goy2vo6v8JCMfGXfQ9sPtw=
+github.com/dhowden/plist v0.0.0-20141002110153-5db6e0d9931a/go.mod h1:sLjdR6uwx3L6/Py8F+QgAfeiuY87xuYGwCDqRFrvCzw=
+github.com/dhowden/tag v0.0.0-20220617232555-e66a190c9f5b h1:TG8R5ZZgd1Sj7iFWnkk5dNy94RG8fP8M4l24UYR8/HY=
+github.com/dhowden/tag v0.0.0-20220617232555-e66a190c9f5b/go.mod h1:Z3Lomva4pyMWYezjMAU5QWRh0p1VvO4199OHlFnyKkM=
diff --git a/packages/music-organizer/main.go b/packages/music-organizer/main.go
new file mode 100644
index 0000000..253afef
--- /dev/null
+++ b/packages/music-organizer/main.go
@@ -0,0 +1,271 @@
+package main
+
+import (
+ "archive/zip"
+ "crypto/md5"
+ "encoding/hex"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/dhowden/tag"
+)
+
+const (
+ // the max lenght for a track can only be 255 characters minus 3 for the
+ // track number (followed by a space), and 4 for the format. The limit of
+ // 255 is coming from HFS+.
+ TrackTitleMaxLenght = 255 - 3 - 4
+)
+
+var musicDest = flag.String("dest", fmt.Sprintf("%s/media/music", os.Getenv("HOME")), "where to store the music")
+
+// replace slashes with dashes
+func stripSlash(s string) string {
+ return strings.ReplaceAll(s, "/", "-")
+}
+
+// return the name of the artist, album and the title of the track
+// the title of the track has the following format:
+//
+// {track #} {track title}.{track format}
+func generatePath(m tag.Metadata) (string, string, string) {
+ var artist, album, title string
+ var track int
+
+ // if there's no artist, let's fallback to "Unknown Artists"
+ if len(m.Artist()) == 0 {
+ artist = "Unknown Artists"
+ } else {
+ artist = stripSlash(m.Artist())
+ }
+
+ // if there's no album name, let's fallback to "Unknown Album"
+ if len(m.Album()) == 0 {
+ album = "Unknown Album"
+ } else {
+ album = stripSlash(m.Album())
+ }
+
+ track, _ = m.Track()
+
+ // ok, there must be a better way
+ format := strings.ToLower(string(m.FileType()))
+
+ title = fmt.Sprintf("%02d %s.%s", track, stripSlash(m.Title()), format)
+ if len(title) > TrackTitleMaxLenght {
+ r := []rune(title)
+ title = string(r[0:255])
+ }
+
+ return artist, album, title
+}
+
+// create all the required directories. if we fail to create one, we die
+func makeParents(path string) error {
+ if err := os.MkdirAll(path, 0o777); err != nil {
+ return fmt.Errorf("failed to create %s: %v", path, err)
+ }
+ return nil
+}
+
+func md5sum(path string) (string, error) {
+ var sum string
+ f, err := os.Open(path)
+ if err != nil {
+ return sum, err
+ }
+
+ defer f.Close()
+
+ h := md5.New()
+ if _, err := io.Copy(h, f); err != nil {
+ return sum, err
+ }
+ sum = hex.EncodeToString(h.Sum(nil)[:16])
+ return sum, nil
+}
+
+func makeCopy(src, dst string) error {
+ f, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ t, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE, 0o666)
+ if err != nil {
+ return err
+ }
+ defer t.Close()
+
+ _, err = io.Copy(t, f)
+ if err != nil {
+ return err
+ }
+ log.Printf("copied %s → %s\n", src, dst)
+ return nil
+}
+
+// ensure the file is named correctly and is moved to the correct destination
+// before we can do that, we need to:
+// 1. check if the track already exists, if it does, does it have the same md5 ?
+// if they are similar, we skip them. if they are not, we log and don't do
+// anything
+// 2. we can move the file to the destination
+// 3. we can delete the original file
+func renameFile(originalPath string, artist, album, title string) error {
+ directories := filepath.Join(*musicDest, artist, album)
+ destination := filepath.Join(directories, title)
+
+ // check if the file is present
+ _, err := os.Stat(destination)
+ if err == nil {
+ var originalSum, destinationSum string
+ if originalSum, err = md5sum(originalPath); err != nil {
+ return err
+ }
+ if destinationSum, err = md5sum(destination); err != nil {
+ return err
+ }
+
+ if destinationSum != originalSum {
+ log.Printf("md5 sum are different: %s(%s) %s(%s)", originalPath, originalSum, destination, destinationSum)
+ }
+ return nil
+ }
+
+ if err := makeParents(directories); err != nil {
+ return err
+ }
+
+ if err := makeCopy(originalPath, destination); err != nil {
+ return err
+ }
+
+ // TODO delete original file
+ // os.Remove(originalPath)
+ return nil
+}
+
+// we try to open any files and read the metadata.
+// if the file has metadata we can read, we will try to move the file to the
+// correct destination
+func processFile(path string) error {
+ f, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+
+ defer f.Close()
+ m, err := tag.ReadFrom(f)
+ if err != nil {
+ // this is fine, this might not be a music file
+ log.Printf("SKIP failed to read tags from %s: %v", path, err)
+ return nil
+ }
+
+ var artist, album, title string
+ artist, album, title = generatePath(m)
+ if err := renameFile(path, artist, album, title); err != nil {
+ return fmt.Errorf("failed to move %s: %v", path, err)
+ }
+ return nil
+}
+
+func processPath(path string, f os.FileInfo, err error) error {
+ if stat, err := os.Stat(path); err == nil && !stat.IsDir() {
+ if err := processFile(path); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// unzip takes two paths, a source and destination. The source is the
+// name of the archive and we will extract the content into the
+// destination directory. The destination directory has to already
+// exists, we are not going to create it here or delete it at the end.
+func unzip(src, dst string) error {
+ r, err := zip.OpenReader(src)
+ if err != nil {
+ return err
+ }
+
+ defer r.Close()
+
+ for _, f := range r.File {
+ fpath := filepath.Join(dst, f.Name)
+ outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
+ if err != nil {
+ return err
+ }
+
+ rc, err := f.Open()
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(outFile, rc)
+ if err != nil {
+ log.Printf("failed to copy %s: %s", outFile.Name(), err)
+ }
+
+ outFile.Close()
+ rc.Close()
+ }
+ return nil
+}
+
+func main() {
+ flag.Parse()
+
+ if *musicDest == "" {
+ log.Fatal("-dest is required")
+ }
+
+ paths := make([]string, flag.NArg())
+
+ // For our temp directory, we use what ever the value of
+ // XDG_RUNTIME_DIR is. If the value is unset, we will default to
+ // the system default temp directory.
+ tmpDir := os.Getenv("XDG_RUNTIME_DIR")
+
+ for i, d := range flag.Args() {
+ if filepath.Ext(d) == ".zip" {
+ // If we have an extension and it's '.zip', we consider the
+ // path to be an archive. In this case we want to create a new
+ // temporary directory and extract the content of the archive
+ // in that path. The temporary directory is removed once we're
+ // done.
+ out, err := ioutil.TempDir(tmpDir, "music-organizer")
+ if err != nil {
+ log.Printf("failed to create a temp directory to extract %s: %v", d, err)
+ continue
+ }
+ defer os.RemoveAll(out)
+
+ if err := unzip(d, out); err != nil {
+ log.Printf("failed to extract %s: %v", d, err)
+ continue
+ }
+ paths[i] = out
+ } else {
+ paths[i] = d
+ }
+ }
+
+ for _, d := range paths {
+ // XXX deal with filenames that are too long
+ // scan the directory and try to find any file that we want to move
+ err := filepath.Walk(d, processPath)
+ if err != nil {
+ log.Fatalf("error while processing files: %v", err)
+ }
+ }
+}
diff --git a/packages/numap/README.org b/packages/numap/README.org
new file mode 100644
index 0000000..c7941b1
--- /dev/null
+++ b/packages/numap/README.org
@@ -0,0 +1,47 @@
+#+TITLE: numap
+
+Print the NUMA topology of a host.
+
+* Usage
+#+BEGIN_SRC sh
+./numap |jq .
+{
+ "node0": {
+ "name": "node0",
+ "path": "/sys/devices/system/node/node0",
+ "cpulist": "0-19,40-59",
+ "pci_devices": [
+ {
+ "vendor": "Mellanox Technologies",
+ "name": "MT27710 Family [ConnectX-4 Lx]"
+ },
+ {
+ "vendor": "Mellanox Technologies",
+ "name": "MT27710 Family [ConnectX-4 Lx]"
+ }
+ ]
+ },
+ "node1": {
+ "name": "node1",
+ "path": "/sys/devices/system/node/node1",
+ "cpulist": "20-39,60-79",
+ "pci_devices": [
+ {
+ "vendor": "Intel Corporation",
+ "name": "NVMe Datacenter SSD [3DNAND, Beta Rock Controller]"
+ }
+ ]
+ }
+}
+#+END_SRC
+
+The command will scan the host to find the NUMA nodes, and all the PCI devices, and map the PCI devices back to the NUMA node.
+
+It also provides a way to see the list of CPUs attached to the node.
+
+* Limitations
+** Device class
+For now only the following classes of hardware are cared for:
+- NVMe
+- network
+- GPU
diff --git a/packages/numap/go.mod b/packages/numap/go.mod
new file mode 100644
index 0000000..92b1885
--- /dev/null
+++ b/packages/numap/go.mod
@@ -0,0 +1,3 @@
+module golang.fcuny.net/numap
+
+go 1.17
diff --git a/packages/numap/internal/hwids/hwids.go b/packages/numap/internal/hwids/hwids.go
new file mode 100644
index 0000000..6aa9d8a
--- /dev/null
+++ b/packages/numap/internal/hwids/hwids.go
@@ -0,0 +1,148 @@
+package hwids
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strings"
+)
+
+var pciPath = []string{
+ "/usr/share/hwdata/pci.ids",
+ "/usr/share/misc/pci.ids",
+}
+
+type PCIType int
+
+const (
+ PCIVendor PCIType = iota
+ PCIDevice
+ PCISubsystem
+)
+
+type PciDevices map[uint16][]PciDevice
+
+// PciDevice represents a PCI device
+type PciDevice struct {
+ Type PCIType
+ Vendor, Device uint16
+ SubVendor, SubDevice uint16
+ VendorName, DeviceName string
+ SubName string
+}
+
+// Load load the hardware database for PCI devices and return a map of
+// vendor -> list of devices.
+func Load() (PciDevices, error) {
+ // if the environment variable HWDATAPATH is set, we add it to the
+ // list of paths we check for the hardware database.
+ extraPath := os.Getenv("HWDATA")
+ if extraPath != "" {
+ pciPath = append(pciPath, extraPath)
+ }
+
+ for _, f := range pciPath {
+ fh, err := os.Open(f)
+ if err != nil {
+ continue
+ }
+ defer fh.Close()
+ return parse(fh)
+ }
+ return PciDevices{}, fmt.Errorf("hwids: could not find a pci.ids file")
+}
+
+func parse(f *os.File) (PciDevices, error) {
+ devices := make(PciDevices)
+
+ s := bufio.NewScanner(f)
+
+ // this is to keep track of the current device. The format of the
+ // file is as follow:
+ // vendor vendor_name
+ // device device_name <-- single tab
+ // subvendor subdevice subsystem_name <-- two tabs
+ // the variable is to keep track of the current vendor / device
+ cur := PciDevice{}
+
+ for s.Scan() {
+ l := s.Text()
+ // skip empty lines or lines that are a comment
+ if len(l) == 0 || l[0] == '#' {
+ continue
+ }
+ // lines starting with a C are the classes definitions, and
+ // they are at the end of the file, which means we're done
+ // parsing the devices
+ if l[0] == 'C' {
+ break
+ }
+
+ parts := strings.SplitN(l, " ", 2)
+ if len(parts) != 2 {
+ return devices, fmt.Errorf("hwids: malformed PCI ID line (missing ID separator): %s", l)
+ }
+
+ ids, name := parts[0], parts[1]
+ if len(ids) < 2 || len(name) == 0 {
+ return devices, fmt.Errorf("hwids: malformed PCI ID line (empty ID or name): %s", l)
+ }
+
+ cur.Type = PCIVendor
+
+ if ids[0] == '\t' {
+ if ids[1] == '\t' {
+ cur.Type = PCISubsystem
+ } else {
+ cur.Type = PCIDevice
+ }
+ }
+
+ var err error
+ switch cur.Type {
+ case PCIVendor:
+ _, err = fmt.Sscanf(ids, "%x", &cur.Vendor)
+ cur.VendorName = name
+ case PCIDevice:
+ _, err = fmt.Sscanf(ids, "%x", &cur.Device)
+ cur.DeviceName = name
+ case PCISubsystem:
+ _, err = fmt.Sscanf(ids, "%x %x", &cur.SubVendor, &cur.SubDevice)
+ cur.SubName = name
+ }
+
+ if err != nil {
+ return devices, fmt.Errorf("hwids: malformed PCI ID line: %s: %v", l, err)
+ }
+
+ // This is to reset the state when we are moving to a
+ // different vendor or device
+ switch cur.Type {
+ case PCIVendor:
+ cur.Device = 0
+ cur.DeviceName = ""
+ fallthrough
+ case PCIDevice:
+ cur.SubVendor = 0
+ cur.SubDevice = 0
+ cur.SubName = ""
+ }
+
+ _, ok := devices[cur.Vendor]
+ if ok {
+ _devices := devices[cur.Vendor]
+ _devices = append(_devices, cur)
+ devices[cur.Vendor] = _devices
+
+ } else {
+ _devices := []PciDevice{cur}
+ devices[cur.Vendor] = _devices
+ }
+ }
+
+ if err := s.Err(); err != nil {
+ return devices, fmt.Errorf("hwids: failed to read PCI ID line: %v", err)
+ }
+
+ return devices, nil
+}
diff --git a/packages/numap/internal/sysfs/parse.go b/packages/numap/internal/sysfs/parse.go
new file mode 100644
index 0000000..d518653
--- /dev/null
+++ b/packages/numap/internal/sysfs/parse.go
@@ -0,0 +1,21 @@
+package sysfs
+
+import (
+ "io/ioutil"
+ "strconv"
+ "strings"
+)
+
+// ContentUint64 parses the content of a file in sysfs, and convert
+// from hex to uint64.
+func ContentUint64(path string) (uint64, error) {
+ content, err := ioutil.ReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ result, err := strconv.ParseUint(strings.TrimSpace(string(content)), 0, 64)
+ if err != nil {
+ return 0, err
+ }
+ return result, nil
+}
diff --git a/packages/numap/internal/sysfs/pci.go b/packages/numap/internal/sysfs/pci.go
new file mode 100644
index 0000000..9e714b1
--- /dev/null
+++ b/packages/numap/internal/sysfs/pci.go
@@ -0,0 +1,145 @@
+package sysfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+const (
+ sysFsPCIDevicesPath = "/sys/bus/pci/devices/"
+)
+
+type PCIDevice struct {
+ NumaNode int
+ ID string
+ Device, Vendor uint64
+ SubVendor, SubDevice uint64
+ Class uint64
+ MSIs []int
+}
+
+func ScanPCIDevices() []PCIDevice {
+ devices, err := ioutil.ReadDir(sysFsPCIDevicesPath)
+ if err != nil {
+ panic(err)
+ }
+
+ pciDevices := []PCIDevice{}
+
+ for _, device := range devices {
+ dpath := filepath.Join(sysFsPCIDevicesPath, device.Name())
+ pcid, err := NewPCIDevice(dpath, device.Name())
+ if err != nil {
+ panic(err)
+ }
+ pciDevices = append(pciDevices, pcid)
+ }
+ return pciDevices
+}
+
+func getPCIDeviceClass(path string) (uint64, error) {
+ return ContentUint64(filepath.Join(path, "class"))
+}
+
+func getPCIDeviceVendor(path string) (uint64, error) {
+ return ContentUint64(filepath.Join(path, "vendor"))
+}
+
+func getPCIDeviceId(path string) (uint64, error) {
+ return ContentUint64(filepath.Join(path, "device"))
+}
+
+func getPCIDeviceSubsystemDevice(path string) (uint64, error) {
+ return ContentUint64(filepath.Join(path, "subsystem_device"))
+}
+
+func getPCIDeviceSubsystemVendor(path string) (uint64, error) {
+ return ContentUint64(filepath.Join(path, "subsystem_vendor"))
+}
+
+func getPCIDeviceNumaNode(path string) int {
+ content, err := ioutil.ReadFile(filepath.Join(path, "numa_node"))
+ if err != nil {
+ panic(err)
+ }
+ nodeNum, err := strconv.Atoi(strings.TrimSpace(string(content)))
+ if err != nil {
+ panic(err)
+ }
+ return nodeNum
+}
+
+func getPCIDeviceMSIx(p string) []int {
+ g := fmt.Sprintf("%s/*", filepath.Join(p, "msi_irqs"))
+ files, err := filepath.Glob(g)
+ if err != nil {
+ panic(err)
+ }
+ if len(files) == 0 {
+ return []int{}
+ }
+
+ msix := []int{}
+
+ for _, f := range files {
+ content, err := ioutil.ReadFile(f)
+ if err != nil {
+ panic(err)
+ }
+ if strings.TrimSpace(string(content)) == "msix" {
+ base := path.Base(f)
+ v, err := strconv.Atoi(base)
+ if err != nil {
+ panic(err)
+ }
+ msix = append(msix, v)
+ }
+ }
+ return msix
+}
+
+func NewPCIDevice(path, name string) (PCIDevice, error) {
+ nodeNum := getPCIDeviceNumaNode(path)
+
+ device, err := getPCIDeviceId(path)
+ if err != nil {
+ return PCIDevice{}, err
+ }
+
+ vendor, err := getPCIDeviceVendor(path)
+ if err != nil {
+ return PCIDevice{}, err
+ }
+
+ subvendor, err := getPCIDeviceSubsystemVendor(path)
+ if err != nil {
+ return PCIDevice{}, err
+ }
+
+ subdevice, err := getPCIDeviceSubsystemDevice(path)
+ if err != nil {
+ return PCIDevice{}, err
+ }
+
+ deviceClass, err := getPCIDeviceClass(path)
+ if err != nil {
+ return PCIDevice{}, err
+ }
+
+ msix := getPCIDeviceMSIx(path)
+
+ return PCIDevice{
+ ID: name,
+ Device: device,
+ Class: deviceClass,
+ NumaNode: nodeNum,
+ Vendor: vendor,
+ SubVendor: subvendor,
+ SubDevice: subdevice,
+ MSIs: msix,
+ }, nil
+}
diff --git a/packages/numap/numa.go b/packages/numap/numa.go
new file mode 100644
index 0000000..402ea1d
--- /dev/null
+++ b/packages/numap/numa.go
@@ -0,0 +1,116 @@
+package main
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "golang.fcuny.net/numap/internal/hwids"
+ "golang.fcuny.net/numap/internal/sysfs"
+)
+
+const (
+ node_root = "/sys/devices/system/node/node*"
+ CLASS_NVMe = 67586
+ CLASS_ETHERNET = 131072
+ CLASS_GPU = 197120
+)
+
+type node struct {
+ Name string `json:"name"`
+ Path string `json:"path"`
+ CpuList string `json:"cpulist"`
+ PCIDevices []PCIDevice `json:"pci_devices"`
+}
+
+type PCIDevice struct {
+ Vendor string `json:"vendor"`
+ Name string `json:"name"`
+}
+
+func findNodes(hwdb hwids.PciDevices) (map[string]node, error) {
+ nodes := make(map[string]node)
+
+ files, err := filepath.Glob(node_root)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to find NUMA nodes under %s: %+v", node_root, err)
+ }
+ if len(files) == 0 {
+ return nil, fmt.Errorf("Could not find NUMA node in %s", node_root)
+ }
+
+ for _, f := range files {
+ n, err := newNode(f)
+ if err != nil {
+ return make(map[string]node), err
+ }
+ nodes[n.Name] = n
+ }
+
+ r, err := mapPCIDevicesToNumaNode(hwdb)
+ if err != nil {
+ panic(err)
+ }
+ for k, v := range r {
+ nodeName := fmt.Sprintf("node%d", k)
+ n := nodes[nodeName]
+ n.PCIDevices = v
+ nodes[nodeName] = n
+ }
+ return nodes, nil
+}
+
+func mapPCIDevicesToNumaNode(hwdb hwids.PciDevices) (map[int][]PCIDevice, error) {
+ devices := sysfs.ScanPCIDevices()
+ r := map[int][]PCIDevice{}
+
+ for _, d := range devices {
+ if d.Class == CLASS_NVMe || d.Class == CLASS_ETHERNET || d.Class == CLASS_GPU {
+ _, ok := hwdb[uint16(d.Vendor)]
+ if ok {
+ desc := hwdb[uint16(d.Vendor)]
+ var vendor, name string
+ for _, m := range desc {
+ if uint64(m.Device) == d.Device && uint64(m.Vendor) == d.Vendor {
+ vendor = m.VendorName
+ name = m.DeviceName
+ break
+ }
+ }
+ pciDevice := PCIDevice{
+ Vendor: vendor,
+ Name: name,
+ }
+ r[d.NumaNode] = append(r[d.NumaNode], pciDevice)
+ }
+ }
+ }
+ return r, nil
+}
+
+func newNode(p string) (node, error) {
+ _, name := path.Split(p)
+
+ cpulist, err := cpuList(p)
+ if err != nil {
+ return node{}, err
+ }
+
+ return node{
+ Name: name,
+ Path: p,
+ CpuList: cpulist,
+ PCIDevices: []PCIDevice{},
+ }, nil
+}
+
+func cpuList(p string) (string, error) {
+ lpath := filepath.Join(p, "cpulist")
+ c, err := ioutil.ReadFile(lpath)
+ if err != nil {
+ return "", fmt.Errorf("Failed to open %s: %+v", lpath, err)
+ }
+ return strings.TrimRight(string(c), "\n"), nil
+}
diff --git a/packages/numap/numap.go b/packages/numap/numap.go
new file mode 100644
index 0000000..c65f1f0
--- /dev/null
+++ b/packages/numap/numap.go
@@ -0,0 +1,31 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+
+ "golang.fcuny.net/numap/internal/hwids"
+)
+
+func main() {
+ hwdb, err := hwids.Load()
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+
+ nodes, err := findNodes(hwdb)
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+
+ out, err := json.Marshal(nodes)
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+
+ fmt.Println(string(out))
+}
diff --git a/packages/perf-flamegraph-pid/default.nix b/packages/perf-flamegraph-pid/default.nix
new file mode 100644
index 0000000..0cd0a1b
--- /dev/null
+++ b/packages/perf-flamegraph-pid/default.nix
@@ -0,0 +1,25 @@
+{ lib, stdenvNoCC, pkgs }:
+
+stdenvNoCC.mkDerivation rec {
+ pname = "perf-flamegraph-pid";
+ src = ./perf-flamegraph-pid.sh;
+ version = "0.1.0";
+
+ nativeBuildInputs = with pkgs; [ flamegraph linuxPackages_latest.perf ];
+ propagatedBuildInputs = with pkgs; [ flamegraph linuxPackages_latest.perf ];
+
+ dontUnpack = true;
+ dontBuild = true;
+
+ installPhase = ''
+ mkdir -p $out/bin
+ cp $src $out/bin/${pname}
+ '';
+
+ meta = with lib; {
+ description = "Generate a process' flame graph.";
+ license = with licenses; [ mit ];
+ platforms = platforms.unix;
+ maintainers = with maintainers; [ fcuny ];
+ };
+}
diff --git a/packages/perf-flamegraph-pid/perf-flamegraph-pid.sh b/packages/perf-flamegraph-pid/perf-flamegraph-pid.sh
new file mode 100755
index 0000000..2ca3d16
--- /dev/null
+++ b/packages/perf-flamegraph-pid/perf-flamegraph-pid.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+OUT_DIR="${HOME}/workspace/debug/flamegraph"
+OUT_DATA="${OUT_DIR}/$(date +%y%m%d-%H%M%S).data"
+OUT_SVG="${OUT_DIR}/$(date +%y%m%d-%H%M%S).svg"
+
+mkdir -p "${OUT_DIR}"
+
+# record the data with perf. We need to run this with sudo to get all
+# the privileges we need.
+sudo perf record -g --call-graph dwarf --freq max --output "${OUT_DATA}" "$@"
+
+# give me ownership of the file
+sudo chown "${USER}" "${OUT_DATA}"
+
+perf script --input "${OUT_DATA}" |
+ stackcollapse-perf.pl |
+ flamegraph.pl >"${OUT_SVG}"
diff --git a/packages/scheddomain/go.mod b/packages/scheddomain/go.mod
new file mode 100644
index 0000000..afbc83a
--- /dev/null
+++ b/packages/scheddomain/go.mod
@@ -0,0 +1,3 @@
+module golang.fcuny.net/scheddomain
+
+go 1.17
diff --git a/packages/scheddomain/main.go b/packages/scheddomain/main.go
new file mode 100644
index 0000000..1d0f5d3
--- /dev/null
+++ b/packages/scheddomain/main.go
@@ -0,0 +1,153 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+// https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux/+/v4.17/include/linux/sched/topology.h#20
+var SDFlags = map[string]uint64{
+ "SD_LOAD_BALANCE": 0x0001,
+ "SD_BALANCE_NEWIDLE": 0x0002,
+ "SD_BALANCE_EXEC": 0x0004,
+ "SD_BALANCE_FORK": 0x0008,
+ "SD_BALANCE_WAKE": 0x0010,
+ "SD_WAKE_AFFINE": 0x0020,
+ "SD_ASYM_CPUCAPACITY": 0x0040,
+ "SD_SHARE_CPUCAPACITY": 0x0080,
+ "SD_SHARE_POWERDOMAIN": 0x0100,
+ "SD_SHARE_PKG_RESOURCES": 0x0200,
+ "SD_SERIALIZE": 0x0400,
+ "SD_ASYM_PACKING": 0x0800,
+ "SD_PREFER_SIBLING": 0x1000,
+ "SD_OVERLAP": 0x2000,
+ "SD_NUMA": 0x4000,
+}
+
+type Scheduler map[string][]Domain
+
+type Domain struct {
+ Name string `json:"name"`
+ Type string `json:"type"`
+ Flags []string `json:"flags"`
+ Indexes map[string]string `json:"indexes"`
+}
+
+func main() {
+ cpus, err := CPUs()
+ if err != nil {
+ fmt.Fprint(os.Stderr, err)
+ os.Exit(1)
+ }
+
+ if len(cpus) == 0 {
+ fmt.Fprint(os.Stderr, "there is no scheduler domains\n")
+ os.Exit(1)
+ }
+
+ sched := Scheduler{}
+ for _, cpu := range cpus {
+ _, cpuID := path.Split(cpu)
+ domains, err := domains(cpu)
+ if err != nil {
+ fmt.Fprint(os.Stderr, err)
+ os.Exit(1)
+ }
+ sched[cpuID] = domains
+ }
+ out, err := json.Marshal(sched)
+ if err != nil {
+ fmt.Fprint(os.Stderr, err)
+ os.Exit(1)
+ }
+ fmt.Println(string(out))
+}
+
+func domains(cpuPath string) ([]Domain, error) {
+ domainPath := fmt.Sprintf("%s/domain*", cpuPath)
+ availDomains, err := filepath.Glob(domainPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get domains under %s: %v", cpuPath, err)
+ }
+
+ domains := []Domain{}
+
+ if len(availDomains) == 0 {
+ return domains, nil
+ }
+
+ for _, d := range availDomains {
+ _, dName := path.Split(d)
+ dType := getContent(d, "name")
+ flags, err := domainFlags(d)
+ if err != nil {
+ return nil, err
+ }
+ indexes := domainIndexes(d)
+
+ domain := Domain{
+ Name: dName,
+ Type: dType,
+ Flags: flags,
+ Indexes: indexes,
+ }
+ domains = append(domains, domain)
+ }
+ return domains, nil
+}
+
+func domainFlags(path string) ([]string, error) {
+ flagPath := fmt.Sprintf("%s/flags", path)
+
+ content, err := ioutil.ReadFile(flagPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read %s: %v", flagPath, err)
+ }
+
+ flags, err := strconv.ParseUint(strings.TrimSpace(string(content)), 0, 64)
+ if err != nil {
+ return nil, fmt.Errorf("failed to convert flags %s: %v", flagPath, err)
+ }
+
+ supportedFlags := []string{}
+ for k, v := range SDFlags {
+ if flags&v > 0 {
+ supportedFlags = append(supportedFlags, k)
+ }
+ }
+ return supportedFlags, nil
+}
+
+func domainIndexes(path string) map[string]string {
+ indexes := map[string]string{
+ "busy": getContent(path, "busy_idx"),
+ "idle": getContent(path, "idle_idx"),
+ "new_idle": getContent(path, "newidle_idx"),
+ "wake": getContent(path, "wake_idx"),
+ "fork_exec": getContent(path, "forkexec_idx"),
+ }
+ return indexes
+}
+
+func getContent(path, fileName string) string {
+ domainName := fmt.Sprintf("%s/%s", path, fileName)
+ name, err := ioutil.ReadFile(domainName)
+ if err != nil {
+ return ""
+ }
+ return strings.TrimSpace(string(name))
+}
+
+func CPUs() ([]string, error) {
+ cpus, err := filepath.Glob("/proc/sys/kernel/sched_domain/cpu*")
+ if err != nil {
+ return nil, fmt.Errorf("failed to get a list of cpus: %v", err)
+ }
+ return cpus, nil
+}
diff --git a/packages/schedlatency/go.mod b/packages/schedlatency/go.mod
new file mode 100644
index 0000000..9a073ac
--- /dev/null
+++ b/packages/schedlatency/go.mod
@@ -0,0 +1,3 @@
+module golang.fcuny.net/schedlatency
+
+go 1.17
diff --git a/packages/schedlatency/main.go b/packages/schedlatency/main.go
new file mode 100644
index 0000000..7dd709e
--- /dev/null
+++ b/packages/schedlatency/main.go
@@ -0,0 +1,254 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type SchedStat struct {
+ Pid int `json:"pid"`
+ RunTicks int `json:"run_ticks"`
+ WaitTicks int `json:"wait_ticks"`
+ SlicesRan int `json:"ran_slices"`
+ AverageRun float64 `json:"avg_run"`
+ AverageWait float64 `json:"avg_wait"`
+}
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: %s <pid>\n", os.Args[0])
+}
+
+func main() {
+ if len(os.Args) == 1 {
+ usage()
+ os.Exit(1)
+ }
+
+ input := os.Args[1]
+ pid, err := strconv.Atoi(input)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to convert %s to a PID: %v", input, err)
+ os.Exit(1)
+ }
+
+ p := Proc{
+ PID: pid,
+ }
+ oran := 0
+ owait_ticks := 0
+ orun_ticks := 0
+ for {
+ stat, err := p.SchedStat()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to get schedstat for %d: %v\n", p.PID, err)
+ os.Exit(1)
+ }
+ diff := stat.SlicesRan - oran
+ var avgrun, avgwait float64
+
+ if diff > 0 {
+ avgrun = float64((stat.RunTicks - orun_ticks) / diff)
+ avgwait = float64((stat.WaitTicks - owait_ticks) / diff)
+ } else {
+ avgrun = 0
+ avgwait = 0
+ }
+
+ stat.AverageRun = avgrun
+ stat.AverageWait = avgwait
+
+ out, err := json.Marshal(stat)
+ if err != nil {
+ fmt.Fprintln(err)
+ os.Exit(1)
+ }
+ fmt.Println(string(out))
+ oran = stat.SlicesRan
+ orun_ticks = stat.RunTicks
+ owait_ticks = stat.WaitTicks
+ time.Sleep(5 * time.Second)
+ }
+}
+
+// This the the path that contains the scheduler statistics.
+// Note that they are not populated unless the value for
+// /proc/sys/kernel/sched_schedstats is 1
+const procSchedStat = "/proc/schedstat"
+
+var idleness = []string{"idle", "busy", "newlyIdle"}
+
+type ProcSchedStat struct {
+ RunTicks int `json:"run_ticks"`
+ WaitTicks int `json:"wait_ticks"`
+ SlicesRan int `json:"ran_slices"`
+ AverageRun float64 `json:"avg_run"`
+ AverageWait float64 `json:"avg_wait"`
+}
+
+// SchedCPUStat contains the load balancer statistics for a CPU.
+type SchedCPUStat struct {
+ YieldCount uint64 `json:"yield_count"`
+ SchedulerCount uint64 `json:"sched_count"`
+ SchedulerGoIdle uint64 `json:"sched_go_idle"`
+ TryToWakeUp uint64 `json:"try_to_wake"`
+ TryToWakeUpLocal uint64 `json:"try_to_wake_local"`
+ Running uint64 `json:"running"`
+ Waiting uint64 `json:"waiting"`
+ Slices uint64 `json:"slices"`
+ Domains map[string]SchedDomain `json:"domains"`
+}
+
+// SchedLoadBalance contains the load balancer statistics for a domain
+// in a given domain.
+type SchedLoadBalance struct {
+ LBCount uint64 `json:"lb_count"`
+ LBBalanced uint64 `json:"lb_balanced"`
+ LBFailed uint64 `json:"lb_failed"`
+ LBImbalanced uint64 `json:"lb_imbalanced"`
+ LBGained uint64 `json:"lb_gained"`
+ LBHotGain uint64 `json:"lb_hot_gain"`
+ LBNoBusyQueue uint64 `json:"lb_no_busy_queue"`
+ LBNoBusyGroup uint64 `json:"lb_no_busy_group"`
+}
+
+// SchedDomain contains the statistics for a domain.
+type SchedDomain struct {
+ LoadBalancers map[string]SchedLoadBalance `json:"lbs"`
+ ActiveLoadBalanceCount uint64 `json:"active_lb_count"`
+ ActiveLoadBalanceFailed uint64 `json:"active_lb_failed"`
+ ActiveLoadBalancePushed uint64 `json:"active_lb_pushed"`
+ TryToWakeUpRemote uint64 `json:"try_to_wake_up_remote"`
+ TryToWakeUpMoveAffine uint64 `json:"try_to_wake_up_move_affine"`
+ TryToWakeUpMoveBalance uint64 `json:"try_to_wake_up_move_balance"`
+}
+
+// Proc provides information about a running process.
+type Proc struct {
+ // The process ID.
+ PID int
+}
+
+// SchedStat returns scheduler statistics for the process.
+// The information available are:
+// 1. time spent on the cpu
+// 2. time spent waiting on a runqueue
+// 3. # of timeslices run on this cpu
+func (p Proc) SchedStat() (ProcSchedStat, error) {
+ path := fmt.Sprintf("/proc/%d/schedstat", p.PID)
+ b, err := ioutil.ReadFile(path)
+ if err != nil {
+ return ProcSchedStat{}, err
+ }
+ content := string(b)
+ stats := strings.Fields(content)
+
+ run_ticks, err := strconv.Atoi(stats[0])
+ if err != nil {
+ return ProcSchedStat{}, err
+ }
+
+ wait_ticks, err := strconv.Atoi(stats[1])
+ if err != nil {
+ return ProcSchedStat{}, err
+ }
+
+ nran, err := strconv.Atoi(stats[2])
+ if err != nil {
+ return ProcSchedStat{}, err
+ }
+
+ stat := ProcSchedStat{
+ RunTicks: run_ticks,
+ WaitTicks: wait_ticks,
+ SlicesRan: nran,
+ }
+ return stat, nil
+}
+
+// ReadSchedstat returns statistics from the scheduler.
+// Information about the statistics can be found at
+// https://www.kernel.org/doc/html/latest/scheduler/sched-stats.html.
+func ReadSchedStat() (map[string]SchedCPUStat, error) {
+ b, err := ioutil.ReadFile(procSchedStat)
+ if err != nil {
+ return nil, fmt.Errorf("procfs: failed to open %s: %v", procSchedStat, err)
+ }
+ content := string(b)
+
+ cpus := map[string]SchedCPUStat{}
+
+ lines := strings.Split(content, "\n")
+
+ var currentCpu string
+
+ // The first line is the version of the stats
+ // TODO(fcuny): we should check which version is used, because the
+ // format changes.
+ for _, line := range lines[2:] {
+ // The format is as follow:
+ // cpu<N> 1 2 3 4 5 6 7 8 9
+ // domain<N> <cpumask> 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
+ if strings.HasPrefix(line, "cpu") {
+ // meaning of the fields: https://www.kernel.org/doc/html/latest/scheduler/sched-stats.html#cpu-statistics
+ fields := strings.Fields(line)
+ cpuStat := SchedCPUStat{
+ YieldCount: convertField(fields[1]),
+ SchedulerCount: convertField(fields[3]),
+ SchedulerGoIdle: convertField(fields[4]),
+ TryToWakeUp: convertField(fields[5]),
+ TryToWakeUpLocal: convertField(fields[6]),
+ Running: convertField(fields[7]),
+ Waiting: convertField(fields[8]),
+ Slices: convertField(fields[9]),
+ Domains: map[string]SchedDomain{},
+ }
+ currentCpu = fields[0]
+ cpus[currentCpu] = cpuStat
+ } else if strings.HasPrefix(line, "domain") {
+ // meaning of the fields: https://www.kernel.org/doc/html/latest/scheduler/sched-stats.html#domain-statistics
+ fields := strings.Fields(line)
+ i := 2
+ lbs := map[string]SchedLoadBalance{}
+ for _, idle := range idleness {
+ lb := SchedLoadBalance{
+ LBCount: convertField(fields[i]),
+ LBBalanced: convertField(fields[i+1]),
+ LBFailed: convertField(fields[i+2]),
+ LBImbalanced: convertField(fields[i+3]),
+ LBGained: convertField(fields[i+4]),
+ LBHotGain: convertField(fields[i+5]),
+ LBNoBusyQueue: convertField(fields[i+6]),
+ LBNoBusyGroup: convertField(fields[i+7]),
+ }
+ i = i + 8
+ lbs[idle] = lb
+ }
+ domain := SchedDomain{
+ LoadBalancers: lbs,
+ ActiveLoadBalanceCount: convertField(fields[26]),
+ ActiveLoadBalanceFailed: convertField(fields[27]),
+ ActiveLoadBalancePushed: convertField(fields[28]),
+ TryToWakeUpRemote: convertField(fields[35]),
+ TryToWakeUpMoveAffine: convertField(fields[36]),
+ TryToWakeUpMoveBalance: convertField(fields[37]),
+ }
+ c := cpus[currentCpu]
+ c.Domains[fields[0]] = domain
+ cpus[currentCpu] = c
+ }
+ }
+ return cpus, nil
+}
+
+func convertField(field string) uint64 {
+ val, err := strconv.ParseUint(field, 10, 64)
+ if err != nil {
+ return 0
+ }
+ return val
+}
diff --git a/packages/seqstat/default.nix b/packages/seqstat/default.nix
new file mode 100644
index 0000000..96cbd40
--- /dev/null
+++ b/packages/seqstat/default.nix
@@ -0,0 +1,25 @@
+{ lib, python3, stdenvNoCC }:
+
+stdenvNoCC.mkDerivation rec {
+ pname = "seqstat";
+ src = ./seqstat.py;
+ version = "0.1.0";
+
+ buildInputs = [ python3 ];
+ propagatedBuildInputs = [ python3 ];
+
+ dontUnpack = true;
+ dontBuild = true;
+
+ installPhase = ''
+ mkdir -p $out/bin
+ cp $src $out/bin/${pname}
+ '';
+
+ meta = with lib; {
+ description = "Display an histogram for a given sequence of numbers.";
+ license = with licenses; [ mit ];
+ platforms = platforms.unix;
+ maintainers = with maintainers; [ fcuny ];
+ };
+}
diff --git a/packages/seqstat/seqstat.py b/packages/seqstat/seqstat.py
new file mode 100755
index 0000000..55b6ecc
--- /dev/null
+++ b/packages/seqstat/seqstat.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python3
+
+import argparse
+
+ticks = ["▁", "▂", "▃", "▄", "▅", "▆", "▇", "█"]
+
+
+def histogram(sequence):
+ min_val = min(sequence)
+ max_val = max(sequence)
+
+ scale = (int(max_val - min_val) << 8) / (len(ticks) - 1)
+ if scale < 1:
+ scale = 1
+
+ return [ticks[int((int(i - min_val) << 8) / scale)] for i in sequence]
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "numbers",
+ metavar="N",
+ type=float,
+ nargs="+",
+ help="a number for the accumulator",
+ )
+ args = parser.parse_args()
+ h = histogram(args.numbers)
+ print("".join(h))