summaryrefslogtreecommitdiff
path: root/Biz
diff options
context:
space:
mode:
authorBen Sima <ben@bsima.me>2020-10-26 17:52:56 -0400
committerBen Sima <ben@bsima.me>2020-10-26 17:52:56 -0400
commit19f5402bec9f6346463b83536cc22d7f4525bc18 (patch)
tree2076903999b67f26e1ada3a8717325710f6b2ee4 /Biz
parente069bc069f998e3158c826e20f7d94575907ae46 (diff)
Capitalize rest of files
Diffstat (limited to 'Biz')
-rw-r--r--Biz/Bild/Repl.nix1
-rw-r--r--Biz/Bild/ShellHook.sh6
-rw-r--r--Biz/Cloud/git.nix2
-rw-r--r--Biz/fathom.nix109
-rw-r--r--Biz/firefox.nix12
-rw-r--r--Biz/idea/duree-pitch.org80
-rw-r--r--Biz/idea/flash.org36
-rw-r--r--Biz/users.nix8
8 files changed, 7 insertions, 247 deletions
diff --git a/Biz/Bild/Repl.nix b/Biz/Bild/Repl.nix
deleted file mode 100644
index 94edf3a..0000000
--- a/Biz/Bild/Repl.nix
+++ /dev/null
@@ -1 +0,0 @@
-{ nixpkgs = import ./.; }
diff --git a/Biz/Bild/ShellHook.sh b/Biz/Bild/ShellHook.sh
index 75a0842..89751d3 100644
--- a/Biz/Bild/ShellHook.sh
+++ b/Biz/Bild/ShellHook.sh
@@ -45,9 +45,7 @@ function hero() {
fi
}
-function lint() {
- alias lint=$BIZ_ROOT/Biz/lint.py
-}
+alias lint=$BIZ_ROOT/Biz/Lint.py
function pie() {
runghc Biz.Pie $@
@@ -74,7 +72,7 @@ function push() {
# TODO: convert to haskell
function ship() {
set -ex
- $BIZ_ROOT/Biz/lint.py
+ $BIZ_ROOT/Biz/Lint.py
stuff=(${1})
if [[ ${#stuff[@]} -eq 0 ]]
then
diff --git a/Biz/Cloud/git.nix b/Biz/Cloud/git.nix
index 370f52a..6104ced 100644
--- a/Biz/Cloud/git.nix
+++ b/Biz/Cloud/git.nix
@@ -11,7 +11,7 @@
extraGitoliteRc = ''
$RC{SITE_INFO} = 'a computer is a bicycle for the mind.';
'';
- adminPubkey = builtins.readFile ../keys/ben.pub;
+ adminPubkey = builtins.readFile ../Keys/Ben.pub;
};
};
}
diff --git a/Biz/fathom.nix b/Biz/fathom.nix
deleted file mode 100644
index 40e8b0b..0000000
--- a/Biz/fathom.nix
+++ /dev/null
@@ -1,109 +0,0 @@
-{ options
-, lib
-, config
-, pkgs
-, modulesPath
-, stdenv
-}:
-
-with lib;
-
-let
- cfg = config.services.fathom
- pkgs.fathom = stdenv.mkDerivation rec {
- name = "fathom-v${version}";
- version = "1.2.1";
- src = builtins.fetchurl {
- url = "https://github.com/usefathom/fathom/releases/download/v${version}/fathom_${version}_linux_amd64.tar.gz";
- sha256 = "0sfpxh2xrvz992k0ynib57zzpcr0ikga60552i14m13wppw836nh";
- };
- sourceRoot = ".";
- dontBuild = true;
- installPhase = ''
- mkdir -p $out/bin
- cp fathom $out/bin
- cp LICENSE $out
- cp README.md $out
- '';
- };
-in {
- options.services.fathom = {
- enable = lib.mkEnableOption "Enable the Fathom Analytics service";
-
- port = mkOption {
- type = types.string;
- default = "3000";
- description = ''
- The port on which Fathom will listen for
- incoming HTTP traffic.
- '';
- };
-
- gzip = mkOption {
- type = types.bool;
- default = true;
- description = "Whether or not to enable gzip compression.";
- };
-
- debug = mkOption {
- type = types.bool;
- default = false;
- description = "Whether or not to enable debug mode.";
- };
-
- dataDir = mkOption {
- type = types.path;
- default = "/var/lib/fathom";
- description = "Fathom data directory";
- };
- };
-
- config = mkIf cfg.enable {
- systemd.services.fathom = {
- wantedBy = [ "multi-user.target" ];
- after = [ "network.target" ];
-
- environment = {
- FATHOM_SERVER_ADDR = cfg.port;
- FATHOM_GZIP = builtins.toString cfg.gzip;
- FATHOM_DEBUG = builtins.toString cfg.debug;
- FATHOM_DATABASE_DRIVER = "sqlite3";
- FATHOM_DATABASE_NAME = "${cfg.dataDir}/fathom.db";
- FATHOM_SECRET = "random-secret-string";
- };
- preStart = ''
- echo "[fathom] creating ${cfg.dataDir}"
- mkdir -p ${cfg.dataDir}
- chown -R fathom:fathom ${cfg.dataDir}
- echo "[fathom]" creating ${cfg.dataDir}/.env
- env | grep "^FATHOM" > ${cfg.dataDir}/.env
- '';
- description = ''
- Fathom Analytics
- '';
-
- serviceConfig = {
- Type = "simple";
- User = "fathom";
- Group = "fathom";
- ExecStart = "${pkgs.fathom}/bin/fathom server";
- KillSignal = "INT";
- WorkingDirectory = cfg.dataDir;
- Restart = "on-failure";
- RestartSec = "10";
- PermissionsStartOnly = "true";
- };
- };
-
- environment.systemPackages = [ pkgs.fathom ];
-
- users = {
- groups = { fathom = {}; };
- users.fathom = {
- description = "Fathom daemon user";
- home = cfg.dataDir;
- group = "fathom";
- };
- };
- };
-}
diff --git a/Biz/firefox.nix b/Biz/firefox.nix
deleted file mode 100644
index 12316fb..0000000
--- a/Biz/firefox.nix
+++ /dev/null
@@ -1,12 +0,0 @@
-{ ... }:
-
-{
- services = {
- firefox.syncserver = {
- enable = true;
- allowNewUsers = true;
- listen.port = 5001;
- publicUri = "http://firefoxsync.simatime.com";
- };
- };
-}
diff --git a/Biz/idea/duree-pitch.org b/Biz/idea/duree-pitch.org
deleted file mode 100644
index d4d9d6f..0000000
--- a/Biz/idea/duree-pitch.org
+++ /dev/null
@@ -1,80 +0,0 @@
-#+TITLE: Duree: automated universal database
-#+SUBTITLE: seeking pre-seed funding
-#+AUTHOR: Ben Sima <ben@bsima.me>
-#+EMAIL: ben@bsima.me
-#+OPTIONS: H:1 num:nil toc:nil
-#+LATEX_CLASS: article
-#+LATEX_CLASS_OPTIONS:
-#+LATEX_HEADER:
-#+LATEX_HEADER_EXTRA:
-#+LATEX_COMPILER: pdflatex
-#+DATE: \today
-#+startup: beamer
-#+LaTeX_CLASS: beamer
-#+LaTeX_CLASS_OPTIONS: [presentation,smaller]
-Start with this:
- - https://news.ycombinator.com/item?id=14605
- - https://news.ycombinator.com/item?id=14754
-Then build AI layers on top.
-* Problem
-Developers spend too much time managing database schemas. Every database
-migration is a risk to the business because of the high possibility of data
-corruption. If the data is modeled incorrectly at the beginning, it requires a
-lot of work (months of developer time) to gut the system and re-architect it.
-* Solution
-- Using machine learning and AI, we automatically detect the schema of your data.
-- Data can be dumped into a noSQL database withouth the developer thinking much
- about structure, then we infer the structure automatically.
-- We can also generate a library of queries and provide an auto-generated client
- in the choosen language of our users.
-* Existing solutions
-- Libraries like alembic and migra (Python) make data migrations easier, but
- don't help you make queries or properly model data.
-- ORMs help with queries but don't give you much insight into the deep structure
- of your data (you still have to do manual joins) and don't help you properly
- model data.
-- Graph QL is the closest competitor, but requires manually writing types and
- knowing about the deep structure of your data. We automate both.
-
-* Unsolved problems
-- Unsure whether to build this on top of existing noSQL databases, or to develop
- our own data store. Could re-use an existing [[https://en.wikipedia.org/wiki/Category:Database_engines][database engine]] to provide an
- end-to-end database solution.
-* Key metrics
-- How much time do developers spend dealing with database migrations? What does
- this cost the business? We can decrease this, decreasing costs.
-- How costly are failed data migrations and backups? We reduce this risk.
-* Unique value proposition
-We can automate the backend data mangling for 90% of software applications.
-* Unfair advantage
-- I have domain expertise, having worked on similar schemaless database problems
- before.
-- First-mover advantage in this space. Everyone else is focused on making
- database migrations easier, we want to make them obsolete.
-* Channels
-- Cold calling mongoDB et al users.
-* Customer segments
-- *Early adopters:* users of mongoDB and graphQL who want to spend time writing
- application code, not managing database schemas. The MVP would be to generate
- the Graph QL code from their Mongo database automatically.
-- Will expand support to other databases one by one. The tech could be used on
- any database... or we expand by offering our own data store.
-* Cost structure
-** Fixed costs
- - Initial development will take about 3 months (~$30k)
- - Each new database support will take a month or two of development.
-** Variable costs
- - Initial analysis will be compute-heavy.
- - Following analyses can be computationally cheap by buildiing off of the
- existing model.
- - Customer acquisition could be expensive, will likely hire a small sales
- team.
-* Revenue streams
-- $100 per month per database analyzed
- - our hosted service connects to their database directly
- - includes client libraries via graphQL
- - may increase this if it turns out we save companies a lot more than $100/mo,
- which is likely
-- enterprise licenses available for on-prem
- - allows them to have complete control over their database access
- - necessary for HIPAA/PCI compliance
diff --git a/Biz/idea/flash.org b/Biz/idea/flash.org
deleted file mode 100644
index 1c392f0..0000000
--- a/Biz/idea/flash.org
+++ /dev/null
@@ -1,36 +0,0 @@
-#+title: Flash
-#+description: a system for quickly testing business ideas
-
-- Each marketing iteration for a product requires some gear. A "gear" pack is just a yaml
- file with all data for a single flash test. It will include ad content,
- pricing info, links to necessary images, and so on.
- - even better: store these in a database? Depends on how often we need to edit them...
-- Data gets marshalled into a bunch of templates, one for each sales pipeline in
- the /Traction/ book by Gabriel Weinberg (7 pipelines total)
-- Each sales pipeline will have a number of integrations, we'll need at least
- one for each pipeline before going to production. E.g.:
- - google adwords
- - facebook ads
- - email lists (sendgrid)
- - simple marketing website
- - producthunt
- - etc
-- Pipelines will need to capture metrics on a pre-set schedule.
- - Above integrations must also pull performance numbers from Adwords etc APIs.
- - Will need some kind of scheduled job queue or robot background worker to handle this.
- - A simple dashboard might also be useful, not sure.
-- Metrics determine the performance of a pipeline. After the defined trial
- duration, some pipelines will be dropped. The high-performing pipelines we
- double-down on.
-- Metrics to watch:
- - conversion rate
- - usage time - minutes spent on site/app
- - money spent per customer
- - see baremetrics for more ideas
-- This can eventually be integrated to a larger product design platform (what Sam
- Altman calls a "product improvement engine" in his playbook - PIE?).
- - metric improvement can be plotted on a relative scale
- - "If you improve your product 5% every week, it will really compound." - Sam
- - PIE will differ from Flash in that Flash is only for the early stages of a
- product - sell it before you build it. PIE will operate on existing products
- to make them better.
diff --git a/Biz/users.nix b/Biz/users.nix
index b52043e..c7c4041 100644
--- a/Biz/users.nix
+++ b/Biz/users.nix
@@ -14,25 +14,25 @@
deploy = {
isNormalUser = true;
home = "/home/deploy";
- openssh.authorizedKeys.keyFiles = [ ./keys/deploy.pub ];
+ openssh.authorizedKeys.keyFiles = [ ./Keys/Deploy.pub ];
extraGroups = [ "wheel" ];
};
#
# humans
#
- root.openssh.authorizedKeys.keyFiles = [ ./keys/ben.pub ];
+ root.openssh.authorizedKeys.keyFiles = [ ./Keys/Ben.pub ];
ben = {
description = "Ben Sima";
isNormalUser = true;
home = "/home/ben";
- openssh.authorizedKeys.keyFiles = [ ./keys/ben.pub ];
+ openssh.authorizedKeys.keyFiles = [ ./Keys/Ben.pub ];
extraGroups = [ "wheel" "networkmanager" "docker" ];
};
nick = {
description = "Nick Sima";
isNormalUser = true;
home = "/home/nick";
- openssh.authorizedKeys.keyFiles = [ ./keys/nick.pub ];
+ openssh.authorizedKeys.keyFiles = [ ./Keys/Nick.pub ];
extraGroups = [ "docker" ];
};
};