From e223b28e6820dcd9fa5c38ba22de487ada2ca0e6 Mon Sep 17 00:00:00 2001 From: Ben Sima Date: Wed, 18 Nov 2020 20:20:27 -0500 Subject: Extend bild to nix targets properly Also had to capitalize some stuff, and move some nix files around and rename the metadata directive from 'exe' to 'out' because that just makes more sense, and fix some compiler errors. But now bild treats both nix and hs files as buildable things. So that's cool. One interesting example is Biz/Pie.{nix,hs} - I can either create a dev build of the hs file with ghc, or I can create a fully-encapsulated nix build. Its nice to have both options because a dev build with ghc takes half the amount of time, and I can rely on my locally cached hi and ho files. I think this shows the power of bild, but also can be a somewhat subtle thing. The issue really is with the separate command calls in nix builds vs dev builds. I figure there are a few ways to fix this: 1. Try to use bild inside the nix rules. That could be interesting, but could also lead to some weird behavior or worm holes forming. 2. Extract the command line invocation into a separate file, some kind of really simple template that gets pulled into both programs. It is important to consider that in the future I might want to have bild do a module-by-module nix build of programs, but I'm not sure how that would effect my choice here. --- Que/Apidocs.md | 3 + Que/Client.py | 186 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ Que/Host.hs | 2 +- Que/Index.md | 73 +++++++++++++++++++++ Que/Prod.nix | 35 ++++++++-- Que/Quescripts.md | 50 +++++++++++++++ Que/Site.hs | 2 +- Que/Site.nix | 2 + Que/Style.css | 136 +++++++++++++++++++++++++++++++++++++++ Que/Tutorial.md | 53 ++++++++++++++++ Que/apidocs.md | 3 - Que/client.py | 186 ------------------------------------------------------ Que/index.md | 73 --------------------- Que/quescripts.md | 50 --------------- Que/style.css | 136 --------------------------------------- Que/tutorial.md | 53 ---------------- 16 files changed, 535 insertions(+), 508 deletions(-) create mode 100644 Que/Apidocs.md create mode 100755 Que/Client.py create mode 100644 Que/Index.md create mode 100644 Que/Quescripts.md create mode 100644 Que/Style.css create mode 100644 Que/Tutorial.md delete mode 100644 Que/apidocs.md delete mode 100755 Que/client.py delete mode 100644 Que/index.md delete mode 100644 Que/quescripts.md delete mode 100644 Que/style.css delete mode 100644 Que/tutorial.md (limited to 'Que') diff --git a/Que/Apidocs.md b/Que/Apidocs.md new file mode 100644 index 0000000..f400889 --- /dev/null +++ b/Que/Apidocs.md @@ -0,0 +1,3 @@ +% que.run Api Docs + +coming soon diff --git a/Que/Client.py b/Que/Client.py new file mode 100755 index 0000000..1063eb8 --- /dev/null +++ b/Que/Client.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python3 +""" +simple client for que.run +""" + +import argparse +import configparser +import functools +import http.client +import logging +import os +import subprocess +import sys +import time +import urllib.parse +import urllib.request as request + +MAX_TIMEOUT = 99999999 # basically never timeout + + +def auth(args): + "Returns the auth key for the given ns from ~/.config/que.conf" + logging.debug("auth") + namespace = args.target.split("/")[0] + if namespace == "pub": + return None + conf_file = os.path.expanduser("~/.config/que.conf") + if not os.path.exists(conf_file): + sys.exit("you need a ~/.config/que.conf") + cfg = configparser.ConfigParser() + cfg.read(conf_file) + return cfg[namespace]["key"] + + +def autodecode(bytestring): + """Attempt to decode bytes `bs` into common codecs, preferably utf-8. If + no decoding is available, just return the raw bytes. + + For all available codecs, see: + + + """ + logging.debug("autodecode") + codecs = ["utf-8", "ascii"] + for codec in codecs: + try: + return bytestring.decode(codec) + except UnicodeDecodeError: + pass + return bytestring + + +def retry(exception, tries=4, delay=3, backoff=2): + "Decorator for retrying an action." + + def decorator(func): + @functools.wraps(func) + def func_retry(*args, **kwargs): + mtries, mdelay = tries, delay + while mtries > 1: + try: + return func(*args, **kwargs) + except exception as ex: + logging.debug(ex) + logging.debug("retrying...") + time.sleep(mdelay) + mtries -= 1 + mdelay *= backoff + return func(*args, **kwargs) + + return func_retry + + return decorator + + +def send(args): + "Send a message to the que." + logging.debug("send") + key = auth(args) + data = args.infile + req = request.Request(f"{args.host}/{args.target}") + req.add_header("User-AgenT", "Que/Client") + if key: + req.add_header("Authorization", key) + if args.serve: + logging.debug("serve") + while not time.sleep(1): + request.urlopen(req, data=data, timeout=MAX_TIMEOUT) + + else: + request.urlopen(req, data=data, timeout=MAX_TIMEOUT) + + +def then(args, msg): + "Perform an action when passed `--then`." + if args.then: + logging.debug("then") + subprocess.run( + args.then.format(msg=msg, que=args.target), check=False, shell=True, + ) + + +@retry(http.client.IncompleteRead, tries=10, delay=5, backoff=1) +@retry(http.client.RemoteDisconnected, tries=10, delay=2, backoff=2) +def recv(args): + "Receive a message from the que." + logging.debug("recv on: %s", args.target) + params = urllib.parse.urlencode({"poll": args.poll}) + req = request.Request(f"{args.host}/{args.target}?{params}") + req.add_header("User-Agent", "Que/Client") + key = auth(args) + if key: + req.add_header("Authorization", key) + with request.urlopen(req) as _req: + if args.poll: + logging.debug("poll") + while not time.sleep(1): + logging.debug("reading") + msg = autodecode(_req.readline()) + logging.debug("read") + print(msg, end="") + then(args, msg) + else: + msg = autodecode(_req.read()) + print(msg) + then(args, msg) + + +def get_args(): + "Command line parser" + cli = argparse.ArgumentParser(description=__doc__) + cli.add_argument("--debug", action="store_true", help="log to stderr") + cli.add_argument( + "--host", default="http://que.run", help="where que-server is running" + ) + cli.add_argument( + "--poll", default=False, action="store_true", help="stream data from the que" + ) + cli.add_argument( + "--then", + help=" ".join( + [ + "when polling, run this shell command after each response,", + "presumably for side effects," + r"replacing '{que}' with the target and '{msg}' with the body of the response", + ] + ), + ) + cli.add_argument( + "--serve", + default=False, + action="store_true", + help=" ".join( + [ + "when posting to the que, do so continuously in a loop.", + "this can be used for serving a webpage or other file continuously", + ] + ), + ) + cli.add_argument( + "target", help="namespace and path of the que, like 'ns/path/subpath'" + ) + cli.add_argument( + "infile", + nargs="?", + type=argparse.FileType("rb"), + help="data to put on the que. Use '-' for stdin, otherwise should be a readable file", + ) + return cli.parse_args() + + +if __name__ == "__main__": + ARGV = get_args() + if ARGV.debug: + logging.basicConfig( + format="%(asctime)s %(message)s", + level=logging.DEBUG, + datefmt="%Y.%m.%d..%H.%M.%S", + ) + try: + if ARGV.infile: + send(ARGV) + else: + recv(ARGV) + except KeyboardInterrupt: + sys.exit(0) diff --git a/Que/Host.hs b/Que/Host.hs index 3303709..b8e7a1a 100644 --- a/Que/Host.hs +++ b/Que/Host.hs @@ -11,7 +11,7 @@ -- - -- - sorta: and -- --- : exe que-server +-- : out que-server -- -- : dep async -- : dep envy diff --git a/Que/Index.md b/Que/Index.md new file mode 100644 index 0000000..a9db12e --- /dev/null +++ b/Que/Index.md @@ -0,0 +1,73 @@ +% que.run + +que.run is the concurrent, async runtime in the cloud + + - runtime concurrency anywhere you have a network connection + - multilanguage communicating sequential processes + - add Go-like channels to any language + - connect your microservices together with the simplest possible + plumbing + - async programming as easy as running two terminal commands + +HTTP routes on `que.run` are Golang-like channels with a namespace and a +path. For example: `https://que.run/pub/path/subpath`. + +## Quickstart + +There is a simple script `que` that acts as a client you can use to +interact with the `que.run` service. + +Download it to somewhere on your `$PATH` and make it executable: + + curl https://que.run/_/client > ~/bin/que + chmod +x ~/bin/que + que --help + +The client requires a recent version of Python 3. + +## Powerup + +que.run is free for limited use, but the real power of an asynchronous, +concurrent runtime in the cloud is unlocked with some extra power-user +features. + +- Free + - security by obscurity + - all protocols and data formats supported + - bandwidth and message sizes limited + - concurrent connections limited + - request rate limited +- Power + - protect your data with private namespaces + - remove bandwidth and size limits + - private dashboard to see all of your active ques + - 99.999% uptime +- Pro + - add durability to your ques so messages are never lost + - powerful batch api + - incredible query api + - Linux FUSE filesystem integration +- Enterprise + - all of the Power & Pro features + - on-prem deployment + - advanced que performance monitoring + - SLA for support from que.run experts + +Email `ben@bsima.me` if you want to sign up for the Power, Pro, or +Enterprise packages. + +## Quescripts + +We are collecting a repository of scripts that make awesome use of que: + +- remote desktop notifications +- two-way communication with your phone +- ephemeral, serverless chat rooms +- collaborative jukebox + +See the scripts + +## Docs + +- [tutorial](/_/tutorial) +- [api docs](/_/apidocs) diff --git a/Que/Prod.nix b/Que/Prod.nix index 23c6f0a..b755d7c 100644 --- a/Que/Prod.nix +++ b/Que/Prod.nix @@ -1,5 +1,22 @@ -{ config, pkgs, lib, ... }: -{ +{ bild, lib }: + +# The production server for que.run + +bild.os { + imports = [ + ../Biz/OsBase.nix + ../Biz/Packages.nix + ../Biz/Users.nix + ./Host.nix + ./Site.nix + ]; + networking.hostName = "prod-que"; + networking.domain = "que.run"; + services.que-server = { + enable = true; + port = 80; + package = bild.ghc ./Host.hs; + }; boot.loader.grub.device = "/dev/vda"; fileSystems."/" = { device = "/dev/vda1"; fsType = "ext4"; }; swapDevices = [ @@ -30,7 +47,15 @@ }; }; }; - services.udev.extraRules = '' - ATTR{address}=="7a:92:a5:c6:db:c3", NAME="eth0" - ''; + services = { + que-website = { + enable = true; + namespace = "_"; + package = bild.ghc ./Site.hs; + }; + + udev.extraRules = '' + ATTR{address}=="7a:92:a5:c6:db:c3", NAME="eth0" + ''; + }; } diff --git a/Que/Quescripts.md b/Que/Quescripts.md new file mode 100644 index 0000000..77e7004 --- /dev/null +++ b/Que/Quescripts.md @@ -0,0 +1,50 @@ +% Quescripts + +## Remote desktop notifications + +Lets say we are running a job that takes a long time, maybe we are +compiling or running a large test suite. Instead of watching the +terminal until it completes, or flipping back to check on it every so +often, we can create a listener that displays a popup notification when +the job finishes. + +In one terminal run the listener: + + que pub/notify --then "notify-send '{que}' '{msg}'" + +In some other terminal run the job that takes forever: + + runtests ; echo "tests are done" | que pub/notify - + +When terminal 2 succeeds, terminal 1 will print "tests are done", then +call the `notify-send` command, which displays a notification toast in +Linux with title "`pub/notify`" and content "`tests are done`". + +Que paths are multi-producer and multi-consumer, so you can add as many +terminals as you want. + +On macOS you could use something like this (just watch your quotes): + + osascript -e "display notification \"{msg}\" with title \"{que}\"" + +in place of notify-send. + +## Ephemeral, serverless chat rooms + +coming soon + +## Collaborative jukebox + +It's surprisingly easy to make a collaborative jukebox. + +First start up a music player: + + que --poll pub/music --then "playsong '{msg}'" + +where `playsong` is a script that plays a file from data streaming to +`stdin`. For example [vlc](https://www.videolan.org/vlc/) does this when +you run it like `vlc -`. + +Then, anyone can submit songs with: + + que pub/music song.mp3 diff --git a/Que/Site.hs b/Que/Site.hs index 794dd04..5d2dbb8 100644 --- a/Que/Site.hs +++ b/Que/Site.hs @@ -5,7 +5,7 @@ -- | spawns a few processes that serve the que.run website -- --- : exe que-website +-- : out que-website -- -- : dep async -- : dep config-ini diff --git a/Que/Site.nix b/Que/Site.nix index 685b3a6..ba2eeb2 100644 --- a/Que/Site.nix +++ b/Que/Site.nix @@ -5,6 +5,8 @@ , modulesPath }: + + let cfg = config.services.que-website; static = pkgs.stdenv.mkDerivation { diff --git a/Que/Style.css b/Que/Style.css new file mode 100644 index 0000000..f8d1ca4 --- /dev/null +++ b/Que/Style.css @@ -0,0 +1,136 @@ + + diff --git a/Que/Tutorial.md b/Que/Tutorial.md new file mode 100644 index 0000000..6542ad3 --- /dev/null +++ b/Que/Tutorial.md @@ -0,0 +1,53 @@ +% que.run Tutorial + +## Ques + +A que is a multi-consumer, multi-producer channel available anywhere you +have a network connection. If you are familiar with Go channels, they +are pretty much the same thing. Put some values in one end, and take +them out the other end at a different time, or in a different process. + +Ques are created dynamically for every HTTP request you make. Here we +use the `que` client to create a new que at the path `pub/new-que`: + + que pub/new-que + +The `que` client is useful, but you can use anything to make the HTTP +request, for example here's the same thing with curl: + + curl https://que.run/pub/new-que + +These requests will block until a value is placed on the other +end. Let's do that now. In a separate terminal: + + echo "hello world" | que pub/new-que - + +This tells the `que` client to read the value from `stdin` and then send +it to `example/new-que`. Or with curl: + + curl https://que.run/pub/new-que -d "hello world" + +This will succeed immediately and send the string "`hello world`" over +the channel, which will be received and printed by the listener in the +other terminal. + +You can have as many producers and consumers attached to a channel as +you want. + +## Namespaces + +Ques are organized into namespaces, identified by the first fragment of +the path. In the above commands we used `pub` as the namespace, which is +a special publically-writable namespace. The other special namespace is +`_` which is reserved for internal use only. You can't write to the `_` +namespace. + +To use other namespaces and add authentication/access controls, you can +[sign up for the Power package](/_/index). + +## Events + +Just reading and writing data isn't very exciting, so let's throw in +some events. We can very quickly put together a job processor. + + que pub/new-que --then "./worker.sh '{msg}'" diff --git a/Que/apidocs.md b/Que/apidocs.md deleted file mode 100644 index f400889..0000000 --- a/Que/apidocs.md +++ /dev/null @@ -1,3 +0,0 @@ -% que.run Api Docs - -coming soon diff --git a/Que/client.py b/Que/client.py deleted file mode 100755 index 1063eb8..0000000 --- a/Que/client.py +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/env python3 -""" -simple client for que.run -""" - -import argparse -import configparser -import functools -import http.client -import logging -import os -import subprocess -import sys -import time -import urllib.parse -import urllib.request as request - -MAX_TIMEOUT = 99999999 # basically never timeout - - -def auth(args): - "Returns the auth key for the given ns from ~/.config/que.conf" - logging.debug("auth") - namespace = args.target.split("/")[0] - if namespace == "pub": - return None - conf_file = os.path.expanduser("~/.config/que.conf") - if not os.path.exists(conf_file): - sys.exit("you need a ~/.config/que.conf") - cfg = configparser.ConfigParser() - cfg.read(conf_file) - return cfg[namespace]["key"] - - -def autodecode(bytestring): - """Attempt to decode bytes `bs` into common codecs, preferably utf-8. If - no decoding is available, just return the raw bytes. - - For all available codecs, see: - - - """ - logging.debug("autodecode") - codecs = ["utf-8", "ascii"] - for codec in codecs: - try: - return bytestring.decode(codec) - except UnicodeDecodeError: - pass - return bytestring - - -def retry(exception, tries=4, delay=3, backoff=2): - "Decorator for retrying an action." - - def decorator(func): - @functools.wraps(func) - def func_retry(*args, **kwargs): - mtries, mdelay = tries, delay - while mtries > 1: - try: - return func(*args, **kwargs) - except exception as ex: - logging.debug(ex) - logging.debug("retrying...") - time.sleep(mdelay) - mtries -= 1 - mdelay *= backoff - return func(*args, **kwargs) - - return func_retry - - return decorator - - -def send(args): - "Send a message to the que." - logging.debug("send") - key = auth(args) - data = args.infile - req = request.Request(f"{args.host}/{args.target}") - req.add_header("User-AgenT", "Que/Client") - if key: - req.add_header("Authorization", key) - if args.serve: - logging.debug("serve") - while not time.sleep(1): - request.urlopen(req, data=data, timeout=MAX_TIMEOUT) - - else: - request.urlopen(req, data=data, timeout=MAX_TIMEOUT) - - -def then(args, msg): - "Perform an action when passed `--then`." - if args.then: - logging.debug("then") - subprocess.run( - args.then.format(msg=msg, que=args.target), check=False, shell=True, - ) - - -@retry(http.client.IncompleteRead, tries=10, delay=5, backoff=1) -@retry(http.client.RemoteDisconnected, tries=10, delay=2, backoff=2) -def recv(args): - "Receive a message from the que." - logging.debug("recv on: %s", args.target) - params = urllib.parse.urlencode({"poll": args.poll}) - req = request.Request(f"{args.host}/{args.target}?{params}") - req.add_header("User-Agent", "Que/Client") - key = auth(args) - if key: - req.add_header("Authorization", key) - with request.urlopen(req) as _req: - if args.poll: - logging.debug("poll") - while not time.sleep(1): - logging.debug("reading") - msg = autodecode(_req.readline()) - logging.debug("read") - print(msg, end="") - then(args, msg) - else: - msg = autodecode(_req.read()) - print(msg) - then(args, msg) - - -def get_args(): - "Command line parser" - cli = argparse.ArgumentParser(description=__doc__) - cli.add_argument("--debug", action="store_true", help="log to stderr") - cli.add_argument( - "--host", default="http://que.run", help="where que-server is running" - ) - cli.add_argument( - "--poll", default=False, action="store_true", help="stream data from the que" - ) - cli.add_argument( - "--then", - help=" ".join( - [ - "when polling, run this shell command after each response,", - "presumably for side effects," - r"replacing '{que}' with the target and '{msg}' with the body of the response", - ] - ), - ) - cli.add_argument( - "--serve", - default=False, - action="store_true", - help=" ".join( - [ - "when posting to the que, do so continuously in a loop.", - "this can be used for serving a webpage or other file continuously", - ] - ), - ) - cli.add_argument( - "target", help="namespace and path of the que, like 'ns/path/subpath'" - ) - cli.add_argument( - "infile", - nargs="?", - type=argparse.FileType("rb"), - help="data to put on the que. Use '-' for stdin, otherwise should be a readable file", - ) - return cli.parse_args() - - -if __name__ == "__main__": - ARGV = get_args() - if ARGV.debug: - logging.basicConfig( - format="%(asctime)s %(message)s", - level=logging.DEBUG, - datefmt="%Y.%m.%d..%H.%M.%S", - ) - try: - if ARGV.infile: - send(ARGV) - else: - recv(ARGV) - except KeyboardInterrupt: - sys.exit(0) diff --git a/Que/index.md b/Que/index.md deleted file mode 100644 index a9db12e..0000000 --- a/Que/index.md +++ /dev/null @@ -1,73 +0,0 @@ -% que.run - -que.run is the concurrent, async runtime in the cloud - - - runtime concurrency anywhere you have a network connection - - multilanguage communicating sequential processes - - add Go-like channels to any language - - connect your microservices together with the simplest possible - plumbing - - async programming as easy as running two terminal commands - -HTTP routes on `que.run` are Golang-like channels with a namespace and a -path. For example: `https://que.run/pub/path/subpath`. - -## Quickstart - -There is a simple script `que` that acts as a client you can use to -interact with the `que.run` service. - -Download it to somewhere on your `$PATH` and make it executable: - - curl https://que.run/_/client > ~/bin/que - chmod +x ~/bin/que - que --help - -The client requires a recent version of Python 3. - -## Powerup - -que.run is free for limited use, but the real power of an asynchronous, -concurrent runtime in the cloud is unlocked with some extra power-user -features. - -- Free - - security by obscurity - - all protocols and data formats supported - - bandwidth and message sizes limited - - concurrent connections limited - - request rate limited -- Power - - protect your data with private namespaces - - remove bandwidth and size limits - - private dashboard to see all of your active ques - - 99.999% uptime -- Pro - - add durability to your ques so messages are never lost - - powerful batch api - - incredible query api - - Linux FUSE filesystem integration -- Enterprise - - all of the Power & Pro features - - on-prem deployment - - advanced que performance monitoring - - SLA for support from que.run experts - -Email `ben@bsima.me` if you want to sign up for the Power, Pro, or -Enterprise packages. - -## Quescripts - -We are collecting a repository of scripts that make awesome use of que: - -- remote desktop notifications -- two-way communication with your phone -- ephemeral, serverless chat rooms -- collaborative jukebox - -See the scripts - -## Docs - -- [tutorial](/_/tutorial) -- [api docs](/_/apidocs) diff --git a/Que/quescripts.md b/Que/quescripts.md deleted file mode 100644 index 77e7004..0000000 --- a/Que/quescripts.md +++ /dev/null @@ -1,50 +0,0 @@ -% Quescripts - -## Remote desktop notifications - -Lets say we are running a job that takes a long time, maybe we are -compiling or running a large test suite. Instead of watching the -terminal until it completes, or flipping back to check on it every so -often, we can create a listener that displays a popup notification when -the job finishes. - -In one terminal run the listener: - - que pub/notify --then "notify-send '{que}' '{msg}'" - -In some other terminal run the job that takes forever: - - runtests ; echo "tests are done" | que pub/notify - - -When terminal 2 succeeds, terminal 1 will print "tests are done", then -call the `notify-send` command, which displays a notification toast in -Linux with title "`pub/notify`" and content "`tests are done`". - -Que paths are multi-producer and multi-consumer, so you can add as many -terminals as you want. - -On macOS you could use something like this (just watch your quotes): - - osascript -e "display notification \"{msg}\" with title \"{que}\"" - -in place of notify-send. - -## Ephemeral, serverless chat rooms - -coming soon - -## Collaborative jukebox - -It's surprisingly easy to make a collaborative jukebox. - -First start up a music player: - - que --poll pub/music --then "playsong '{msg}'" - -where `playsong` is a script that plays a file from data streaming to -`stdin`. For example [vlc](https://www.videolan.org/vlc/) does this when -you run it like `vlc -`. - -Then, anyone can submit songs with: - - que pub/music song.mp3 diff --git a/Que/style.css b/Que/style.css deleted file mode 100644 index f8d1ca4..0000000 --- a/Que/style.css +++ /dev/null @@ -1,136 +0,0 @@ - - diff --git a/Que/tutorial.md b/Que/tutorial.md deleted file mode 100644 index 6542ad3..0000000 --- a/Que/tutorial.md +++ /dev/null @@ -1,53 +0,0 @@ -% que.run Tutorial - -## Ques - -A que is a multi-consumer, multi-producer channel available anywhere you -have a network connection. If you are familiar with Go channels, they -are pretty much the same thing. Put some values in one end, and take -them out the other end at a different time, or in a different process. - -Ques are created dynamically for every HTTP request you make. Here we -use the `que` client to create a new que at the path `pub/new-que`: - - que pub/new-que - -The `que` client is useful, but you can use anything to make the HTTP -request, for example here's the same thing with curl: - - curl https://que.run/pub/new-que - -These requests will block until a value is placed on the other -end. Let's do that now. In a separate terminal: - - echo "hello world" | que pub/new-que - - -This tells the `que` client to read the value from `stdin` and then send -it to `example/new-que`. Or with curl: - - curl https://que.run/pub/new-que -d "hello world" - -This will succeed immediately and send the string "`hello world`" over -the channel, which will be received and printed by the listener in the -other terminal. - -You can have as many producers and consumers attached to a channel as -you want. - -## Namespaces - -Ques are organized into namespaces, identified by the first fragment of -the path. In the above commands we used `pub` as the namespace, which is -a special publically-writable namespace. The other special namespace is -`_` which is reserved for internal use only. You can't write to the `_` -namespace. - -To use other namespaces and add authentication/access controls, you can -[sign up for the Power package](/_/index). - -## Events - -Just reading and writing data isn't very exciting, so let's throw in -some events. We can very quickly put together a job processor. - - que pub/new-que --then "./worker.sh '{msg}'" -- cgit v1.2.3