To block the insufferable infinite scroll "social" feed that LinkedIn renders when one logs in, add this rule to uBlock origin's "My Filters" list.
www.linkedin.com##main[aria-label="Main Feed"] .scaffold-finite-scroll
| # Self-contained Nomad job file for deploying a Discourse instance. Uses Postgres, Redis, and Discourse Docker containers. SSL-enabled. | |
| # Create a vars.hcl file and set values for the below variable names (one per line), eg: hostname=yoursite.com | |
| # Then run `nomad run -var-file=vars.hcl discourse-forum.hcl` | |
| # Global database configuration variables. | |
| variable "hostname" {} | |
| variable "developer_emails" {} | |
| variable "db_name" {} | |
| variable "db_username" {} |
To block the insufferable infinite scroll "social" feed that LinkedIn renders when one logs in, add this rule to uBlock origin's "My Filters" list.
www.linkedin.com##main[aria-label="Main Feed"] .scaffold-finite-scroll
| package main | |
| import ( | |
| "fmt" | |
| "time" | |
| ) | |
| func main() { | |
| fmt.Println(unixToTime(time.Now().UnixMilli())) | |
| } |
| package main | |
| import ( | |
| "fmt" | |
| ) | |
| var ( | |
| ones = []string{ | |
| "zero", "one", "two", "three", "four", | |
| "five", "six", "seven", "eight", "nine", |
| #!/usr/bin/env bash | |
| DATABASE_URL="postgres://MyPostgresUser:MyPostgresPassword@192.168.0.1:5432/MyPostgresDB" | |
| # `cut` is used to cut out the separators (:, @, /) that come matched with the groups. | |
| DATABASE_USER=$(echo $DATABASE_URL | grep -oP "postgres://\K(.+?):" | cut -d: -f1) | |
| DATABASE_PASSWORD=$(echo $DATABASE_URL | grep -oP "postgres://.*:\K(.+?)@" | cut -d@ -f1) | |
| DATABASE_HOST=$(echo $DATABASE_URL | grep -oP "postgres://.*@\K(.+?):" | cut -d: -f1) | |
| DATABASE_PORT=$(echo $DATABASE_URL | grep -oP "postgres://.*@.*:\K(\d+)/" | cut -d/ -f1) |
| package main | |
| import ( | |
| "encoding/json" | |
| "strings" | |
| "testing" | |
| ) | |
| var nested map[string]interface{} |
| package cacheman | |
| import ( | |
| "errors" | |
| "fmt" | |
| "reflect" | |
| "strconv" | |
| "strings" | |
| ) |
| ALTER TABLE media DROP COLUMN width, DROP COLUMN height; | |
| ALTER TABLE media ADD COLUMN provider TEXT NOT NULL; | |
| DROP TABLE IF EXISTS settings CASCADE; | |
| CREATE TABLE settings ( | |
| key TEXT NOT NULL UNIQUE, | |
| value JSONB NOT NULL DEFAULT '{}', | |
| updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() | |
| ); | |
| DROP INDEX IF EXISTS idx_settings_key; CREATE INDEX idx_settings_key ON settings(key); | |
| INSERT INTO settings (key, value) VALUES |
| // program | |
| package main | |
| import ( | |
| "fmt" | |
| "log" | |
| "os" | |
| "os/signal" | |
| "syscall" | |
| "time" |
Unable to get cluster admin: kafka: controller is not available
If you get this error and don't want to waste hours trying to debug why
kaf is unable to communicate with a Kafka cluster, make sure you have
an entry in the hosts file for the system's hostname that resolves to self (127.0.0.1),
if your cluster is running locally.