Refactor repository layout and convert build system to Mage.

This commit implements a massive refactor of the repository, and
moves the build system over to use Mage (magefile.org) which should
allow seamless building across multiple platforms.
This commit is contained in:
Will Rouesnel
2018-02-23 01:55:49 +11:00
parent 3e6cf08dc5
commit 989489096e
269 changed files with 35309 additions and 2017 deletions

View File

@@ -0,0 +1,135 @@
package main
import (
"database/sql"
"errors"
"fmt"
"math"
"strconv"
"strings"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
// Query the pg_settings view containing runtime variables
func querySettings(ch chan<- prometheus.Metric, db *sql.DB) error {
log.Debugln("Querying pg_setting view")
// pg_settings docs: https://www.postgresql.org/docs/current/static/view-pg-settings.html
//
// NOTE: If you add more vartypes here, you must update the supported
// types in normaliseUnit() below
query := "SELECT name, setting, COALESCE(unit, ''), short_desc, vartype FROM pg_settings WHERE vartype IN ('bool', 'integer', 'real');"
rows, err := db.Query(query)
if err != nil {
return errors.New(fmt.Sprintln("Error running query on database: ", namespace, err))
}
defer rows.Close() // nolint: errcheck
for rows.Next() {
s := &pgSetting{}
err = rows.Scan(&s.name, &s.setting, &s.unit, &s.shortDesc, &s.vartype)
if err != nil {
return errors.New(fmt.Sprintln("Error retrieving rows:", namespace, err))
}
ch <- s.metric()
}
return nil
}
// pgSetting is represents a PostgreSQL runtime variable as returned by the
// pg_settings view.
type pgSetting struct {
name, setting, unit, shortDesc, vartype string
}
func (s *pgSetting) metric() prometheus.Metric {
var (
err error
name = strings.Replace(s.name, ".", "_", -1)
unit = s.unit // nolint: ineffassign
shortDesc = s.shortDesc
subsystem = "settings"
val float64
)
switch s.vartype {
case "bool":
if s.setting == "on" {
val = 1
}
case "integer", "real":
if val, unit, err = s.normaliseUnit(); err != nil {
// Panic, since we should recognise all units
// and don't want to silently exlude metrics
panic(err)
}
if len(unit) > 0 {
name = fmt.Sprintf("%s_%s", name, unit)
shortDesc = fmt.Sprintf("%s [Units converted to %s.]", shortDesc, unit)
}
default:
// Panic because we got a type we didn't ask for
panic(fmt.Sprintf("Unsupported vartype %q", s.vartype))
}
desc := newDesc(subsystem, name, shortDesc)
return prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, val)
}
func (s *pgSetting) normaliseUnit() (val float64, unit string, err error) {
val, err = strconv.ParseFloat(s.setting, 64)
if err != nil {
return val, unit, fmt.Errorf("Error converting setting %q value %q to float: %s", s.name, s.setting, err)
}
// Units defined in: https://www.postgresql.org/docs/current/static/config-setting.html
switch s.unit {
case "":
return
case "ms", "s", "min", "h", "d":
unit = "seconds"
case "kB", "MB", "GB", "TB", "8kB", "16kB", "16MB":
unit = "bytes"
default:
err = fmt.Errorf("Unknown unit for runtime variable: %q", s.unit)
return
}
// -1 is special, don't modify the value
if val == -1 {
return
}
switch s.unit {
case "ms":
val /= 1000
case "min":
val *= 60
case "h":
val *= 60 * 60
case "d":
val *= 60 * 60 * 24
case "kB":
val *= math.Pow(2, 10)
case "MB":
val *= math.Pow(2, 20)
case "GB":
val *= math.Pow(2, 30)
case "TB":
val *= math.Pow(2, 40)
case "8kB":
val *= math.Pow(2, 13)
case "16kB":
val *= math.Pow(2, 14)
case "16MB":
val *= math.Pow(2, 24)
}
return
}

View File

@@ -0,0 +1,223 @@
// +build !integration
package main
import (
dto "github.com/prometheus/client_model/go"
. "gopkg.in/check.v1"
)
type PgSettingSuite struct{}
var _ = Suite(&PgSettingSuite{})
var fixtures = []fixture{
{
p: pgSetting{
name: "seconds_fixture_metric",
setting: "5",
unit: "s",
shortDesc: "Foo foo foo",
vartype: "integer",
},
n: normalised{
val: 5,
unit: "seconds",
err: "",
},
d: "Desc{fqName: \"pg_settings_seconds_fixture_metric_seconds\", help: \"Foo foo foo [Units converted to seconds.]\", constLabels: {}, variableLabels: []}",
v: 5,
},
{
p: pgSetting{
name: "milliseconds_fixture_metric",
setting: "5000",
unit: "ms",
shortDesc: "Foo foo foo",
vartype: "integer",
},
n: normalised{
val: 5,
unit: "seconds",
err: "",
},
d: "Desc{fqName: \"pg_settings_milliseconds_fixture_metric_seconds\", help: \"Foo foo foo [Units converted to seconds.]\", constLabels: {}, variableLabels: []}",
v: 5,
},
{
p: pgSetting{
name: "eight_kb_fixture_metric",
setting: "17",
unit: "8kB",
shortDesc: "Foo foo foo",
vartype: "integer",
},
n: normalised{
val: 139264,
unit: "bytes",
err: "",
},
d: "Desc{fqName: \"pg_settings_eight_kb_fixture_metric_bytes\", help: \"Foo foo foo [Units converted to bytes.]\", constLabels: {}, variableLabels: []}",
v: 139264,
},
{
p: pgSetting{
name: "16_kb_real_fixture_metric",
setting: "3.0",
unit: "16kB",
shortDesc: "Foo foo foo",
vartype: "real",
},
n: normalised{
val: 49152,
unit: "bytes",
err: "",
},
d: "Desc{fqName: \"pg_settings_16_kb_real_fixture_metric_bytes\", help: \"Foo foo foo [Units converted to bytes.]\", constLabels: {}, variableLabels: []}",
v: 49152,
},
{
p: pgSetting{
name: "16_mb_real_fixture_metric",
setting: "3.0",
unit: "16MB",
shortDesc: "Foo foo foo",
vartype: "real",
},
n: normalised{
val: 5.0331648e+07,
unit: "bytes",
err: "",
},
d: "Desc{fqName: \"pg_settings_16_mb_real_fixture_metric_bytes\", help: \"Foo foo foo [Units converted to bytes.]\", constLabels: {}, variableLabels: []}",
v: 5.0331648e+07,
},
{
p: pgSetting{
name: "bool_on_fixture_metric",
setting: "on",
unit: "",
shortDesc: "Foo foo foo",
vartype: "bool",
},
n: normalised{
val: 1,
unit: "",
err: "",
},
d: "Desc{fqName: \"pg_settings_bool_on_fixture_metric\", help: \"Foo foo foo\", constLabels: {}, variableLabels: []}",
v: 1,
},
{
p: pgSetting{
name: "bool_off_fixture_metric",
setting: "off",
unit: "",
shortDesc: "Foo foo foo",
vartype: "bool",
},
n: normalised{
val: 0,
unit: "",
err: "",
},
d: "Desc{fqName: \"pg_settings_bool_off_fixture_metric\", help: \"Foo foo foo\", constLabels: {}, variableLabels: []}",
v: 0,
},
{
p: pgSetting{
name: "special_minus_one_value",
setting: "-1",
unit: "d",
shortDesc: "foo foo foo",
vartype: "integer",
},
n: normalised{
val: -1,
unit: "seconds",
err: "",
},
d: "Desc{fqName: \"pg_settings_special_minus_one_value_seconds\", help: \"foo foo foo [Units converted to seconds.]\", constLabels: {}, variableLabels: []}",
v: -1,
},
{
p: pgSetting{
name: "rds.rds_superuser_reserved_connections",
setting: "2",
unit: "",
shortDesc: "Sets the number of connection slots reserved for rds_superusers.",
vartype: "integer",
},
n: normalised{
val: 2,
unit: "",
err: "",
},
d: "Desc{fqName: \"pg_settings_rds_rds_superuser_reserved_connections\", help: \"Sets the number of connection slots reserved for rds_superusers.\", constLabels: {}, variableLabels: []}",
v: 2,
},
{
p: pgSetting{
name: "unknown_unit",
setting: "10",
unit: "nonexistent",
shortDesc: "foo foo foo",
vartype: "integer",
},
n: normalised{
val: 10,
unit: "",
err: `Unknown unit for runtime variable: "nonexistent"`,
},
},
}
func (s *PgSettingSuite) TestNormaliseUnit(c *C) {
for _, f := range fixtures {
switch f.p.vartype {
case "integer", "real":
val, unit, err := f.p.normaliseUnit()
c.Check(val, Equals, f.n.val)
c.Check(unit, Equals, f.n.unit)
if err == nil {
c.Check("", Equals, f.n.err)
} else {
c.Check(err.Error(), Equals, f.n.err)
}
}
}
}
func (s *PgSettingSuite) TestMetric(c *C) {
defer func() {
if r := recover(); r != nil {
if r.(error).Error() != `Unknown unit for runtime variable: "nonexistent"` {
panic(r)
}
}
}()
for _, f := range fixtures {
d := &dto.Metric{}
m := f.p.metric()
m.Write(d) // nolint: errcheck
c.Check(m.Desc().String(), Equals, f.d)
c.Check(d.GetGauge().GetValue(), Equals, f.v)
}
}
type normalised struct {
val float64
unit string
err string
}
type fixture struct {
p pgSetting
n normalised
d string
v float64
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,124 @@
// These are specialized integration tests. We only build them when we're doing
// a lot of additional work to keep the external docker environment they require
// working.
// +build integration
package main
import (
"os"
"testing"
. "gopkg.in/check.v1"
"database/sql"
"fmt"
_ "github.com/lib/pq"
"github.com/prometheus/client_golang/prometheus"
)
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { TestingT(t) }
type IntegrationSuite struct {
e *Exporter
}
var _ = Suite(&IntegrationSuite{})
func (s *IntegrationSuite) SetUpSuite(c *C) {
dsn := os.Getenv("DATA_SOURCE_NAME")
c.Assert(dsn, Not(Equals), "")
exporter := NewExporter(dsn, "")
c.Assert(exporter, NotNil)
// Assign the exporter to the suite
s.e = exporter
prometheus.MustRegister(exporter)
}
// TODO: it would be nice if cu didn't mostly just recreate the scrape function
func (s *IntegrationSuite) TestAllNamespacesReturnResults(c *C) {
// Setup a dummy channel to consume metrics
ch := make(chan prometheus.Metric, 100)
go func() {
for range ch {
}
}()
// Open a database connection
db, err := sql.Open("postgres", s.e.dsn)
c.Assert(db, NotNil)
c.Assert(err, IsNil)
defer db.Close()
// Do a version update
err = s.e.checkMapVersions(ch, db)
c.Assert(err, IsNil)
err = querySettings(ch, db)
if !c.Check(err, Equals, nil) {
fmt.Println("## ERRORS FOUND")
fmt.Println(err)
}
// This should never happen in our test cases.
errMap := queryNamespaceMappings(ch, db, s.e.metricMap, s.e.queryOverrides)
if !c.Check(len(errMap), Equals, 0) {
fmt.Println("## NAMESPACE ERRORS FOUND")
for namespace, err := range errMap {
fmt.Println(namespace, ":", err)
}
}
}
// TestInvalidDsnDoesntCrash tests that specifying an invalid DSN doesn't crash
// the exporter. Related to https://github.com/wrouesnel/postgres_exporter/issues/93
// although not a replication of the scenario.
func (s *IntegrationSuite) TestInvalidDsnDoesntCrash(c *C) {
// Setup a dummy channel to consume metrics
ch := make(chan prometheus.Metric, 100)
go func() {
for range ch {
}
}()
// Send a bad DSN
exporter := NewExporter("invalid dsn", *queriesPath)
c.Assert(exporter, NotNil)
exporter.scrape(ch)
// Send a DSN to a non-listening port.
exporter = NewExporter("postgresql://nothing:nothing@127.0.0.1:1/nothing", *queriesPath)
c.Assert(exporter, NotNil)
exporter.scrape(ch)
}
// TestUnknownMetricParsingDoesntCrash deliberately deletes all the column maps out
// of an exporter to test that the default metric handling code can cope with unknown columns.
func (s *IntegrationSuite) TestUnknownMetricParsingDoesntCrash(c *C) {
// Setup a dummy channel to consume metrics
ch := make(chan prometheus.Metric, 100)
go func() {
for range ch {
}
}()
dsn := os.Getenv("DATA_SOURCE_NAME")
c.Assert(dsn, Not(Equals), "")
exporter := NewExporter(dsn, "")
c.Assert(exporter, NotNil)
// Convert the default maps into a list of empty maps.
emptyMaps := make(map[string]map[string]ColumnMapping, 0)
for k := range exporter.builtinMetricMaps {
emptyMaps[k] = map[string]ColumnMapping{}
}
exporter.builtinMetricMaps = emptyMaps
// scrape the exporter and make sure it works
exporter.scrape(ch)
}

View File

@@ -0,0 +1,179 @@
// +build !integration
package main
import (
. "gopkg.in/check.v1"
"testing"
"github.com/blang/semver"
"os"
)
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { TestingT(t) }
type FunctionalSuite struct {
}
var _ = Suite(&FunctionalSuite{})
func (s *FunctionalSuite) SetUpSuite(c *C) {
}
func (s *FunctionalSuite) TestSemanticVersionColumnDiscard(c *C) {
testMetricMap := map[string]map[string]ColumnMapping{
"test_namespace": {
"metric_which_stays": {COUNTER, "This metric should not be eliminated", nil, nil},
"metric_which_discards": {COUNTER, "This metric should be forced to DISCARD", nil, nil},
},
}
{
// No metrics should be eliminated
resultMap := makeDescMap(semver.MustParse("0.0.1"), testMetricMap)
c.Check(
resultMap["test_namespace"].columnMappings["metric_which_stays"].discard,
Equals,
false,
)
c.Check(
resultMap["test_namespace"].columnMappings["metric_which_discards"].discard,
Equals,
false,
)
}
{
// Update the map so the discard metric should be eliminated
discardableMetric := testMetricMap["test_namespace"]["metric_which_discards"]
discardableMetric.supportedVersions = semver.MustParseRange(">0.0.1")
testMetricMap["test_namespace"]["metric_which_discards"] = discardableMetric
// Discard metric should be discarded
resultMap := makeDescMap(semver.MustParse("0.0.1"), testMetricMap)
c.Check(
resultMap["test_namespace"].columnMappings["metric_which_stays"].discard,
Equals,
false,
)
c.Check(
resultMap["test_namespace"].columnMappings["metric_which_discards"].discard,
Equals,
true,
)
}
{
// Update the map so the discard metric should be kept but has a version
discardableMetric := testMetricMap["test_namespace"]["metric_which_discards"]
discardableMetric.supportedVersions = semver.MustParseRange(">0.0.1")
testMetricMap["test_namespace"]["metric_which_discards"] = discardableMetric
// Discard metric should be discarded
resultMap := makeDescMap(semver.MustParse("0.0.2"), testMetricMap)
c.Check(
resultMap["test_namespace"].columnMappings["metric_which_stays"].discard,
Equals,
false,
)
c.Check(
resultMap["test_namespace"].columnMappings["metric_which_discards"].discard,
Equals,
false,
)
}
}
// test read username and password from file
func (s *FunctionalSuite) TestEnvironmentSettingWithSecretsFiles(c *C) {
err := os.Setenv("DATA_SOURCE_USER_FILE", "./tests/username_file")
c.Assert(err, IsNil)
defer UnsetEnvironment(c, "DATA_SOURCE_USER_FILE")
err = os.Setenv("DATA_SOURCE_PASS_FILE", "./tests/userpass_file")
c.Assert(err, IsNil)
defer UnsetEnvironment(c, "DATA_SOURCE_PASS_FILE")
err = os.Setenv("DATA_SOURCE_URI", "localhost:5432/?sslmode=disable")
c.Assert(err, IsNil)
defer UnsetEnvironment(c, "DATA_SOURCE_URI")
var expected = "postgresql://custom_username:custom_password@localhost:5432/?sslmode=disable"
dsn := getDataSource()
if dsn != expected {
c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn, expected)
}
}
// test read DATA_SOURCE_NAME from environment
func (s *FunctionalSuite) TestEnvironmentSettingWithDns(c *C) {
envDsn := "postgresql://user:password@localhost:5432/?sslmode=enabled"
err := os.Setenv("DATA_SOURCE_NAME", envDsn)
c.Assert(err, IsNil)
defer UnsetEnvironment(c, "DATA_SOURCE_NAME")
dsn := getDataSource()
if dsn != envDsn {
c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn, envDsn)
}
}
// test DATA_SOURCE_NAME is used even if username and password environment variables are set
func (s *FunctionalSuite) TestEnvironmentSettingWithDnsAndSecrets(c *C) {
envDsn := "postgresql://userDsn:passwordDsn@localhost:55432/?sslmode=disabled"
err := os.Setenv("DATA_SOURCE_NAME", envDsn)
c.Assert(err, IsNil)
defer UnsetEnvironment(c, "DATA_SOURCE_NAME")
err = os.Setenv("DATA_SOURCE_USER_FILE", "./tests/username_file")
c.Assert(err, IsNil)
defer UnsetEnvironment(c, "DATA_SOURCE_USER_FILE")
err = os.Setenv("DATA_SOURCE_PASS", "envUserPass")
c.Assert(err, IsNil)
defer UnsetEnvironment(c, "DATA_SOURCE_PASS")
dsn := getDataSource()
if dsn != envDsn {
c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn, envDsn)
}
}
func (s *FunctionalSuite) TestPostgresVersionParsing(c *C) {
type TestCase struct {
input string
expected string
}
cases := []TestCase{
{
input: "PostgreSQL 10.1 on x86_64-pc-linux-gnu, compiled by gcc (Debian 6.3.0-18) 6.3.0 20170516, 64-bit",
expected: "10.1.0",
},
{
input: "PostgreSQL 9.5.4, compiled by Visual C++ build 1800, 64-bit",
expected: "9.5.4",
},
{
input: "EnterpriseDB 9.6.5.10 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16), 64-bit",
expected: "9.6.5",
},
}
for _, cs := range cases {
ver, err := parseVersion(cs.input)
c.Assert(err, IsNil)
c.Assert(ver.String(), Equals, cs.expected)
}
}
func UnsetEnvironment(c *C, d string) {
err := os.Unsetenv(d)
c.Assert(err, IsNil)
}

View File

@@ -0,0 +1,7 @@
FROM postgres:10
MAINTAINER Daniel Dent (https://www.danieldent.com)
ENV PG_MAX_WAL_SENDERS 8
ENV PG_WAL_KEEP_SEGMENTS 8
COPY setup-replication.sh /docker-entrypoint-initdb.d/
COPY docker-entrypoint.sh /docker-entrypoint.sh
RUN chmod +x /docker-entrypoint-initdb.d/setup-replication.sh /docker-entrypoint.sh

View File

@@ -0,0 +1,7 @@
FROM postgres:{{VERSION}}
MAINTAINER Daniel Dent (https://www.danieldent.com)
ENV PG_MAX_WAL_SENDERS 8
ENV PG_WAL_KEEP_SEGMENTS 8
COPY setup-replication.sh /docker-entrypoint-initdb.d/
COPY docker-entrypoint.sh /docker-entrypoint.sh
RUN chmod +x /docker-entrypoint-initdb.d/setup-replication.sh /docker-entrypoint.sh

View File

@@ -0,0 +1,11 @@
# Replicated postgres cluster in docker.
Upstream is forked from https://github.com/DanielDent/docker-postgres-replication
My version lives at https://github.com/wrouesnel/docker-postgres-replication
This very simple docker-compose file lets us stand up a replicated postgres
cluster so we can test streaming.
# TODO:
Pull in p2 and template the Dockerfile so we can test multiple versions.

View File

@@ -0,0 +1,32 @@
version: '2'
services:
pg-master:
build: '.'
image: 'danieldent/postgres-replication'
restart: 'always'
environment:
POSTGRES_USER: 'postgres'
POSTGRES_PASSWORD: 'postgres'
PGDATA: '/var/lib/postgresql/data/pgdata'
volumes:
- '/var/lib/postgresql/data'
expose:
- '5432'
pg-slave:
build: '.'
image: 'danieldent/postgres-replication'
restart: 'always'
environment:
POSTGRES_USER: 'postgres'
POSTGRES_PASSWORD: 'postgres'
PGDATA: '/var/lib/postgresql/data/pgdata'
REPLICATE_FROM: 'pg-master'
volumes:
- '/var/lib/postgresql/data'
expose:
- '5432'
links:
- 'pg-master'

View File

@@ -0,0 +1,140 @@
#!/bin/bash
# Backwards compatibility for old variable names (deprecated)
if [ "x$PGUSER" != "x" ]; then
POSTGRES_USER=$PGUSER
fi
if [ "x$PGPASSWORD" != "x" ]; then
POSTGRES_PASSWORD=$PGPASSWORD
fi
# Forwards-compatibility for old variable names (pg_basebackup uses them)
if [ "x$PGPASSWORD" = "x" ]; then
export PGPASSWORD=$POSTGRES_PASSWORD
fi
# Based on official postgres package's entrypoint script (https://hub.docker.com/_/postgres/)
# Modified to be able to set up a slave. The docker-entrypoint-initdb.d hook provided is inadequate.
set -e
if [ "${1:0:1}" = '-' ]; then
set -- postgres "$@"
fi
if [ "$1" = 'postgres' ]; then
mkdir -p "$PGDATA"
chmod 700 "$PGDATA"
chown -R postgres "$PGDATA"
mkdir -p /run/postgresql
chmod g+s /run/postgresql
chown -R postgres /run/postgresql
# look specifically for PG_VERSION, as it is expected in the DB dir
if [ ! -s "$PGDATA/PG_VERSION" ]; then
if [ "x$REPLICATE_FROM" == "x" ]; then
eval "gosu postgres initdb $POSTGRES_INITDB_ARGS"
else
until ping -c 1 -W 1 ${REPLICATE_FROM}
do
echo "Waiting for master to ping..."
sleep 1s
done
until gosu postgres pg_basebackup -h ${REPLICATE_FROM} -D ${PGDATA} -U ${POSTGRES_USER} -vP -w
do
echo "Waiting for master to connect..."
sleep 1s
done
fi
# check password first so we can output the warning before postgres
# messes it up
if [ ! -z "$POSTGRES_PASSWORD" ]; then
pass="PASSWORD '$POSTGRES_PASSWORD'"
authMethod=md5
else
# The - option suppresses leading tabs but *not* spaces. :)
cat >&2 <<-'EOWARN'
****************************************************
WARNING: No password has been set for the database.
This will allow anyone with access to the
Postgres port to access your database. In
Docker's default configuration, this is
effectively any other container on the same
system.
Use "-e POSTGRES_PASSWORD=password" to set
it in "docker run".
****************************************************
EOWARN
pass=
authMethod=trust
fi
if [ "x$REPLICATE_FROM" == "x" ]; then
{ echo; echo "host replication all 0.0.0.0/0 $authMethod"; } | gosu postgres tee -a "$PGDATA/pg_hba.conf" > /dev/null
{ echo; echo "host all all 0.0.0.0/0 $authMethod"; } | gosu postgres tee -a "$PGDATA/pg_hba.conf" > /dev/null
# internal start of server in order to allow set-up using psql-client
# does not listen on external TCP/IP and waits until start finishes
gosu postgres pg_ctl -D "$PGDATA" \
-o "-c listen_addresses='localhost'" \
-w start
: ${POSTGRES_USER:=postgres}
: ${POSTGRES_DB:=$POSTGRES_USER}
export POSTGRES_USER POSTGRES_DB
psql=( psql -v ON_ERROR_STOP=1 )
if [ "$POSTGRES_DB" != 'postgres' ]; then
"${psql[@]}" --username postgres <<-EOSQL
CREATE DATABASE "$POSTGRES_DB" ;
EOSQL
echo
fi
if [ "$POSTGRES_USER" = 'postgres' ]; then
op='ALTER'
else
op='CREATE'
fi
"${psql[@]}" --username postgres <<-EOSQL
$op USER "$POSTGRES_USER" WITH SUPERUSER $pass ;
EOSQL
echo
fi
psql+=( --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" )
echo
for f in /docker-entrypoint-initdb.d/*; do
case "$f" in
*.sh) echo "$0: running $f"; . "$f" ;;
*.sql) echo "$0: running $f"; "${psql[@]}" < "$f"; echo ;;
*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${psql[@]}"; echo ;;
*) echo "$0: ignoring $f" ;;
esac
echo
done
if [ "x$REPLICATE_FROM" == "x" ]; then
gosu postgres pg_ctl -D "$PGDATA" -m fast -w stop
fi
echo
echo 'PostgreSQL init process complete; ready for start up.'
echo
fi
# We need this health check so we know when it's started up.
touch /tmp/.postgres_init_complete
exec gosu postgres "$@"
fi
exec "$@"

View File

@@ -0,0 +1,22 @@
#!/bin/bash
if [ "x$REPLICATE_FROM" == "x" ]; then
cat >> ${PGDATA}/postgresql.conf <<EOF
wal_level = hot_standby
max_wal_senders = $PG_MAX_WAL_SENDERS
wal_keep_segments = $PG_WAL_KEEP_SEGMENTS
hot_standby = on
EOF
else
cat > ${PGDATA}/recovery.conf <<EOF
standby_mode = on
primary_conninfo = 'host=${REPLICATE_FROM} port=5432 user=${POSTGRES_USER} password=${POSTGRES_PASSWORD}'
trigger_file = '/tmp/touch_me_to_promote_to_me_master'
EOF
chown postgres ${PGDATA}/recovery.conf
chmod 600 ${PGDATA}/recovery.conf
fi

View File

@@ -0,0 +1,169 @@
#!/bin/bash
# Basic integration tests with postgres. Requires docker to work.
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
METRICS_DIR=$(pwd)
# Read the absolute path to the exporter
postgres_exporter="$1"
test_binary="$2"
export POSTGRES_PASSWORD=postgres
exporter_port=9187
echo "Exporter Binary: $postgres_exporter" 1>&2
echo "Test Binary: $test_binary" 1>&2
[ -z "$postgres_exporter" ] && echo "Missing exporter binary" && exit 1
[ -z "$test_binary" ] && echo "Missing test binary" && exit 1
cd $DIR
VERSIONS=( \
9.1 \
9.2 \
9.3 \
9.4 \
9.5 \
9.6 \
10 \
)
wait_for_postgres(){
local ip=$1
local port=$2
if [ -z $ip ]; then
echo "No IP specified." 1>&2
exit 1
fi
if [ -z $port ]; then
echo "No port specified." 1>&2
exit 1
fi
local wait_start=$(date +%s)
echo "Waiting for postgres to start listening..."
while ! pg_isready --host=$ip --port=$port &> /dev/null; do
if [ $(( $(date +%s) - $wait_start )) -gt $TIMEOUT ]; then
echo "Timed out waiting for postgres to start!" 1>&2
exit 1
fi
sleep 1
done
}
wait_for_exporter() {
local wait_start=$(date +%s)
echo "Waiting for exporter to start..."
while ! nc -z localhost $exporter_port ; do
if [ $(( $(date +%s) - $wait_start )) -gt $TIMEOUT ]; then
echo "Timed out waiting for exporter!" 1>&2
exit 1
fi
sleep 1
done
}
smoketest_postgres() {
local version=$1
local CONTAINER_NAME=postgres_exporter-test-smoke
local TIMEOUT=30
local IMAGE_NAME=postgres
local CUR_IMAGE=$IMAGE_NAME:$version
echo "#######################"
echo "Standalone Postgres $version"
echo "#######################"
local docker_cmd="docker run -d -e POSTGRES_PASSWORD=$POSTGRES_PASSWORD $CUR_IMAGE"
echo "Docker Cmd: $docker_cmd"
CONTAINER_NAME=$($docker_cmd)
standalone_ip=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $CONTAINER_NAME)
trap "docker logs $CONTAINER_NAME ; docker kill $CONTAINER_NAME ; docker rm -v $CONTAINER_NAME; exit 1" EXIT INT TERM
wait_for_postgres $standalone_ip 5432
# Run the test binary.
DATA_SOURCE_NAME="postgresql://postgres:$POSTGRES_PASSWORD@$standalone_ip:5432/?sslmode=disable" $test_binary || exit $?
# Extract a raw metric list.
DATA_SOURCE_NAME="postgresql://postgres:$POSTGRES_PASSWORD@$standalone_ip:5432/?sslmode=disable" $postgres_exporter --log.level=debug --web.listen-address=:$exporter_port &
exporter_pid=$!
trap "docker logs $CONTAINER_NAME ; docker kill $CONTAINER_NAME ; docker rm -v $CONTAINER_NAME; kill $exporter_pid; exit 1" EXIT INT TERM
wait_for_exporter
# Dump the metrics to a file.
wget -q -O - http://localhost:$exporter_port/metrics 1> $METRICS_DIR/.metrics.single.$version.prom
if [ "$?" != "0" ]; then
echo "Failed on postgres $version ($DOCKER_IMAGE)" 1>&2
kill $exporter_pid
exit 1
fi
kill $exporter_pid
docker kill $CONTAINER_NAME
docker rm -v $CONTAINER_NAME
trap - EXIT INT TERM
echo "#######################"
echo "Replicated Postgres $version"
echo "#######################"
old_pwd=$(pwd)
cd docker-postgres-replication
VERSION=$version p2 -t Dockerfile.p2 -o Dockerfile
if [ "$?" != "0" ]; then
echo "Templating failed" 1>&2
exit 1
fi
trap "docker-compose logs; docker-compose down ; docker-compose rm -v; exit 1" EXIT INT TERM
local compose_cmd="POSTGRES_PASSWORD=$POSTGRES_PASSWORD docker-compose up -d --force-recreate --build"
echo "Compose Cmd: $compose_cmd"
eval $compose_cmd
master_container=$(docker-compose ps -q pg-master)
slave_container=$(docker-compose ps -q pg-slave)
master_ip=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $master_container)
slave_ip=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $slave_container)
echo "Got master IP: $master_ip"
wait_for_postgres $master_ip 5432
wait_for_postgres $slave_ip 5432
DATA_SOURCE_NAME="postgresql://postgres:$POSTGRES_PASSWORD@$master_ip:5432/?sslmode=disable" $test_binary || exit $?
DATA_SOURCE_NAME="postgresql://postgres:$POSTGRES_PASSWORD@$master_ip:5432/?sslmode=disable" $postgres_exporter --log.level=debug --web.listen-address=:$exporter_port &
exporter_pid=$!
trap "docker-compose logs; docker-compose down ; docker-compose rm -v ; kill $exporter_pid; exit 1" EXIT INT TERM
wait_for_exporter
wget -q -O - http://localhost:$exporter_port/metrics 1> $METRICS_DIR/.metrics.replicated.$version.prom
if [ "$?" != "0" ]; then
echo "Failed on postgres $version ($DOCKER_IMAGE)" 1>&2
exit 1
fi
kill $exporter_pid
docker-compose down
docker-compose rm -v
trap - EXIT INT TERM
cd $old_pwd
}
# Start pulling the docker images in advance
for version in ${VERSIONS[@]}; do
docker pull postgres:$version > /dev/null &
done
for version in ${VERSIONS[@]}; do
echo "Testing postgres version $version"
smoketest_postgres $version
done

View File

@@ -0,0 +1 @@
custom_username

View File

@@ -0,0 +1 @@
custom_password