changeset 4110:861760675497 request_hist_bns

Merged default
author Sascha Wilde <wilde@intevation.de>
date Tue, 30 Jul 2019 11:30:31 +0200
parents 3d2f02c16765 (current diff) 6ee5523967ec (diff)
children 692aba3e8b85
files pkg/imports/bn.go
diffstat 26 files changed, 735 insertions(+), 221 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Fri Jul 26 13:46:55 2019 +0200
+++ b/.hgtags	Tue Jul 30 11:30:31 2019 +0200
@@ -12,3 +12,4 @@
 b166cb97b98a40f33e977c96e65e79bf22e92fca v3.1
 d78af8354b95cea86744459f350edb16662dadd0 v3.1
 5396581cf20334cbc5e69280e5d9b192640d96b9 v4-preview20190717
+aececbc3d04798d905e65196ac0870d081776ca2 v4-preview20190726
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/TODOs-historization_ng.md	Tue Jul 30 11:30:31 2019 +0200
@@ -0,0 +1,51 @@
+# TODOs for HNG (Historization NextGen)
+
+## gauges-reference-water-levels
+
+Could be seen as direct part of the gauge data and therefore kept with
+the foll primary key reference...
+
+## Water level diagrams:
+
+One time range might reference different verions of one gauge:
+- How is that hanedled currently?
+- How to handle it in HNG?
+
+## Map display:
+
+Ensure only current gauges are shown in all situations
+- Gauges layer
+- Search
+- more?
+
+## Bottlenecks:
+
+Always associate the correct gauge, when:
+- Uploading Sounding results (how is that currently handled?)
+- Displaying Sounding data
+- Creating/showing X-Cuts
+- Unclear: what constraints shall we chekc for on bottleneck imports?
+
+Cut end of validit _after_ accepting the new one.
+
+## Sounding Results:
+
+- Maybe add a constraint to check, that a ref gauge is available?
+  Maybe not neccessary as import isn't possible anyway, as the
+  reference gauge is needed for the import process... (So the
+  constraint would never catch anything, as no data could be produced
+  without ref gauge) Only upside: we would triger if there is an error
+  in the backend generating SR with invalid ref data.
+- Check during upload that matching bottleneck and reference gauge
+  data is available (and use ist correctly)
+- Use correct reference gauge for X-Cuts
+- (Maybe later) use matching BN data for display
+
+## EVERYTHING ELSE
+
+Not being mentioned here, doesn't mean something is done...
+
+
+# DONE
+
+Stuff done will bemoved moved here for reference:
--- a/client/src/components/importoverview/ImportOverview.vue	Fri Jul 26 13:46:55 2019 +0200
+++ b/client/src/components/importoverview/ImportOverview.vue	Tue Jul 30 11:30:31 2019 +0200
@@ -343,8 +343,10 @@
       this.$store
         .dispatch("imports/getImports", {
           filter: this.filters,
-          from: format(this.startDate, "YYYY-MM-DDTHH:mm:ss.SSS"),
-          to: format(this.endDate, "YYYY-MM-DDTHH:mm:ss.SSS"),
+          from: encodeURIComponent(
+            format(this.startDate, "YYYY-MM-DDTHH:mm:ssZ")
+          ),
+          to: encodeURIComponent(format(this.endDate, "YYYY-MM-DDTHH:mm:ssZ")),
           query: this.searchQuery
         })
         .then(() => {
--- a/client/src/store/fairwayavailability.js	Fri Jul 26 13:46:55 2019 +0200
+++ b/client/src/store/fairwayavailability.js	Tue Jul 30 11:30:31 2019 +0200
@@ -18,7 +18,9 @@
 import {
   format,
   subYears,
+  startOfDay,
   startOfMonth,
+  endOfDay,
   endOfMonth,
   startOfYear,
   endOfYear,
@@ -238,11 +240,13 @@
         } else if (type == TYPES.SECTION || type == TYPES.STRETCH) {
           additionalParams = `&depthbreaks=${depthLimit1},${depthLimit2}&widthbreaks=${widthLimit1},${widthLimit2}`;
         }
-        const start = encodeURIComponent("00:00:00+00:00");
-        const end = encodeURIComponent("23:59:59+00:00");
         const URL = `data/${endpoint}/fairway-depth/${encodeURIComponent(
           name
-        )}?from=${from}T${start}&to=${to}T${end}&mode=${frequency}&los=${LOS}${additionalParams}`;
+        )}?from=${encodeURIComponent(
+          format(startOfDay(from), "YYYY-MM-DDTHH:mm:ssZ")
+        )}&to=${encodeURIComponent(
+          format(endOfDay(to), "YYYY-MM-DDTHH:mm:ssZ")
+        )}&mode=${frequency}&los=${LOS}${additionalParams}`;
         HTTP.get(URL, {
           headers: { "X-Gemma-Auth": localStorage.getItem("token") }
         })
@@ -278,8 +282,6 @@
           ? feature.properties.name
           : feature.get("objnam");
         [from, to] = getIntervallBorders(from, to, frequency);
-        const start = encodeURIComponent("00:00:00+00:00");
-        const end = encodeURIComponent("23:59:59+00:00");
         let additionalParams = "";
         let endpoint = type || TYPES.BOTTLENECK;
         if (type === TYPES.BOTTLENECK) {
@@ -292,7 +294,11 @@
         }
         const URL = `data/${endpoint}/availability/${encodeURIComponent(
           name
-        )}?from=${from}T${start}&to=${to}T${end}&mode=${frequency}&los=${LOS}${additionalParams}`;
+        )}?from=${encodeURIComponent(
+          format(startOfDay(from), "YYYY-MM-DDTHH:mm:ssZ")
+        )}&to=${encodeURIComponent(
+          format(endOfDay(to), "YYYY-MM-DDTHH:mm:ssZ")
+        )}&mode=${frequency}&los=${LOS}${additionalParams}`;
         HTTP.get(URL, {
           headers: { "X-Gemma-Auth": localStorage.getItem("token") }
         })
--- a/client/src/store/gauges.js	Fri Jul 26 13:46:55 2019 +0200
+++ b/client/src/store/gauges.js	Tue Jul 30 11:30:31 2019 +0200
@@ -138,10 +138,13 @@
     loadWaterlevels({ state, commit }) {
       return new Promise((resolve, reject) => {
         HTTP.get(
-          `/data/waterlevels/${state.selectedGaugeISRS}?from=${format(
-            startOfDay(state.dateFrom),
-            "YYYY-MM-DDTHH:mm:ss.SSS"
-          )}&to=${format(endOfDay(state.dateTo), "YYYY-MM-DDTHH:mm:ss.SSS")}`,
+          `/data/waterlevels/${
+            state.selectedGaugeISRS
+          }?from=${encodeURIComponent(
+            format(startOfDay(state.dateFrom), "YYYY-MM-DDTHH:mm:ssZ")
+          )}&to=${encodeURIComponent(
+            format(endOfDay(state.dateTo), "YYYY-MM-DDTHH:mm:ssZ")
+          )}`,
           {
             headers: { "X-Gemma-Auth": localStorage.getItem("token") }
           }
--- a/pkg/common/time.go	Fri Jul 26 13:46:55 2019 +0200
+++ b/pkg/common/time.go	Tue Jul 30 11:30:31 2019 +0200
@@ -23,13 +23,22 @@
 	// time.RFC3339 equals "simplified ISO format as defined by ECMA-262"
 	//   https://tc39.github.io/ecma262/#sec-date-time-string-format
 	// and "SHOULD be used in new protocols on the Internet." (RFC section 5.6)
-	TimeFormat = time.RFC3339
-	DateFormat = "2006-01-02"
+	TimeFormat           = time.RFC3339
+	TimeFormatMicro      = "2006-01-02T15:04:05.999Z07:00"
+	TimeFormatMicroLocal = "2006-01-02T15:04:05.000"
+	DateFormat           = "2006-01-02"
 )
 
 // TimeParser is a list of time formats.
 type TimeParser []string
 
+var ParseTime = TimeParser{
+	TimeFormat,
+	TimeFormatMicro,
+	TimeFormatMicroLocal,
+	DateFormat,
+}.Parse
+
 var utc0 = time.Unix(0, 0)
 
 // Parse tries to parse a given string by the entries of the layout
--- a/pkg/controllers/bottlenecks.go	Fri Jul 26 13:46:55 2019 +0200
+++ b/pkg/controllers/bottlenecks.go	Tue Jul 30 11:30:31 2019 +0200
@@ -48,7 +48,7 @@
   JOIN waterway.fairway_availability fa
     ON efa.fairway_availability_id = fa.id
   JOIN waterway.bottlenecks bn
-    ON fa.bottleneck_id = bn.id
+    ON fa.bottleneck_id = bn.bottleneck_id
   WHERE
     bn.validity @> current_timestamp AND
     bn.objnam = $1 AND
@@ -83,7 +83,7 @@
 FROM waterway.gauges_reference_water_levels grwl
   JOIN waterway.bottlenecks bns
     ON grwl.location = bns.gauge_location
-      AND grwl.validity = bns.gauge_validity
+      AND grwl.validity @> current_timestamp
 WHERE bns.validity @> current_timestamp
   AND bns.objnam = $1
   AND grwl.depth_reference like 'LDC%'
--- a/pkg/controllers/gauges.go	Fri Jul 26 13:46:55 2019 +0200
+++ b/pkg/controllers/gauges.go	Tue Jul 30 11:30:31 2019 +0200
@@ -588,7 +588,7 @@
 
 	var when time.Time
 	if w := req.FormValue("when"); w != "" {
-		if when, err = time.Parse(models.ImportTimeFormat, w); err != nil {
+		if when, err = common.ParseTime(w); err != nil {
 			err = JSONError{
 				Code:    http.StatusBadRequest,
 				Message: fmt.Sprintf("error: wrong time format: %v", err),
@@ -702,7 +702,7 @@
 	}
 
 	if from := req.FormValue("from"); from != "" {
-		fromTime, err := time.Parse(models.ImportTimeFormat, from)
+		fromTime, err := common.ParseTime(from)
 		if err != nil {
 			http.Error(
 				rw, fmt.Sprintf("error: Invalid from time: %v", err),
@@ -713,7 +713,7 @@
 	}
 
 	if to := req.FormValue("to"); to != "" {
-		toTime, err := time.Parse(models.ImportTimeFormat, to)
+		toTime, err := common.ParseTime(to)
 		if err != nil {
 			http.Error(
 				rw, fmt.Sprintf("error: Invalid from time: %v", err),
@@ -779,7 +779,7 @@
 			// Too late for an HTTP error code.
 			return
 		}
-		record[0] = measureDate.Format(models.ImportTimeFormat)
+		record[0] = measureDate.Format(common.TimeFormat)
 		record[1] = float64format(waterlevel)
 		record[2] = nullFloat64format(valueMin)
 		record[3] = nullFloat64format(valueMax)
--- a/pkg/controllers/importqueue.go	Fri Jul 26 13:46:55 2019 +0200
+++ b/pkg/controllers/importqueue.go	Tue Jul 30 11:30:31 2019 +0200
@@ -27,6 +27,7 @@
 	"github.com/gorilla/mux"
 
 	"gemma.intevation.de/gemma/pkg/auth"
+	"gemma.intevation.de/gemma/pkg/common"
 	"gemma.intevation.de/gemma/pkg/imports"
 	"gemma.intevation.de/gemma/pkg/models"
 )
@@ -136,7 +137,7 @@
 	}
 
 	if from := req.FormValue("from"); from != "" {
-		fromTime, err := time.Parse(models.ImportTimeFormat, from)
+		fromTime, err := common.ParseTime(from)
 		if err != nil {
 			return nil, nil, nil, err
 		}
@@ -147,7 +148,7 @@
 	}
 
 	if to := req.FormValue("to"); to != "" {
-		toTime, err := time.Parse(models.ImportTimeFormat, to)
+		toTime, err := common.ParseTime(to)
 		if err != nil {
 			return nil, nil, nil, err
 		}
@@ -224,7 +225,7 @@
 		log.Printf("warn: %v\n", err)
 		return nil
 	}
-	return &models.ImportTime{Time: when}
+	return &models.ImportTime{Time: when.UTC()}
 }
 
 func listImports(
@@ -287,7 +288,7 @@
 		if signer.Valid {
 			it.Signer = signer.String
 		}
-		it.Enqueued = models.ImportTime{Time: enqueued}
+		it.Enqueued = models.ImportTime{Time: enqueued.UTC()}
 		imports = append(imports, &it)
 	}
 
@@ -346,6 +347,7 @@
 	case err != nil:
 		return
 	}
+	enqueued = enqueued.UTC()
 
 	var sum interface{}
 	if summary.Valid {
@@ -367,9 +369,11 @@
 
 	for rows.Next() {
 		var entry models.ImportLogEntry
-		if err = rows.Scan(&entry.Time, &entry.Kind, &entry.Message); err != nil {
+		var t time.Time
+		if err = rows.Scan(&t, &entry.Kind, &entry.Message); err != nil {
 			return
 		}
+		entry.Time = models.ImportTime{t.UTC()}
 		entries = append(entries, &entry)
 	}
 
--- a/pkg/controllers/surveys.go	Fri Jul 26 13:46:55 2019 +0200
+++ b/pkg/controllers/surveys.go	Tue Jul 30 11:30:31 2019 +0200
@@ -32,9 +32,9 @@
   g.objname AS gauge_objname,
   r.value AS waterlevel_value
 FROM waterway.bottlenecks AS b
+  JOIN waterway.sounding_results AS s ON b.bottleneck_id = s.bottleneck_id
   JOIN waterway.gauges AS g
-    ON b.gauge_location = g.location AND b.gauge_validity = g.validity
-  JOIN waterway.sounding_results AS s ON b.bottleneck_id = s.bottleneck_id
+    ON b.gauge_location = g.location AND s.date_info::timestamptz <@ g.validity
   LEFT JOIN waterway.gauges_reference_water_levels AS r
     ON s.depth_reference = r.depth_reference
       AND g.location = r.location AND g.validity = r.validity
--- a/pkg/imports/agm.go	Fri Jul 26 13:46:55 2019 +0200
+++ b/pkg/imports/agm.go	Tue Jul 30 11:30:31 2019 +0200
@@ -169,7 +169,6 @@
 	agmInsertSQL = `
 INSERT INTO waterway.gauge_measurements (
   location,
-  validity,
   measure_date,
   country_code,
   sender,
@@ -182,12 +181,6 @@
   staging_done
 ) VALUES (
   ($1::char(2), $2::char(3), $3::char(5), $4::char(5), $5::int),
-  COALESCE(
-    (SELECT validity FROM waterway.gauges
-       WHERE location
-            = ($1::char(2), $2::char(3), $3::char(5), $4::char(5), $5::int)
-         AND validity @> CAST($6 AS timestamp with time zone)),
-    tstzrange(NULL, NULL)),
   $6,
   $7,
   $8,
--- a/pkg/imports/bn.go	Fri Jul 26 13:46:55 2019 +0200
+++ b/pkg/imports/bn.go	Tue Jul 30 11:30:31 2019 +0200
@@ -56,7 +56,6 @@
   bottleneck_id,
   validity,
   gauge_location,
-  gauge_validity,
   objnam,
   nobjnm,
   stretch,
@@ -68,11 +67,10 @@
   limiting,
   date_info,
   source_organization
-) SELECT
+) VALUES (
   $1,
-  validity * $2, -- intersections with gauge validity ranges
-  location,
-  validity,
+  $2::tstzrange,
+  isrs_fromText($3),
   $4,
   $5,
   (SELECT r FROM r),
@@ -88,8 +86,7 @@
   $12,
   $13,
   $14
-  FROM waterway.gauges
-  WHERE location = isrs_fromText($3) AND validity && $2
+)
 RETURNING id
 `
 
@@ -103,7 +100,6 @@
   bottleneck_id,
   validity,
   gauge_location,
-  gauge_validity,
   objnam,
   nobjnm,
   stretch,
@@ -115,11 +111,10 @@
   limiting,
   date_info,
   source_organization
-) = ( SELECT
+) = (
   $2,
-  validity * $3, -- intersections with gauge validity ranges
-  location,
-  validity,
+  $3::tstzrange,
+  isrs_fromText($4),
   $5,
   $6,
   (SELECT r FROM r),
@@ -135,8 +130,7 @@
   $13,
   $14::timestamptz,
   $15
-  FROM waterway.gauges
-  WHERE location = isrs_fromText($4) AND validity && $3 )
+)
 WHERE id=$1
 RETURNING id
 `
@@ -152,7 +146,6 @@
   bottleneck_id,
   validity,
   gauge_location,
-  gauge_validity,
   objnam,
   nobjnm,
   stretch,
@@ -166,9 +159,8 @@
   staging_done
 ) = ( SELECT
   $1,
-  validity * $2, -- intersections with gauge validity ranges
-  location,
-  validity,
+  $2::tstzrange,
+  isrs_fromText($3),
   $4,
   $5,
   (SELECT r FROM r),
@@ -180,8 +172,6 @@
   $13::timestamptz,
   $14,
   true
-  FROM waterway.gauges
-  WHERE location = isrs_fromText($3) AND validity && $2
 )
 `
 
@@ -191,14 +181,16 @@
   bottleneck_id,
   validity,
   staging_done
-) = ( SELECT
+) = (
   $1,
-  validity * $2, -- intersections with gauge validity ranges
+  $2::tstzrange,
   true
-  FROM waterway.gauges
-  WHERE location = isrs_fromText($3) AND validity && $2
 )
 `
+	// FIXME: Is this still neede wtih the new simplified historization
+	// model?  My intuition is: no it isn't and should be removed, but we
+	// should double check before doing so... [sw]
+	//
 	// Alignment with gauge validity might have generated new entries
 	// for the same time range. Thus, remove the old ones
 	deleteObsoleteBNSQL = `
@@ -540,7 +532,6 @@
 	err = findMatchingBNStmt.QueryRowContext(ctx,
 		bn.Bottleneck_id,
 		&validity,
-		bn.Fk_g_fid,
 	).Scan(&existing_bn_id)
 	switch {
 	case err == sql.ErrNoRows:
--- a/pkg/imports/fa.go	Fri Jul 26 13:46:55 2019 +0200
+++ b/pkg/imports/fa.go	Tue Jul 30 11:30:31 2019 +0200
@@ -4,13 +4,14 @@
 // SPDX-License-Identifier: AGPL-3.0-or-later
 // License-Filename: LICENSES/AGPL-3.0.txt
 //
-// Copyright (C) 2018 by via donau
+// Copyright (C) 2018,2019 by via donau
 //   – Österreichische Wasserstraßen-Gesellschaft mbH
 // Software engineering by Intevation GmbH
 //
 // Author(s):
 //  * Raimund Renkert <raimund.renkert@intevation.de>
 //  * Sascha L. Teichmann <sascha.teichmann@intevation.de>
+//  * Sascha Wilde <wilde@intevation.de>
 
 package imports
 
@@ -67,10 +68,7 @@
   source_organization
 ) VALUES (
   $1,
-  -- Always associate fairway availability data to newest bottleneck
-  -- version to prevent problems in analysis over longer time periods
-  (SELECT id FROM waterway.bottlenecks WHERE bottleneck_id = $2
-     ORDER BY validity DESC FETCH FIRST ROW ONLY),
+  $2,
   $3,
   $4,
   $5,
@@ -309,6 +307,8 @@
 	var faID int64
 	feedback.Info("Found %d fairway availabilities", len(fas))
 	for _, faRes := range fas {
+		// FIXME: The following test is propably unneccessary as already
+		//   done by DB constraints...  [sw]
 		if !bnIds.contains(faRes.Bottleneck_id) {
 			feedback.Warn("Bottleneck %s not found in database.", faRes.Bottleneck_id)
 			continue
--- a/pkg/imports/gm.go	Fri Jul 26 13:46:55 2019 +0200
+++ b/pkg/imports/gm.go	Tue Jul 30 11:30:31 2019 +0200
@@ -62,7 +62,6 @@
 	insertGMSQL = `
 INSERT INTO waterway.gauge_measurements (
   location,
-  validity,
   measure_date,
   sender,
   language_code,
@@ -75,12 +74,6 @@
   staging_done
 ) VALUES (
   ($1, $2, $3, $4, $5),
-  COALESCE(
-    (SELECT validity FROM waterway.gauges
-       WHERE location
-            = ($1::char(2), $2::char(3), $3::char(5), $4::char(5), $5::int)
-         AND validity @> CAST($6 AS timestamp with time zone)),
-    tstzrange(NULL, NULL)),
   $6,
   $7,
   $8,
@@ -99,7 +92,6 @@
 	insertGPSQL = `
 INSERT INTO waterway.gauge_predictions (
   location,
-  validity,
   measure_date,
   sender,
   language_code,
@@ -112,12 +104,6 @@
   source_organization
 ) VALUES(
   ($1, $2, $3, $4, $5),
-  COALESCE(
-    (SELECT validity FROM waterway.gauges
-       WHERE location
-            = ($1::char(2), $2::char(3), $3::char(5), $4::char(5), $5::int)
-         AND validity @> CAST($6 AS timestamp with time zone)),
-    tstzrange(NULL, NULL)),
   $6,
   $7,
   $8,
--- a/pkg/imports/queue.go	Fri Jul 26 13:46:55 2019 +0200
+++ b/pkg/imports/queue.go	Tue Jul 30 11:30:31 2019 +0200
@@ -362,6 +362,7 @@
 	if due.IsZero() {
 		due = time.Now()
 	}
+	due = due.UTC()
 
 	var tl sql.NullInt64
 	if trysLeft != nil {
--- a/pkg/imports/sr.go	Fri Jul 26 13:46:55 2019 +0200
+++ b/pkg/imports/sr.go	Tue Jul 30 11:30:31 2019 +0200
@@ -116,13 +116,11 @@
 	insertHullSQL = `
 INSERT INTO waterway.sounding_results (
   bottleneck_id,
-  bottleneck_validity,
   date_info,
   depth_reference,
   area
 ) SELECT
   bottleneck_id,
-  validity,
   $2::date,
   $3,
   (SELECT
@@ -190,7 +188,7 @@
 FROM waterway.gauges_reference_water_levels grwl
   JOIN waterway.bottlenecks bns
     ON grwl.location = bns.gauge_location
-      AND grwl.validity = bns.gauge_validity
+      AND grwl.validity @> CAST($2 AS timestamptz)
 WHERE bns.objnam = $1
   AND bns.validity @> CAST($2 AS timestamptz)
   AND grwl.depth_reference like 'LDC%'
--- a/pkg/imports/wg.go	Fri Jul 26 13:46:55 2019 +0200
+++ b/pkg/imports/wg.go	Tue Jul 30 11:30:31 2019 +0200
@@ -67,7 +67,7 @@
 
 const (
 	eraseObsoleteGaugesSQL = `
-UPDATE waterway.gauges SET erased = true
+UPDATE waterway.gauges SET erased = true, validity = validity - '[now,)'
 WHERE NOT erased
   AND (location).country_code = ANY($1)
   AND isrs_astext(location) <> ALL($2)
--- a/pkg/models/import.go	Fri Jul 26 13:46:55 2019 +0200
+++ b/pkg/models/import.go	Tue Jul 30 11:30:31 2019 +0200
@@ -17,10 +17,10 @@
 	"encoding/json"
 	"errors"
 	"time"
+
+	"gemma.intevation.de/gemma/pkg/common"
 )
 
-const ImportTimeFormat = "2006-01-02T15:04:05.000"
-
 type (
 	ImportTime struct{ time.Time }
 
@@ -64,7 +64,7 @@
 }
 
 func (it ImportTime) MarshalJSON() ([]byte, error) {
-	return json.Marshal(it.Format(ImportTimeFormat))
+	return json.Marshal(it.Format(common.TimeFormatMicro))
 }
 
 func (it *ImportTime) Scan(x interface{}) error {
--- a/pkg/models/sr.go	Fri Jul 26 13:46:55 2019 +0200
+++ b/pkg/models/sr.go	Tue Jul 30 11:30:31 2019 +0200
@@ -41,7 +41,7 @@
 SELECT EXISTS(SELECT 1
   FROM waterway.bottlenecks bn
     JOIN waterway.gauges g
-      ON bn.gauge_location = g.location AND bn.gauge_validity = g.validity
+      ON bn.gauge_location = g.location AND $3::timestamptz <@ g.validity
     JOIN waterway.gauges_reference_water_levels rl
       ON g.location = rl.location AND g.validity = rl.validity
   WHERE bn.objnam = $1
@@ -85,7 +85,8 @@
 		err = conn.QueryRowContext(ctx,
 			checkDepthReferenceSQL,
 			m.Bottleneck,
-			m.DepthReference).Scan(&b)
+			m.DepthReference,
+			m.Date.Time).Scan(&b)
 		switch {
 		case !b:
 			errs = append(errs,
--- a/schema/gemma.sql	Fri Jul 26 13:46:55 2019 +0200
+++ b/schema/gemma.sql	Tue Jul 30 11:30:31 2019 +0200
@@ -72,122 +72,134 @@
 $$
 LANGUAGE plpgsql;
 
--- Trigger functions to be used as statement-level AFTER triggers,
--- associating time-based referencing objects to matching version
-CREATE OR REPLACE FUNCTION move_gauge_referencing() RETURNS trigger AS
+-- Trigger function to be used as a constraint trigger to enforce
+-- existance of a referenced gauge with intersecting validity.  The
+-- columns with the referenced gauge isrs code an the validity are
+-- given as arguments to the trigger function.
+CREATE OR REPLACE FUNCTION check_valid_gauge() RETURNS trigger AS
 $$
 DECLARE
-    new_bn int;
-    new_bns int[];
+    -- FIXME: I'm using text for the isrs code and cast it on demand.
+    -- If someone is able to get it to work with isrs or isrs_base as
+    -- type, feel free to show me how its done...  ;-) [sw]
+    referenced_gauge text;
+    new_validity tstzrange;
 BEGIN
-    -- Avoid unnecessary execution ON UPDATE if validity did not change
-    IF OLD IS NULL OR NEW.validity <> OLD.validity THEN
-        UPDATE waterway.gauge_measurements
-        SET validity = NEW.validity
-        WHERE location = NEW.location
-            AND measure_date <@ NEW.validity;
-
-        -- build bottleneck validities from intersections with gauge validities
-        FOR new_bn IN
-            INSERT INTO waterway.bottlenecks (
-                    bottleneck_id,
-                    validity,
-                    gauge_location,
-                    gauge_validity,
-                    objnam,
-                    nobjnm,
-                    stretch,
-                    area,
-                    rb,
-                    lb,
-                    responsible_country,
-                    revisiting_time,
-                    limiting,
-                    date_info,
-                    source_organization,
-                    staging_done
-                ) SELECT
-                    b.bottleneck_id,
-                    -- Anticipate non-intersecting gauge validities:
-                    b.validity * CASE WHEN g.validity = NEW.validity
-                        THEN NEW.validity ELSE g.validity - NEW.validity END,
-                    b.gauge_location,
-                    g.validity,
-                    b.objnam,
-                    b.nobjnm,
-                    b.stretch,
-                    b.area,
-                    b.rb,
-                    b.lb,
-                    b.responsible_country,
-                    b.revisiting_time,
-                    b.limiting,
-                    b.date_info,
-                    b.source_organization,
-                    b.staging_done
-                FROM waterway.bottlenecks b JOIN waterway.gauges g
-                    ON b.gauge_location = g.location
-                WHERE b.gauge_location = NEW.location
-                    AND b.validity && NEW.validity
-                    -- Avoid duplicate intersection results:
-                    AND NOT (b.validity <@ NEW.validity
-                        AND g.validity <> NEW.validity)
-            ON CONFLICT (bottleneck_id, validity) DO UPDATE SET
-                -- Associate to new matching gauge version
-                gauge_validity = EXCLUDED.gauge_validity
-            RETURNING id
-        LOOP
-            new_bns = new_bns || new_bn;
-        END LOOP;
-        -- Delete bottleneck versions superseded by new intersections:
-        DELETE FROM waterway.bottlenecks
-        WHERE gauge_location = NEW.location
-            AND validity && NEW.validity
-            AND id <> ALL(new_bns);
+    EXECUTE format('SELECT $1.%I', TG_ARGV[0])
+        INTO referenced_gauge
+        USING NEW;
+    EXECUTE format('SELECT $1.%I', TG_ARGV[1])
+        INTO new_validity
+        USING NEW;
+    IF EXISTS ( SELECT * FROM waterway.gauges
+                  WHERE location = referenced_gauge::isrs
+                  AND validity && new_validity )
+    THEN
+        RETURN NEW;
+    ELSE
+        RAISE EXCEPTION
+            'new row for relation "%" violates constraint trigger "%"',
+                TG_TABLE_NAME, TG_NAME
+            USING
+                DETAIL = format('No matching gauge %s found.',
+                    isrs_AsText(referenced_gauge::isrs)),
+                ERRCODE = 23505,
+                SCHEMA = TG_TABLE_SCHEMA,
+                TABLE = TG_TABLE_NAME,
+                COLUMN = TG_ARGV[0],
+                CONSTRAINT = TG_NAME;
     END IF;
-    RETURN NULL; -- ignored
 END;
 $$
 LANGUAGE plpgsql;
 
-CREATE OR REPLACE FUNCTION move_bottleneck_referencing() RETURNS trigger AS
+-- The same for objects with a timestamp instead of a validity range.
+CREATE OR REPLACE FUNCTION check_valid_gauge_ts() RETURNS trigger AS
 $$
+DECLARE
+    -- FIXME: I'm using text for the isrs code and cast it on demand.
+    -- If someone is able to get it to work with isrs or isrs_base as
+    -- type, feel free to show me how its done...  ;-) [sw]
+    referenced_gauge text;
+    new_tstz timestamptz;
 BEGIN
-    -- Avoid unnecessary execution ON UPDATE if validity did not change
-    IF OLD IS NULL OR NEW.validity <> OLD.validity THEN
-        UPDATE waterway.sounding_results
-        SET bottleneck_validity = NEW.validity
-        WHERE bottleneck_id = NEW.bottleneck_id
-          AND CAST(date_info AS timestamptz) <@ NEW.validity;
+    EXECUTE format('SELECT $1.%I', TG_ARGV[0])
+        INTO referenced_gauge
+        USING NEW;
+    EXECUTE format('SELECT $1.%I', TG_ARGV[1])
+        INTO new_tstz
+        USING NEW;
+    IF EXISTS ( SELECT * FROM waterway.gauges
+                  WHERE location = referenced_gauge::isrs
+                  AND validity @> new_tstz )
+    THEN
+        RETURN NEW;
+    ELSE
+        RAISE EXCEPTION
+            'new row for relation "%" violates constraint trigger "%"',
+                TG_TABLE_NAME, TG_NAME
+            USING
+                DETAIL = format('No matching gauge %s found.',
+                    isrs_AsText(referenced_gauge::isrs)),
+                ERRCODE = 23505,
+                SCHEMA = TG_TABLE_SCHEMA,
+                TABLE = TG_TABLE_NAME,
+                COLUMN = TG_ARGV[0],
+                CONSTRAINT = TG_NAME;
+    END IF;
+END;
+$$
+LANGUAGE plpgsql;
 
-        -- Always associate fairway availability data to newest bottleneck
-        -- version to prevent problems in analysis over longer time periods
-        WITH
-        bn AS (SELECT id, validity FROM waterway.bottlenecks
-            WHERE bottleneck_id = NEW.bottleneck_id),
-        latest AS (SELECT id FROM bn
-            -- Candidates are past new validity or just inserted/updated
-            WHERE NOT validity &< NEW.validity OR id = NEW.id
-            ORDER BY upper(validity) DESC FETCH FIRST ROW ONLY)
-        UPDATE waterway.fairway_availability
-        SET bottleneck_id = (SELECT id FROM latest)
-        WHERE bottleneck_id IN(SELECT id FROM bn EXCEPT SELECT id FROM latest);
+-- Trigger function to be used as a constraint trigger to enforce
+-- existance of a referenced bottleneck with validity at a given time.
+-- The columns with the referenced bottleneck id and the timestamp are
+-- given as arguments to the trigger function.
+CREATE OR REPLACE FUNCTION check_valid_bottleneck_ts() RETURNS trigger AS
+$$
+DECLARE
+    referenced_bottleneck_id text;
+    new_tstz timestamptz;
+BEGIN
+    EXECUTE format('SELECT $1.%I', TG_ARGV[0])
+        INTO referenced_bottleneck_id
+        USING NEW;
+    EXECUTE format('SELECT $1.%I', TG_ARGV[1])
+        INTO new_tstz
+        USING NEW;
+    IF EXISTS ( SELECT * FROM waterway.bottlenecks
+                  WHERE bottleneck_id = referenced_bottleneck_id
+                  AND validity @> new_tstz )
+    THEN
+        RETURN NEW;
+    ELSE
+        RAISE EXCEPTION
+            'new row for relation "%" violates constraint trigger "%"',
+                TG_TABLE_NAME, TG_NAME
+            USING
+                DETAIL = format('No matching bottleneck %s for %s found.',
+                    referenced_bottleneck_id, new_tstz),
+                ERRCODE = 23505,
+                SCHEMA = TG_TABLE_SCHEMA,
+                TABLE = TG_TABLE_NAME,
+                COLUMN = TG_ARGV[0],
+                CONSTRAINT = TG_NAME;
     END IF;
-    RETURN NULL; -- ignored
 END;
 $$
 LANGUAGE plpgsql;
 
 -- Constraint trigger: sounding Results must intersect with the area
--- of the bottleneck they belong to.
+-- of the bottleneck they belong to.  The "xx" at the beginning of the
+-- name is to ensure, it is fired last after other triggers.
 CREATE OR REPLACE FUNCTION check_sr_in_bn_area() RETURNS trigger
 LANGUAGE plpgsql
 AS $$
 BEGIN
     IF NOT st_intersects((SELECT area
                           FROM waterway.bottlenecks
-                          WHERE (bottleneck_id, validity)
-                              =(NEW.bottleneck_id, NEW.bottleneck_validity)),
+                          WHERE bottleneck_id = NEW.bottleneck_id
+                            AND validity @> NEW.date_info::timestamptz),
                          NEW.area)
     THEN
         RAISE EXCEPTION
@@ -444,9 +456,6 @@
     CREATE UNIQUE INDEX gauges_erased_unique_constraint
         ON gauges (location)
         WHERE NOT erased
-    -- Associate referencing objects to matching gauge version
-    CREATE TRIGGER move_referencing AFTER INSERT OR UPDATE OF validity
-        ON gauges FOR EACH ROW EXECUTE FUNCTION move_gauge_referencing()
 
     CREATE TABLE gauges_reference_water_levels (
         location isrs NOT NULL,
@@ -462,12 +471,7 @@
     CREATE TABLE gauge_measurements (
         id int PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY,
         location isrs NOT NULL,
-        validity tstzrange NOT NULL,
-        CONSTRAINT gauge_key
-            FOREIGN KEY (location, validity) REFERENCES gauges
-            ON UPDATE CASCADE,
         measure_date timestamp with time zone NOT NULL,
-        CHECK (measure_date <@ validity),
         country_code char(2) NOT NULL REFERENCES countries,
         sender varchar NOT NULL, -- "from" element from NtS response
         language_code varchar NOT NULL REFERENCES language_codes,
@@ -479,18 +483,16 @@
         staging_done boolean NOT NULL DEFAULT false,
         UNIQUE (measure_date, location, staging_done)
     )
+    CREATE CONSTRAINT TRIGGER waterway_gauge_measurements_reference_gauge
+        AFTER INSERT OR UPDATE OF location ON gauge_measurements
+        FOR EACH ROW EXECUTE FUNCTION check_valid_gauge_ts('location','measure_date')
     -- For fast retrieval of newest measurement per location:
     CREATE INDEX gauge_measurements_location_measure_date_desc
         ON waterway.gauge_measurements (location, measure_date DESC)
 
     CREATE TABLE gauge_predictions (
         location isrs NOT NULL,
-        validity tstzrange NOT NULL,
-        CONSTRAINT gauge_key
-            FOREIGN KEY (location, validity) REFERENCES gauges
-            ON UPDATE CASCADE,
         measure_date timestamp with time zone NOT NULL,
-        CHECK (measure_date >= lower(validity)),
         country_code char(2) NOT NULL REFERENCES countries,
         sender varchar NOT NULL, -- "from" element from NtS response
         language_code varchar NOT NULL REFERENCES language_codes,
@@ -503,6 +505,9 @@
         source_organization varchar NOT NULL, -- "originator" from NtS response
         PRIMARY KEY (measure_date, location, date_issue)
     )
+    CREATE CONSTRAINT TRIGGER waterway_gauge_predictions_reference_gauge
+        AFTER INSERT OR UPDATE OF location ON gauge_predictions
+        FOR EACH ROW EXECUTE FUNCTION check_valid_gauge_ts('location','measure_date')
 
     CREATE TABLE waterway_axis (
         id int PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY,
@@ -648,11 +653,6 @@
         EXCLUDE USING GiST (bottleneck_id WITH =, validity WITH &&)
             DEFERRABLE INITIALLY DEFERRED,
         gauge_location isrs NOT NULL,
-        gauge_validity tstzrange NOT NULL,
-        CHECK(validity <@ gauge_validity),
-        CONSTRAINT gauge_key
-            FOREIGN KEY (gauge_location, gauge_validity) REFERENCES gauges
-                ON UPDATE CASCADE,
         objnam varchar,
         nobjnm varchar,
         stretch isrsrange NOT NULL,
@@ -675,10 +675,9 @@
         source_organization varchar NOT NULL,
         staging_done boolean NOT NULL DEFAULT false
     )
-    -- Associate referencing objects to matching bottleneck version
-    CREATE TRIGGER move_referencing AFTER INSERT OR UPDATE OF validity
-        ON bottlenecks FOR EACH ROW
-        EXECUTE FUNCTION move_bottleneck_referencing()
+    CREATE CONSTRAINT TRIGGER waterway_bottlenecks_reference_gauge
+        AFTER INSERT OR UPDATE OF gauge_location ON bottlenecks
+        FOR EACH ROW EXECUTE FUNCTION check_valid_gauge('gauge_location','validity')
 
     CREATE TABLE bottlenecks_riverbed_materials (
         bottleneck_id int NOT NULL REFERENCES bottlenecks(id)
@@ -691,14 +690,7 @@
     CREATE TABLE sounding_results (
         id int PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY,
         bottleneck_id varchar NOT NULL,
-        bottleneck_validity tstzrange NOT NULL,
-        CONSTRAINT bottleneck_key
-            FOREIGN KEY (bottleneck_id, bottleneck_validity)
-                REFERENCES bottlenecks (bottleneck_id, validity)
-                ON UPDATE CASCADE,
         date_info date NOT NULL,
-        CHECK (tstzrange(date_info::timestamptz,
-            date_info::timestamptz + '1 d'::interval) && bottleneck_validity),
         UNIQUE (bottleneck_id, date_info),
         area geography(POLYGON, 4326) NOT NULL
             CHECK(ST_IsValid(CAST(area AS geometry))),
@@ -709,7 +701,11 @@
         octree_index bytea,
         staging_done boolean NOT NULL DEFAULT false
     )
-    CREATE CONSTRAINT TRIGGER sounding_results_in_bn_area
+    CREATE CONSTRAINT TRIGGER a_sounding_results_reference_bottleneck
+        AFTER INSERT OR UPDATE OF bottleneck_id ON sounding_results
+        FOR EACH ROW
+        EXECUTE FUNCTION check_valid_bottleneck_ts('bottleneck_id','date_info')
+    CREATE CONSTRAINT TRIGGER b_sounding_results_in_bn_area
         AFTER INSERT OR UPDATE ON sounding_results
         FOR EACH ROW EXECUTE FUNCTION check_sr_in_bn_area()
 
@@ -728,7 +724,7 @@
     CREATE TABLE fairway_availability (
         id int PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY,
         position_code char(2) REFERENCES position_codes,
-        bottleneck_id int NOT NULL REFERENCES bottlenecks(id),
+        bottleneck_id varchar NOT NULL,
         surdat date NOT NULL,
         UNIQUE (bottleneck_id, surdat),
         -- additional_data xml -- Currently not relevant for GEMMA
@@ -736,6 +732,18 @@
         date_info timestamp with time zone NOT NULL,
         source_organization varchar NOT NULL
     )
+    -- FIXME: From the DRC it is unclear what the exact semantics of
+    --   surdat and Date_Info ar unclear.  Currently we assume that
+    --   (fk_bn_fid,surdat) has to be unique, but that might be false.
+    --   Anyway, I will date_info here to check for an matching
+    --   reference gauge at the bottleneck.  The reason for this
+    --   decision is purely practical (and might be semantically
+    --   disputable: the bottleneck data in the demo system is not old
+    --   enough to cover rthe surdat times...
+    CREATE CONSTRAINT TRIGGER fairway_availability_referenced_bottleneck
+        AFTER INSERT OR UPDATE OF bottleneck_id ON fairway_availability
+        FOR EACH ROW
+        EXECUTE FUNCTION check_valid_bottleneck_ts('bottleneck_id','date_info')
 
     CREATE TABLE fa_reference_values (
         fairway_availability_id int NOT NULL REFERENCES fairway_availability,
@@ -826,8 +834,8 @@
         id         int PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY,
         state      import_state NOT NULL DEFAULT 'queued',
         kind       varchar   NOT NULL,
-        enqueued   timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
-        due        timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+        enqueued   timestamp with time zone NOT NULL DEFAULT CURRENT_TIMESTAMP,
+        due        timestamp with time zone NOT NULL DEFAULT CURRENT_TIMESTAMP,
         retry_wait interval
             CHECK(retry_wait IS NULL
                 OR retry_wait >= interval '0 microseconds'),
@@ -850,7 +858,7 @@
     CREATE TABLE import_logs (
         import_id int NOT NULL REFERENCES imports(id)
             ON DELETE CASCADE,
-        time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+        time timestamp with time zone NOT NULL DEFAULT CURRENT_TIMESTAMP,
         kind log_type NOT NULL DEFAULT 'info',
         msg TEXT NOT NULL
     )
--- a/schema/geoserver_views.sql	Fri Jul 26 13:46:55 2019 +0200
+++ b/schema/geoserver_views.sql	Tue Jul 30 11:30:31 2019 +0200
@@ -121,12 +121,12 @@
         g.forecast_accuracy_1d
     FROM waterway.bottlenecks b
         LEFT JOIN waterway.gauges_base_view g
-            ON b.gauge_location = g.location AND b.gauge_validity = g.validity
+            ON b.gauge_location = g.location AND g.validity @> current_timestamp
         LEFT JOIN (SELECT DISTINCT ON (bottleneck_id)
                     bottleneck_id, date_info, critical
                 FROM waterway.fairway_availability
                 ORDER BY bottleneck_id, date_info DESC) AS fal
-            ON b.id = fal.bottleneck_id
+            ON b.bottleneck_id = fal.bottleneck_id
         LEFT JOIN (SELECT DISTINCT ON (bottleneck_id)
                     bottleneck_id, max(date_info) AS date_max
                 FROM waterway.sounding_results
@@ -218,4 +218,4 @@
             ON sd.subtrahend = srs.id
         JOIN waterway.bottlenecks bn
             ON srm.bottleneck_id = bn.bottleneck_id
-                AND srm.bottleneck_validity = bn.validity;
+                AND srm.date_info::timestamptz <@ bn.validity;
--- a/schema/update-db.sh	Fri Jul 26 13:46:55 2019 +0200
+++ b/schema/update-db.sh	Tue Jul 30 11:30:31 2019 +0200
@@ -98,13 +98,17 @@
 
 current_ver=$( get_version )
 
-for d in $BASEDIR/updates/* ; do
+for d in "$BASEDIR"/updates/* ; do
   new_ver=$( basename $d )
   if [ -d "$d" ] && [ "$new_ver" -gt $current_ver ] ; then
     echo "Running updates for $new_ver ..."
 
-    psql -1qv ON_ERROR_STOP= -p "$port" -d "$db" \
-      $(find "$d" -type f -printf ' -f %p') \
+    file_args=""
+    for f in "$d"/* ; do
+      file_args+=" -f $f"
+    done
+
+    psql -1qv ON_ERROR_STOP= -p "$port" -d "$db" $file_args \
       -c "INSERT INTO gemma_schema_version(version) VALUES ($new_ver)"
 
   fi
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/schema/updates/1010/01.timezones-imports.sql	Tue Jul 30 11:30:31 2019 +0200
@@ -0,0 +1,3 @@
+ALTER TABLE import.imports     ALTER COLUMN enqueued TYPE timestamp with time zone;
+ALTER TABLE import.imports     ALTER COLUMN due      TYPE timestamp with time zone;
+ALTER TABLE import.import_logs ALTER COLUMN time     TYPE timestamp with time zone;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/schema/updates/1100/01.remove_gauge_validity_refs.sql	Tue Jul 30 11:30:31 2019 +0200
@@ -0,0 +1,197 @@
+-- This is Free Software under GNU Affero General Public License v >= 3.0
+-- without warranty, see README.md and license for details.
+
+-- SPDX-License-Identifier: AGPL-3.0-or-later
+-- License-Filename: LICENSES/AGPL-3.0.txt
+
+-- Copyright (C) 2019 by via donau
+--   – Österreichische Wasserstraßen-Gesellschaft mbH
+-- Software engineering by Intevation GmbH
+
+-- Author(s):
+--  * Sascha Wilde <sascha.wilde@intevation.de>
+
+--
+-- CONSTRAINT FUNCTIONS
+--
+
+-- We still want to ensure, that there is at least a valid gauge at
+-- any time of the referencing objects validity.  To ensure this we
+-- need a trigger constraint:
+CREATE OR REPLACE FUNCTION check_valid_gauge() RETURNS trigger AS
+$$
+DECLARE
+    -- FIXME: I'm using text for the isrs code and cast it on demand.
+    -- If someone is able to get it to work with isrs or isrs_base as
+    -- type, feel free to show me how its done...  ;-) [sw]
+    referenced_gauge text;
+    new_validity tstzrange;
+BEGIN
+    EXECUTE format('SELECT $1.%I', TG_ARGV[0])
+        INTO referenced_gauge
+        USING NEW;
+    EXECUTE format('SELECT $1.%I', TG_ARGV[1])
+        INTO new_validity
+        USING NEW;
+    IF EXISTS ( SELECT * FROM waterway.gauges
+                  WHERE location = referenced_gauge::isrs
+                  AND validity && new_validity )
+    THEN
+        RETURN NEW;
+    ELSE
+        RAISE EXCEPTION
+            'new row for relation "%" violates constraint trigger "%"',
+                TG_TABLE_NAME, TG_NAME
+            USING
+                DETAIL = format('No matching gauge %s found.',
+                    isrs_AsText(referenced_gauge::isrs)),
+                ERRCODE = 23505,
+                SCHEMA = TG_TABLE_SCHEMA,
+                TABLE = TG_TABLE_NAME,
+                COLUMN = TG_ARGV[0],
+                CONSTRAINT = TG_NAME;
+    END IF;
+END;
+$$
+LANGUAGE plpgsql;
+
+-- The same for objects with a timestamp instead of a validity range.
+CREATE OR REPLACE FUNCTION check_valid_gauge_ts() RETURNS trigger AS
+$$
+DECLARE
+    -- FIXME: I'm using text for the isrs code and cast it on demand.
+    -- If someone is able to get it to work with isrs or isrs_base as
+    -- type, feel free to show me how its done...  ;-) [sw]
+    referenced_gauge text;
+    new_tstz timestamptz;
+BEGIN
+    EXECUTE format('SELECT $1.%I', TG_ARGV[0])
+        INTO referenced_gauge
+        USING NEW;
+    EXECUTE format('SELECT $1.%I', TG_ARGV[1])
+        INTO new_tstz
+        USING NEW;
+    IF EXISTS ( SELECT * FROM waterway.gauges
+                  WHERE location = referenced_gauge::isrs
+                  AND validity @> new_tstz )
+    THEN
+        RETURN NEW;
+    ELSE
+        RAISE EXCEPTION
+            'new row for relation "%" violates constraint trigger "%"',
+                TG_TABLE_NAME, TG_NAME
+            USING
+                DETAIL = format('No matching gauge %s for %s found.',
+                    (isrs_AsText(referenced_gauge::isrs)), new_tstz),
+                ERRCODE = 23505,
+                SCHEMA = TG_TABLE_SCHEMA,
+                TABLE = TG_TABLE_NAME,
+                COLUMN = TG_ARGV[0],
+                CONSTRAINT = TG_NAME;
+    END IF;
+END;
+$$
+LANGUAGE plpgsql;
+
+
+--
+-- BOTTLENECKS
+--
+
+-- Dynamic version of bottlenecks_geoserver view:
+-- Instead of a static reference to a specific gauge data set via
+-- primary key (location, validity) we check for a currently
+-- valid gauge (for the currently valid bottleneck) at executiuon
+-- time.
+CREATE OR REPLACE VIEW waterway.bottlenecks_geoserver AS
+    SELECT
+        b.id,
+        b.bottleneck_id,
+        b.objnam,
+        b.nobjnm,
+        b.stretch,
+        b.area,
+        b.rb,
+        b.lb,
+        b.responsible_country,
+        b.revisiting_time,
+        b.limiting,
+        b.date_info,
+        b.source_organization,
+        g.location AS gauge_isrs_code,
+        g.objname AS gauge_objname,
+        g.reference_water_levels,
+        fal.date_info AS fa_date_info,
+        fal.critical AS fa_critical,
+        g.gm_measuredate,
+        g.gm_waterlevel,
+        g.gm_n_14d,
+        srl.date_max,
+        g.forecast_accuracy_3d,
+        g.forecast_accuracy_1d
+    FROM waterway.bottlenecks b
+        LEFT JOIN waterway.gauges_base_view g
+            ON b.gauge_location = g.location AND g.validity @> current_timestamp
+        LEFT JOIN (SELECT DISTINCT ON (bottleneck_id)
+                    bottleneck_id, date_info, critical
+                FROM waterway.fairway_availability
+                ORDER BY bottleneck_id, date_info DESC) AS fal
+            ON b.id = fal.bottleneck_id
+        LEFT JOIN (SELECT DISTINCT ON (bottleneck_id)
+                    bottleneck_id, max(date_info) AS date_max
+                FROM waterway.sounding_results
+                GROUP BY bottleneck_id
+                ORDER BY bottleneck_id DESC) AS srl
+            ON b.bottleneck_id = srl.bottleneck_id
+    WHERE b.validity @> current_timestamp;
+
+-- As we resolve the correct gauge data to use on runtime, we drop the
+-- hard reference to the gauges vaidity:
+ALTER TABLE waterway.bottlenecks DROP IF EXISTS gauge_validity;
+
+DROP TRIGGER IF EXISTS waterway_bottlenecks_reference_gauge
+    ON waterway.bottlenecks;
+CREATE CONSTRAINT TRIGGER waterway_bottlenecks_reference_gauge
+    AFTER INSERT OR UPDATE OF gauge_location ON waterway.bottlenecks
+    FOR EACH ROW EXECUTE FUNCTION check_valid_gauge('gauge_location','validity');
+
+
+--
+-- GAUGE MEASUREMENTS
+--
+
+-- As we resolve the correct gauge data to use on runtime, we drop the
+-- hard reference to the gauges vaidity:
+ALTER TABLE waterway.gauge_measurements DROP IF EXISTS validity;
+
+DROP TRIGGER IF EXISTS waterway_gauge_measurements_reference_gauge
+    ON waterway.gauge_measurements;
+CREATE CONSTRAINT TRIGGER waterway_gauge_measurements_reference_gauge
+    AFTER INSERT OR UPDATE OF location ON waterway.gauge_measurements
+    FOR EACH ROW EXECUTE FUNCTION check_valid_gauge_ts('location','measure_date');
+
+ALTER TABLE waterway.gauge_predictions DROP IF EXISTS validity;
+
+DROP TRIGGER IF EXISTS waterway_gauge_predictions_reference_gauge
+    ON waterway.gauge_predictions;
+CREATE CONSTRAINT TRIGGER waterway_gauge_predictions_reference_gauge
+    AFTER INSERT OR UPDATE OF location ON waterway.gauge_predictions
+    FOR EACH ROW EXECUTE FUNCTION check_valid_gauge_ts('location','measure_date');
+
+--
+-- DROP NO LONGER NEEDED TRIGGER FUNCTION
+--
+
+-- This used to update foreign key references.  As these references no
+-- longer exist we dont need this magic any more...
+DROP TRIGGER IF EXISTS move_referencing ON waterway.gauges;
+DROP FUNCTION IF EXISTS move_gauge_referencing();
+
+
+--
+-- ADDITIONAL NOTES
+--
+-- waterway.gauges_reference_water_levels still has a hard foreign key
+-- reference to waterway.gauges.  As this data actually is part of the
+-- gauges data and the seperation in two tables is exclusively due to
+-- schema modeling we leave it alone!
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/schema/updates/1100/02.remove_bottleneck_validity_refs.sql	Tue Jul 30 11:30:31 2019 +0200
@@ -0,0 +1,256 @@
+-- This is Free Software under GNU Affero General Public License v >= 3.0
+-- without warranty, see README.md and license for details.
+
+-- SPDX-License-Identifier: AGPL-3.0-or-later
+-- License-Filename: LICENSES/AGPL-3.0.txt
+
+-- Copyright (C) 2019 by via donau
+--   – Österreichische Wasserstraßen-Gesellschaft mbH
+-- Software engineering by Intevation GmbH
+
+-- Author(s):
+--  * Sascha Wilde <sascha.wilde@intevation.de>
+
+--
+-- CONSTRAINT FUNCTIONS
+--
+
+-- We still want to ensure, that there is at least a valid bottleneck
+-- at any time of the referencing objects validity.  To ensure this we
+-- need a trigger constraint:
+CREATE OR REPLACE FUNCTION check_valid_bottleneck_ts() RETURNS trigger AS
+$$
+DECLARE
+    referenced_bottleneck_id text;
+    new_tstz timestamptz;
+BEGIN
+    EXECUTE format('SELECT $1.%I', TG_ARGV[0])
+        INTO referenced_bottleneck_id
+        USING NEW;
+    EXECUTE format('SELECT $1.%I', TG_ARGV[1])
+        INTO new_tstz
+        USING NEW;
+    IF EXISTS ( SELECT * FROM waterway.bottlenecks
+                  WHERE bottleneck_id = referenced_bottleneck_id
+                  AND validity @> new_tstz )
+    THEN
+        RETURN NEW;
+    ELSE
+        RAISE EXCEPTION
+            'new row for relation "%" violates constraint trigger "%"',
+                TG_TABLE_NAME, TG_NAME
+            USING
+                DETAIL = format('No matching bottleneck %s for %s found.',
+                    referenced_bottleneck_id, new_tstz),
+                ERRCODE = 23505,
+                SCHEMA = TG_TABLE_SCHEMA,
+                TABLE = TG_TABLE_NAME,
+                COLUMN = TG_ARGV[0],
+                CONSTRAINT = TG_NAME;
+    END IF;
+END;
+$$
+LANGUAGE plpgsql;
+
+-- Redifine constraint trigger: sounding Results must intersect with
+-- the area of the bottleneck they belong to.  Bottleneck is
+-- determined dynamically via date_info.
+CREATE OR REPLACE FUNCTION check_sr_in_bn_area() RETURNS trigger
+LANGUAGE plpgsql
+AS $$
+BEGIN
+    IF NOT st_intersects((SELECT area
+                          FROM waterway.bottlenecks
+                          WHERE bottleneck_id = NEW.bottleneck_id
+                            AND validity @> NEW.date_info::timestamptz),
+                         NEW.area)
+    THEN
+        RAISE EXCEPTION
+            'new row for relation "%" violates constraint trigger "%"',
+                TG_TABLE_NAME, TG_NAME
+            USING
+                DETAIL = 'Failing row area has no intersection with bottleneck.',
+                ERRCODE = 23514,
+                SCHEMA = TG_TABLE_SCHEMA,
+                TABLE = TG_TABLE_NAME,
+                CONSTRAINT = TG_NAME;
+    END IF;
+    RETURN NEW;
+END;
+$$;
+
+
+--
+-- SOUNDING RESULTS
+--
+
+-- Dynamic version of sounding_differences geoserver view: Instead of
+-- a static reference to a specific bottleneck data set via primary
+-- key (id, validity) we check for a bottleneck valid at the time of
+-- the survey at executiuon time.
+CREATE OR REPLACE VIEW waterway.sounding_differences AS
+    SELECT
+        sd.id           AS id,
+        bn.objnam       AS objnam,
+        srm.date_info   AS minuend,
+        srs.date_info   AS subtrahend,
+        sdcl.height     AS height,
+        CAST(sdcl.lines AS geometry(multilinestring, 4326)) AS lines
+    FROM caching.sounding_differences sd
+        JOIN caching.sounding_differences_contour_lines sdcl
+            ON sd.id = sdcl.sounding_differences_id
+        JOIN waterway.sounding_results srm
+            ON sd.minuend = srm.id
+        JOIN waterway.sounding_results srs
+            ON sd.subtrahend = srs.id
+        JOIN waterway.bottlenecks bn
+            ON srm.bottleneck_id = bn.bottleneck_id
+                AND srm.date_info::timestamptz <@ bn.validity;
+
+-- As we resolve the correct gauge data to use on runtime, we drop the
+-- hard reference to the bottlenecks vaidity:
+ALTER TABLE waterway.sounding_results DROP IF EXISTS bottleneck_validity;
+
+-- Note, we now use prefixed names, to ensure correct execution order
+-- for the triggers...
+DROP TRIGGER IF EXISTS a_sounding_results_reference_bottleneck
+    ON waterway.sounding_results;
+CREATE CONSTRAINT TRIGGER a_sounding_results_reference_bottleneck
+    AFTER INSERT OR UPDATE OF bottleneck_id ON waterway.sounding_results
+    FOR EACH ROW
+    EXECUTE FUNCTION check_valid_bottleneck_ts('bottleneck_id','date_info');
+
+DROP TRIGGER IF EXISTS sounding_results_in_bn_area
+    ON waterway.sounding_results;
+DROP TRIGGER IF EXISTS b_sounding_results_in_bn_area
+    ON waterway.sounding_results;
+CREATE CONSTRAINT TRIGGER b_sounding_results_in_bn_area
+    AFTER INSERT OR UPDATE ON waterway.sounding_results
+    FOR EACH ROW EXECUTE FUNCTION check_sr_in_bn_area();
+
+
+--
+-- FAIRWAY AVAILABILITY
+--
+
+-- The current implementation associates fairway_availability data
+-- hard with the internal id of an bottleneck.  Lets use the
+-- bottleneck_id (official unique id) instead and to the lookup of an
+-- matching bottleneck for a specific date on demand.
+
+-- Dear reader: The gemma schema update scripts are intended to be run
+--   only once, which is implemented via schema verioning and the
+--   update-db.sh script.  None the less it is very helpful to be able
+--   to run scripts more than once during development and testing,
+--   without harm being done.  Thats the reason for the following code
+--   to be, like it is:
+--
+-- This migration can only be done once, thanks to psql magic we are
+-- able to guard it approprieatly.
+
+SELECT data_type='integer' AS old_fwa_bnid
+    FROM information_schema.columns
+    WHERE table_schema = 'waterway'
+        AND table_name='fairway_availability'
+        AND column_name='bottleneck_id';
+\gset
+\if :old_fwa_bnid
+  \qecho 'Migrating bottleneck_id column in fairway_availability to text id.'
+  -- We temporarily keep the old if field for the migration of existing data:
+  ALTER TABLE waterway.fairway_availability
+      RENAME COLUMN bottleneck_id TO old_bnid;
+  ALTER TABLE waterway.fairway_availability
+      ADD COLUMN bottleneck_id varchar;
+
+  -- Migrate existing data: Let's be ultra permissive and make the
+  -- migration _before adding the constraint triggger.
+  UPDATE waterway.fairway_availability AS fwa
+      SET bottleneck_id = b.bottleneck_id
+      FROM waterway.bottlenecks b
+      WHERE b.id = fwa.old_bnid;
+  ALTER TABLE waterway.fairway_availability
+      ALTER COLUMN bottleneck_id SET NOT NULL;
+
+  -- Set constraint trigger to make sure a matching BN exists:
+  --
+  -- FIXME: From the DRC it is unclear what the exact semantics of
+  --   surdat and Date_Info ar unclear.  Currently we assume that
+  --   (fk_bn_fid,surdat) has to be unique, but that might be false.
+  --   Anyway, I will date_info here to check for an matching
+  --   reference gauge at the bottleneck.  The reason for this
+  --   decision is purely practical (and might be semantically
+  --   disputable: the bottleneck data in the demo system is not old
+  --   enough to cover rthe surdat times...
+  CREATE CONSTRAINT TRIGGER fairway_availability_referenced_bottleneck
+      AFTER INSERT OR UPDATE OF bottleneck_id ON waterway.fairway_availability
+      FOR EACH ROW
+      EXECUTE FUNCTION check_valid_bottleneck_ts('bottleneck_id','date_info');
+  -- Set NOT NULL constraint for new column
+  ALTER TABLE waterway.fairway_availability
+      ALTER COLUMN bottleneck_id SET NOT NULL;
+
+  -- The change also effects the geoserver bottlenecks view, which
+  -- joined in fairway_availability.  We leave the rather fuzzy match
+  -- (not using any validity time match), as it is unclear what the
+  -- validity period of fwa data is.
+  CREATE OR REPLACE VIEW waterway.bottlenecks_geoserver AS
+      SELECT
+          b.id,
+          b.bottleneck_id,
+          b.objnam,
+          b.nobjnm,
+          b.stretch,
+          b.area,
+          b.rb,
+          b.lb,
+          b.responsible_country,
+          b.revisiting_time,
+          b.limiting,
+          b.date_info,
+          b.source_organization,
+          g.location AS gauge_isrs_code,
+          g.objname AS gauge_objname,
+          g.reference_water_levels,
+          fal.date_info AS fa_date_info,
+          fal.critical AS fa_critical,
+          g.gm_measuredate,
+          g.gm_waterlevel,
+          g.gm_n_14d,
+          srl.date_max,
+          g.forecast_accuracy_3d,
+          g.forecast_accuracy_1d
+      FROM waterway.bottlenecks b
+          LEFT JOIN waterway.gauges_base_view g
+              ON b.gauge_location = g.location AND g.validity @> current_timestamp
+          LEFT JOIN (SELECT DISTINCT ON (bottleneck_id)
+                      bottleneck_id, date_info, critical
+                  FROM waterway.fairway_availability
+                  ORDER BY bottleneck_id, date_info DESC) AS fal
+              ON b.bottleneck_id = fal.bottleneck_id
+          LEFT JOIN (SELECT DISTINCT ON (bottleneck_id)
+                      bottleneck_id, max(date_info) AS date_max
+                  FROM waterway.sounding_results
+                  GROUP BY bottleneck_id
+                  ORDER BY bottleneck_id DESC) AS srl
+              ON b.bottleneck_id = srl.bottleneck_id
+      WHERE b.validity @> current_timestamp;
+
+  -- Finally dropt the old column
+  ALTER TABLE waterway.fairway_availability
+      DROP COLUMN old_bnid;
+  ALTER TABLE waterway.fairway_availability
+      ADD CONSTRAINT fairway_availability_bottleneck_id_surdat_key
+      UNIQUE (bottleneck_id, surdat);
+\else
+  \qecho 'NOTICE: bottleneck_id column in fairway_availability alread migrated.'
+\endif
+
+
+--
+-- DROP NO LONGER NEEDED TRIGGER FUNCTION
+--
+
+-- This used to update foreign key references.  As these references no
+-- longer exist we dont need this magic any more...
+DROP TRIGGER IF EXISTS move_referencing ON waterway.bottlenecks;
+DROP FUNCTION IF EXISTS move_bottleneck_referencing();
--- a/schema/version.sql	Fri Jul 26 13:46:55 2019 +0200
+++ b/schema/version.sql	Tue Jul 30 11:30:31 2019 +0200
@@ -1,1 +1,1 @@
-INSERT INTO gemma_schema_version(version) VALUES (1009);
+INSERT INTO gemma_schema_version(version) VALUES (1100);