summaryrefslogtreecommitdiff
path: root/postgres
diff options
context:
space:
mode:
authorBen Burwell <ben@benburwell.com>2020-06-02 21:36:52 -0400
committerBen Burwell <ben@benburwell.com>2020-06-02 21:36:52 -0400
commitc1dcb9245f288ddef7a213b7d2fd8a8ebe5d3ab3 (patch)
tree562ca59d6402ec8c7fb3ad0935b6ad32355fbae1 /postgres
parentb005938c00c62465dac06693703ab3f592d15369 (diff)
Reorganize files
Diffstat (limited to 'postgres')
-rw-r--r--postgres/postgres.go148
1 files changed, 148 insertions, 0 deletions
diff --git a/postgres/postgres.go b/postgres/postgres.go
new file mode 100644
index 0000000..3a5221e
--- /dev/null
+++ b/postgres/postgres.go
@@ -0,0 +1,148 @@
+package postgres
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/jmoiron/sqlx"
+ _ "github.com/lib/pq"
+)
+
+type DB struct {
+ *sqlx.DB
+}
+
+func Open(conn string) (*DB, error) {
+ db, err := sqlx.Open("postgres", conn)
+ if err != nil {
+ return nil, err
+ }
+ return &DB{db}, nil
+}
+
+type Schema struct {
+ SchemaName string `db:"schema_name"`
+}
+
+func (db *DB) GetSchemata() ([]*Schema, error) {
+ var schemata []*Schema
+ if err := db.Select(&schemata, "SELECT schema_name FROM information_schema.schemata"); err != nil {
+ return nil, err
+ }
+ return schemata, nil
+}
+
+type PgStatStatement struct {
+ UserID int `db:"userid"`
+ DBID int `db:"dbid"`
+ QueryID int64 `db:"queryid"`
+ Query string `db:"query"`
+ Calls int `db:"calls"`
+ TotalTime float32 `db:"total_time"`
+ MinTime float32 `db:"min_time"`
+ MaxTime float32 `db:"max_time"`
+ MeanTime float32 `db:"mean_time"`
+ StddevTime float32 `db:"stddev_time"`
+ Rows int64 `db:"rows"`
+ SharedBlksHit int64 `db:"shared_blks_hit"`
+ SharedBlksRead int64 `db:"shared_blks_read"`
+ SharedBlksDirtied int64 `db:"shared_blks_dirtied"`
+ SharedBlksWritten int64 `db:"shared_blks_written"`
+ LocalBlksHit int64 `db:"local_blks_hit"`
+ LocalBlksRead int64 `db:"local_blks_read"`
+ LocalBlksDirtied int64 `db:"local_blks_dirtied"`
+ LocalBlksWritten int64 `db:"local_blks_written"`
+ TempBlksRead int64 `db:"temp_blks_read"`
+ TempBlksWritten int64 `db:"temp_blks_written"`
+ BlkReadTime float32 `db:"blk_read_time"`
+ BlkWriteTime float32 `db:"blk_write_time"`
+}
+
+func (db *DB) GetPgStatStatements(schema string) ([]*PgStatStatement, error) {
+ var stmts []*PgStatStatement
+ if err := db.Select(&stmts, fmt.Sprintf("SELECT * FROM %s.pg_stat_statements ORDER BY total_time DESC", schema)); err != nil {
+ return nil, err
+ }
+ return stmts, nil
+}
+
+type Explain struct {
+ Plan *Plan `json:"Plan"`
+ PlanningTime float32 `json:"Planning Time"`
+ // Triggers []string `json:"Triggers"`
+ ExecutionTime float32 `json:"Execution Time"`
+}
+
+type Plan struct {
+ NodeType string `json:"Node Type"`
+ StartupCost float32 `json:"Startup Cost"`
+ TotalCost float32 `json:"Total Cost"`
+ PlanRows int `json:"Plan Rows"`
+ PlanWidth int `json:"Plan Width"`
+ ActualStartupTime float32 `json:"Actual Startup Time"`
+ ActualTotalTime float32 `json:"Actual Total Time"`
+ ActualRows int `json:"Actual Rows"`
+ ActualLoops int `json:"Actual Loops"`
+ Output []string `json:"Output"`
+ SharedHitBlocks int `json:"Shared Hit Blocks"`
+ SharedReadBlocks int `json:"Shared Read Blocks"`
+ SharedDirtiedBlocks int `json:"Shared Dirtied Blocks"`
+ SharedWrittenBlocks int `json:"Shared Written Blocks"`
+ LocalHitBlocks int `json:"Local Hit Blocks"`
+ LocalReadBlocks int `json:"Local Read Blocks"`
+ LocalDirtiedBlocks int `json:"Local Dirtied Blocks"`
+ LocalWrittenBlocks int `json:"Local Written Blocks"`
+ TempReadBlocks int `json:"Temp Read Blocks"`
+ TempWrittenBlocks int `json:"Temp Written Blocks"`
+ IOReadTime float32 `json:"I/O Read Time"`
+ IOWriteTime float32 `json:"I/O Write Time"`
+ ParentRelationship *string `json:"Parent Relationship"`
+ SortKey []string `json:"Sort Key"`
+ SortMethod *string `json:"Sort Method"`
+ SortSpaceUsed *int `json:"Sort Space Used"`
+ SortSpaceType *string `json:"Sort Space Type"`
+ JoinType *string `json:"Join Type"`
+ Strategy *string `json:"Strategy"`
+ RelationName *string `json:"Relation Name"`
+ Schema *string `json:"Schema"`
+ Alias *string `json:"Alias"`
+ RecheckCond *string `json:"Recheck Cond"`
+ RowsRemovedByIndexRecheck *int `json:"Rows Removed by Index Recheck"`
+ ExactHeapBlocks *int `json:"Exact Heap Blocks"`
+ LossyHeapBlocks *int `json:"Lossy Heap Blocks"`
+ IndexName *string `json:"Index Name"`
+ IndexCond *string `json:"Index Cond"`
+ ScanDirection *string `json:"Scan Direction"`
+ ParallelAware *bool `json:"Parallel Aware"`
+ FunctionName *string `json:"Function Name"`
+ Plans []*Plan `json:"Plans"`
+}
+
+func (db *DB) Analyze(query string) ([]*Explain, error) {
+ tx, err := db.Beginx()
+ if err != nil {
+ return nil, fmt.Errorf("could not begin transaction: %v", err)
+ }
+ defer tx.Rollback()
+
+ type qp struct {
+ Plan string `db:"QUERY PLAN"`
+ }
+
+ var plan qp
+ if err := tx.Get(&plan, fmt.Sprintf("EXPLAIN (ANALYZE, COSTS, BUFFERS, VERBOSE, FORMAT JSON) %s", query)); err != nil {
+ return nil, fmt.Errorf("could not analyze query: %v", err)
+ }
+
+ if err := tx.Rollback(); err != nil {
+ return nil, fmt.Errorf("could not roll back transaction: %v", err)
+ }
+
+ var exp []*Explain
+ if err := json.Unmarshal([]byte(plan.Plan), &exp); err != nil {
+ // if err := json.Unmarshal([]byte(testPlan), &a); err != nil {
+ return nil, fmt.Errorf("could not unmarshal plan: %v", err)
+ }
+
+ return exp, nil
+}