commit ccf12a95b1c30c22dd3d4323e59061d1bfb01cfc Author: renaldybrada Date: Tue Jan 27 11:41:51 2026 +0700 initiate repo diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..2eea525 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +.env \ No newline at end of file diff --git a/cmd/api/main.go b/cmd/api/main.go new file mode 100644 index 0000000..ed58692 --- /dev/null +++ b/cmd/api/main.go @@ -0,0 +1,60 @@ +package main + +import ( + "antrian-operasi/internal/server" + "context" + "fmt" + "log" + "net/http" + "os/signal" + "syscall" + "time" + + "github.com/joho/godotenv" +) + +func gracefulShutdown(apiServer *http.Server, done chan bool) { + ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer stop() + + <-ctx.Done() + + log.Println("Shutting down gracefully, press Ctrl+C again to force") + stop() + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := apiServer.Shutdown(ctx); err != nil { + log.Printf("Server forced to shutdown with error: %v", err) + } + + log.Println("Server exiting") + + done <- true +} + +func main() { + log.Println("Starting API Service...") + + if err := godotenv.Load(); err != nil { + log.Printf("Warning: .env file not found or could not be loaded: %v", err) + log.Println("Continuing with system environment variables...") + } + + server := server.NewServer() + + done := make(chan bool, 1) + + // Run graceful shutdown in a separate goroutine + go gracefulShutdown(server, done) + + log.Printf("Server starting on port %s", server.Addr) + err := server.ListenAndServe() + if err != nil && err != http.ErrServerClosed { + panic(fmt.Sprintf("http server error: %s", err)) + } + + // Wait for the graceful shutdown to complete + <-done + log.Println("Graceful shutdown complete.") +} diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..b8afaeb --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,23 @@ +version: "3.9" + +services: + postgres: + image: postgres:16 + container_name: antrian_operasi + restart: unless-stopped + environment: + POSTGRES_USER: rssa + POSTGRES_PASSWORD: supersecret + POSTGRES_DB: antrian_operasi + ports: + - "5432:5432" + volumes: + - pgdata:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U rssa -d antrian_operasi"] + interval: 5s + timeout: 5s + retries: 5 + +volumes: + pgdata: diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..11cc2e7 --- /dev/null +++ b/go.mod @@ -0,0 +1,56 @@ +module antrian-operasi + +go 1.25.3 + +require ( + github.com/Masterminds/squirrel v1.5.4 // indirect + github.com/bytedance/sonic v1.14.0 // indirect + github.com/bytedance/sonic/loader v0.3.0 // indirect + github.com/cloudwego/base64x v0.1.6 // indirect + github.com/gabriel-vasile/mimetype v1.4.8 // indirect + github.com/gin-contrib/sse v1.1.0 // indirect + github.com/gin-gonic/gin v1.11.0 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.27.0 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/goccy/go-yaml v1.18.0 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/pgx/v5 v5.8.0 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect + github.com/jmoiron/sqlx v1.4.0 // indirect + github.com/joho/godotenv v1.5.1 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.16.7 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/leodido/go-urn v1.4.0 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/montanaflynn/stats v0.7.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/quic-go/qpack v0.5.1 // indirect + github.com/quic-go/quic-go v0.54.0 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/ugorji/go/codec v1.3.0 // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/scram v1.1.2 // indirect + github.com/xdg-go/stringprep v1.0.4 // indirect + github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect + go.mongodb.org/mongo-driver v1.17.7 // indirect + go.uber.org/mock v0.5.0 // indirect + golang.org/x/arch v0.20.0 // indirect + golang.org/x/crypto v0.41.0 // indirect + golang.org/x/mod v0.27.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/text v0.29.0 // indirect + golang.org/x/tools v0.36.0 // indirect + google.golang.org/protobuf v1.36.9 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..57e3410 --- /dev/null +++ b/go.sum @@ -0,0 +1,156 @@ +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ= +github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA= +github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= +github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= +github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= +github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= +github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= +github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w= +github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM= +github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk= +github.com/gin-gonic/gin v1.11.0/go.mod h1:+iq/FyxlGzII0KHiBGjuNn4UNENUlKbGlNmc+W50Dls= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4= +github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= +github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo= +github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= +github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= +github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= +github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg= +github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA= +github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.mongodb.org/mongo-driver v1.17.7 h1:a9w+U3Vt67eYzcfq3k/OAv284/uUUkL0uP75VE5rCOU= +go.mongodb.org/mongo-driver v1.17.7/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= +golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c= +golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= +golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= +golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= +golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= +golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= +golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/config/config.go b/internal/config/config.go new file mode 100644 index 0000000..218c9ca --- /dev/null +++ b/internal/config/config.go @@ -0,0 +1,111 @@ +package config + +import ( + "log" + "os" + "strings" + + "github.com/joho/godotenv" +) + +func LoadConfig() *Config { + errLoadEnv := godotenv.Load() + if errLoadEnv != nil { + log.Println("error loading .env") + } + + config := &Config{ + Server: ServerConfig{ + Port: getEnvAsInt("PORT", 8080), + Mode: getEnv("GIN_MODE", "debug"), + }, + Databases: make(map[string]DatabaseConfig), + } + + config.loadCustomDatabaseConfigs() + + return config +} + +func (c *Config) loadCustomDatabaseConfigs() { + envVars := os.Environ() + dbConfigs := make(map[string]map[string]string) + + // Parse database configurations from environment variables + for _, envVar := range envVars { + parts := strings.SplitN(envVar, "=", 2) + if len(parts) != 2 { + continue + } + + key := parts[0] + value := parts[1] + + // Parse specific database configurations + if strings.HasSuffix(key, "_CONNECTION") || strings.HasSuffix(key, "_HOST") || + strings.HasSuffix(key, "_DATABASE") || strings.HasSuffix(key, "_USERNAME") || + strings.HasSuffix(key, "_PASSWORD") || strings.HasSuffix(key, "_PORT") || + strings.HasSuffix(key, "_NAME") { + + segments := strings.Split(key, "_") + if len(segments) >= 2 { + dbName := strings.ToLower(strings.Join(segments[:len(segments)-1], "_")) + property := strings.ToLower(segments[len(segments)-1]) + + if dbConfigs[dbName] == nil { + dbConfigs[dbName] = make(map[string]string) + } + dbConfigs[dbName][property] = value + } + } + } + + // Create DatabaseConfig from parsed configurations for additional databases + for name, config := range dbConfigs { + // Skip empty configurations or system configurations + if name == "" || strings.Contains(name, "chrome_crashpad_pipe") || name == "primary" { + continue + } + + dbType := getEnvFromMap(config, "connection", getEnvFromMap(config, "type", "postgres")) + + // Skip if username is empty and it's not a system config + username := getEnvFromMap(config, "username", "") + if username == "" && !strings.HasPrefix(name, "chrome") { + continue + } + + dbConfig := DatabaseConfig{ + Name: name, + Type: dbType, + Host: getEnvFromMap(config, "host", "localhost"), + Port: getEnvAsIntFromMap(config, "port", getDefaultPort(dbType)), + Username: username, + Password: getEnvFromMap(config, "password", ""), + Database: getEnvFromMap(config, "database", getEnvFromMap(config, "name", name)), + Schema: getEnvFromMap(config, "schema", getDefaultSchema(dbType)), + SSLMode: getEnvFromMap(config, "sslmode", getDefaultSSLMode(dbType)), + Path: getEnvFromMap(config, "path", ""), + Options: getEnvFromMap(config, "options", ""), + MaxOpenConns: getEnvAsIntFromMap(config, "max_open_conns", getDefaultMaxOpenConns(dbType)), + MaxIdleConns: getEnvAsIntFromMap(config, "max_idle_conns", getDefaultMaxIdleConns(dbType)), + ConnMaxLifetime: parseDuration(getEnvFromMap(config, "conn_max_lifetime", getDefaultConnMaxLifetime(dbType))), + // Security settings + RequireSSL: getEnvAsBoolFromMap(config, "require_ssl", false), + SSLRootCert: getEnvFromMap(config, "ssl_root_cert", ""), + SSLCert: getEnvFromMap(config, "ssl_cert", ""), + SSLKey: getEnvFromMap(config, "ssl_key", ""), + Timeout: parseDuration(getEnvFromMap(config, "timeout", "30s")), + ConnectTimeout: parseDuration(getEnvFromMap(config, "connect_timeout", "10s")), + ReadTimeout: parseDuration(getEnvFromMap(config, "read_timeout", "30s")), + WriteTimeout: parseDuration(getEnvFromMap(config, "write_timeout", "30s")), + StatementTimeout: parseDuration(getEnvFromMap(config, "statement_timeout", "360s")), + // Connection pool settings + MaxLifetime: parseDuration(getEnvFromMap(config, "max_lifetime", "1h")), + MaxIdleTime: parseDuration(getEnvFromMap(config, "max_idle_time", "5m")), + HealthCheckPeriod: parseDuration(getEnvFromMap(config, "health_check_period", "1m")), + } + + c.Databases[name] = dbConfig + } +} diff --git a/internal/config/helper.go b/internal/config/helper.go new file mode 100644 index 0000000..b676e82 --- /dev/null +++ b/internal/config/helper.go @@ -0,0 +1,157 @@ +package config + +import ( + "os" + "strconv" + "time" +) + +func getEnv(key, defaultValue string) string { + if value := os.Getenv(key); value != "" { + return value + } + return defaultValue +} + +func getEnvAsInt(key string, defaultValue int) int { + valueStr := getEnv(key, "") + if value, err := strconv.Atoi(valueStr); err == nil { + return value + } + return defaultValue +} + +func getEnvFromMap(config map[string]string, key, defaultValue string) string { + if value, exists := config[key]; exists { + return value + } + return defaultValue +} + +func getEnvAsIntFromMap(config map[string]string, key string, defaultValue int) int { + if value, exists := config[key]; exists { + if intValue, err := strconv.Atoi(value); err == nil { + return intValue + } + } + return defaultValue +} + +func getEnvAsBoolFromMap(config map[string]string, key string, defaultValue bool) bool { + if value, exists := config[key]; exists { + if boolValue, err := strconv.ParseBool(value); err == nil { + return boolValue + } + } + return defaultValue +} + +// Helper functions for getting default values based on database type +func getDefaultPort(dbType string) int { + switch dbType { + case "postgres": + return 5432 + case "mysql": + return 3306 + case "sqlserver": + return 1433 + case "mongodb": + return 27017 + case "sqlite": + return 0 // SQLite doesn't use port + default: + return 5432 + } +} + +func getDefaultSchema(dbType string) string { + switch dbType { + case "postgres": + return "public" + case "mysql": + return "" + case "sqlserver": + return "dbo" + case "mongodb": + return "" + case "sqlite": + return "" + default: + return "public" + } +} + +func getDefaultSSLMode(dbType string) string { + switch dbType { + case "postgres": + return "disable" + case "mysql": + return "false" + case "sqlserver": + return "false" + case "mongodb": + return "false" + case "sqlite": + return "" + default: + return "disable" + } +} + +func getDefaultMaxOpenConns(dbType string) int { + switch dbType { + case "postgres": + return 25 + case "mysql": + return 25 + case "sqlserver": + return 25 + case "mongodb": + return 100 + case "sqlite": + return 1 // SQLite only supports one writer at a time + default: + return 25 + } +} + +func getDefaultMaxIdleConns(dbType string) int { + switch dbType { + case "postgres": + return 25 + case "mysql": + return 25 + case "sqlserver": + return 25 + case "mongodb": + return 10 + case "sqlite": + return 1 // SQLite only supports one writer at a time + default: + return 25 + } +} + +func getDefaultConnMaxLifetime(dbType string) string { + switch dbType { + case "postgres": + return "5m" + case "mysql": + return "5m" + case "sqlserver": + return "5m" + case "mongodb": + return "30m" + case "sqlite": + return "5m" + default: + return "5m" + } +} + +func parseDuration(durationStr string) time.Duration { + if duration, err := time.ParseDuration(durationStr); err == nil { + return duration + } + return 5 * time.Minute +} diff --git a/internal/config/struct.go b/internal/config/struct.go new file mode 100644 index 0000000..6d68ea7 --- /dev/null +++ b/internal/config/struct.go @@ -0,0 +1,45 @@ +package config + +import "time" + +type Config struct { + Server ServerConfig + Databases map[string]DatabaseConfig + ReadReplicas map[string][]DatabaseConfig +} + +type ServerConfig struct { + Port int + Mode string +} + +type DatabaseConfig struct { + Name string + Type string // postgres, mysql, sqlserver, sqlite, mongodb + Host string + Port int + Username string + Password string + Database string + Schema string + SSLMode string + Path string // For SQLite + Options string // Additional connection options + MaxOpenConns int // Max open connections + MaxIdleConns int // Max idle connections + ConnMaxLifetime time.Duration // Connection max lifetime + // Security settings + RequireSSL bool // Require SSL connection + SSLRootCert string // Path to SSL root certificate + SSLCert string // Path to SSL client certificate + SSLKey string // Path to SSL client key + Timeout time.Duration // Connection timeout + ConnectTimeout time.Duration // Connect timeout + ReadTimeout time.Duration // Read timeout + WriteTimeout time.Duration // Write timeout + StatementTimeout time.Duration // Statement timeout for PostgreSQL + // Connection pool settings + MaxLifetime time.Duration // Maximum amount of time a connection may be reused + MaxIdleTime time.Duration // Maximum amount of time a connection may be idle + HealthCheckPeriod time.Duration // Health check period +} diff --git a/internal/database/database.go b/internal/database/database.go new file mode 100644 index 0000000..aca5337 --- /dev/null +++ b/internal/database/database.go @@ -0,0 +1,187 @@ +package database + +import ( + "antrian-operasi/internal/config" + "context" + "database/sql" + "fmt" + "log" + "sync" + + "github.com/jmoiron/sqlx" + "github.com/lib/pq" + "go.mongodb.org/mongo-driver/mongo" +) + +type DatabaseType string + +const ( + Postgres DatabaseType = "postgres" + MySQL DatabaseType = "mysql" + SQLServer DatabaseType = "sqlserver" + SQLite DatabaseType = "sqlite" + MongoDB DatabaseType = "mongodb" +) + +type Service interface { + Health() map[string]map[string]string + GetDB(name string) (*sql.DB, error) + GetSQLXDB(name string) (*sqlx.DB, error) // Tambahkan metode ini + GetMongoClient(name string) (*mongo.Client, error) + GetReadDB(name string) (*sql.DB, error) + Close() error + ListDBs() []string + GetDBType(name string) (DatabaseType, error) + ListenForChanges(ctx context.Context, dbName string, channels []string, callback func(string, string)) error + NotifyChange(dbName, channel, payload string) error + GetPrimaryDB(name string) (*sql.DB, error) + ExecuteQuery(ctx context.Context, dbName string, query string, args ...interface{}) (*sql.Rows, error) + ExecuteQueryRow(ctx context.Context, dbName string, query string, args ...interface{}) *sql.Row + Exec(ctx context.Context, dbName string, query string, args ...interface{}) (sql.Result, error) +} + +type service struct { + sqlDatabases map[string]*sql.DB + sqlxDatabases map[string]*sqlx.DB // Tambahkan map untuk sqlx.DB + mongoClients map[string]*mongo.Client + readReplicas map[string][]*sql.DB + configs map[string]config.DatabaseConfig + readConfigs map[string][]config.DatabaseConfig + mu sync.RWMutex + readBalancer map[string]int + listeners map[string]*pq.Listener + listenersMu sync.RWMutex +} + +var ( + dbManager *service + once sync.Once +) + +func (s *service) loadFromConfig(cfg *config.Config) { + s.mu.Lock() + defer s.mu.Unlock() + + // Load primary databases + for name, dbConfig := range cfg.Databases { + fmt.Printf("db name : %s", name) + s.configs[name] = dbConfig + } + + // Load read replicas + for name, replicaConfigs := range cfg.ReadReplicas { + s.readConfigs[name] = replicaConfigs + } +} + +func (s *service) addDatabase(name string, config config.DatabaseConfig) error { + s.mu.Lock() + defer s.mu.Unlock() + + var db *sql.DB + var err error + + dbType := DatabaseType(config.Type) + log.Printf("Database type: %s", dbType) + + switch dbType { + case Postgres: + db, err = s.openPostgresConnection(config) + case MySQL: + db, err = s.openMySQLConnection(config) + case SQLServer: + db, err = s.openSQLServerConnection(config) + case SQLite: + db, err = s.openSQLiteConnection(config) + case MongoDB: + return s.addMongoDB(name, config) + default: + return fmt.Errorf("unsupported database type: %s", config.Type) + } + + if err != nil { + log.Printf("❌ Error connecting to database %s: %v", name, err) + return err + } + + log.Printf("✅ Successfully connected to database: %s", name) + return s.configureSQLDB(name, db, config) +} + +func (s *service) addReadReplica(name string, index int, config config.DatabaseConfig) error { + s.mu.Lock() + defer s.mu.Unlock() + + var db *sql.DB + var err error + + dbType := DatabaseType(config.Type) + + switch dbType { + case Postgres: + db, err = s.openPostgresConnection(config) + case MySQL: + db, err = s.openMySQLConnection(config) + case SQLServer: + db, err = s.openSQLServerConnection(config) + case SQLite: + db, err = s.openSQLiteConnection(config) + default: + return fmt.Errorf("unsupported database type for read replica: %s", config.Type) + } + + if err != nil { + return err + } + + if s.readReplicas[name] == nil { + s.readReplicas[name] = make([]*sql.DB, 0) + } + + // Ensure we have enough slots + for len(s.readReplicas[name]) <= index { + s.readReplicas[name] = append(s.readReplicas[name], nil) + } + + s.readReplicas[name][index] = db + log.Printf("Successfully connected to read replica %s[%d]", name, index) + + return nil +} + +func New(cfg *config.Config) Service { + once.Do(func() { + dbManager = &service{ + sqlDatabases: make(map[string]*sql.DB), + sqlxDatabases: make(map[string]*sqlx.DB), // Inisialisasi map sqlx + mongoClients: make(map[string]*mongo.Client), + readReplicas: make(map[string][]*sql.DB), + configs: make(map[string]config.DatabaseConfig), + readConfigs: make(map[string][]config.DatabaseConfig), + readBalancer: make(map[string]int), + listeners: make(map[string]*pq.Listener), + } + + log.Println("Initializing database service...") + dbManager.loadFromConfig(cfg) + fmt.Printf("%#v\n", dbManager.configs) + + // Initialize all databases + for name, dbConfig := range dbManager.configs { + if err := dbManager.addDatabase(name, dbConfig); err != nil { + log.Printf("Failed to connect to database %s: %v", name, err) + } + } + + // Initialize read replicas + for name, replicaConfigs := range dbManager.readConfigs { + for i, replicaConfig := range replicaConfigs { + if err := dbManager.addReadReplica(name, i, replicaConfig); err != nil { + log.Printf("Failed to connect to read replica %s[%d]: %v", name, i, err) + } + } + } + }) + + return dbManager +} diff --git a/internal/database/mongodb.go b/internal/database/mongodb.go new file mode 100644 index 0000000..5dd45cc --- /dev/null +++ b/internal/database/mongodb.go @@ -0,0 +1,56 @@ +package database + +import ( + "antrian-operasi/internal/config" + "context" + "crypto/tls" + "fmt" + "log" + + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +func (s *service) addMongoDB(name string, config config.DatabaseConfig) error { + ctx, cancel := context.WithTimeout(context.Background(), config.Timeout) + defer cancel() + + // Build MongoDB URI with authentication and TLS options + uri := fmt.Sprintf("mongodb://%s:%s@%s:%d/%s", + config.Username, + config.Password, + config.Host, + config.Port, + config.Database, + ) + + // Configure client options with security settings + clientOptions := options.Client().ApplyURI(uri) + + // Set TLS configuration if needed + if config.RequireSSL { + clientOptions.SetTLSConfig(&tls.Config{ + InsecureSkipVerify: config.SSLMode == "require", + MinVersion: tls.VersionTLS12, + }) + } + + // Set connection timeout + clientOptions.SetConnectTimeout(config.ConnectTimeout) + clientOptions.SetServerSelectionTimeout(config.Timeout) + + client, err := mongo.Connect(ctx, clientOptions) + if err != nil { + return fmt.Errorf("failed to connect to MongoDB: %w", err) + } + + // Ping to verify connection + if err := client.Ping(ctx, nil); err != nil { + return fmt.Errorf("failed to ping MongoDB: %w", err) + } + + s.mongoClients[name] = client + log.Printf("Successfully connected to MongoDB: %s", name) + + return nil +} diff --git a/internal/database/mysql.go b/internal/database/mysql.go new file mode 100644 index 0000000..244732a --- /dev/null +++ b/internal/database/mysql.go @@ -0,0 +1,43 @@ +package database + +import ( + "antrian-operasi/internal/config" + "database/sql" + "fmt" +) + +func (s *service) openMySQLConnection(config config.DatabaseConfig) (*sql.DB, error) { + // Build connection string with security parameters + connStr := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=true&timeout=%s&readTimeout=%s&writeTimeout=%s", + config.Username, + config.Password, + config.Host, + config.Port, + config.Database, + config.Timeout, + config.ReadTimeout, + config.WriteTimeout, + ) + + // Add SSL configuration if required + if config.RequireSSL { + connStr += "&tls=true" + if config.SSLRootCert != "" { + connStr += "&ssl-ca=" + config.SSLRootCert + } + if config.SSLCert != "" { + connStr += "&ssl-cert=" + config.SSLCert + } + if config.SSLKey != "" { + connStr += "&ssl-key=" + config.SSLKey + } + } + + // Open connection + db, err := sql.Open("mysql", connStr) + if err != nil { + return nil, fmt.Errorf("failed to open MySQL connection: %w", err) + } + + return db, nil +} diff --git a/internal/database/postgres.go b/internal/database/postgres.go new file mode 100644 index 0000000..2896a7e --- /dev/null +++ b/internal/database/postgres.go @@ -0,0 +1,44 @@ +package database + +import ( + "antrian-operasi/internal/config" + "database/sql" + "fmt" + + _ "github.com/jackc/pgx/v5/stdlib" +) + +func (s *service) openPostgresConnection(config config.DatabaseConfig) (*sql.DB, error) { + // Build connection string with security parameters + // Convert timeout durations to seconds for pgx + connectTimeoutSec := int(config.ConnectTimeout.Seconds()) + statementTimeoutSec := int(config.StatementTimeout.Seconds()) + + connStr := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=%d statement_timeout=%d", + config.Host, + config.Port, + config.Username, + config.Password, + config.Database, + config.SSLMode, + connectTimeoutSec, + statementTimeoutSec, + ) + + if config.Schema != "" { + connStr += " search_path=" + config.Schema + } + + // Add SSL configuration if required + if config.RequireSSL { + connStr += " sslcert=" + config.SSLCert + " sslkey=" + config.SSLKey + " sslrootcert=" + config.SSLRootCert + } + + // Open connection using standard database/sql interface + db, err := sql.Open("pgx", connStr) + if err != nil { + return nil, fmt.Errorf("failed to open PostgreSQL connection: %w", err) + } + + return db, nil +} diff --git a/internal/database/service.go b/internal/database/service.go new file mode 100644 index 0000000..3ff9176 --- /dev/null +++ b/internal/database/service.go @@ -0,0 +1,498 @@ +package database + +import ( + "antrian-operasi/internal/config" + "context" + "database/sql" + "fmt" + "log" + "strconv" + "time" + + "github.com/jmoiron/sqlx" + "github.com/lib/pq" + "go.mongodb.org/mongo-driver/mongo" +) + +func (s *service) configureSQLDB(name string, db *sql.DB, config config.DatabaseConfig) error { + // Set connection pool limits + db.SetMaxOpenConns(config.MaxOpenConns) + db.SetMaxIdleConns(config.MaxIdleConns) + db.SetConnMaxLifetime(config.ConnMaxLifetime) + db.SetConnMaxIdleTime(config.MaxIdleTime) + + ctx, cancel := context.WithTimeout(context.Background(), config.Timeout) + defer cancel() + + if err := db.PingContext(ctx); err != nil { + db.Close() + return fmt.Errorf("failed to ping database: %w", err) + } + + s.sqlDatabases[name] = db + + // PERUBAHAN: Tambahkan pembuatan sqlx.DB dari sql.DB yang sudah ada + dbType := DatabaseType(config.Type) + var driverName string + + switch dbType { + case Postgres: + driverName = "pgx" + case MySQL: + driverName = "mysql" + case SQLServer: + driverName = "sqlserver" + case SQLite: + driverName = "sqlite3" + default: + return fmt.Errorf("unsupported database type for sqlx: %s", config.Type) + } + + // Buat sqlx.DB dari sql.DB yang sudah ada + sqlxDB := sqlx.NewDb(db, driverName) + s.sqlxDatabases[name] = sqlxDB + + log.Printf("Successfully connected to SQL database: %s", name) + + return nil +} + +// Health checks the health of all database connections by pinging each database. +func (s *service) Health() map[string]map[string]string { + s.mu.RLock() + defer s.mu.RUnlock() + + result := make(map[string]map[string]string) + + // Check SQL databases + for name, db := range s.sqlDatabases { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + stats := make(map[string]string) + + err := db.PingContext(ctx) + if err != nil { + stats["status"] = "down" + stats["error"] = fmt.Sprintf("db down: %v", err) + stats["type"] = "sql" + stats["role"] = "primary" + result[name] = stats + continue + } + + stats["status"] = "up" + stats["message"] = "It's healthy" + stats["type"] = "sql" + stats["role"] = "primary" + + dbStats := db.Stats() + stats["open_connections"] = strconv.Itoa(dbStats.OpenConnections) + stats["in_use"] = strconv.Itoa(dbStats.InUse) + stats["idle"] = strconv.Itoa(dbStats.Idle) + stats["wait_count"] = strconv.FormatInt(dbStats.WaitCount, 10) + stats["wait_duration"] = dbStats.WaitDuration.String() + stats["max_idle_closed"] = strconv.FormatInt(dbStats.MaxIdleClosed, 10) + stats["max_lifetime_closed"] = strconv.FormatInt(dbStats.MaxLifetimeClosed, 10) + + if dbStats.OpenConnections > 40 { + stats["message"] = "The database is experiencing heavy load." + } + + if dbStats.WaitCount > 1000 { + stats["message"] = "The database has a high number of wait events, indicating potential bottlenecks." + } + + if dbStats.MaxIdleClosed > int64(dbStats.OpenConnections)/2 { + stats["message"] = "Many idle connections are being closed, consider revising the connection pool settings." + } + + if dbStats.MaxLifetimeClosed > int64(dbStats.OpenConnections)/2 { + stats["message"] = "Many connections are being closed due to max lifetime, consider increasing max lifetime or revising the connection usage pattern." + } + + result[name] = stats + } + + // Check read replicas + for name, replicas := range s.readReplicas { + for i, db := range replicas { + if db == nil { + continue + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + replicaName := fmt.Sprintf("%s_replica_%d", name, i) + stats := make(map[string]string) + + err := db.PingContext(ctx) + if err != nil { + stats["status"] = "down" + stats["error"] = fmt.Sprintf("read replica down: %v", err) + stats["type"] = "sql" + stats["role"] = "replica" + result[replicaName] = stats + continue + } + + stats["status"] = "up" + stats["message"] = "Read replica healthy" + stats["type"] = "sql" + stats["role"] = "replica" + + dbStats := db.Stats() + stats["open_connections"] = strconv.Itoa(dbStats.OpenConnections) + stats["in_use"] = strconv.Itoa(dbStats.InUse) + stats["idle"] = strconv.Itoa(dbStats.Idle) + + result[replicaName] = stats + } + } + + // Check MongoDB connections + for name, client := range s.mongoClients { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + stats := make(map[string]string) + + err := client.Ping(ctx, nil) + if err != nil { + stats["status"] = "down" + stats["error"] = fmt.Sprintf("mongodb down: %v", err) + stats["type"] = "mongodb" + result[name] = stats + continue + } + + stats["status"] = "up" + stats["message"] = "It's healthy" + stats["type"] = "mongodb" + + result[name] = stats + } + + return result +} + +// GetDB returns a specific SQL database connection by name +func (s *service) GetDB(name string) (*sql.DB, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + db, exists := s.sqlDatabases[name] + if !exists { + return nil, fmt.Errorf("database %s not found", name) + } + + return db, nil +} + +// PERUBAHAN: Tambahkan metode GetSQLXDB +// GetSQLXDB returns a specific SQLX database connection by name +func (s *service) GetSQLXDB(name string) (*sqlx.DB, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + db, exists := s.sqlxDatabases[name] + if !exists { + return nil, fmt.Errorf("database %s not found", name) + } + + return db, nil +} + +// GetReadDB returns a read replica connection using round-robin load balancing +func (s *service) GetReadDB(name string) (*sql.DB, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + replicas, exists := s.readReplicas[name] + if !exists || len(replicas) == 0 { + // Fallback to primary if no replicas available + return s.GetDB(name) + } + + // Round-robin load balancing + s.readBalancer[name] = (s.readBalancer[name] + 1) % len(replicas) + selected := replicas[s.readBalancer[name]] + + if selected == nil { + // Fallback to primary if replica is nil + return s.GetDB(name) + } + + return selected, nil +} + +// GetMongoClient returns a specific MongoDB client by name +func (s *service) GetMongoClient(name string) (*mongo.Client, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + client, exists := s.mongoClients[name] + if !exists { + return nil, fmt.Errorf("MongoDB client %s not found", name) + } + + return client, nil +} + +// ListDBs returns list of available database names +func (s *service) ListDBs() []string { + s.mu.RLock() + defer s.mu.RUnlock() + + names := make([]string, 0, len(s.sqlDatabases)+len(s.mongoClients)) + + for name := range s.sqlDatabases { + names = append(names, name) + } + + for name := range s.mongoClients { + names = append(names, name) + } + + return names +} + +// GetDBType returns the type of a specific database +func (s *service) GetDBType(name string) (DatabaseType, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + config, exists := s.configs[name] + if !exists { + return "", fmt.Errorf("database %s not found", name) + } + + return DatabaseType(config.Type), nil +} + +// Close closes all database connections +func (s *service) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + + var errs []error + + // Close listeners first + for name, listener := range s.listeners { + if err := listener.Close(); err != nil { + errs = append(errs, fmt.Errorf("failed to close listener for %s: %w", name, err)) + } + } + + for name, db := range s.sqlDatabases { + if err := db.Close(); err != nil { + errs = append(errs, fmt.Errorf("failed to close database %s: %w", name, err)) + } else { + log.Printf("Disconnected from SQL database: %s", name) + } + } + + for name, replicas := range s.readReplicas { + for i, db := range replicas { + if db != nil { + if err := db.Close(); err != nil { + errs = append(errs, fmt.Errorf("failed to close read replica %s[%d]: %w", name, i, err)) + } else { + log.Printf("Disconnected from read replica: %s[%d]", name, i) + } + } + } + } + + for name, client := range s.mongoClients { + if err := client.Disconnect(context.Background()); err != nil { + errs = append(errs, fmt.Errorf("failed to disconnect MongoDB client %s: %w", name, err)) + } else { + log.Printf("Disconnected from MongoDB: %s", name) + } + } + + s.sqlDatabases = make(map[string]*sql.DB) + s.sqlxDatabases = make(map[string]*sqlx.DB) // Reset map sqlx + s.mongoClients = make(map[string]*mongo.Client) + s.readReplicas = make(map[string][]*sql.DB) + s.configs = make(map[string]config.DatabaseConfig) + s.readConfigs = make(map[string][]config.DatabaseConfig) + s.listeners = make(map[string]*pq.Listener) + + if len(errs) > 0 { + return fmt.Errorf("errors closing databases: %v", errs) + } + + return nil +} + +// GetPrimaryDB returns primary database connection +func (s *service) GetPrimaryDB(name string) (*sql.DB, error) { + return s.GetDB(name) +} + +// ExecuteQuery executes a query with parameters and returns rows +func (s *service) ExecuteQuery(ctx context.Context, dbName string, query string, args ...interface{}) (*sql.Rows, error) { + db, err := s.GetDB(dbName) + if err != nil { + return nil, fmt.Errorf("failed to get database %s: %w", dbName, err) + } + + // Use parameterized queries to prevent SQL injection + rows, err := db.QueryContext(ctx, query, args...) + if err != nil { + return nil, fmt.Errorf("failed to execute query: %w", err) + } + + return rows, nil +} + +// ExecuteQueryRow executes a query with parameters and returns a single row +func (s *service) ExecuteQueryRow(ctx context.Context, dbName string, query string, args ...interface{}) *sql.Row { + db, err := s.GetDB(dbName) + if err != nil { + // Return an empty row with error + row := &sql.Row{} + return row + } + + // Use parameterized queries to prevent SQL injection + return db.QueryRowContext(ctx, query, args...) +} + +// Exec executes a query with parameters and returns the result +func (s *service) Exec(ctx context.Context, dbName string, query string, args ...interface{}) (sql.Result, error) { + db, err := s.GetDB(dbName) + if err != nil { + return nil, fmt.Errorf("failed to get database %s: %w", dbName, err) + } + + // Use parameterized queries to prevent SQL injection + result, err := db.ExecContext(ctx, query, args...) + if err != nil { + return nil, fmt.Errorf("failed to execute query: %w", err) + } + + return result, nil +} + +// ListenForChanges implements PostgreSQL LISTEN/NOTIFY for real-time updates +func (s *service) ListenForChanges(ctx context.Context, dbName string, channels []string, callback func(string, string)) error { + s.mu.RLock() + config, exists := s.configs[dbName] + s.mu.RUnlock() + + if !exists { + return fmt.Errorf("database %s not found", dbName) + } + + // Only support PostgreSQL for LISTEN/NOTIFY + if DatabaseType(config.Type) != Postgres { + return fmt.Errorf("LISTEN/NOTIFY only supported for PostgreSQL databases") + } + + // Create connection string for listener + // Convert timeout to seconds for pq + connectTimeoutSec := int(config.ConnectTimeout.Seconds()) + + connStr := fmt.Sprintf("postgres://%s:%s@%s:%d/%s?sslmode=%s&connect_timeout=%d", + config.Username, + config.Password, + config.Host, + config.Port, + config.Database, + config.SSLMode, + connectTimeoutSec, + ) + + // Create listener + listener := pq.NewListener( + connStr, + 10*time.Second, + time.Minute, + func(ev pq.ListenerEventType, err error) { + if err != nil { + log.Printf("Database listener (%s) error: %v", dbName, err) + } + }, + ) + + // Store listener for cleanup + s.listenersMu.Lock() + s.listeners[dbName] = listener + s.listenersMu.Unlock() + + // Listen to specified channels + for _, channel := range channels { + err := listener.Listen(channel) + if err != nil { + listener.Close() + return fmt.Errorf("failed to listen to channel %s: %w", channel, err) + } + log.Printf("Listening to database channel: %s on %s", channel, dbName) + } + + // Start listening loop + go func() { + defer func() { + listener.Close() + s.listenersMu.Lock() + delete(s.listeners, dbName) + s.listenersMu.Unlock() + log.Printf("Database listener for %s stopped", dbName) + }() + + for { + select { + case n := <-listener.Notify: + if n != nil { + callback(n.Channel, n.Extra) + } + case <-ctx.Done(): + return + case <-time.After(90 * time.Second): + // Send ping to keep connection alive + go func() { + if err := listener.Ping(); err != nil { + log.Printf("Listener ping failed for %s: %v", dbName, err) + } + }() + } + } + }() + + return nil +} + +// NotifyChange sends a notification to a PostgreSQL channel +func (s *service) NotifyChange(dbName, channel, payload string) error { + db, err := s.GetDB(dbName) + if err != nil { + return fmt.Errorf("failed to get database %s: %w", dbName, err) + } + + // Check if it's PostgreSQL + s.mu.RLock() + config, exists := s.configs[dbName] + s.mu.RUnlock() + + if !exists { + return fmt.Errorf("database %s configuration not found", dbName) + } + + if DatabaseType(config.Type) != Postgres { + return fmt.Errorf("NOTIFY only supported for PostgreSQL databases") + } + + // Execute NOTIFY with parameterized query to prevent SQL injection + query := "SELECT pg_notify($1, $2)" + _, err = db.Exec(query, channel, payload) + if err != nil { + return fmt.Errorf("failed to send notification: %w", err) + } + + log.Printf("Sent notification to channel %s on %s: %s", channel, dbName, payload) + return nil +} diff --git a/internal/database/sqlite.go b/internal/database/sqlite.go new file mode 100644 index 0000000..e496052 --- /dev/null +++ b/internal/database/sqlite.go @@ -0,0 +1,23 @@ +package database + +import ( + "antrian-operasi/internal/config" + "database/sql" + "fmt" +) + +func (s *service) openSQLiteConnection(config config.DatabaseConfig) (*sql.DB, error) { + // Open connection + db, err := sql.Open("sqlite3", config.Path) + if err != nil { + return nil, fmt.Errorf("failed to open SQLite connection: %w", err) + } + + // Enable foreign key constraints and WAL mode for better security and performance + _, err = db.Exec("PRAGMA foreign_keys = ON; PRAGMA journal_mode = WAL;") + if err != nil { + return nil, fmt.Errorf("failed to configure SQLite: %w", err) + } + + return db, nil +} diff --git a/internal/database/sqlserver.go b/internal/database/sqlserver.go new file mode 100644 index 0000000..49ba584 --- /dev/null +++ b/internal/database/sqlserver.go @@ -0,0 +1,40 @@ +package database + +import ( + "antrian-operasi/internal/config" + "database/sql" + "fmt" +) + +func (s *service) openSQLServerConnection(config config.DatabaseConfig) (*sql.DB, error) { + // Build connection string with security parameters + // Convert timeout to seconds for SQL Server + connectTimeoutSec := int(config.ConnectTimeout.Seconds()) + + connStr := fmt.Sprintf("sqlserver://%s:%s@%s:%d?database=%s&connection timeout=%d", + config.Username, + config.Password, + config.Host, + config.Port, + config.Database, + connectTimeoutSec, + ) + + // Add SSL configuration if required + if config.RequireSSL { + connStr += "&encrypt=true" + if config.SSLRootCert != "" { + connStr += "&trustServerCertificate=false" + } else { + connStr += "&trustServerCertificate=true" + } + } + + // Open connection + db, err := sql.Open("sqlserver", connStr) + if err != nil { + return nil, fmt.Errorf("failed to open SQL Server connection: %w", err) + } + + return db, nil +} diff --git a/internal/domain/reference/kategori/handler.go b/internal/domain/reference/kategori/handler.go new file mode 100644 index 0000000..ebccf84 --- /dev/null +++ b/internal/domain/reference/kategori/handler.go @@ -0,0 +1,24 @@ +package kategori + +import ( + "net/http" + + "github.com/gin-gonic/gin" +) + +type KategoriHandler struct { + repo IKategoriRepository +} + +func NewKategoriHandler(repo IKategoriRepository) KategoriHandler { + return KategoriHandler{repo} +} + +func (h KategoriHandler) ListKategoriOperasi(c *gin.Context) { + list, err := h.repo.SearchableListKategori(c) + if err != nil { + c.JSON(http.StatusInternalServerError, err) + } + + c.JSON(http.StatusOK, list) +} diff --git a/internal/domain/reference/kategori/model.go b/internal/domain/reference/kategori/model.go new file mode 100644 index 0000000..6c53093 --- /dev/null +++ b/internal/domain/reference/kategori/model.go @@ -0,0 +1,14 @@ +package kategori + +import ( + "database/sql" + "time" +) + +type KategoriOperasiModel struct { + ID int `json:"id" db:"id"` + Status string `json:"status" db:"status" validate:"oneof=draft published"` + Kategori string `json:"kategori" db:"kategori"` + DateCreated time.Time `json:"date_created" db:"date_created"` + DateUpdated sql.NullTime `json:"date_updated" db:"date_updated"` +} diff --git a/internal/domain/reference/kategori/repository.go b/internal/domain/reference/kategori/repository.go new file mode 100644 index 0000000..f691dad --- /dev/null +++ b/internal/domain/reference/kategori/repository.go @@ -0,0 +1,77 @@ +package kategori + +import ( + "antrian-operasi/internal/database" + "log" + + queryUtils "antrian-operasi/internal/utils/query" + + "github.com/gin-gonic/gin" +) + +const DB_NAME = "db_antrian" + +type IKategoriRepository interface { + SearchableListKategori(c *gin.Context) ([]KategoriOperasiModel, error) +} + +type kategoriRepo struct { + queryBuilder *queryUtils.QueryBuilder + db database.Service +} + +func NewRepository(dbService database.Service) IKategoriRepository { + queryBuilder := queryUtils.NewQueryBuilder(queryUtils.DBTypePostgreSQL). + SetAllowedColumns([]string{ + "id", + "status", + "date_created", + "date_updated", + "Kategori", + }) + queryBuilder.SetSecurityOptions(false, 100) + + return kategoriRepo{ + queryBuilder: queryBuilder, + db: dbService, + } +} + +func (r kategoriRepo) SearchableListKategori(c *gin.Context) ([]KategoriOperasiModel, error) { + var result []KategoriOperasiModel + search := c.Query("search") + + query := queryUtils.DynamicQuery{ + From: "daftar_kategori_operasi", + Fields: []queryUtils.SelectField{ + {Expression: "id"}, + {Expression: "status"}, + {Expression: "date_created"}, + {Expression: "date_updated"}, + {Expression: "Kategori", Alias: "kategori"}, + }, + Sort: []queryUtils.SortField{ + {Column: "date_created", Order: "DESC"}, + }, + } + + if search != "" { + searchFilters := []queryUtils.DynamicFilter{ + {Column: "Kategori", Operator: queryUtils.OpILike, Value: "%" + search + "%"}, + } + query.Filters = append(query.Filters, queryUtils.FilterGroup{Filters: searchFilters, LogicOp: "OR"}) + } + + dbconn, err := r.db.GetSQLXDB(DB_NAME) + if err != nil { + log.Fatalf("unable to connect db %s", err) + } + + err = r.queryBuilder.ExecuteQuery( + c, dbconn, query, &result) + if err != nil { + log.Fatalf("unable to execute query %s", err) + } + + return result, nil +} diff --git a/internal/domain/reference/kategori/routes.go b/internal/domain/reference/kategori/routes.go new file mode 100644 index 0000000..186094e --- /dev/null +++ b/internal/domain/reference/kategori/routes.go @@ -0,0 +1,17 @@ +package kategori + +import ( + "antrian-operasi/internal/database" + + "github.com/gin-gonic/gin" +) + +func RegisterRoutes(r *gin.RouterGroup, dbService database.Service) { + ketegoriRepo := NewRepository(dbService) + kategoriHandler := NewKategoriHandler(ketegoriRepo) + + kategori := r.Group("/kategori") + { + kategori.GET("", kategoriHandler.ListKategoriOperasi) + } +} diff --git a/internal/domain/reference/spesialis/handler.go b/internal/domain/reference/spesialis/handler.go new file mode 100644 index 0000000..b191d5a --- /dev/null +++ b/internal/domain/reference/spesialis/handler.go @@ -0,0 +1,33 @@ +package spesialis + +import ( + "net/http" + + "github.com/gin-gonic/gin" +) + +type SpesialisHandler struct { + repo ISpesialisRepository +} + +func NewSpesialisHandler(repo ISpesialisRepository) SpesialisHandler { + return SpesialisHandler{repo} +} + +func (h SpesialisHandler) ListSpesialis(c *gin.Context) { + list, err := h.repo.SearchableListSpesialis(c) + if err != nil { + c.JSON(http.StatusInternalServerError, err) + } + + c.JSON(http.StatusOK, list) +} + +func (h SpesialisHandler) ListSubSpesialis(c *gin.Context) { + list, err := h.repo.SearchableListSubSpesialis(c) + if err != nil { + c.JSON(http.StatusInternalServerError, err) + } + + c.JSON(http.StatusOK, list) +} diff --git a/internal/domain/reference/spesialis/model.go b/internal/domain/reference/spesialis/model.go new file mode 100644 index 0000000..97ffd85 --- /dev/null +++ b/internal/domain/reference/spesialis/model.go @@ -0,0 +1,16 @@ +package spesialis + +type SpesialisModel struct { + ID int `json:"id" db:"id"` + Kode string `json:"kode" db:"Kode"` + Spesialis string `json:"spesialis" db:"Spesialis"` +} + +type SubSpesialisModel struct { + ID int `json:"id" db:"id"` + KodeSpesialis string `json:"kode_spesialis" db:"kode_spesialis"` + Spesialis string `json:"spesialis" db:"spesialis"` + KodeSubSpesialis string `json:"kode" db:"Kode"` + SubSpesialis string `json:"sub_spesialis" db:"Subspesialis"` + IDSpesialis int `json:"id_spesialis" db:"id_spesialis"` +} diff --git a/internal/domain/reference/spesialis/repository.go b/internal/domain/reference/spesialis/repository.go new file mode 100644 index 0000000..5b1c647 --- /dev/null +++ b/internal/domain/reference/spesialis/repository.go @@ -0,0 +1,128 @@ +package spesialis + +import ( + "antrian-operasi/internal/database" + "log" + + queryUtils "antrian-operasi/internal/utils/query" + + "github.com/gin-gonic/gin" +) + +const DB_NAME = "db_antrian" + +type ISpesialisRepository interface { + SearchableListSpesialis(c *gin.Context) ([]SpesialisModel, error) + SearchableListSubSpesialis(c *gin.Context) ([]SubSpesialisModel, error) +} + +type spesialisRepo struct { + queryBuilder *queryUtils.QueryBuilder + db database.Service +} + +func NewRepository(dbService database.Service) ISpesialisRepository { + queryBuilder := queryUtils.NewQueryBuilder(queryUtils.DBTypePostgreSQL). + SetAllowedColumns([]string{ + "id", + "Kode", + "Spesialis", + "Subspesialis", + "FK_daftar_spesialis_ID", + }) + queryBuilder.SetSecurityOptions(false, 100) + + return spesialisRepo{ + queryBuilder: queryBuilder, + db: dbService, + } +} + +func (r spesialisRepo) SearchableListSpesialis(c *gin.Context) ([]SpesialisModel, error) { + var result []SpesialisModel + search := c.Query("search") + + query := queryUtils.DynamicQuery{ + From: "daftar_spesialis", + Fields: []queryUtils.SelectField{ + {Expression: "id"}, + {Expression: "Kode"}, + {Expression: "Spesialis"}, + }, + } + + if search != "" { + searchFilters := []queryUtils.DynamicFilter{ + {Column: "Kode", Operator: queryUtils.OpILike, Value: "%" + search + "%"}, + {Column: "Spesialis", Operator: queryUtils.OpILike, Value: "%" + search + "%"}, + } + query.Filters = append(query.Filters, queryUtils.FilterGroup{Filters: searchFilters, LogicOp: "OR"}) + } + + dbconn, err := r.db.GetSQLXDB(DB_NAME) + if err != nil { + log.Fatalf("unable to connect db %s", err) + } + + err = r.queryBuilder.ExecuteQuery( + c, dbconn, query, &result) + if err != nil { + log.Fatalf("unable to execute query %s", err) + } + + return result, nil +} + +func (r spesialisRepo) SearchableListSubSpesialis(c *gin.Context) ([]SubSpesialisModel, error) { + var result []SubSpesialisModel + search := c.Query("search") + + query := queryUtils.DynamicQuery{ + From: "daftar_subspesialis", + Aliases: "dss", + Fields: []queryUtils.SelectField{ + {Expression: "dss.id", Alias: "id"}, + {Expression: "dss.Kode", Alias: "Kode"}, + {Expression: "Subspesialis", Alias: "Subspesialis"}, + {Expression: "ds.Kode", Alias: "kode_spesialis"}, + {Expression: "ds.Spesialis", Alias: "spesialis"}, + {Expression: "ds.id", Alias: "id_spesialis"}, + }, + } + + query.Joins = []queryUtils.Join{ + { + Type: "LEFT", + Table: "daftar_spesialis", + Alias: "ds", + OnConditions: queryUtils.FilterGroup{ + Filters: []queryUtils.DynamicFilter{ + { + Column: "FK_daftar_spesialis_ID", Operator: queryUtils.OpEqual, Value: "ds.id", + }, + }, + }, + }, + } + + if search != "" { + searchFilters := []queryUtils.DynamicFilter{ + {Column: "dss.Kode", Operator: queryUtils.OpILike, Value: "%" + search + "%"}, + {Column: "dss.Subspesialis", Operator: queryUtils.OpILike, Value: "%" + search + "%"}, + } + query.Filters = append(query.Filters, queryUtils.FilterGroup{Filters: searchFilters, LogicOp: "OR"}) + } + + dbconn, err := r.db.GetSQLXDB(DB_NAME) + if err != nil { + log.Fatalf("unable to connect db %s", err) + } + + err = r.queryBuilder.ExecuteQuery( + c, dbconn, query, &result) + if err != nil { + log.Fatalf("unable to execute query %s", err) + } + + return result, nil +} diff --git a/internal/domain/reference/spesialis/routes.go b/internal/domain/reference/spesialis/routes.go new file mode 100644 index 0000000..138e1c8 --- /dev/null +++ b/internal/domain/reference/spesialis/routes.go @@ -0,0 +1,15 @@ +package spesialis + +import ( + "antrian-operasi/internal/database" + + "github.com/gin-gonic/gin" +) + +func RegisterRoutes(r *gin.RouterGroup, dbService database.Service) { + spesialisRepo := NewRepository(dbService) + spesialisHandler := NewSpesialisHandler(spesialisRepo) + + r.GET("/spesialis", spesialisHandler.ListSpesialis) + r.GET("/sub-spesialis", spesialisHandler.ListSubSpesialis) +} diff --git a/internal/routes/routes.go b/internal/routes/routes.go new file mode 100644 index 0000000..5ec6e24 --- /dev/null +++ b/internal/routes/routes.go @@ -0,0 +1,38 @@ +package routes + +import ( + "antrian-operasi/internal/config" + "antrian-operasi/internal/database" + "antrian-operasi/internal/domain/reference/kategori" + "antrian-operasi/internal/domain/reference/spesialis" + "net/http" + + "github.com/gin-gonic/gin" +) + +func RegisterRoutes(cfg *config.Config, dbService database.Service) *gin.Engine { + gin.SetMode(cfg.Server.Mode) + router := gin.New() + + router.GET("/ping", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{ + "message": "pong", + }) + }) + + // router.GET("databases", func(c *gin.Context) { + // c.JSON(200, gin.H{ + // "databases": dbService.ListDBs(), + // "health": dbService.Health(), + // "timestamp": time.Now().Unix(), + // }) + // }) + + api := router.Group("/api") + { + kategori.RegisterRoutes(api, dbService) + spesialis.RegisterRoutes(api, dbService) + } + + return router +} diff --git a/internal/server/server.go b/internal/server/server.go new file mode 100644 index 0000000..02be3c7 --- /dev/null +++ b/internal/server/server.go @@ -0,0 +1,41 @@ +package server + +import ( + "antrian-operasi/internal/database" + "antrian-operasi/internal/routes" + "fmt" + "net/http" + "time" + + "antrian-operasi/internal/config" +) + +var dbService database.Service + +type Server struct { + port int + db database.Service +} + +func NewServer() *http.Server { + cfg := config.LoadConfig() + + if dbService == nil { + dbService = database.New(cfg) + } + + NewServer := &Server{ + port: cfg.Server.Port, + db: dbService, + } + + server := &http.Server{ + Addr: fmt.Sprintf(":%d", NewServer.port), + Handler: routes.RegisterRoutes(cfg, dbService), + IdleTimeout: time.Minute, + ReadTimeout: 10 * time.Second, + WriteTimeout: 30 * time.Second, + } + + return server +} diff --git a/internal/utils/filters/dynamic_filter.go b/internal/utils/filters/dynamic_filter.go new file mode 100644 index 0000000..d735ce2 --- /dev/null +++ b/internal/utils/filters/dynamic_filter.go @@ -0,0 +1,593 @@ +package utils + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// FilterOperator represents supported filter operators +type FilterOperator string + +const ( + OpEqual FilterOperator = "_eq" + OpNotEqual FilterOperator = "_neq" + OpLike FilterOperator = "_like" + OpILike FilterOperator = "_ilike" + OpIn FilterOperator = "_in" + OpNotIn FilterOperator = "_nin" + OpGreaterThan FilterOperator = "_gt" + OpGreaterThanEqual FilterOperator = "_gte" + OpLessThan FilterOperator = "_lt" + OpLessThanEqual FilterOperator = "_lte" + OpBetween FilterOperator = "_between" + OpNotBetween FilterOperator = "_nbetween" + OpNull FilterOperator = "_null" + OpNotNull FilterOperator = "_nnull" + OpContains FilterOperator = "_contains" + OpNotContains FilterOperator = "_ncontains" + OpStartsWith FilterOperator = "_starts_with" + OpEndsWith FilterOperator = "_ends_with" +) + +// DynamicFilter represents a single filter condition +type DynamicFilter struct { + Column string `json:"column"` + Operator FilterOperator `json:"operator"` + Value interface{} `json:"value"` + LogicOp string `json:"logic_op,omitempty"` // AND, OR +} + +// FilterGroup represents a group of filters +type FilterGroup struct { + Filters []DynamicFilter `json:"filters"` + LogicOp string `json:"logic_op"` // AND, OR +} + +// DynamicQuery represents the complete query structure +type DynamicQuery struct { + Fields []string `json:"fields,omitempty"` + Filters []FilterGroup `json:"filters,omitempty"` + Sort []SortField `json:"sort,omitempty"` + Limit int `json:"limit"` + Offset int `json:"offset"` + GroupBy []string `json:"group_by,omitempty"` + Having []FilterGroup `json:"having,omitempty"` +} + +// SortField represents sorting configuration +type SortField struct { + Column string `json:"column"` + Order string `json:"order"` // ASC, DESC +} + +// QueryBuilder builds SQL queries from dynamic filters +type QueryBuilder struct { + tableName string + columnMapping map[string]string // Maps API field names to DB column names + allowedColumns map[string]bool // Security: only allow specified columns + paramCounter int + mu *sync.RWMutex +} + +// NewQueryBuilder creates a new query builder instance +func NewQueryBuilder(tableName string) *QueryBuilder { + return &QueryBuilder{ + tableName: tableName, + columnMapping: make(map[string]string), + allowedColumns: make(map[string]bool), + paramCounter: 0, + } +} + +// SetColumnMapping sets the mapping between API field names and database column names +func (qb *QueryBuilder) SetColumnMapping(mapping map[string]string) *QueryBuilder { + qb.columnMapping = mapping + return qb +} + +// SetAllowedColumns sets the list of allowed columns for security +func (qb *QueryBuilder) SetAllowedColumns(columns []string) *QueryBuilder { + qb.allowedColumns = make(map[string]bool) + for _, col := range columns { + qb.allowedColumns[col] = true + } + return qb +} + +// BuildQuery builds the complete SQL query +func (qb *QueryBuilder) BuildQuery(query DynamicQuery) (string, []interface{}, error) { + qb.paramCounter = 0 + + // Build SELECT clause + selectClause := qb.buildSelectClause(query.Fields) + + // Build FROM clause + fromClause := fmt.Sprintf("FROM %s", qb.tableName) + + // Build WHERE clause + whereClause, whereArgs, err := qb.buildWhereClause(query.Filters) + if err != nil { + return "", nil, err + } + + // Build ORDER BY clause + orderClause := qb.buildOrderClause(query.Sort) + + // Build GROUP BY clause + groupClause := qb.buildGroupByClause(query.GroupBy) + + // Build HAVING clause + havingClause, havingArgs, err := qb.buildHavingClause(query.Having) + if err != nil { + return "", nil, err + } + + // Combine all parts + sqlParts := []string{selectClause, fromClause} + args := []interface{}{} + + if whereClause != "" { + sqlParts = append(sqlParts, "WHERE "+whereClause) + args = append(args, whereArgs...) + } + + if groupClause != "" { + sqlParts = append(sqlParts, groupClause) + } + + if havingClause != "" { + sqlParts = append(sqlParts, "HAVING "+havingClause) + args = append(args, havingArgs...) + } + + if orderClause != "" { + sqlParts = append(sqlParts, orderClause) + } + + // Add pagination + if query.Limit > 0 { + qb.paramCounter++ + sqlParts = append(sqlParts, fmt.Sprintf("LIMIT $%d", qb.paramCounter)) + args = append(args, query.Limit) + } + + if query.Offset > 0 { + qb.paramCounter++ + sqlParts = append(sqlParts, fmt.Sprintf("OFFSET $%d", qb.paramCounter)) + args = append(args, query.Offset) + } + + sql := strings.Join(sqlParts, " ") + return sql, args, nil +} + +// buildSelectClause builds the SELECT part of the query +func (qb *QueryBuilder) buildSelectClause(fields []string) string { + if len(fields) == 0 || (len(fields) == 1 && fields[0] == "*") { + return "SELECT *" + } + + var selectedFields []string + for _, field := range fields { + if field == "*.*" || field == "*" { + selectedFields = append(selectedFields, "*") + continue + } + + // Check if it's an expression (contains spaces, parentheses, etc.) + if strings.Contains(field, " ") || strings.Contains(field, "(") || strings.Contains(field, ")") { + // Expression, add as is + selectedFields = append(selectedFields, field) + continue + } + + // Security check: only allow specified columns (check original field name) + if len(qb.allowedColumns) > 0 && !qb.allowedColumns[field] { + continue + } + + // Map field name if mapping exists + if mappedCol, exists := qb.columnMapping[field]; exists { + field = mappedCol + } + + selectedFields = append(selectedFields, fmt.Sprintf(`"%s"`, field)) + } + + if len(selectedFields) == 0 { + return "SELECT *" + } + + return "SELECT " + strings.Join(selectedFields, ", ") +} + +// buildWhereClause builds the WHERE part of the query +func (qb *QueryBuilder) buildWhereClause(filterGroups []FilterGroup) (string, []interface{}, error) { + if len(filterGroups) == 0 { + return "", nil, nil + } + + var conditions []string + var args []interface{} + + for i, group := range filterGroups { + groupCondition, groupArgs, err := qb.buildFilterGroup(group) + if err != nil { + return "", nil, err + } + + if groupCondition != "" { + if i > 0 { + logicOp := "AND" + if group.LogicOp != "" { + logicOp = strings.ToUpper(group.LogicOp) + } + conditions = append(conditions, logicOp) + } + + conditions = append(conditions, groupCondition) + args = append(args, groupArgs...) + } + } + + return strings.Join(conditions, " "), args, nil +} + +// buildFilterGroup builds conditions for a filter group +func (qb *QueryBuilder) buildFilterGroup(group FilterGroup) (string, []interface{}, error) { + if len(group.Filters) == 0 { + return "", nil, nil + } + + var conditions []string + var args []interface{} + + for i, filter := range group.Filters { + condition, filterArgs, err := qb.buildFilterCondition(filter) + if err != nil { + return "", nil, err + } + + if condition != "" { + if i > 0 { + logicOp := "AND" + if filter.LogicOp != "" { + logicOp = strings.ToUpper(filter.LogicOp) + } else if group.LogicOp != "" { + logicOp = strings.ToUpper(group.LogicOp) + } + conditions = append(conditions, logicOp) + } + + conditions = append(conditions, condition) + args = append(args, filterArgs...) + } + } + + return strings.Join(conditions, " "), args, nil +} + +// buildFilterCondition builds a single filter condition +func (qb *QueryBuilder) buildFilterCondition(filter DynamicFilter) (string, []interface{}, error) { + // Security check (check original field name) + if len(qb.allowedColumns) > 0 && !qb.allowedColumns[filter.Column] { + return "", nil, nil + } + + // Map column name if mapping exists + column := filter.Column + if mappedCol, exists := qb.columnMapping[column]; exists { + column = mappedCol + } + + // Wrap column name in quotes for PostgreSQL + column = fmt.Sprintf(`"%s"`, column) + + switch filter.Operator { + case OpEqual: + if filter.Value == nil { + return "", nil, nil + } + qb.paramCounter++ + return fmt.Sprintf("%s = $%d", column, qb.paramCounter), []interface{}{filter.Value}, nil + + case OpNotEqual: + if filter.Value == nil { + return "", nil, nil + } + qb.paramCounter++ + return fmt.Sprintf("%s != $%d", column, qb.paramCounter), []interface{}{filter.Value}, nil + + case OpLike: + if filter.Value == nil { + return "", nil, nil + } + qb.paramCounter++ + return fmt.Sprintf("%s LIKE $%d", column, qb.paramCounter), []interface{}{filter.Value}, nil + + case OpILike: + if filter.Value == nil { + return "", nil, nil + } + qb.paramCounter++ + return fmt.Sprintf("%s ILIKE $%d", column, qb.paramCounter), []interface{}{filter.Value}, nil + + case OpIn: + values := qb.parseArrayValue(filter.Value) + if len(values) == 0 { + return "", nil, nil + } + + var placeholders []string + var args []interface{} + for _, val := range values { + qb.paramCounter++ + placeholders = append(placeholders, fmt.Sprintf("$%d", qb.paramCounter)) + args = append(args, val) + } + + return fmt.Sprintf("%s IN (%s)", column, strings.Join(placeholders, ", ")), args, nil + + case OpNotIn: + values := qb.parseArrayValue(filter.Value) + if len(values) == 0 { + return "", nil, nil + } + + var placeholders []string + var args []interface{} + for _, val := range values { + qb.paramCounter++ + placeholders = append(placeholders, fmt.Sprintf("$%d", qb.paramCounter)) + args = append(args, val) + } + + return fmt.Sprintf("%s NOT IN (%s)", column, strings.Join(placeholders, ", ")), args, nil + + case OpGreaterThan: + if filter.Value == nil { + return "", nil, nil + } + qb.paramCounter++ + return fmt.Sprintf("%s > $%d", column, qb.paramCounter), []interface{}{filter.Value}, nil + + case OpGreaterThanEqual: + if filter.Value == nil { + return "", nil, nil + } + qb.paramCounter++ + return fmt.Sprintf("%s >= $%d", column, qb.paramCounter), []interface{}{filter.Value}, nil + + case OpLessThan: + if filter.Value == nil { + return "", nil, nil + } + qb.paramCounter++ + return fmt.Sprintf("%s < $%d", column, qb.paramCounter), []interface{}{filter.Value}, nil + + case OpLessThanEqual: + if filter.Value == nil { + return "", nil, nil + } + qb.paramCounter++ + return fmt.Sprintf("%s <= $%d", column, qb.paramCounter), []interface{}{filter.Value}, nil + + case OpBetween: + if filter.Value == nil { + return "", nil, nil + } + values := qb.parseArrayValue(filter.Value) + if len(values) != 2 { + return "", nil, fmt.Errorf("between operator requires exactly 2 values") + } + qb.paramCounter++ + param1 := qb.paramCounter + qb.paramCounter++ + param2 := qb.paramCounter + return fmt.Sprintf("%s BETWEEN $%d AND $%d", column, param1, param2), []interface{}{values[0], values[1]}, nil + + case OpNotBetween: + if filter.Value == nil { + return "", nil, nil + } + values := qb.parseArrayValue(filter.Value) + if len(values) != 2 { + return "", nil, fmt.Errorf("not between operator requires exactly 2 values") + } + qb.paramCounter++ + param1 := qb.paramCounter + qb.paramCounter++ + param2 := qb.paramCounter + return fmt.Sprintf("%s NOT BETWEEN $%d AND $%d", column, param1, param2), []interface{}{values[0], values[1]}, nil + + case OpNull: + return fmt.Sprintf("%s IS NULL", column), nil, nil + + case OpNotNull: + return fmt.Sprintf("%s IS NOT NULL", column), nil, nil + + case OpContains: + if filter.Value == nil { + return "", nil, nil + } + qb.paramCounter++ + value := fmt.Sprintf("%%%v%%", filter.Value) + return fmt.Sprintf("%s ILIKE $%d", column, qb.paramCounter), []interface{}{value}, nil + + case OpNotContains: + if filter.Value == nil { + return "", nil, nil + } + qb.paramCounter++ + value := fmt.Sprintf("%%%v%%", filter.Value) + return fmt.Sprintf("%s NOT ILIKE $%d", column, qb.paramCounter), []interface{}{value}, nil + + case OpStartsWith: + if filter.Value == nil { + return "", nil, nil + } + qb.paramCounter++ + value := fmt.Sprintf("%v%%", filter.Value) + return fmt.Sprintf("%s ILIKE $%d", column, qb.paramCounter), []interface{}{value}, nil + + case OpEndsWith: + if filter.Value == nil { + return "", nil, nil + } + qb.paramCounter++ + value := fmt.Sprintf("%%%v", filter.Value) + return fmt.Sprintf("%s ILIKE $%d", column, qb.paramCounter), []interface{}{value}, nil + + default: + return "", nil, fmt.Errorf("unsupported operator: %s", filter.Operator) + } +} + +// parseArrayValue parses array values from various formats +func (qb *QueryBuilder) parseArrayValue(value interface{}) []interface{} { + if value == nil { + return nil + } + + // If it's already a slice + if reflect.TypeOf(value).Kind() == reflect.Slice { + v := reflect.ValueOf(value) + result := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + result[i] = v.Index(i).Interface() + } + return result + } + + // If it's a string, try to split by comma + if str, ok := value.(string); ok { + if strings.Contains(str, ",") { + parts := strings.Split(str, ",") + result := make([]interface{}, len(parts)) + for i, part := range parts { + result[i] = strings.TrimSpace(part) + } + return result + } + return []interface{}{str} + } + + return []interface{}{value} +} + +// buildOrderClause builds the ORDER BY clause +func (qb *QueryBuilder) buildOrderClause(sortFields []SortField) string { + if len(sortFields) == 0 { + return "" + } + + var orderParts []string + for _, sort := range sortFields { + column := sort.Column + + // Security check (check original field name) + if len(qb.allowedColumns) > 0 && !qb.allowedColumns[column] { + continue + } + + if mappedCol, exists := qb.columnMapping[column]; exists { + column = mappedCol + } + + order := "ASC" + if sort.Order != "" { + order = strings.ToUpper(sort.Order) + } + + orderParts = append(orderParts, fmt.Sprintf(`"%s" %s`, column, order)) + } + + if len(orderParts) == 0 { + return "" + } + + return "ORDER BY " + strings.Join(orderParts, ", ") +} + +// buildGroupByClause builds the GROUP BY clause +func (qb *QueryBuilder) buildGroupByClause(groupFields []string) string { + if len(groupFields) == 0 { + return "" + } + + var groupParts []string + for _, field := range groupFields { + column := field + if mappedCol, exists := qb.columnMapping[column]; exists { + column = mappedCol + } + + // Security check + if len(qb.allowedColumns) > 0 && !qb.allowedColumns[column] { + continue + } + + groupParts = append(groupParts, fmt.Sprintf(`"%s"`, column)) + } + + if len(groupParts) == 0 { + return "" + } + + return "GROUP BY " + strings.Join(groupParts, ", ") +} + +// buildHavingClause builds the HAVING clause +func (qb *QueryBuilder) buildHavingClause(havingGroups []FilterGroup) (string, []interface{}, error) { + if len(havingGroups) == 0 { + return "", nil, nil + } + + return qb.buildWhereClause(havingGroups) +} + +// BuildCountQuery builds a count query +func (qb *QueryBuilder) BuildCountQuery(query DynamicQuery) (string, []interface{}, error) { + qb.paramCounter = 0 + + // Build FROM clause + fromClause := fmt.Sprintf("FROM %s", qb.tableName) + + // Build WHERE clause + whereClause, whereArgs, err := qb.buildWhereClause(query.Filters) + if err != nil { + return "", nil, err + } + + // Build GROUP BY clause + groupClause := qb.buildGroupByClause(query.GroupBy) + + // Build HAVING clause + havingClause, havingArgs, err := qb.buildHavingClause(query.Having) + if err != nil { + return "", nil, err + } + + // Combine parts + sqlParts := []string{"SELECT COUNT(*)", fromClause} + args := []interface{}{} + + if whereClause != "" { + sqlParts = append(sqlParts, "WHERE "+whereClause) + args = append(args, whereArgs...) + } + + if groupClause != "" { + sqlParts = append(sqlParts, groupClause) + } + + if havingClause != "" { + sqlParts = append(sqlParts, "HAVING "+havingClause) + args = append(args, havingArgs...) + } + + sql := strings.Join(sqlParts, " ") + return sql, args, nil +} diff --git a/internal/utils/filters/query_parser.go b/internal/utils/filters/query_parser.go new file mode 100644 index 0000000..6b6f07e --- /dev/null +++ b/internal/utils/filters/query_parser.go @@ -0,0 +1,241 @@ +package utils + +import ( + "net/url" + "strconv" + "strings" + "time" +) + +// QueryParser parses HTTP query parameters into DynamicQuery +type QueryParser struct { + defaultLimit int + maxLimit int +} + +// NewQueryParser creates a new query parser +func NewQueryParser() *QueryParser { + return &QueryParser{ + defaultLimit: 10, + maxLimit: 100, + } +} + +// SetLimits sets default and maximum limits +func (qp *QueryParser) SetLimits(defaultLimit, maxLimit int) *QueryParser { + qp.defaultLimit = defaultLimit + qp.maxLimit = maxLimit + return qp +} + +// ParseQuery parses URL query parameters into DynamicQuery +func (qp *QueryParser) ParseQuery(values url.Values) (DynamicQuery, error) { + query := DynamicQuery{ + Limit: qp.defaultLimit, + Offset: 0, + } + + // Parse fields + if fields := values.Get("fields"); fields != "" { + if fields == "*.*" || fields == "*" { + query.Fields = []string{"*"} + } else { + query.Fields = strings.Split(fields, ",") + for i, field := range query.Fields { + query.Fields[i] = strings.TrimSpace(field) + } + } + } + + // Parse pagination + if limit := values.Get("limit"); limit != "" { + if l, err := strconv.Atoi(limit); err == nil { + if l > 0 && l <= qp.maxLimit { + query.Limit = l + } + } + } + + if offset := values.Get("offset"); offset != "" { + if o, err := strconv.Atoi(offset); err == nil && o >= 0 { + query.Offset = o + } + } + + // Parse filters + filters, err := qp.parseFilters(values) + if err != nil { + return query, err + } + query.Filters = filters + + // Parse sorting + sorts, err := qp.parseSorting(values) + if err != nil { + return query, err + } + query.Sort = sorts + + // Parse group by + if groupBy := values.Get("group"); groupBy != "" { + query.GroupBy = strings.Split(groupBy, ",") + for i, field := range query.GroupBy { + query.GroupBy[i] = strings.TrimSpace(field) + } + } + + return query, nil +} + +// parseFilters parses filter parameters +// Supports format: filter[column][operator]=value +func (qp *QueryParser) parseFilters(values url.Values) ([]FilterGroup, error) { + filterMap := make(map[string]map[string]string) + + // Group filters by column + for key, vals := range values { + if strings.HasPrefix(key, "filter[") && strings.HasSuffix(key, "]") { + // Parse filter[column][operator] format + parts := strings.Split(key[7:len(key)-1], "][") + if len(parts) == 2 { + column := parts[0] + operator := parts[1] + + if filterMap[column] == nil { + filterMap[column] = make(map[string]string) + } + + if len(vals) > 0 { + filterMap[column][operator] = vals[0] + } + } + } + } + + if len(filterMap) == 0 { + return nil, nil + } + + // Convert to FilterGroup + var filters []DynamicFilter + + for column, operators := range filterMap { + for opStr, value := range operators { + operator := FilterOperator(opStr) + + // Parse value based on operator + var parsedValue interface{} + switch operator { + case OpIn, OpNotIn: + if value != "" { + parsedValue = strings.Split(value, ",") + } + case OpBetween, OpNotBetween: + if value != "" { + parts := strings.Split(value, ",") + if len(parts) == 2 { + parsedValue = []interface{}{strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1])} + } + } + case OpNull, OpNotNull: + parsedValue = nil + default: + parsedValue = value + } + + filters = append(filters, DynamicFilter{ + Column: column, + Operator: operator, + Value: parsedValue, + }) + } + } + + if len(filters) == 0 { + return nil, nil + } + + return []FilterGroup{{ + Filters: filters, + LogicOp: "AND", + }}, nil +} + +// parseSorting parses sort parameters +// Supports format: sort=column1,-column2 (- for DESC) +func (qp *QueryParser) parseSorting(values url.Values) ([]SortField, error) { + sortParam := values.Get("sort") + if sortParam == "" { + return nil, nil + } + + var sorts []SortField + fields := strings.Split(sortParam, ",") + + for _, field := range fields { + field = strings.TrimSpace(field) + if field == "" { + continue + } + + order := "ASC" + column := field + + if strings.HasPrefix(field, "-") { + order = "DESC" + column = field[1:] + } else if strings.HasPrefix(field, "+") { + column = field[1:] + } + + sorts = append(sorts, SortField{ + Column: column, + Order: order, + }) + } + + return sorts, nil +} + +// ParseAdvancedFilters parses complex filter structures +// Supports nested filters and logic operators +func (qp *QueryParser) ParseAdvancedFilters(filterParam string) ([]FilterGroup, error) { + // This would be for more complex JSON-based filters + // Implementation depends on your specific needs + return nil, nil +} + +// Helper function to parse date values +func parseDate(value string) (interface{}, error) { + // Try different date formats + formats := []string{ + "2006-01-02", + "2006-01-02T15:04:05Z", + "2006-01-02T15:04:05.000Z", + "2006-01-02 15:04:05", + } + + for _, format := range formats { + if t, err := time.Parse(format, value); err == nil { + return t, nil + } + } + + return value, nil +} + +// Helper function to parse numeric values +func parseNumeric(value string) interface{} { + // Try integer first + if i, err := strconv.Atoi(value); err == nil { + return i + } + + // Try float + if f, err := strconv.ParseFloat(value, 64); err == nil { + return f + } + + // Return as string + return value +} diff --git a/internal/utils/query/builder.go b/internal/utils/query/builder.go new file mode 100644 index 0000000..c0305a3 --- /dev/null +++ b/internal/utils/query/builder.go @@ -0,0 +1,2729 @@ +package utils + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "log" + "net/url" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/Masterminds/squirrel" + "github.com/jmoiron/sqlx" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +// DBType represents the type of database +type DBType string + +const ( + DBTypePostgreSQL DBType = "postgres" + DBTypeMySQL DBType = "mysql" + DBTypeSQLite DBType = "sqlite" + DBTypeSQLServer DBType = "sqlserver" + DBTypeMongoDB DBType = "mongodb" +) + +// FilterOperator represents supported filter operators +type FilterOperator string + +const ( + OpEqual FilterOperator = "_eq" + OpNotEqual FilterOperator = "_neq" + OpLike FilterOperator = "_like" + OpILike FilterOperator = "_ilike" + OpNotLike FilterOperator = "_nlike" + OpNotILike FilterOperator = "_nilike" + OpIn FilterOperator = "_in" + OpNotIn FilterOperator = "_nin" + OpGreaterThan FilterOperator = "_gt" + OpGreaterThanEqual FilterOperator = "_gte" + OpLessThan FilterOperator = "_lt" + OpLessThanEqual FilterOperator = "_lte" + OpBetween FilterOperator = "_between" + OpNotBetween FilterOperator = "_nbetween" + OpNull FilterOperator = "_null" + OpNotNull FilterOperator = "_nnull" + OpContains FilterOperator = "_contains" + OpNotContains FilterOperator = "_ncontains" + OpStartsWith FilterOperator = "_starts_with" + OpEndsWith FilterOperator = "_ends_with" + OpJsonContains FilterOperator = "_json_contains" + OpJsonNotContains FilterOperator = "_json_ncontains" + OpJsonExists FilterOperator = "_json_exists" + OpJsonNotExists FilterOperator = "_json_nexists" + OpJsonEqual FilterOperator = "_json_eq" + OpJsonNotEqual FilterOperator = "_json_neq" + OpArrayContains FilterOperator = "_array_contains" + OpArrayNotContains FilterOperator = "_array_ncontains" + OpArrayLength FilterOperator = "_array_length" +) + +// DynamicFilter represents a single filter condition +type DynamicFilter struct { + Column string `json:"column"` + Operator FilterOperator `json:"operator"` + Value interface{} `json:"value"` + // Additional options for complex filters + Options map[string]interface{} `json:"options,omitempty"` +} + +// FilterGroup represents a group of filters with a logical operator (AND/OR) +type FilterGroup struct { + Filters []DynamicFilter `json:"filters"` + LogicOp string `json:"logic_op"` // AND, OR +} + +// SelectField represents a field in the SELECT clause, supporting expressions and aliases +type SelectField struct { + Expression string `json:"expression"` // e.g., "TMLogBarang.Nama", "COUNT(*)" + Alias string `json:"alias"` // e.g., "obat_nama", "total_count" + // Window function support + WindowFunction *WindowFunction `json:"window_function,omitempty"` +} + +// WindowFunction represents a window function with its configuration +type WindowFunction struct { + Function string `json:"function"` // e.g., "ROW_NUMBER", "RANK", "DENSE_RANK", "LEAD", "LAG" + Over string `json:"over"` // PARTITION BY expression + OrderBy string `json:"order_by"` // ORDER BY expression + Frame string `json:"frame"` // ROWS/RANGE clause + Alias string `json:"alias"` // Alias for the window function +} + +// Join represents a JOIN clause +type Join struct { + Type string `json:"type"` // "INNER", "LEFT", "RIGHT", "FULL" + Table string `json:"table"` // Table name to join + Alias string `json:"alias"` // Table alias + OnConditions FilterGroup `json:"on_conditions"` // Conditions for the ON clause + // LATERAL JOIN support + Lateral bool `json:"lateral,omitempty"` +} + +// Union represents a UNION clause +type Union struct { + Type string `json:"type"` // "UNION", "UNION ALL" + Query DynamicQuery `json:"query"` // The subquery to union with +} + +// CTE (Common Table Expression) represents a WITH clause +type CTE struct { + Name string `json:"name"` // CTE alias name + Query DynamicQuery `json:"query"` // The query defining the CTE + // Recursive CTE support + Recursive bool `json:"recursive,omitempty"` +} + +// DynamicQuery represents the complete query structure +type DynamicQuery struct { + Fields []SelectField `json:"fields,omitempty"` + From string `json:"from"` // Main table name + Aliases string `json:"aliases"` // Main table alias + Joins []Join `json:"joins,omitempty"` + Filters []FilterGroup `json:"filters,omitempty"` + GroupBy []string `json:"group_by,omitempty"` + Having []FilterGroup `json:"having,omitempty"` + Unions []Union `json:"unions,omitempty"` + CTEs []CTE `json:"ctes,omitempty"` + Sort []SortField `json:"sort,omitempty"` + Limit int `json:"limit"` + Offset int `json:"offset"` + // Window function support + WindowFunctions []WindowFunction `json:"window_functions,omitempty"` + // JSON operations + JsonOperations []JsonOperation `json:"json_operations,omitempty"` +} + +// JsonOperation represents a JSON operation +type JsonOperation struct { + Type string `json:"type"` // "extract", "exists", "contains", etc. + Column string `json:"column"` // JSON column + Path string `json:"path"` // JSON path + Value interface{} `json:"value,omitempty"` // Value for comparison + Alias string `json:"alias,omitempty"` // Alias for the result +} + +// SortField represents sorting configuration +type SortField struct { + Column string `json:"column"` + Order string `json:"order"` // ASC, DESC +} + +// UpdateData represents data for UPDATE operations +type UpdateData struct { + Columns []string `json:"columns"` + Values []interface{} `json:"values"` + // JSON update support + JsonUpdates map[string]JsonUpdate `json:"json_updates,omitempty"` +} + +// JsonUpdate represents a JSON update operation +type JsonUpdate struct { + Path string `json:"path"` // JSON path + Value interface{} `json:"value"` // New value +} + +// InsertData represents data for INSERT operations +type InsertData struct { + Columns []string `json:"columns"` + Values []interface{} `json:"values"` + // JSON insert support + JsonValues map[string]interface{} `json:"json_values,omitempty"` +} + +// QueryBuilder builds SQL queries from dynamic filters using squirrel +type QueryBuilder struct { + dbType DBType + sqlBuilder squirrel.StatementBuilderType + allowedColumns map[string]bool // Security: only allow specified columns + allowedTables map[string]bool // Security: only allow specified tables + // Security settings + enableSecurityChecks bool + maxAllowedRows int + // SQL injection prevention patterns + dangerousPatterns []*regexp.Regexp + // Query logging + enableQueryLogging bool + // Connection timeout settings + queryTimeout time.Duration +} + +// NewQueryBuilder creates a new query builder instance for a specific database type +func NewQueryBuilder(dbType DBType) *QueryBuilder { + var placeholderFormat squirrel.PlaceholderFormat + + switch dbType { + case DBTypePostgreSQL: + placeholderFormat = squirrel.Dollar + case DBTypeMySQL, DBTypeSQLite: + placeholderFormat = squirrel.Question + case DBTypeSQLServer: + placeholderFormat = squirrel.AtP + default: + placeholderFormat = squirrel.Question + } + + // Initialize dangerous patterns for SQL injection prevention + dangerousPatterns := []*regexp.Regexp{ + regexp.MustCompile(`(?i)(union|select|insert|update|delete|drop|alter|create|exec|execute)\s`), + regexp.MustCompile(`(?i)(--|\/\*|\*\/)`), + regexp.MustCompile(`(?i)(or|and)\s+1\s*=\s*1`), + regexp.MustCompile(`(?i)(or|and)\s+true`), + regexp.MustCompile(`(?i)(xp_|sp_)\w+`), // SQL Server extended procedures + regexp.MustCompile(`(?i)(waitfor\s+delay)`), // SQL Server time-based attack + regexp.MustCompile(`(?i)(benchmark|sleep)\s*\(`), // MySQL time-based attack + regexp.MustCompile(`(?i)(pg_sleep)\s*\(`), // PostgreSQL time-based attack + regexp.MustCompile(`(?i)(load_file|into\s+outfile)`), // File operations + regexp.MustCompile(`(?i)(information_schema|sysobjects|syscolumns)`), // System tables + } + + return &QueryBuilder{ + dbType: dbType, + sqlBuilder: squirrel.StatementBuilder.PlaceholderFormat(placeholderFormat), + allowedColumns: make(map[string]bool), + allowedTables: make(map[string]bool), + enableSecurityChecks: true, + maxAllowedRows: 10000, + dangerousPatterns: dangerousPatterns, + enableQueryLogging: true, + queryTimeout: 30 * time.Second, + } +} + +// SetSecurityOptions configures security settings +func (qb *QueryBuilder) SetSecurityOptions(enableChecks bool, maxRows int) *QueryBuilder { + qb.enableSecurityChecks = enableChecks + qb.maxAllowedRows = maxRows + return qb +} + +// SetAllowedColumns sets the list of allowed columns for security +func (qb *QueryBuilder) SetAllowedColumns(columns []string) *QueryBuilder { + qb.allowedColumns = make(map[string]bool) + for _, col := range columns { + qb.allowedColumns[col] = true + } + return qb +} + +// SetAllowedTables sets the list of allowed tables for security +func (qb *QueryBuilder) SetAllowedTables(tables []string) *QueryBuilder { + qb.allowedTables = make(map[string]bool) + for _, table := range tables { + qb.allowedTables[table] = true + } + return qb +} + +// SetQueryLogging enables or disables query logging +func (qb *QueryBuilder) SetQueryLogging(enable bool) *QueryBuilder { + qb.enableQueryLogging = enable + return qb +} + +// SetQueryTimeout sets the default query timeout +func (qb *QueryBuilder) SetQueryTimeout(timeout time.Duration) *QueryBuilder { + qb.queryTimeout = timeout + return qb +} + +// BuildQuery builds the complete SQL SELECT query with support for CTEs, JOINs, and UNIONs +func (qb *QueryBuilder) BuildQuery(query DynamicQuery) (string, []interface{}, error) { + var allArgs []interface{} + var queryParts []string + + // Security check for limit + if qb.enableSecurityChecks && query.Limit > qb.maxAllowedRows { + return "", nil, fmt.Errorf("requested limit %d exceeds maximum allowed %d", query.Limit, qb.maxAllowedRows) + } + + // Security check for table name + if qb.enableSecurityChecks && len(qb.allowedTables) > 0 && !qb.allowedTables[query.From] { + return "", nil, fmt.Errorf("disallowed table: %s", query.From) + } + + // 1. Build CTEs (WITH clause) + if len(query.CTEs) > 0 { + cteClause, cteArgs, err := qb.buildCTEClause(query.CTEs) + if err != nil { + return "", nil, err + } + queryParts = append(queryParts, cteClause) + allArgs = append(allArgs, cteArgs...) + } + + // 2. Build Main Query using Squirrel's From and Join methods + fromClause := qb.buildFromClause(query.From, query.Aliases) + selectFields := qb.buildSelectFields(query.Fields) + + // Start building the main query + var mainQuery squirrel.SelectBuilder + if len(query.WindowFunctions) > 0 || len(query.JsonOperations) > 0 { + // We need to add window functions and JSON operations after initial select + mainQuery = qb.sqlBuilder.Select(selectFields...).From(fromClause) + } else { + mainQuery = qb.sqlBuilder.Select(selectFields...).From(fromClause) + } + + // Add JOINs using Squirrel's Join method + if len(query.Joins) > 0 { + for _, join := range query.Joins { + // Security check for joined table + if qb.enableSecurityChecks && len(qb.allowedTables) > 0 && !qb.allowedTables[join.Table] { + return "", nil, fmt.Errorf("disallowed table in join: %s", join.Table) + } + + joinType, tableWithAlias, onClause, joinArgs, err := qb.buildSingleJoinClause(join) + if err != nil { + return "", nil, err + } + joinStr := tableWithAlias + " ON " + onClause + switch strings.ToUpper(joinType) { + case "LEFT": + if join.Lateral { + mainQuery = mainQuery.LeftJoin("LATERAL "+joinStr, joinArgs...) + } else { + mainQuery = mainQuery.LeftJoin(joinStr, joinArgs...) + } + case "RIGHT": + mainQuery = mainQuery.RightJoin(joinStr, joinArgs...) + case "FULL": + mainQuery = mainQuery.Join("FULL JOIN "+joinStr, joinArgs...) + default: + if join.Lateral { + mainQuery = mainQuery.Join("LATERAL "+joinStr, joinArgs...) + } else { + mainQuery = mainQuery.Join(joinStr, joinArgs...) + } + } + } + } + + // 4. Apply WHERE conditions + if len(query.Filters) > 0 { + whereClause, whereArgs, err := qb.BuildWhereClause(query.Filters) + if err != nil { + return "", nil, err + } + mainQuery = mainQuery.Where(whereClause, whereArgs...) + } + + // 5. Apply GROUP BY + if len(query.GroupBy) > 0 { + mainQuery = mainQuery.GroupBy(qb.buildGroupByColumns(query.GroupBy)...) + } + + // 6. Apply HAVING conditions + if len(query.Having) > 0 { + havingClause, havingArgs, err := qb.BuildWhereClause(query.Having) + if err != nil { + return "", nil, err + } + mainQuery = mainQuery.Having(havingClause, havingArgs...) + } + + // 7. Apply ORDER BY + if len(query.Sort) > 0 { + for _, sort := range query.Sort { + column := qb.validateAndEscapeColumn(sort.Column) + if column == "" { + continue + } + order := "ASC" + if strings.ToUpper(sort.Order) == "DESC" { + order = "DESC" + } + mainQuery = mainQuery.OrderBy(fmt.Sprintf("%s %s", column, order)) + } + } + + // 8. Apply window functions and JSON operations by modifying the SELECT clause + if len(query.WindowFunctions) > 0 || len(query.JsonOperations) > 0 { + // We need to rebuild the SELECT clause with window functions and JSON operations + var finalSelectFields []string + finalSelectFields = append(finalSelectFields, selectFields...) + + // Add window functions + for _, wf := range query.WindowFunctions { + windowFunc, err := qb.buildWindowFunction(wf) + if err != nil { + return "", nil, err + } + finalSelectFields = append(finalSelectFields, windowFunc) + } + + // Add JSON operations + for _, jo := range query.JsonOperations { + jsonExpr, jsonArgs, err := qb.buildJsonOperation(jo) + if err != nil { + return "", nil, err + } + if jo.Alias != "" { + jsonExpr += " AS " + qb.escapeIdentifier(jo.Alias) + } + finalSelectFields = append(finalSelectFields, jsonExpr) + allArgs = append(allArgs, jsonArgs...) + } + + // Rebuild the query with the complete SELECT clause + mainQuery = qb.sqlBuilder.Select(finalSelectFields...).From(fromClause) + + // Re-apply all the other clauses + if len(query.Joins) > 0 { + for _, join := range query.Joins { + // Security check for joined table + if qb.enableSecurityChecks && len(qb.allowedTables) > 0 && !qb.allowedTables[join.Table] { + return "", nil, fmt.Errorf("disallowed table in join: %s", join.Table) + } + + joinType, tableWithAlias, onClause, joinArgs, err := qb.buildSingleJoinClause(join) + if err != nil { + return "", nil, err + } + joinStr := tableWithAlias + " ON " + onClause + switch strings.ToUpper(joinType) { + case "LEFT": + if join.Lateral { + mainQuery = mainQuery.LeftJoin("LATERAL "+joinStr, joinArgs...) + } else { + mainQuery = mainQuery.LeftJoin(joinStr, joinArgs...) + } + case "RIGHT": + mainQuery = mainQuery.RightJoin(joinStr, joinArgs...) + case "FULL": + mainQuery = mainQuery.Join("FULL JOIN "+joinStr, joinArgs...) + default: + if join.Lateral { + mainQuery = mainQuery.Join("LATERAL "+joinStr, joinArgs...) + } else { + mainQuery = mainQuery.Join(joinStr, joinArgs...) + } + } + } + } + + if len(query.Filters) > 0 { + whereClause, whereArgs, err := qb.BuildWhereClause(query.Filters) + if err != nil { + return "", nil, err + } + mainQuery = mainQuery.Where(whereClause, whereArgs...) + } + + if len(query.GroupBy) > 0 { + mainQuery = mainQuery.GroupBy(qb.buildGroupByColumns(query.GroupBy)...) + } + + if len(query.Having) > 0 { + havingClause, havingArgs, err := qb.BuildWhereClause(query.Having) + if err != nil { + return "", nil, err + } + mainQuery = mainQuery.Having(havingClause, havingArgs...) + } + + if len(query.Sort) > 0 { + for _, sort := range query.Sort { + column := qb.validateAndEscapeColumn(sort.Column) + if column == "" { + continue + } + order := "ASC" + if strings.ToUpper(sort.Order) == "DESC" { + order = "DESC" + } + mainQuery = mainQuery.OrderBy(fmt.Sprintf("%s %s", column, order)) + } + } + } + + // 9. Apply pagination with dialect-specific syntax + if query.Limit > 0 { + if qb.dbType == DBTypeSQLServer { + // SQL Server requires ORDER BY for OFFSET FETCH + if len(query.Sort) == 0 { + mainQuery = mainQuery.OrderBy("(SELECT 1)") + } + mainQuery = mainQuery.Suffix(fmt.Sprintf("OFFSET %d ROWS FETCH NEXT %d ROWS ONLY", query.Offset, query.Limit)) + } else { + mainQuery = mainQuery.Limit(uint64(query.Limit)) + if query.Offset > 0 { + mainQuery = mainQuery.Offset(uint64(query.Offset)) + } + } + } else if query.Offset > 0 && qb.dbType != DBTypeSQLServer { + mainQuery = mainQuery.Offset(uint64(query.Offset)) + } + + // Build final main query SQL + sql, args, err := mainQuery.ToSql() + if err != nil { + return "", nil, fmt.Errorf("failed to build main query: %w", err) + } + queryParts = append(queryParts, sql) + allArgs = append(allArgs, args...) + + // 10. Apply UNIONs + if len(query.Unions) > 0 { + unionClause, unionArgs, err := qb.buildUnionClause(query.Unions) + if err != nil { + return "", nil, err + } + queryParts = append(queryParts, unionClause) + allArgs = append(allArgs, unionArgs...) + } + + finalSQL := strings.Join(queryParts, " ") + + // Security check for dangerous patterns in user input values + if qb.enableSecurityChecks { + if err := qb.checkForSqlInjectionInArgs(allArgs); err != nil { + return "", nil, err + } + } + + // Security check for dangerous patterns in the final SQL + if qb.enableSecurityChecks { + log.Println(finalSQL) + if err := qb.checkForSqlInjectionInSQL(finalSQL); err != nil { + return "", nil, err + } + } + + if qb.enableQueryLogging { + fmt.Printf("[DEBUG BuilderQuery] Final SQL query: %s\n", finalSQL) + fmt.Printf("[DEBUG] Query args: %v\n", allArgs) + } + return finalSQL, allArgs, nil +} + +// buildWindowFunction builds a window function expression +func (qb *QueryBuilder) buildWindowFunction(wf WindowFunction) (string, error) { + if !qb.isValidFunctionName(wf.Function) { + return "", fmt.Errorf("invalid window function name: %s", wf.Function) + } + + windowExpr := fmt.Sprintf("%s() OVER (", wf.Function) + + if wf.Over != "" { + windowExpr += fmt.Sprintf("PARTITION BY %s ", wf.Over) + } + + if wf.OrderBy != "" { + windowExpr += fmt.Sprintf("ORDER BY %s ", wf.OrderBy) + } + + if wf.Frame != "" { + windowExpr += wf.Frame + } + + windowExpr += ")" + + if wf.Alias != "" { + windowExpr += " AS " + qb.escapeIdentifier(wf.Alias) + } + + return windowExpr, nil +} + +// buildJsonOperation builds a JSON operation expression +func (qb *QueryBuilder) buildJsonOperation(jo JsonOperation) (string, []interface{}, error) { + column := qb.validateAndEscapeColumn(jo.Column) + if column == "" { + return "", nil, fmt.Errorf("invalid or disallowed column: %s", jo.Column) + } + + path := jo.Path + if path == "" { + path = "$" + } + + var expr string + var args []interface{} + + switch strings.ToLower(jo.Type) { + case "extract": + switch qb.dbType { + case DBTypePostgreSQL: + expr = fmt.Sprintf("%s->>%s", column, qb.escapeJsonPath(path)) + case DBTypeMySQL: + expr = fmt.Sprintf("JSON_EXTRACT(%s, '%s')", column, path) + case DBTypeSQLServer: + expr = fmt.Sprintf("JSON_VALUE(%s, '%s')", column, qb.escapeSqlServerJsonPath(path)) + case DBTypeSQLite: + expr = fmt.Sprintf("json_extract(%s, '%s')", column, path) + default: + return "", nil, fmt.Errorf("JSON operations not supported for database type: %s", qb.dbType) + } + case "exists": + switch qb.dbType { + case DBTypePostgreSQL: + expr = fmt.Sprintf("jsonb_path_exists(%s, '%s')", column, path) + case DBTypeMySQL: + expr = fmt.Sprintf("JSON_CONTAINS_PATH(%s, 'one', '%s')", column, path) + case DBTypeSQLServer: + expr = fmt.Sprintf("JSON_VALUE(%s, '%s') IS NOT NULL", column, qb.escapeSqlServerJsonPath(path)) + case DBTypeSQLite: + expr = fmt.Sprintf("json_extract(%s, '%s') IS NOT NULL", column, path) + default: + return "", nil, fmt.Errorf("JSON operations not supported for database type: %s", qb.dbType) + } + case "contains": + switch qb.dbType { + case DBTypePostgreSQL: + expr = fmt.Sprintf("%s @> %s", column, "?") + args = append(args, jo.Value) + case DBTypeMySQL: + expr = fmt.Sprintf("JSON_CONTAINS(%s, ?, '%s')", column, path) + args = append(args, jo.Value) + case DBTypeSQLServer: + expr = fmt.Sprintf("JSON_VALUE(%s, '%s') = ?", column, qb.escapeSqlServerJsonPath(path)) + args = append(args, jo.Value) + case DBTypeSQLite: + expr = fmt.Sprintf("json_extract(%s, '%s') = ?", column, path) + args = append(args, jo.Value) + default: + return "", nil, fmt.Errorf("JSON operations not supported for database type: %s", qb.dbType) + } + default: + return "", nil, fmt.Errorf("unsupported JSON operation type: %s", jo.Type) + } + + return expr, args, nil +} + +// escapeJsonPath escapes a JSON path for PostgreSQL +func (qb *QueryBuilder) escapeJsonPath(path string) string { + // Simple implementation - in a real scenario, you'd need more sophisticated escaping + return "'" + strings.ReplaceAll(path, "'", "''") + "'" +} + +// escapeSqlServerJsonPath escapes a JSON path for SQL Server +func (qb *QueryBuilder) escapeSqlServerJsonPath(path string) string { + // Convert JSONPath to SQL Server format + // $.path.to.property -> '$.path.to.property' + if !strings.HasPrefix(path, "$") { + path = "$." + path + } + return strings.ReplaceAll(path, ".", ".") +} + +// buildCTEClause builds the WITH clause for Common Table Expressions +func (qb *QueryBuilder) buildCTEClause(ctes []CTE) (string, []interface{}, error) { + var cteParts []string + var allArgs []interface{} + + hasRecursive := false + for _, cte := range ctes { + if cte.Recursive { + hasRecursive = true + break + } + } + + withClause := "WITH" + if hasRecursive { + withClause = "WITH RECURSIVE" + } + + for _, cte := range ctes { + subQuery, args, err := qb.BuildQuery(cte.Query) + if err != nil { + return "", nil, fmt.Errorf("failed to build CTE '%s': %w", cte.Name, err) + } + cteParts = append(cteParts, fmt.Sprintf("%s AS (%s)", qb.escapeIdentifier(cte.Name), subQuery)) + allArgs = append(allArgs, args...) + } + + return fmt.Sprintf("%s %s", withClause, strings.Join(cteParts, ", ")), allArgs, nil +} + +// buildFromClause builds the FROM clause with optional alias +func (qb *QueryBuilder) buildFromClause(table, alias string) string { + fromClause := qb.escapeIdentifier(table) + if alias != "" { + fromClause += " " + qb.escapeIdentifier(alias) + } + return fromClause +} + +// buildSingleJoinClause builds a single JOIN clause components +func (qb *QueryBuilder) buildSingleJoinClause(join Join) (string, string, string, []interface{}, error) { + joinType := strings.ToUpper(join.Type) + if joinType == "" { + joinType = "INNER" + } + + table := qb.escapeIdentifier(join.Table) + if join.Alias != "" { + table += " " + qb.escapeIdentifier(join.Alias) + } + + onClause, onArgs, err := qb.BuildWhereClause([]FilterGroup{join.OnConditions}) + if err != nil { + return "", "", "", nil, fmt.Errorf("failed to build ON clause for join on table %s: %w", join.Table, err) + } + + return joinType, table, onClause, onArgs, nil +} + +// buildUnionClause builds the UNION clause +func (qb *QueryBuilder) buildUnionClause(unions []Union) (string, []interface{}, error) { + var unionParts []string + var allArgs []interface{} + + for _, union := range unions { + subQuery, args, err := qb.BuildQuery(union.Query) + if err != nil { + return "", nil, fmt.Errorf("failed to build subquery for UNION: %w", err) + } + unionType := strings.ToUpper(union.Type) + if unionType == "" { + unionType = "UNION" + } + unionParts = append(unionParts, fmt.Sprintf("%s %s", unionType, subQuery)) + allArgs = append(allArgs, args...) + } + + return strings.Join(unionParts, " "), allArgs, nil +} + +// buildSelectFields builds the SELECT fields from SelectField structs +func (qb *QueryBuilder) buildSelectFields(fields []SelectField) []string { + if len(fields) == 0 { + return []string{"*"} + } + + var selectedFields []string + for _, field := range fields { + expr := field.Expression + if expr == "" { + continue + } + + var finalExpr string + + // Handle window functions + if field.WindowFunction != nil { + windowFunc, err := qb.buildWindowFunction(*field.WindowFunction) + if err != nil { + continue + } + finalExpr = windowFunc + } else { + // Use the more robust escaping logic for expressions + finalExpr = qb.escapeSelectExpression(expr) + } + + if finalExpr == "" { + continue + } + + if field.Alias != "" { + selectedFields = append(selectedFields, fmt.Sprintf("%s AS %s", finalExpr, qb.escapeIdentifier(field.Alias))) + } else { + selectedFields = append(selectedFields, finalExpr) + } + } + + if len(selectedFields) == 0 { + return []string{"*"} + } + + return selectedFields +} + +func (qb *QueryBuilder) escapeSelectExpression(expr string) string { + // If it's a function call (contains parentheses), validate and return as-is. + if strings.Contains(expr, "(") { + if qb.isValidExpression(expr) { + return expr + } + return "" // Invalid expression + } + + // If it's a qualified column name (table.column), escape each part. + if strings.Contains(expr, ".") { + if qb.isValidExpression(expr) { + parts := strings.Split(expr, ".") + var escapedParts []string + for _, part := range parts { + escapedParts = append(escapedParts, qb.escapeIdentifier(part)) + } + return strings.Join(escapedParts, ".") + } + return "" // Invalid expression + } + + return qb.escapeIdentifier(expr) +} + +// BuildWhereClause builds WHERE/HAVING conditions from FilterGroups +func (qb *QueryBuilder) BuildWhereClause(filterGroups []FilterGroup) (string, []interface{}, error) { + if len(filterGroups) == 0 { + return "", nil, nil + } + + var conditions []string + var allArgs []interface{} + + for i, group := range filterGroups { + if len(group.Filters) == 0 { + continue + } + + groupCondition, groupArgs, err := qb.buildFilterGroup(group) + if err != nil { + return "", nil, err + } + + if groupCondition != "" { + if i > 0 { + logicOp := "AND" + if group.LogicOp != "" { + logicOp = strings.ToUpper(group.LogicOp) + } + conditions = append(conditions, logicOp) + } + conditions = append(conditions, fmt.Sprintf("(%s)", groupCondition)) + allArgs = append(allArgs, groupArgs...) + } + } + + return strings.Join(conditions, " "), allArgs, nil +} + +// buildFilterGroup builds conditions for a single filter group +func (qb *QueryBuilder) buildFilterGroup(group FilterGroup) (string, []interface{}, error) { + var conditions []string + var args []interface{} + logicOp := "AND" + if group.LogicOp != "" { + logicOp = strings.ToUpper(group.LogicOp) + } + + for i, filter := range group.Filters { + condition, filterArgs, err := qb.buildFilterCondition(filter) + if err != nil { + return "", nil, err + } + + if condition != "" { + if i > 0 { + conditions = append(conditions, logicOp) + } + conditions = append(conditions, condition) + args = append(args, filterArgs...) + } + } + + return strings.Join(conditions, " "), args, nil +} + +// buildFilterCondition builds a single filter condition with dialect-specific logic +func (qb *QueryBuilder) buildFilterCondition(filter DynamicFilter) (string, []interface{}, error) { + column := qb.validateAndEscapeColumn(filter.Column) + if column == "" { + return "", nil, fmt.Errorf("invalid or disallowed column: %s", filter.Column) + } + + // Handle column-to-column comparison + if valStr, ok := filter.Value.(string); ok && strings.Contains(valStr, ".") && qb.isValidExpression(valStr) && len(strings.Split(valStr, ".")) == 2 { + escapedVal := qb.escapeColumnReference(valStr) + switch filter.Operator { + case OpEqual: + return fmt.Sprintf("%s = %s", column, escapedVal), nil, nil + case OpNotEqual: + return fmt.Sprintf("%s <> %s", column, escapedVal), nil, nil + case OpGreaterThan: + return fmt.Sprintf("%s > %s", column, escapedVal), nil, nil + case OpLessThan: + return fmt.Sprintf("%s < %s", column, escapedVal), nil, nil + } + } + + // Handle JSON operations + switch filter.Operator { + case OpJsonContains, OpJsonNotContains, OpJsonExists, OpJsonNotExists, OpJsonEqual, OpJsonNotEqual: + return qb.buildJsonFilterCondition(filter) + case OpArrayContains, OpArrayNotContains, OpArrayLength: + return qb.buildArrayFilterCondition(filter) + } + + // Handle standard operators + switch filter.Operator { + case OpEqual: + if filter.Value == nil { + return fmt.Sprintf("%s IS NULL", column), nil, nil + } + return fmt.Sprintf("%s = ?", column), []interface{}{filter.Value}, nil + case OpNotEqual: + if filter.Value == nil { + return fmt.Sprintf("%s IS NOT NULL", column), nil, nil + } + return fmt.Sprintf("%s <> ?", column), []interface{}{filter.Value}, nil + case OpLike: + if filter.Value == nil { + return "", nil, nil + } + return fmt.Sprintf("%s LIKE ?", column), []interface{}{filter.Value}, nil + case OpILike: + if filter.Value == nil { + return "", nil, nil + } + switch qb.dbType { + case DBTypePostgreSQL, DBTypeSQLite: + return fmt.Sprintf("%s ILIKE ?", column), []interface{}{filter.Value}, nil + case DBTypeMySQL, DBTypeSQLServer: + return fmt.Sprintf("LOWER(%s) LIKE LOWER(?)", column), []interface{}{filter.Value}, nil + default: + return fmt.Sprintf("%s LIKE ?", column), []interface{}{filter.Value}, nil + } + case OpIn, OpNotIn: + values := qb.parseArrayValue(filter.Value) + if len(values) == 0 { + return "1=0", nil, nil + } + op := "IN" + if filter.Operator == OpNotIn { + op = "NOT IN" + } + placeholders := squirrel.Placeholders(len(values)) + return fmt.Sprintf("%s %s (%s)", column, op, placeholders), values, nil + case OpGreaterThan, OpGreaterThanEqual, OpLessThan, OpLessThanEqual: + if filter.Value == nil { + return "", nil, nil + } + op := strings.TrimPrefix(string(filter.Operator), "_") + return fmt.Sprintf("%s %s ?", column, op), []interface{}{filter.Value}, nil + case OpBetween, OpNotBetween: + values := qb.parseArrayValue(filter.Value) + if len(values) != 2 { + return "", nil, fmt.Errorf("between operator requires exactly 2 values") + } + op := "BETWEEN" + if filter.Operator == OpNotBetween { + op = "NOT BETWEEN" + } + return fmt.Sprintf("%s %s ? AND ?", column, op), []interface{}{values[0], values[1]}, nil + case OpNull: + return fmt.Sprintf("%s IS NULL", column), nil, nil + case OpNotNull: + return fmt.Sprintf("%s IS NOT NULL", column), nil, nil + case OpContains, OpNotContains, OpStartsWith, OpEndsWith: + if filter.Value == nil { + return "", nil, nil + } + var value string + switch filter.Operator { + case OpContains, OpNotContains: + value = fmt.Sprintf("%%%v%%", filter.Value) + case OpStartsWith: + value = fmt.Sprintf("%v%%", filter.Value) + case OpEndsWith: + value = fmt.Sprintf("%%%v", filter.Value) + } + + switch qb.dbType { + case DBTypePostgreSQL, DBTypeSQLite: + op := "ILIKE" + if strings.Contains(string(filter.Operator), "Not") { + op = "NOT ILIKE" + } + return fmt.Sprintf("%s %s ?", column, op), []interface{}{value}, nil + case DBTypeMySQL, DBTypeSQLServer: + op := "LIKE" + if strings.Contains(string(filter.Operator), "Not") { + op = "NOT LIKE" + } + return fmt.Sprintf("LOWER(%s) %s LOWER(?)", column, op), []interface{}{value}, nil + default: + op := "LIKE" + if strings.Contains(string(filter.Operator), "Not") { + op = "NOT LIKE" + } + return fmt.Sprintf("%s %s ?", column, op), []interface{}{value}, nil + } + default: + return "", nil, fmt.Errorf("unsupported operator: %s", filter.Operator) + } +} + +// buildJsonFilterCondition builds a JSON filter condition +func (qb *QueryBuilder) buildJsonFilterCondition(filter DynamicFilter) (string, []interface{}, error) { + column := qb.validateAndEscapeColumn(filter.Column) + if column == "" { + return "", nil, fmt.Errorf("invalid or disallowed column: %s", filter.Column) + } + + path := "$" + if pathOption, ok := filter.Options["path"].(string); ok && pathOption != "" { + path = pathOption + } + + var expr string + var args []interface{} + + switch filter.Operator { + case OpJsonContains: + switch qb.dbType { + case DBTypePostgreSQL: + expr = fmt.Sprintf("%s @> ?", column) + args = append(args, filter.Value) + case DBTypeMySQL: + expr = fmt.Sprintf("JSON_CONTAINS(%s, ?, '%s')", column, path) + args = append(args, filter.Value) + case DBTypeSQLServer: + expr = fmt.Sprintf("JSON_VALUE(%s, '%s') = ?", column, qb.escapeSqlServerJsonPath(path)) + args = append(args, filter.Value) + case DBTypeSQLite: + expr = fmt.Sprintf("json_extract(%s, '%s') = ?", column, path) + args = append(args, filter.Value) + default: + return "", nil, fmt.Errorf("JSON operations not supported for database type: %s", qb.dbType) + } + case OpJsonNotContains: + switch qb.dbType { + case DBTypePostgreSQL: + expr = fmt.Sprintf("NOT (%s @> ?)", column) + args = append(args, filter.Value) + case DBTypeMySQL: + expr = fmt.Sprintf("NOT JSON_CONTAINS(%s, ?, '%s')", column, path) + args = append(args, filter.Value) + case DBTypeSQLServer: + expr = fmt.Sprintf("JSON_VALUE(%s, '%s') <> ?", column, qb.escapeSqlServerJsonPath(path)) + args = append(args, filter.Value) + case DBTypeSQLite: + expr = fmt.Sprintf("json_extract(%s, '%s') <> ?", column, path) + args = append(args, filter.Value) + default: + return "", nil, fmt.Errorf("JSON operations not supported for database type: %s", qb.dbType) + } + case OpJsonExists: + switch qb.dbType { + case DBTypePostgreSQL: + expr = fmt.Sprintf("jsonb_path_exists(%s, '%s')", column, path) + case DBTypeMySQL: + expr = fmt.Sprintf("JSON_CONTAINS_PATH(%s, 'one', '%s')", column, path) + case DBTypeSQLServer: + expr = fmt.Sprintf("JSON_VALUE(%s, '%s') IS NOT NULL", column, qb.escapeSqlServerJsonPath(path)) + case DBTypeSQLite: + expr = fmt.Sprintf("json_extract(%s, '%s') IS NOT NULL", column, path) + default: + return "", nil, fmt.Errorf("JSON operations not supported for database type: %s", qb.dbType) + } + case OpJsonNotExists: + switch qb.dbType { + case DBTypePostgreSQL: + expr = fmt.Sprintf("NOT jsonb_path_exists(%s, '%s')", column, path) + case DBTypeMySQL: + expr = fmt.Sprintf("NOT JSON_CONTAINS_PATH(%s, 'one', '%s')", column, path) + case DBTypeSQLServer: + expr = fmt.Sprintf("JSON_VALUE(%s, '%s') IS NULL", column, qb.escapeSqlServerJsonPath(path)) + case DBTypeSQLite: + expr = fmt.Sprintf("json_extract(%s, '%s') IS NULL", column, path) + default: + return "", nil, fmt.Errorf("JSON operations not supported for database type: %s", qb.dbType) + } + case OpJsonEqual: + switch qb.dbType { + case DBTypePostgreSQL: + expr = fmt.Sprintf("%s->>%s = ?", column, qb.escapeJsonPath(path)) + args = append(args, filter.Value) + case DBTypeMySQL: + expr = fmt.Sprintf("JSON_EXTRACT(%s, '%s') = ?", column, path) + args = append(args, filter.Value) + case DBTypeSQLServer: + expr = fmt.Sprintf("JSON_VALUE(%s, '%s') = ?", column, qb.escapeSqlServerJsonPath(path)) + args = append(args, filter.Value) + case DBTypeSQLite: + expr = fmt.Sprintf("json_extract(%s, '%s') = ?", column, path) + args = append(args, filter.Value) + default: + return "", nil, fmt.Errorf("JSON operations not supported for database type: %s", qb.dbType) + } + case OpJsonNotEqual: + switch qb.dbType { + case DBTypePostgreSQL: + expr = fmt.Sprintf("%s->>%s <> ?", column, qb.escapeJsonPath(path)) + args = append(args, filter.Value) + case DBTypeMySQL: + expr = fmt.Sprintf("JSON_EXTRACT(%s, '%s') <> ?", column, path) + args = append(args, filter.Value) + case DBTypeSQLServer: + expr = fmt.Sprintf("JSON_VALUE(%s, '%s') <> ?", column, qb.escapeSqlServerJsonPath(path)) + args = append(args, filter.Value) + case DBTypeSQLite: + expr = fmt.Sprintf("json_extract(%s, '%s') <> ?", column, path) + args = append(args, filter.Value) + default: + return "", nil, fmt.Errorf("JSON operations not supported for database type: %s", qb.dbType) + } + default: + return "", nil, fmt.Errorf("unsupported JSON operator: %s", filter.Operator) + } + + return expr, args, nil +} + +// buildArrayFilterCondition builds an array filter condition +func (qb *QueryBuilder) buildArrayFilterCondition(filter DynamicFilter) (string, []interface{}, error) { + column := qb.validateAndEscapeColumn(filter.Column) + if column == "" { + return "", nil, fmt.Errorf("invalid or disallowed column: %s", filter.Column) + } + + var expr string + var args []interface{} + + switch filter.Operator { + case OpArrayContains: + switch qb.dbType { + case DBTypePostgreSQL: + expr = fmt.Sprintf("? = ANY(%s)", column) + args = append(args, filter.Value) + case DBTypeMySQL: + expr = fmt.Sprintf("JSON_CONTAINS(%s, JSON_QUOTE(?))", column) + args = append(args, filter.Value) + case DBTypeSQLServer: + expr = fmt.Sprintf("? IN (SELECT value FROM OPENJSON(%s))", column) + args = append(args, filter.Value) + case DBTypeSQLite: + expr = fmt.Sprintf("EXISTS (SELECT 1 FROM json_each(%s) WHERE json_each.value = ?)", column) + args = append(args, filter.Value) + default: + return "", nil, fmt.Errorf("Array operations not supported for database type: %s", qb.dbType) + } + case OpArrayNotContains: + switch qb.dbType { + case DBTypePostgreSQL: + expr = fmt.Sprintf("? <> ALL(%s)", column) + args = append(args, filter.Value) + case DBTypeMySQL: + expr = fmt.Sprintf("NOT JSON_CONTAINS(%s, JSON_QUOTE(?))", column) + args = append(args, filter.Value) + case DBTypeSQLServer: + expr = fmt.Sprintf("? NOT IN (SELECT value FROM OPENJSON(%s))", column) + args = append(args, filter.Value) + case DBTypeSQLite: + expr = fmt.Sprintf("NOT EXISTS (SELECT 1 FROM json_each(%s) WHERE json_each.value = ?)", column) + args = append(args, filter.Value) + default: + return "", nil, fmt.Errorf("Array operations not supported for database type: %s", qb.dbType) + } + case OpArrayLength: + switch qb.dbType { + case DBTypePostgreSQL: + if lengthOption, ok := filter.Options["length"].(int); ok { + expr = fmt.Sprintf("array_length(%s, 1) = ?", column) + args = append(args, lengthOption) + } else { + return "", nil, fmt.Errorf("array_length operator requires 'length' option") + } + case DBTypeMySQL: + if lengthOption, ok := filter.Options["length"].(int); ok { + expr = fmt.Sprintf("JSON_LENGTH(%s) = ?", column) + args = append(args, lengthOption) + } else { + return "", nil, fmt.Errorf("array_length operator requires 'length' option") + } + case DBTypeSQLServer: + if lengthOption, ok := filter.Options["length"].(int); ok { + expr = fmt.Sprintf("(SELECT COUNT(*) FROM OPENJSON(%s)) = ?", column) + args = append(args, lengthOption) + } else { + return "", nil, fmt.Errorf("array_length operator requires 'length' option") + } + case DBTypeSQLite: + if lengthOption, ok := filter.Options["length"].(int); ok { + expr = fmt.Sprintf("json_array_length(%s) = ?", column) + args = append(args, lengthOption) + } else { + return "", nil, fmt.Errorf("array_length operator requires 'length' option") + } + default: + return "", nil, fmt.Errorf("Array operations not supported for database type: %s", qb.dbType) + } + default: + return "", nil, fmt.Errorf("unsupported array operator: %s", filter.Operator) + } + + return expr, args, nil +} + +// ============================================================================= +// SECTION 6: EXECUTION METHODS (NEW) +// Metode untuk mengeksekusi query langsung dengan logging performa. +// ============================================================================= + +func (qb *QueryBuilder) ExecuteQuery(ctx context.Context, db *sqlx.DB, query DynamicQuery, dest interface{}) error { + // sql, args, err := qb.BuildQuery(query) + // if err != nil { + // return err + // } + // start := time.Now() + // err = db.SelectContext(ctx, dest, sql, args...) + // fmt.Printf("[DEBUG] Query executed in %v\n", time.Since(start)) + // return err + sql, args, err := qb.BuildQuery(query) + if err != nil { + return err + } + + // Set timeout if not already in context + if _, hasDeadline := ctx.Deadline(); !hasDeadline && qb.queryTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, qb.queryTimeout) + defer cancel() + } + + start := time.Now() + + // Check if dest is a pointer to a slice of maps + destValue := reflect.ValueOf(dest) + if destValue.Kind() != reflect.Ptr || destValue.IsNil() { + return fmt.Errorf("dest must be a non-nil pointer") + } + + destElem := destValue.Elem() + if destElem.Kind() == reflect.Slice { + sliceType := destElem.Type().Elem() + if sliceType.Kind() == reflect.Map && + sliceType.Key().Kind() == reflect.String && + sliceType.Elem().Kind() == reflect.Interface { + + // Handle slice of map[string]interface{} + rows, err := db.QueryxContext(ctx, sql, args...) + if err != nil { + return err + } + defer rows.Close() + + for rows.Next() { + row := make(map[string]interface{}) + if err := rows.MapScan(row); err != nil { + return err + } + destElem.Set(reflect.Append(destElem, reflect.ValueOf(row))) + } + + fmt.Printf("[DEBUG] Query executed in %v\n", time.Since(start)) + return nil + } + } + + // Default case: use SelectContext + err = db.SelectContext(ctx, dest, sql, args...) + if qb.enableQueryLogging { + fmt.Printf("[DEBUG] Query executed in %v\n", time.Since(start)) + } + return err +} + +func (qb *QueryBuilder) ExecuteQueryRow(ctx context.Context, db *sqlx.DB, query DynamicQuery, dest interface{}) error { + sql, args, err := qb.BuildQuery(query) + if err != nil { + return err + } + + // Set timeout if not already in context + if _, hasDeadline := ctx.Deadline(); !hasDeadline && qb.queryTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, qb.queryTimeout) + defer cancel() + } + + start := time.Now() + err = db.GetContext(ctx, dest, sql, args...) + if qb.enableQueryLogging { + fmt.Printf("[DEBUG] QueryRow executed in %v\n", time.Since(start)) + } + return err +} + +func (qb *QueryBuilder) ExecuteCount(ctx context.Context, db *sqlx.DB, query DynamicQuery) (int64, error) { + sql, args, err := qb.BuildCountQuery(query) + if err != nil { + return 0, err + } + + // Set timeout if not already in context + if _, hasDeadline := ctx.Deadline(); !hasDeadline && qb.queryTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, qb.queryTimeout) + defer cancel() + } + + var count int64 + start := time.Now() + err = db.GetContext(ctx, &count, sql, args...) + if qb.enableQueryLogging { + fmt.Printf("[DEBUG] Count query executed in %v\n", time.Since(start)) + } + return count, err +} + +func (qb *QueryBuilder) ExecuteInsert(ctx context.Context, db *sqlx.DB, table string, data InsertData, returningColumns ...string) (sql.Result, error) { + // Security check for table name + if qb.enableSecurityChecks && len(qb.allowedTables) > 0 && !qb.allowedTables[table] { + return nil, fmt.Errorf("disallowed table: %s", table) + } + + sql, args, err := qb.BuildInsertQuery(table, data, returningColumns...) + if err != nil { + return nil, err + } + + // Set timeout if not already in context + if _, hasDeadline := ctx.Deadline(); !hasDeadline && qb.queryTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, qb.queryTimeout) + defer cancel() + } + + start := time.Now() + result, err := db.ExecContext(ctx, sql, args...) + if qb.enableQueryLogging { + fmt.Printf("[DEBUG] Insert query executed in %v\n", time.Since(start)) + } + return result, err +} + +func (qb *QueryBuilder) ExecuteUpdate(ctx context.Context, db *sqlx.DB, table string, updateData UpdateData, filters []FilterGroup, returningColumns ...string) (sql.Result, error) { + // Security check for table name + if qb.enableSecurityChecks && len(qb.allowedTables) > 0 && !qb.allowedTables[table] { + return nil, fmt.Errorf("disallowed table: %s", table) + } + + sql, args, err := qb.BuildUpdateQuery(table, updateData, filters, returningColumns...) + if err != nil { + return nil, err + } + + // Set timeout if not already in context + if _, hasDeadline := ctx.Deadline(); !hasDeadline && qb.queryTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, qb.queryTimeout) + defer cancel() + } + + start := time.Now() + result, err := db.ExecContext(ctx, sql, args...) + if qb.enableQueryLogging { + fmt.Printf("[DEBUG] Update query executed in %v\n", time.Since(start)) + } + return result, err +} + +func (qb *QueryBuilder) ExecuteDelete(ctx context.Context, db *sqlx.DB, table string, filters []FilterGroup, returningColumns ...string) (sql.Result, error) { + // Security check for table name + if qb.enableSecurityChecks && len(qb.allowedTables) > 0 && !qb.allowedTables[table] { + return nil, fmt.Errorf("disallowed table: %s", table) + } + + sql, args, err := qb.BuildDeleteQuery(table, filters, returningColumns...) + if err != nil { + return nil, err + } + + // Set timeout if not already in context + if _, hasDeadline := ctx.Deadline(); !hasDeadline && qb.queryTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, qb.queryTimeout) + defer cancel() + } + + start := time.Now() + result, err := db.ExecContext(ctx, sql, args...) + if qb.enableQueryLogging { + fmt.Printf("[DEBUG] Delete query executed in %v\n", time.Since(start)) + } + return result, err +} + +func (qb *QueryBuilder) ExecuteUpsert(ctx context.Context, db *sqlx.DB, table string, insertData InsertData, conflictColumns []string, updateColumns []string, returningColumns ...string) (sql.Result, error) { + // Security check for table name + if qb.enableSecurityChecks && len(qb.allowedTables) > 0 && !qb.allowedTables[table] { + return nil, fmt.Errorf("disallowed table: %s", table) + } + + sql, args, err := qb.BuildUpsertQuery(table, insertData, conflictColumns, updateColumns, returningColumns...) + if err != nil { + return nil, err + } + + // Set timeout if not already in context + if _, hasDeadline := ctx.Deadline(); !hasDeadline && qb.queryTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, qb.queryTimeout) + defer cancel() + } + + start := time.Now() + result, err := db.ExecContext(ctx, sql, args...) + if qb.enableQueryLogging { + fmt.Printf("[DEBUG] Upsert query executed in %v\n", time.Since(start)) + } + return result, err +} + +// --- Helper and Validation Methods --- + +func (qb *QueryBuilder) buildGroupByColumns(fields []string) []string { + var groupCols []string + for _, field := range fields { + col := qb.validateAndEscapeColumn(field) + if col != "" { + groupCols = append(groupCols, col) + } + } + return groupCols +} + +func (qb *QueryBuilder) parseArrayValue(value interface{}) []interface{} { + if value == nil { + return nil + } + if reflect.TypeOf(value).Kind() == reflect.Slice { + v := reflect.ValueOf(value) + result := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + result[i] = v.Index(i).Interface() + } + return result + } + if str, ok := value.(string); ok { + if strings.Contains(str, ",") { + parts := strings.Split(str, ",") + result := make([]interface{}, len(parts)) + for i, part := range parts { + result[i] = strings.TrimSpace(part) + } + return result + } + return []interface{}{str} + } + return []interface{}{value} +} + +func (qb *QueryBuilder) validateAndEscapeColumn(field string) string { + if field == "" { + return "" + } + // Allow complex expressions like functions + if strings.Contains(field, "(") { + if qb.isValidExpression(field) { + return field // Don't escape complex expressions, assume they are safe + } + return "" + } + // Handle dotted column names like "table.column" + if strings.Contains(field, ".") { + if qb.isValidExpression(field) { + // Split on dot and escape each part + parts := strings.Split(field, ".") + var escapedParts []string + for _, part := range parts { + escapedParts = append(escapedParts, qb.escapeIdentifier(part)) + } + return strings.Join(escapedParts, ".") + } + return "" + } + // Simple column name + if qb.allowedColumns != nil && !qb.allowedColumns[field] { + return "" + } + return qb.escapeIdentifier(field) +} + +func (qb *QueryBuilder) isValidExpression(expr string) bool { + // This is a simplified check. A more robust solution might use a proper SQL parser library. + // For now, we allow alphanumeric, underscore, dots, parentheses, and common operators. + // For SQL Server, allow brackets [] and spaces for column names. + allowedChars := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_.,() *-/[]" + for _, r := range expr { + if !strings.ContainsRune(allowedChars, r) { + return false + } + } + // Check for dangerous keywords + dangerousPatterns := []string{"--", "/*", "*/", "union", "select", "insert", "update", "delete", "drop", "alter", "create", "exec", "execute"} + lowerExpr := strings.ToLower(expr) + for _, pattern := range dangerousPatterns { + if strings.Contains(lowerExpr, pattern) { + return false + } + } + return true +} + +func (qb *QueryBuilder) isValidFunctionName(name string) bool { + // Check if the function name is a valid SQL function + validFunctions := map[string]bool{ + // Aggregate functions + "count": true, "sum": true, "avg": true, "min": true, "max": true, + // Window functions + "row_number": true, "rank": true, "dense_rank": true, "ntile": true, + "lag": true, "lead": true, "first_value": true, "last_value": true, + // JSON functions + "json_extract": true, "json_contains": true, "json_search": true, + "json_array": true, "json_object": true, "json_merge": true, + // Other functions + "concat": true, "substring": true, "upper": true, "lower": true, + "trim": true, "coalesce": true, "nullif": true, "isnull": true, + } + + return validFunctions[strings.ToLower(name)] +} + +func (qb *QueryBuilder) escapeColumnReference(col string) string { + parts := strings.Split(col, ".") + var escaped []string + for _, p := range parts { + if strings.HasPrefix(p, "[") && strings.HasSuffix(p, "]") { + escaped = append(escaped, p) + } else { + escaped = append(escaped, qb.escapeIdentifier(p)) + } + } + return strings.Join(escaped, ".") +} + +func (qb *QueryBuilder) escapeIdentifier(col string) string { + switch qb.dbType { + case DBTypePostgreSQL, DBTypeSQLite: + return fmt.Sprintf("\"%s\"", strings.ReplaceAll(col, "\"", "\"\"")) + case DBTypeMySQL: + return fmt.Sprintf("`%s`", strings.ReplaceAll(col, "`", "``")) + case DBTypeSQLServer: + return fmt.Sprintf("[%s]", strings.ReplaceAll(col, "]", "]]")) + default: + return col + } +} + +// checkForSqlInjectionInArgs checks for potential SQL injection patterns in query arguments +func (qb *QueryBuilder) checkForSqlInjectionInArgs(args []interface{}) error { + if !qb.enableSecurityChecks { + return nil + } + + for _, arg := range args { + if str, ok := arg.(string); ok { + lowerStr := strings.ToLower(str) + for _, pattern := range qb.dangerousPatterns { + if pattern.MatchString(lowerStr) { + return fmt.Errorf("potential SQL injection detected in query argument: pattern %s matched", pattern.String()) + } + } + } + } + return nil +} + +// checkForSqlInjectionInSQL checks for potential SQL injection patterns in the final SQL +func (qb *QueryBuilder) checkForSqlInjectionInSQL(sql string) error { + if !qb.enableSecurityChecks { + return nil + } + + lowerSQL := strings.ToLower(sql) + for _, pattern := range qb.dangerousPatterns { + if pattern.MatchString(lowerSQL) { + return fmt.Errorf("potential SQL injection detected in SQL: pattern %s matched", pattern.String()) + } + } + return nil +} + +// --- Other Query Builders (Insert, Update, Delete, Upsert, Count) --- + +// BuildCountQuery builds a count query +func (qb *QueryBuilder) BuildCountQuery(query DynamicQuery) (string, []interface{}, error) { + // For a count query, we don't need fields, joins, or unions. + // We only need FROM, WHERE, GROUP BY, HAVING. + countQuery := DynamicQuery{ + From: query.From, + Aliases: query.Aliases, + Filters: query.Filters, + GroupBy: query.GroupBy, + Having: query.Having, + // Joins are important for count with filters on joined tables + Joins: query.Joins, + } + + // Build the base query for the count using Squirrel's From and Join methods + fromClause := qb.buildFromClause(countQuery.From, countQuery.Aliases) + baseQuery := qb.sqlBuilder.Select("COUNT(*)").From(fromClause) + + // Add JOINs using Squirrel's Join method + if len(countQuery.Joins) > 0 { + for _, join := range countQuery.Joins { + // Security check for joined table + if qb.enableSecurityChecks && len(qb.allowedTables) > 0 && !qb.allowedTables[join.Table] { + return "", nil, fmt.Errorf("disallowed table in join: %s", join.Table) + } + + joinType, tableWithAlias, onClause, joinArgs, err := qb.buildSingleJoinClause(join) + if err != nil { + return "", nil, err + } + joinStr := tableWithAlias + " ON " + onClause + switch strings.ToUpper(joinType) { + case "LEFT": + baseQuery = baseQuery.LeftJoin(joinStr, joinArgs...) + case "RIGHT": + baseQuery = baseQuery.RightJoin(joinStr, joinArgs...) + case "FULL": + baseQuery = baseQuery.Join("FULL JOIN "+joinStr, joinArgs...) + default: + baseQuery = baseQuery.Join(joinStr, joinArgs...) + } + } + } + + if len(countQuery.Filters) > 0 { + whereClause, whereArgs, err := qb.BuildWhereClause(countQuery.Filters) + if err != nil { + return "", nil, err + } + baseQuery = baseQuery.Where(whereClause, whereArgs...) + } + + if len(countQuery.GroupBy) > 0 { + baseQuery = baseQuery.GroupBy(qb.buildGroupByColumns(countQuery.GroupBy)...) + } + + if len(countQuery.Having) > 0 { + havingClause, havingArgs, err := qb.BuildWhereClause(countQuery.Having) + if err != nil { + return "", nil, err + } + baseQuery = baseQuery.Having(havingClause, havingArgs...) + } + + sql, args, err := baseQuery.ToSql() + if err != nil { + return "", nil, fmt.Errorf("failed to build COUNT query: %w", err) + } + + if qb.enableQueryLogging { + fmt.Printf("[DEBUG] COUNT SQL query: %s\n", sql) + fmt.Printf("[DEBUG] COUNT query args: %v\n", args) + } + return sql, args, nil +} + +// BuildInsertQuery builds an INSERT query +func (qb *QueryBuilder) BuildInsertQuery(table string, data InsertData, returningColumns ...string) (string, []interface{}, error) { + // Validate columns + for _, col := range data.Columns { + if qb.allowedColumns != nil && !qb.allowedColumns[col] { + return "", nil, fmt.Errorf("disallowed column: %s", col) + } + } + + // Start with basic insert + insert := qb.sqlBuilder.Insert(table).Columns(data.Columns...).Values(data.Values...) + + // Handle JSON values - we need to modify the insert statement + if len(data.JsonValues) > 0 { + // Create a new insert builder with all columns including JSON columns + allColumns := make([]string, len(data.Columns)) + copy(allColumns, data.Columns) + + allValues := make([]interface{}, len(data.Values)) + copy(allValues, data.Values) + + for col, val := range data.JsonValues { + allColumns = append(allColumns, col) + jsonVal, err := json.Marshal(val) + if err != nil { + return "", nil, fmt.Errorf("failed to marshal JSON value for column %s: %w", col, err) + } + allValues = append(allValues, jsonVal) + } + + insert = qb.sqlBuilder.Insert(table).Columns(allColumns...).Values(allValues...) + } + + if len(returningColumns) > 0 { + if qb.dbType == DBTypePostgreSQL { + insert = insert.Suffix("RETURNING " + strings.Join(returningColumns, ", ")) + } else { + return "", nil, fmt.Errorf("RETURNING not supported for database type: %s", qb.dbType) + } + } + + sql, args, err := insert.ToSql() + if err != nil { + return "", nil, fmt.Errorf("failed to build INSERT query: %w", err) + } + + return sql, args, nil +} + +// BuildUpdateQuery builds an UPDATE query +func (qb *QueryBuilder) BuildUpdateQuery(table string, updateData UpdateData, filters []FilterGroup, returningColumns ...string) (string, []interface{}, error) { + // Validate columns + for _, col := range updateData.Columns { + if qb.allowedColumns != nil && !qb.allowedColumns[col] { + return "", nil, fmt.Errorf("disallowed column: %s", col) + } + } + + // Start with basic update + update := qb.sqlBuilder.Update(table).SetMap(qb.buildSetMap(updateData)) + + // Handle JSON updates - we need to modify the update statement + if len(updateData.JsonUpdates) > 0 { + // Create a new set map including JSON updates + setMap := qb.buildSetMap(updateData) + + for col, jsonUpdate := range updateData.JsonUpdates { + switch qb.dbType { + case DBTypePostgreSQL: + jsonVal, err := json.Marshal(jsonUpdate.Value) + if err != nil { + return "", nil, fmt.Errorf("failed to marshal JSON value for column %s: %w", col, err) + } + // Use jsonb_set function for updating specific paths + setMap[col] = squirrel.Expr(fmt.Sprintf("jsonb_set(%s, '%s', ?)", qb.escapeIdentifier(col), jsonUpdate.Path), jsonVal) + case DBTypeMySQL: + jsonVal, err := json.Marshal(jsonUpdate.Value) + if err != nil { + return "", nil, fmt.Errorf("failed to marshal JSON value for column %s: %w", col, err) + } + // Use JSON_SET function for updating specific paths + setMap[col] = squirrel.Expr(fmt.Sprintf("JSON_SET(%s, '%s', ?)", qb.escapeIdentifier(col), jsonUpdate.Path), jsonVal) + case DBTypeSQLServer: + jsonVal, err := json.Marshal(jsonUpdate.Value) + if err != nil { + return "", nil, fmt.Errorf("failed to marshal JSON value for column %s: %w", col, err) + } + // Use JSON_MODIFY function for updating specific paths + setMap[col] = squirrel.Expr(fmt.Sprintf("JSON_MODIFY(%s, '%s', ?)", qb.escapeIdentifier(col), jsonUpdate.Path), jsonVal) + case DBTypeSQLite: + jsonVal, err := json.Marshal(jsonUpdate.Value) + if err != nil { + return "", nil, fmt.Errorf("failed to marshal JSON value for column %s: %w", col, err) + } + // SQLite doesn't have a built-in JSON_SET function, so we need to use json_patch + setMap[col] = squirrel.Expr(fmt.Sprintf("json_patch(%s, ?)", qb.escapeIdentifier(col)), jsonVal) + } + } + + update = qb.sqlBuilder.Update(table).SetMap(setMap) + } + + if len(filters) > 0 { + whereClause, whereArgs, err := qb.BuildWhereClause(filters) + if err != nil { + return "", nil, err + } + update = update.Where(whereClause, whereArgs...) + } + + if len(returningColumns) > 0 { + if qb.dbType == DBTypePostgreSQL { + update = update.Suffix("RETURNING " + strings.Join(returningColumns, ", ")) + } else { + return "", nil, fmt.Errorf("RETURNING not supported for database type: %s", qb.dbType) + } + } + + sql, args, err := update.ToSql() + if err != nil { + return "", nil, fmt.Errorf("failed to build UPDATE query: %w", err) + } + + return sql, args, nil +} + +// buildSetMap builds a map for SetMap from UpdateData +func (qb *QueryBuilder) buildSetMap(updateData UpdateData) map[string]interface{} { + setMap := make(map[string]interface{}) + for i, col := range updateData.Columns { + setMap[col] = updateData.Values[i] + } + return setMap +} + +// BuildDeleteQuery builds a DELETE query +func (qb *QueryBuilder) BuildDeleteQuery(table string, filters []FilterGroup, returningColumns ...string) (string, []interface{}, error) { + delete := qb.sqlBuilder.Delete(table) + + if len(filters) > 0 { + whereClause, whereArgs, err := qb.BuildWhereClause(filters) + if err != nil { + return "", nil, err + } + delete = delete.Where(whereClause, whereArgs...) + } + + if len(returningColumns) > 0 { + if qb.dbType == DBTypePostgreSQL { + delete = delete.Suffix("RETURNING " + strings.Join(returningColumns, ", ")) + } else { + return "", nil, fmt.Errorf("RETURNING not supported for database type: %s", qb.dbType) + } + } + + sql, args, err := delete.ToSql() + if err != nil { + return "", nil, fmt.Errorf("failed to build DELETE query: %w", err) + } + + return sql, args, nil +} + +// BuildUpsertQuery builds an UPSERT query +func (qb *QueryBuilder) BuildUpsertQuery(table string, insertData InsertData, conflictColumns []string, updateColumns []string, returningColumns ...string) (string, []interface{}, error) { + // Validate columns + for _, col := range insertData.Columns { + if qb.allowedColumns != nil && !qb.allowedColumns[col] { + return "", nil, fmt.Errorf("disallowed column: %s", col) + } + } + for _, col := range updateColumns { + if qb.allowedColumns != nil && !qb.allowedColumns[col] { + return "", nil, fmt.Errorf("disallowed column: %s", col) + } + } + + switch qb.dbType { + case DBTypePostgreSQL: + // Handle JSON values for PostgreSQL + allColumns := make([]string, len(insertData.Columns)) + copy(allColumns, insertData.Columns) + + allValues := make([]interface{}, len(insertData.Values)) + copy(allValues, insertData.Values) + + for col, val := range insertData.JsonValues { + allColumns = append(allColumns, col) + jsonVal, err := json.Marshal(val) + if err != nil { + return "", nil, fmt.Errorf("failed to marshal JSON value for column %s: %w", col, err) + } + allValues = append(allValues, jsonVal) + } + + insert := qb.sqlBuilder.Insert(table).Columns(allColumns...).Values(allValues...) + if len(conflictColumns) > 0 { + conflictTarget := strings.Join(conflictColumns, ", ") + setClause := "" + for _, col := range updateColumns { + if setClause != "" { + setClause += ", " + } + setClause += fmt.Sprintf("%s = EXCLUDED.%s", qb.escapeIdentifier(col), qb.escapeIdentifier(col)) + } + insert = insert.Suffix(fmt.Sprintf("ON CONFLICT (%s) DO UPDATE SET %s", conflictTarget, setClause)) + } + if len(returningColumns) > 0 { + insert = insert.Suffix("RETURNING " + strings.Join(returningColumns, ", ")) + } + sql, args, err := insert.ToSql() + if err != nil { + return "", nil, fmt.Errorf("failed to build UPSERT query: %w", err) + } + return sql, args, nil + case DBTypeMySQL: + // Handle JSON values for MySQL + allColumns := make([]string, len(insertData.Columns)) + copy(allColumns, insertData.Columns) + + allValues := make([]interface{}, len(insertData.Values)) + copy(allValues, insertData.Values) + + for col, val := range insertData.JsonValues { + allColumns = append(allColumns, col) + jsonVal, err := json.Marshal(val) + if err != nil { + return "", nil, fmt.Errorf("failed to marshal JSON value for column %s: %w", col, err) + } + allValues = append(allValues, jsonVal) + } + + insert := qb.sqlBuilder.Insert(table).Columns(allColumns...).Values(allValues...) + if len(updateColumns) > 0 { + setClause := "" + for _, col := range updateColumns { + if setClause != "" { + setClause += ", " + } + setClause += fmt.Sprintf("%s = VALUES(%s)", qb.escapeIdentifier(col), qb.escapeIdentifier(col)) + } + insert = insert.Suffix(fmt.Sprintf("ON DUPLICATE KEY UPDATE %s", setClause)) + } + sql, args, err := insert.ToSql() + if err != nil { + return "", nil, fmt.Errorf("failed to build UPSERT query: %w", err) + } + return sql, args, nil + default: + return "", nil, fmt.Errorf("UPSERT not supported for database type: %s", qb.dbType) + } +} + +// --- QueryParser (for parsing URL query strings) --- + +type QueryParser struct { + defaultLimit int + maxLimit int +} + +func NewQueryParser() *QueryParser { + return &QueryParser{defaultLimit: 10, maxLimit: 100} +} + +func (qp *QueryParser) SetLimits(defaultLimit, maxLimit int) *QueryParser { + qp.defaultLimit = defaultLimit + qp.maxLimit = maxLimit + return qp +} + +// ParseQuery parses URL query parameters into a DynamicQuery struct. +func (qp *QueryParser) ParseQuery(values url.Values, defaultTable string) (DynamicQuery, error) { + query := DynamicQuery{ + From: defaultTable, + Limit: qp.defaultLimit, + Offset: 0, + } + + // Parse fields + if fields := values.Get("fields"); fields != "" { + if fields == "*" { + query.Fields = []SelectField{{Expression: "*"}} + } else { + fieldList := strings.Split(fields, ",") + for _, field := range fieldList { + query.Fields = append(query.Fields, SelectField{Expression: strings.TrimSpace(field)}) + } + } + } else { + query.Fields = []SelectField{{Expression: "*"}} + } + + // Parse pagination + if limit := values.Get("limit"); limit != "" { + if l, err := strconv.Atoi(limit); err == nil && l > 0 && l <= qp.maxLimit { + query.Limit = l + } + } + if offset := values.Get("offset"); offset != "" { + if o, err := strconv.Atoi(offset); err == nil && o >= 0 { + query.Offset = o + } + } + + // Parse filters + filters, err := qp.parseFilters(values) + if err != nil { + return query, err + } + query.Filters = filters + + // Parse sorting + sorts, err := qp.parseSorting(values) + if err != nil { + return query, err + } + query.Sort = sorts + + return query, nil +} + +func (qp *QueryParser) parseFilters(values url.Values) ([]FilterGroup, error) { + filterMap := make(map[string]map[string]string) + for key, vals := range values { + if strings.HasPrefix(key, "filter[") && strings.HasSuffix(key, "]") { + parts := strings.Split(key[7:len(key)-1], "][") + if len(parts) == 2 { + column, operator := parts[0], parts[1] + if filterMap[column] == nil { + filterMap[column] = make(map[string]string) + } + if len(vals) > 0 { + filterMap[column][operator] = vals[0] + } + } + } + } + if len(filterMap) == 0 { + return nil, nil + } + var filters []DynamicFilter + for column, operators := range filterMap { + for opStr, value := range operators { + operator := FilterOperator(opStr) + var parsedValue interface{} + switch operator { + case OpIn, OpNotIn: + if value != "" { + parsedValue = strings.Split(value, ",") + } + case OpBetween, OpNotBetween: + if value != "" { + parts := strings.Split(value, ",") + if len(parts) == 2 { + parsedValue = []interface{}{strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1])} + } + } + case OpNull, OpNotNull: + parsedValue = nil + default: + parsedValue = value + } + filters = append(filters, DynamicFilter{Column: column, Operator: operator, Value: parsedValue}) + } + } + if len(filters) == 0 { + return nil, nil + } + return []FilterGroup{{Filters: filters, LogicOp: "AND"}}, nil +} + +func (qp *QueryParser) parseSorting(values url.Values) ([]SortField, error) { + sortParam := values.Get("sort") + if sortParam == "" { + return nil, nil + } + var sorts []SortField + fields := strings.Split(sortParam, ",") + for _, field := range fields { + field = strings.TrimSpace(field) + if field == "" { + continue + } + order, column := "ASC", field + if strings.HasPrefix(field, "-") { + order = "DESC" + column = field[1:] + } else if strings.HasPrefix(field, "+") { + column = field[1:] + } + sorts = append(sorts, SortField{Column: column, Order: order}) + } + return sorts, nil +} + +// ParseQueryWithDefaultFields parses URL query parameters into a DynamicQuery struct with default fields. +func (qp *QueryParser) ParseQueryWithDefaultFields(values url.Values, defaultTable string, defaultFields []string) (DynamicQuery, error) { + query, err := qp.ParseQuery(values, defaultTable) + if err != nil { + return query, err + } + + // If no fields specified, use default fields + if len(query.Fields) == 0 || (len(query.Fields) == 1 && query.Fields[0].Expression == "*") { + query.Fields = make([]SelectField, len(defaultFields)) + for i, field := range defaultFields { + query.Fields[i] = SelectField{Expression: field} + } + } + + return query, nil +} + +// ============================================================================= +// MONGODB QUERY BUILDER +// ============================================================================= + +// MongoQueryBuilder builds MongoDB queries from dynamic filters +type MongoQueryBuilder struct { + allowedFields map[string]bool // Security: only allow specified fields + allowedCollections map[string]bool // Security: only allow specified collections + // Security settings + enableSecurityChecks bool + maxAllowedDocs int + // Query logging + enableQueryLogging bool + // Connection timeout settings + queryTimeout time.Duration +} + +// NewMongoQueryBuilder creates a new MongoDB query builder instance +func NewMongoQueryBuilder() *MongoQueryBuilder { + return &MongoQueryBuilder{ + allowedFields: make(map[string]bool), + allowedCollections: make(map[string]bool), + enableSecurityChecks: true, + maxAllowedDocs: 10000, + enableQueryLogging: true, + queryTimeout: 30 * time.Second, + } +} + +// SetSecurityOptions configures security settings +func (mqb *MongoQueryBuilder) SetSecurityOptions(enableChecks bool, maxDocs int) *MongoQueryBuilder { + mqb.enableSecurityChecks = enableChecks + mqb.maxAllowedDocs = maxDocs + return mqb +} + +// SetAllowedFields sets the list of allowed fields for security +func (mqb *MongoQueryBuilder) SetAllowedFields(fields []string) *MongoQueryBuilder { + mqb.allowedFields = make(map[string]bool) + for _, field := range fields { + mqb.allowedFields[field] = true + } + return mqb +} + +// SetAllowedCollections sets the list of allowed collections for security +func (mqb *MongoQueryBuilder) SetAllowedCollections(collections []string) *MongoQueryBuilder { + mqb.allowedCollections = make(map[string]bool) + for _, collection := range collections { + mqb.allowedCollections[collection] = true + } + return mqb +} + +// SetQueryLogging enables or disables query logging +func (mqb *MongoQueryBuilder) SetQueryLogging(enable bool) *MongoQueryBuilder { + mqb.enableQueryLogging = enable + return mqb +} + +// SetQueryTimeout sets the default query timeout +func (mqb *MongoQueryBuilder) SetQueryTimeout(timeout time.Duration) *MongoQueryBuilder { + mqb.queryTimeout = timeout + return mqb +} + +// BuildFindQuery builds a MongoDB find query from DynamicQuery +func (mqb *MongoQueryBuilder) BuildFindQuery(query DynamicQuery) (bson.M, *options.FindOptions, error) { + filter := bson.M{} + findOptions := options.Find() + + // Security check for limit + if mqb.enableSecurityChecks && query.Limit > mqb.maxAllowedDocs { + return nil, nil, fmt.Errorf("requested limit %d exceeds maximum allowed %d", query.Limit, mqb.maxAllowedDocs) + } + + // Security check for collection name + if mqb.enableSecurityChecks && len(mqb.allowedCollections) > 0 && !mqb.allowedCollections[query.From] { + return nil, nil, fmt.Errorf("disallowed collection: %s", query.From) + } + + // Build filter from DynamicQuery filters + if len(query.Filters) > 0 { + mongoFilter, err := mqb.buildFilter(query.Filters) + if err != nil { + return nil, nil, err + } + filter = mongoFilter + } + + // Set projection from fields + if len(query.Fields) > 0 { + projection := bson.M{} + for _, field := range query.Fields { + if field.Expression == "*" { + // Include all fields + continue + } + fieldName := field.Expression + if field.Alias != "" { + fieldName = field.Alias + } + if mqb.allowedFields != nil && !mqb.allowedFields[fieldName] { + return nil, nil, fmt.Errorf("disallowed field: %s", fieldName) + } + projection[fieldName] = 1 + } + if len(projection) > 0 { + findOptions.SetProjection(projection) + } + } + + // Set sort + if len(query.Sort) > 0 { + sort := bson.D{} + for _, sortField := range query.Sort { + fieldName := sortField.Column + if mqb.allowedFields != nil && !mqb.allowedFields[fieldName] { + return nil, nil, fmt.Errorf("disallowed field: %s", fieldName) + } + order := 1 // ASC + if strings.ToUpper(sortField.Order) == "DESC" { + order = -1 // DESC + } + sort = append(sort, bson.E{Key: fieldName, Value: order}) + } + findOptions.SetSort(sort) + } + + // Set limit and offset + if query.Limit > 0 { + findOptions.SetLimit(int64(query.Limit)) + } + if query.Offset > 0 { + findOptions.SetSkip(int64(query.Offset)) + } + + return filter, findOptions, nil +} + +// BuildAggregateQuery builds a MongoDB aggregation pipeline from DynamicQuery +func (mqb *MongoQueryBuilder) BuildAggregateQuery(query DynamicQuery) ([]bson.D, error) { + pipeline := []bson.D{} + + // Security check for collection name + if mqb.enableSecurityChecks && len(mqb.allowedCollections) > 0 && !mqb.allowedCollections[query.From] { + return nil, fmt.Errorf("disallowed collection: %s", query.From) + } + + // Handle CTEs as stages in the pipeline + if len(query.CTEs) > 0 { + for _, cte := range query.CTEs { + // Security check for CTE collection + if mqb.enableSecurityChecks && len(mqb.allowedCollections) > 0 && !mqb.allowedCollections[cte.Query.From] { + return nil, fmt.Errorf("disallowed collection in CTE: %s", cte.Query.From) + } + + subPipeline, err := mqb.BuildAggregateQuery(cte.Query) + if err != nil { + return nil, fmt.Errorf("failed to build CTE '%s': %w", cte.Name, err) + } + // Add $lookup stage for joins + if len(cte.Query.Joins) > 0 { + for _, join := range cte.Query.Joins { + // Security check for joined collection + if mqb.enableSecurityChecks && len(mqb.allowedCollections) > 0 && !mqb.allowedCollections[join.Table] { + return nil, fmt.Errorf("disallowed collection in join: %s", join.Table) + } + + lookupStage := bson.D{ + {Key: "$lookup", Value: bson.D{ + {Key: "from", Value: join.Table}, + {Key: "localField", Value: join.Alias}, + {Key: "foreignField", Value: "_id"}, + {Key: "as", Value: join.Alias}, + }}, + } + pipeline = append(pipeline, lookupStage) + } + } + // Add the sub-pipeline + pipeline = append(pipeline, subPipeline...) + } + } + + // Match stage for filters + if len(query.Filters) > 0 { + filter, err := mqb.buildFilter(query.Filters) + if err != nil { + return nil, err + } + pipeline = append(pipeline, bson.D{{Key: "$match", Value: filter}}) + } + + // Group stage for GROUP BY + if len(query.GroupBy) > 0 { + groupID := bson.D{} + for _, field := range query.GroupBy { + if mqb.allowedFields != nil && !mqb.allowedFields[field] { + return nil, fmt.Errorf("disallowed field: %s", field) + } + groupID = append(groupID, bson.E{Key: field, Value: "$" + field}) + } + + groupStage := bson.D{ + {Key: "$group", Value: bson.D{ + {Key: "_id", Value: groupID}, + }}, + } + + // Add any aggregations from fields + for _, field := range query.Fields { + if strings.Contains(field.Expression, "(") && strings.Contains(field.Expression, ")") { + // This is an aggregation function + funcName := strings.Split(field.Expression, "(")[0] + funcField := strings.TrimSuffix(strings.Split(field.Expression, "(")[1], ")") + + if mqb.allowedFields != nil && !mqb.allowedFields[funcField] { + return nil, fmt.Errorf("disallowed field: %s", funcField) + } + + switch strings.ToLower(funcName) { + case "count": + groupStage = append(groupStage, bson.E{ + Key: field.Alias, Value: bson.D{{Key: "$sum", Value: 1}}, + }) + case "sum": + groupStage = append(groupStage, bson.E{ + Key: field.Alias, Value: bson.D{{Key: "$sum", Value: "$" + funcField}}, + }) + case "avg": + groupStage = append(groupStage, bson.E{ + Key: field.Alias, Value: bson.D{{Key: "$avg", Value: "$" + funcField}}, + }) + case "min": + groupStage = append(groupStage, bson.E{ + Key: field.Alias, Value: bson.D{{Key: "$min", Value: "$" + funcField}}, + }) + case "max": + groupStage = append(groupStage, bson.E{ + Key: field.Alias, Value: bson.D{{Key: "$max", Value: "$" + funcField}}, + }) + } + } + } + + pipeline = append(pipeline, groupStage) + } + + // Sort stage + if len(query.Sort) > 0 { + sort := bson.D{} + for _, sortField := range query.Sort { + fieldName := sortField.Column + if mqb.allowedFields != nil && !mqb.allowedFields[fieldName] { + return nil, fmt.Errorf("disallowed field: %s", fieldName) + } + order := 1 // ASC + if strings.ToUpper(sortField.Order) == "DESC" { + order = -1 // DESC + } + sort = append(sort, bson.E{Key: fieldName, Value: order}) + } + pipeline = append(pipeline, bson.D{{Key: "$sort", Value: sort}}) + } + + // Skip and limit stages + if query.Offset > 0 { + pipeline = append(pipeline, bson.D{{Key: "$skip", Value: query.Offset}}) + } + if query.Limit > 0 { + pipeline = append(pipeline, bson.D{{Key: "$limit", Value: query.Limit}}) + } + + return pipeline, nil +} + +// buildFilter builds a MongoDB filter from FilterGroups +func (mqb *MongoQueryBuilder) buildFilter(filterGroups []FilterGroup) (bson.M, error) { + if len(filterGroups) == 0 { + return bson.M{}, nil + } + + var result bson.M + var err error + + for i, group := range filterGroups { + if len(group.Filters) == 0 { + continue + } + + groupFilter, err := mqb.buildFilterGroup(group) + if err != nil { + return nil, err + } + + if i == 0 { + result = groupFilter + } else { + logicOp := "$and" + if group.LogicOp != "" { + switch strings.ToUpper(group.LogicOp) { + case "OR": + logicOp = "$or" + } + } + result = bson.M{logicOp: []bson.M{result, groupFilter}} + } + } + + return result, err +} + +// buildFilterGroup builds a filter for a single filter group +func (mqb *MongoQueryBuilder) buildFilterGroup(group FilterGroup) (bson.M, error) { + var filters []bson.M + logicOp := "$and" + if group.LogicOp != "" { + switch strings.ToUpper(group.LogicOp) { + case "OR": + logicOp = "$or" + } + } + + for _, filter := range group.Filters { + fieldFilter, err := mqb.buildFilterCondition(filter) + if err != nil { + return nil, err + } + filters = append(filters, fieldFilter) + } + + if len(filters) == 1 { + return filters[0], nil + } + return bson.M{logicOp: filters}, nil +} + +// buildFilterCondition builds a single filter condition for MongoDB +func (mqb *MongoQueryBuilder) buildFilterCondition(filter DynamicFilter) (bson.M, error) { + field := filter.Column + if mqb.allowedFields != nil && !mqb.allowedFields[field] { + return nil, fmt.Errorf("disallowed field: %s", field) + } + + switch filter.Operator { + case OpEqual: + return bson.M{field: filter.Value}, nil + case OpNotEqual: + return bson.M{field: bson.M{"$ne": filter.Value}}, nil + case OpIn: + values := mqb.parseArrayValue(filter.Value) + return bson.M{field: bson.M{"$in": values}}, nil + case OpNotIn: + values := mqb.parseArrayValue(filter.Value) + return bson.M{field: bson.M{"$nin": values}}, nil + case OpGreaterThan: + return bson.M{field: bson.M{"$gt": filter.Value}}, nil + case OpGreaterThanEqual: + return bson.M{field: bson.M{"$gte": filter.Value}}, nil + case OpLessThan: + return bson.M{field: bson.M{"$lt": filter.Value}}, nil + case OpLessThanEqual: + return bson.M{field: bson.M{"$lte": filter.Value}}, nil + case OpLike: + // Convert SQL LIKE to MongoDB regex + pattern := filter.Value.(string) + pattern = strings.ReplaceAll(pattern, "%", ".*") + pattern = strings.ReplaceAll(pattern, "_", ".") + return bson.M{field: bson.M{"$regex": pattern, "$options": "i"}}, nil + case OpILike: + // Case-insensitive like + pattern := filter.Value.(string) + pattern = strings.ReplaceAll(pattern, "%", ".*") + pattern = strings.ReplaceAll(pattern, "_", ".") + return bson.M{field: bson.M{"$regex": pattern, "$options": "i"}}, nil + case OpContains: + // Contains substring + pattern := filter.Value.(string) + return bson.M{field: bson.M{"$regex": pattern, "$options": "i"}}, nil + case OpNotContains: + // Does not contain substring + pattern := filter.Value.(string) + return bson.M{field: bson.M{"$not": bson.M{"$regex": pattern, "$options": "i"}}}, nil + case OpStartsWith: + // Starts with + pattern := filter.Value.(string) + return bson.M{field: bson.M{"$regex": "^" + pattern, "$options": "i"}}, nil + case OpEndsWith: + // Ends with + pattern := filter.Value.(string) + return bson.M{field: bson.M{"$regex": pattern + "$", "$options": "i"}}, nil + case OpNull: + return bson.M{field: bson.M{"$exists": false}}, nil + case OpNotNull: + return bson.M{field: bson.M{"$exists": true}}, nil + case OpJsonContains: + // JSON contains + return bson.M{field: bson.M{"$elemMatch": filter.Value}}, nil + case OpJsonNotContains: + // JSON does not contain + return bson.M{field: bson.M{"$not": bson.M{"$elemMatch": filter.Value}}}, nil + case OpJsonExists: + // JSON path exists + return bson.M{field + "." + filter.Options["path"].(string): bson.M{"$exists": true}}, nil + case OpJsonNotExists: + // JSON path does not exist + return bson.M{field + "." + filter.Options["path"].(string): bson.M{"$exists": false}}, nil + case OpArrayContains: + // Array contains + return bson.M{field: bson.M{"$elemMatch": bson.M{"$eq": filter.Value}}}, nil + case OpArrayNotContains: + // Array does not contain + return bson.M{field: bson.M{"$not": bson.M{"$elemMatch": bson.M{"$eq": filter.Value}}}}, nil + case OpArrayLength: + // Array length + if lengthOption, ok := filter.Options["length"].(int); ok { + return bson.M{field: bson.M{"$size": lengthOption}}, nil + } + return nil, fmt.Errorf("array_length operator requires 'length' option") + default: + return nil, fmt.Errorf("unsupported operator: %s", filter.Operator) + } +} + +// parseArrayValue parses an array value for MongoDB +func (mqb *MongoQueryBuilder) parseArrayValue(value interface{}) []interface{} { + if value == nil { + return nil + } + if reflect.TypeOf(value).Kind() == reflect.Slice { + v := reflect.ValueOf(value) + result := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + result[i] = v.Index(i).Interface() + } + return result + } + if str, ok := value.(string); ok { + if strings.Contains(str, ",") { + parts := strings.Split(str, ",") + result := make([]interface{}, len(parts)) + for i, part := range parts { + result[i] = strings.TrimSpace(part) + } + return result + } + return []interface{}{str} + } + return []interface{}{value} +} + +// ExecuteFind executes a MongoDB find query +func (mqb *MongoQueryBuilder) ExecuteFind(ctx context.Context, collection *mongo.Collection, query DynamicQuery, dest interface{}) error { + // Security check for collection name + if mqb.enableSecurityChecks && len(mqb.allowedCollections) > 0 && !mqb.allowedCollections[collection.Name()] { + return fmt.Errorf("disallowed collection: %s", collection.Name()) + } + + filter, findOptions, err := mqb.BuildFindQuery(query) + if err != nil { + return err + } + + // Set timeout if not already in context + if _, hasDeadline := ctx.Deadline(); !hasDeadline && mqb.queryTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, mqb.queryTimeout) + defer cancel() + } + + start := time.Now() + cursor, err := collection.Find(ctx, filter, findOptions) + if err != nil { + return err + } + defer cursor.Close(ctx) + err = cursor.All(ctx, dest) + if mqb.enableQueryLogging { + fmt.Printf("[DEBUG] MongoDB Find executed in %v\n", time.Since(start)) + } + return err +} + +// ExecuteAggregate executes a MongoDB aggregation pipeline +func (mqb *MongoQueryBuilder) ExecuteAggregate(ctx context.Context, collection *mongo.Collection, query DynamicQuery, dest interface{}) error { + // Security check for collection name + if mqb.enableSecurityChecks && len(mqb.allowedCollections) > 0 && !mqb.allowedCollections[collection.Name()] { + return fmt.Errorf("disallowed collection: %s", collection.Name()) + } + + pipeline, err := mqb.BuildAggregateQuery(query) + if err != nil { + return err + } + + // Set timeout if not already in context + if _, hasDeadline := ctx.Deadline(); !hasDeadline && mqb.queryTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, mqb.queryTimeout) + defer cancel() + } + + start := time.Now() + cursor, err := collection.Aggregate(ctx, pipeline) + if err != nil { + return err + } + defer cursor.Close(ctx) + err = cursor.All(ctx, dest) + if mqb.enableQueryLogging { + fmt.Printf("[DEBUG] MongoDB Aggregate executed in %v\n", time.Since(start)) + } + return err +} + +// ExecuteCount executes a MongoDB count query +func (mqb *MongoQueryBuilder) ExecuteCount(ctx context.Context, collection *mongo.Collection, query DynamicQuery) (int64, error) { + // Security check for collection name + if mqb.enableSecurityChecks && len(mqb.allowedCollections) > 0 && !mqb.allowedCollections[collection.Name()] { + return 0, fmt.Errorf("disallowed collection: %s", collection.Name()) + } + + filter, _, err := mqb.BuildFindQuery(query) + if err != nil { + return 0, err + } + + // Set timeout if not already in context + if _, hasDeadline := ctx.Deadline(); !hasDeadline && mqb.queryTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, mqb.queryTimeout) + defer cancel() + } + + start := time.Now() + count, err := collection.CountDocuments(ctx, filter) + if mqb.enableQueryLogging { + fmt.Printf("[DEBUG] MongoDB Count executed in %v\n", time.Since(start)) + } + return count, err +} + +// ExecuteInsert executes a MongoDB insert operation +func (mqb *MongoQueryBuilder) ExecuteInsert(ctx context.Context, collection *mongo.Collection, data InsertData) (*mongo.InsertOneResult, error) { + // Security check for collection name + if mqb.enableSecurityChecks && len(mqb.allowedCollections) > 0 && !mqb.allowedCollections[collection.Name()] { + return nil, fmt.Errorf("disallowed collection: %s", collection.Name()) + } + + document := bson.M{} + for i, col := range data.Columns { + if mqb.allowedFields != nil && !mqb.allowedFields[col] { + return nil, fmt.Errorf("disallowed field: %s", col) + } + document[col] = data.Values[i] + } + + // Handle JSON values + for col, val := range data.JsonValues { + if mqb.allowedFields != nil && !mqb.allowedFields[col] { + return nil, fmt.Errorf("disallowed field: %s", col) + } + document[col] = val + } + + // Set timeout if not already in context + if _, hasDeadline := ctx.Deadline(); !hasDeadline && mqb.queryTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, mqb.queryTimeout) + defer cancel() + } + + start := time.Now() + result, err := collection.InsertOne(ctx, document) + if mqb.enableQueryLogging { + fmt.Printf("[DEBUG] MongoDB Insert executed in %v\n", time.Since(start)) + } + return result, err +} + +// ExecuteUpdate executes a MongoDB update operation +func (mqb *MongoQueryBuilder) ExecuteUpdate(ctx context.Context, collection *mongo.Collection, updateData UpdateData, filters []FilterGroup) (*mongo.UpdateResult, error) { + // Security check for collection name + if mqb.enableSecurityChecks && len(mqb.allowedCollections) > 0 && !mqb.allowedCollections[collection.Name()] { + return nil, fmt.Errorf("disallowed collection: %s", collection.Name()) + } + + filter, err := mqb.buildFilter(filters) + if err != nil { + return nil, err + } + + update := bson.M{"$set": bson.M{}} + for i, col := range updateData.Columns { + if mqb.allowedFields != nil && !mqb.allowedFields[col] { + return nil, fmt.Errorf("disallowed field: %s", col) + } + update["$set"].(bson.M)[col] = updateData.Values[i] + } + + // Handle JSON updates + for col, jsonUpdate := range updateData.JsonUpdates { + if mqb.allowedFields != nil && !mqb.allowedFields[col] { + return nil, fmt.Errorf("disallowed field: %s", col) + } + // Use dot notation for nested JSON updates + update["$set"].(bson.M)[col+"."+jsonUpdate.Path] = jsonUpdate.Value + } + + // Set timeout if not already in context + if _, hasDeadline := ctx.Deadline(); !hasDeadline && mqb.queryTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, mqb.queryTimeout) + defer cancel() + } + + start := time.Now() + result, err := collection.UpdateMany(ctx, filter, update) + if mqb.enableQueryLogging { + fmt.Printf("[DEBUG] MongoDB Update executed in %v\n", time.Since(start)) + } + return result, err +} + +// ExecuteDelete executes a MongoDB delete operation +func (mqb *MongoQueryBuilder) ExecuteDelete(ctx context.Context, collection *mongo.Collection, filters []FilterGroup) (*mongo.DeleteResult, error) { + // Security check for collection name + if mqb.enableSecurityChecks && len(mqb.allowedCollections) > 0 && !mqb.allowedCollections[collection.Name()] { + return nil, fmt.Errorf("disallowed collection: %s", collection.Name()) + } + + filter, err := mqb.buildFilter(filters) + if err != nil { + return nil, err + } + + // Set timeout if not already in context + if _, hasDeadline := ctx.Deadline(); !hasDeadline && mqb.queryTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, mqb.queryTimeout) + defer cancel() + } + + start := time.Now() + result, err := collection.DeleteMany(ctx, filter) + if mqb.enableQueryLogging { + fmt.Printf("[DEBUG] MongoDB Delete executed in %v\n", time.Since(start)) + } + return result, err +} diff --git a/internal/utils/query/exemple.go.text b/internal/utils/query/exemple.go.text new file mode 100644 index 0000000..a36651a --- /dev/null +++ b/internal/utils/query/exemple.go.text @@ -0,0 +1,918 @@ +package main + +import ( + "context" + "database/sql" + "fmt" + "log" + "time" + + "github.com/jmoiron/sqlx" + _ "github.com/lib/pq" // PostgreSQL driver + "yourpackage/utils" +) + +func main() { + // Inisialisasi koneksi database + db, err := sqlx.Connect("postgres", "user=postgres dbname=testdb sslmode=disable") + if err != nil { + log.Fatalf("Failed to connect to database: %v", err) + } + defer db.Close() + + // Inisialisasi QueryBuilder + qb := utils.NewQueryBuilder(utils.DBTypePostgreSQL) + + // Contoh penggunaan + simpleQueryExample(db, qb) + complexQueryExample(db, qb) + nestedJoinExample(db, qb) + multiJoinExample(db, qb) + commonQueriesExample(db, qb) + jsonQueryExample(db, qb) + windowFunctionExample(db, qb) + cteExample(db, qb) + unionExample(db, qb) + aggregateExample(db, qb) +} + +func simpleQueryExample(db *sqlx.DB, qb *utils.QueryBuilder) { + fmt.Println("\n=== Simple Query Example ===") + + // Query sederhana dengan filter + query := utils.DynamicQuery{ + From: "users", + Fields: []utils.SelectField{ + {Expression: "id", Alias: "user_id"}, + {Expression: "name", Alias: "user_name"}, + {Expression: "email"}, + }, + Filters: []utils.FilterGroup{ + { + Filters: []utils.DynamicFilter{ + {Column: "status", Operator: utils.OpEqual, Value: "active"}, + {Column: "created_at", Operator: utils.OpGreaterThanEqual, Value: time.Now().AddDate(0, -1, 0)}, + }, + LogicOp: "AND", + }, + }, + Sort: []utils.SortField{ + {Column: "name", Order: "ASC"}, + }, + Limit: 10, + } + + var results []map[string]interface{} + err := qb.ExecuteQuery(context.Background(), db, query, &results) + if err != nil { + log.Printf("Error executing simple query: %v", err) + return + } + + fmt.Printf("Found %d users\n", len(results)) + for _, user := range results { + fmt.Printf("User: %+v\n", user) + } +} + +func complexQueryExample(db *sqlx.DB, qb *utils.QueryBuilder) { + fmt.Println("\n=== Complex Query Example ===") + + // Query dengan nested filter dan berbagai operator + query := utils.DynamicQuery{ + From: "orders", + Fields: []utils.SelectField{ + {Expression: "id", Alias: "order_id"}, + {Expression: "customer_id"}, + {Expression: "total_amount"}, + {Expression: "order_date"}, + {Expression: "status"}, + }, + Filters: []utils.FilterGroup{ + { + Filters: []utils.DynamicFilter{ + {Column: "status", Operator: utils.OpIn, Value: []string{"completed", "processing"}}, + {Column: "total_amount", Operator: utils.OpGreaterThan, Value: 1000}, + }, + LogicOp: "AND", + }, + { + Filters: []utils.DynamicFilter{ + {Column: "order_date", Operator: utils.OpBetween, Value: []interface{}{time.Now().AddDate(0, -3, 0), time.Now()}}, + {Column: "customer_id", Operator: utils.OpNotIn, Value: []int{1, 2, 3}}, + }, + LogicOp: "OR", + }, + }, + Sort: []utils.SortField{ + {Column: "order_date", Order: "DESC"}, + {Column: "total_amount", Order: "DESC"}, + }, + Limit: 20, + Offset: 10, + } + + var results []map[string]interface{} + err := qb.ExecuteQuery(context.Background(), db, query, &results) + if err != nil { + log.Printf("Error executing complex query: %v", err) + return + } + + fmt.Printf("Found %d orders\n", len(results)) + for _, order := range results { + fmt.Printf("Order: %+v\n", order) + } +} + +func nestedJoinExample(db *sqlx.DB, qb *utils.QueryBuilder) { + fmt.Println("\n=== Nested Join Example ===") + + // Query dengan nested join + query := utils.DynamicQuery{ + From: "customers", + Fields: []utils.SelectField{ + {Expression: "customers.id", Alias: "customer_id"}, + {Expression: "customers.name", Alias: "customer_name"}, + {Expression: "orders.id", Alias: "order_id"}, + {Expression: "orders.total_amount"}, + {Expression: "order_items.product_id"}, + {Expression: "order_items.quantity"}, + {Expression: "products.name", Alias: "product_name"}, + }, + Joins: []utils.Join{ + { + Type: "LEFT", + Table: "orders", + Alias: "orders", + OnConditions: utils.FilterGroup{ + Filters: []utils.DynamicFilter{ + {Column: "customers.id", Operator: utils.OpEqual, Value: "orders.customer_id"}, + }, + }, + }, + { + Type: "LEFT", + Table: "order_items", + Alias: "order_items", + OnConditions: utils.FilterGroup{ + Filters: []utils.DynamicFilter{ + {Column: "orders.id", Operator: utils.OpEqual, Value: "order_items.order_id"}, + }, + }, + }, + { + Type: "LEFT", + Table: "products", + Alias: "products", + OnConditions: utils.FilterGroup{ + Filters: []utils.DynamicFilter{ + {Column: "order_items.product_id", Operator: utils.OpEqual, Value: "products.id"}, + }, + }, + }, + }, + Filters: []utils.FilterGroup{ + { + Filters: []utils.DynamicFilter{ + {Column: "customers.status", Operator: utils.OpEqual, Value: "active"}, + {Column: "orders.status", Operator: utils.OpEqual, Value: "completed"}, + }, + LogicOp: "AND", + }, + }, + Sort: []utils.SortField{ + {Column: "customers.name", Order: "ASC"}, + {Column: "orders.id", Order: "DESC"}, + }, + Limit: 50, + } + + var results []map[string]interface{} + err := qb.ExecuteQuery(context.Background(), db, query, &results) + if err != nil { + log.Printf("Error executing nested join query: %v", err) + return + } + + fmt.Printf("Found %d customer-order-product records\n", len(results)) + for _, record := range results { + fmt.Printf("Record: %+v\n", record) + } +} + +func multiJoinExample(db *sqlx.DB, qb *utils.QueryBuilder) { + fmt.Println("\n=== Multi Join Example ===") + + // Query dengan multiple join types + query := utils.DynamicQuery{ + From: "employees", + Fields: []utils.SelectField{ + {Expression: "employees.id", Alias: "employee_id"}, + {Expression: "employees.name", Alias: "employee_name"}, + {Expression: "departments.name", Alias: "department_name"}, + {Expression: "projects.name", Alias: "project_name"}, + {Expression: "tasks.title", Alias: "task_title"}, + {Expression: "task_assignments.assigned_date"}, + }, + Joins: []utils.Join{ + { + Type: "INNER", + Table: "departments", + Alias: "departments", + OnConditions: utils.FilterGroup{ + Filters: []utils.DynamicFilter{ + {Column: "employees.department_id", Operator: utils.OpEqual, Value: "departments.id"}, + }, + }, + }, + { + Type: "LEFT", + Table: "task_assignments", + Alias: "task_assignments", + OnConditions: utils.FilterGroup{ + Filters: []utils.DynamicFilter{ + {Column: "employees.id", Operator: utils.OpEqual, Value: "task_assignments.employee_id"}, + }, + }, + }, + { + Type: "LEFT", + Table: "tasks", + Alias: "tasks", + OnConditions: utils.FilterGroup{ + Filters: []utils.DynamicFilter{ + {Column: "task_assignments.task_id", Operator: utils.OpEqual, Value: "tasks.id"}, + }, + }, + }, + { + Type: "LEFT", + Table: "projects", + Alias: "projects", + OnConditions: utils.FilterGroup{ + Filters: []utils.DynamicFilter{ + {Column: "tasks.project_id", Operator: utils.OpEqual, Value: "projects.id"}, + }, + }, + }, + }, + Filters: []utils.FilterGroup{ + { + Filters: []utils.DynamicFilter{ + {Column: "employees.status", Operator: utils.OpEqual, Value: "active"}, + {Column: "departments.status", Operator: utils.OpEqual, Value: "active"}, + }, + LogicOp: "AND", + }, + }, + Sort: []utils.SortField{ + {Column: "departments.name", Order: "ASC"}, + {Column: "employees.name", Order: "ASC"}, + {Column: "task_assignments.assigned_date", Order: "DESC"}, + }, + Limit: 100, + } + + var results []map[string]interface{} + err := qb.ExecuteQuery(context.Background(), db, query, &results) + if err != nil { + log.Printf("Error executing multi join query: %v", err) + return + } + + fmt.Printf("Found %d employee-task-project records\n", len(results)) + for _, record := range results { + fmt.Printf("Record: %+v\n", record) + } +} + +func commonQueriesExample(db *sqlx.DB, qb *utils.QueryBuilder) { + fmt.Println("\n=== Common Queries Example ===") + + // 1. Query dengan LIKE/ILIKE + likeQuery := utils.DynamicQuery{ + From: "products", + Fields: []utils.SelectField{ + {Expression: "id"}, + {Expression: "name"}, + {Expression: "price"}, + {Expression: "category"}, + }, + Filters: []utils.FilterGroup{ + { + Filters: []utils.DynamicFilter{ + {Column: "name", Operator: utils.OpILike, Value: "%laptop%"}, + {Column: "category", Operator: utils.OpEqual, Value: "electronics"}, + }, + LogicOp: "AND", + }, + }, + Sort: []utils.SortField{ + {Column: "price", Order: "ASC"}, + }, + Limit: 10, + } + + var products []map[string]interface{} + err := qb.ExecuteQuery(context.Background(), db, likeQuery, &products) + if err != nil { + log.Printf("Error executing LIKE query: %v", err) + } else { + fmt.Printf("Found %d products matching 'laptop'\n", len(products)) + } + + // 2. Query dengan pagination + page := 2 + pageSize := 20 + paginationQuery := utils.DynamicQuery{ + From: "orders", + Fields: []utils.SelectField{ + {Expression: "id"}, + {Expression: "customer_id"}, + {Expression: "total_amount"}, + {Expression: "order_date"}, + }, + Filters: []utils.FilterGroup{ + { + Filters: []utils.DynamicFilter{ + {Column: "status", Operator: utils.OpEqual, Value: "completed"}, + }, + }, + }, + Sort: []utils.SortField{ + {Column: "order_date", Order: "DESC"}, + }, + Limit: pageSize, + Offset: (page - 1) * pageSize, + } + + var orders []map[string]interface{} + err = qb.ExecuteQuery(context.Background(), db, paginationQuery, &orders) + if err != nil { + log.Printf("Error executing pagination query: %v", err) + } else { + fmt.Printf("Found %d orders on page %d\n", len(orders), page) + } + + // 3. Query dengan NULL/NOT NULL + nullQuery := utils.DynamicQuery{ + From: "customers", + Fields: []utils.SelectField{ + {Expression: "id"}, + {Expression: "name"}, + {Expression: "email"}, + {Expression: "phone"}, + }, + Filters: []utils.FilterGroup{ + { + Filters: []utils.DynamicFilter{ + {Column: "email", Operator: utils.OpNotNull}, + {Column: "phone", Operator: utils.OpNull}, + }, + LogicOp: "AND", + }, + }, + Limit: 10, + } + + var customers []map[string]interface{} + err = qb.ExecuteQuery(context.Background(), db, nullQuery, &customers) + if err != nil { + log.Printf("Error executing NULL query: %v", err) + } else { + fmt.Printf("Found %d customers with email but no phone\n", len(customers)) + } + + // 4. Query dengan BETWEEN + betweenQuery := utils.DynamicQuery{ + From: "transactions", + Fields: []utils.SelectField{ + {Expression: "id"}, + {Expression: "account_id"}, + {Expression: "amount"}, + {Expression: "transaction_date"}, + }, + Filters: []utils.FilterGroup{ + { + Filters: []utils.DynamicFilter{ + {Column: "amount", Operator: utils.OpBetween, Value: []interface{}{100, 1000}}, + {Column: "transaction_date", Operator: utils.OpBetween, Value: []interface{}{time.Now().AddDate(0, -1, 0), time.Now()}}, + }, + LogicOp: "AND", + }, + }, + Sort: []utils.SortField{ + {Column: "transaction_date", Order: "DESC"}, + }, + Limit: 20, + } + + var transactions []map[string]interface{} + err = qb.ExecuteQuery(context.Background(), db, betweenQuery, &transactions) + if err != nil { + log.Printf("Error executing BETWEEN query: %v", err) + } else { + fmt.Printf("Found %d transactions between $100 and $1000 in the last month\n", len(transactions)) + } +} + +func jsonQueryExample(db *sqlx.DB, qb *utils.QueryBuilder) { + fmt.Println("\n=== JSON Query Example ===") + + // Query dengan operasi JSON + query := utils.DynamicQuery{ + From: "products", + Fields: []utils.SelectField{ + {Expression: "id"}, + {Expression: "name"}, + {Expression: "price"}, + {Expression: "attributes"}, + }, + JsonOperations: []utils.JsonOperation{ + { + Type: "extract", + Column: "attributes", + Path: "$.color", + Alias: "color", + }, + { + Type: "extract", + Column: "attributes", + Path: "$.size", + Alias: "size", + }, + }, + Filters: []utils.FilterGroup{ + { + Filters: []utils.DynamicFilter{ + { + Column: "attributes", + Operator: utils.OpJsonContains, + Value: map[string]interface{}{"category": "electronics"}, + Options: map[string]interface{}{"path": "$"}, + }, + }, + }, + }, + Sort: []utils.SortField{ + {Column: "name", Order: "ASC"}, + }, + Limit: 10, + } + + var results []map[string]interface{} + err := qb.ExecuteQuery(context.Background(), db, query, &results) + if err != nil { + log.Printf("Error executing JSON query: %v", err) + return + } + + fmt.Printf("Found %d products with JSON attributes\n", len(results)) + for _, product := range results { + fmt.Printf("Product: %+v\n", product) + } +} + +func windowFunctionExample(db *sqlx.DB, qb *utils.QueryBuilder) { + fmt.Println("\n=== Window Function Example ===") + + // Query dengan window functions + query := utils.DynamicQuery{ + From: "sales", + Fields: []utils.SelectField{ + {Expression: "id"}, + {Expression: "salesperson_id"}, + {Expression: "amount"}, + {Expression: "sale_date"}, + }, + WindowFunctions: []utils.WindowFunction{ + { + Function: "ROW_NUMBER", + Over: "salesperson_id", + OrderBy: "amount DESC", + Alias: "sales_rank", + }, + { + Function: "SUM", + Over: "salesperson_id", + OrderBy: "sale_date", + Frame: "ROWS UNBOUNDED PRECEDING", + Alias: "running_total", + }, + }, + Filters: []utils.FilterGroup{ + { + Filters: []utils.DynamicFilter{ + {Column: "sale_date", Operator: utils.OpGreaterThanEqual, Value: time.Now().AddDate(0, -6, 0)}, + }, + }, + }, + Sort: []utils.SortField{ + {Column: "salesperson_id", Order: "ASC"}, + {Column: "amount", Order: "DESC"}, + }, + Limit: 50, + } + + var results []map[string]interface{} + err := qb.ExecuteQuery(context.Background(), db, query, &results) + if err != nil { + log.Printf("Error executing window function query: %v", err) + return + } + + fmt.Printf("Found %d sales records with window functions\n", len(results)) + for _, sale := range results { + fmt.Printf("Sale: %+v\n", sale) + } +} + +func cteExample(db *sqlx.DB, qb *utils.QueryBuilder) { + fmt.Println("\n=== CTE Example ===") + + // Query dengan CTE + query := utils.DynamicQuery{ + CTEs: []utils.CTE{ + { + Name: "monthly_sales", + Query: utils.DynamicQuery{ + Fields: []utils.SelectField{ + {Expression: "salesperson_id"}, + {Expression: "EXTRACT(MONTH FROM sale_date) AS month"}, + {Expression: "SUM(amount) AS total"}, + }, + From: "sales", + Filters: []utils.FilterGroup{ + { + Filters: []utils.DynamicFilter{ + {Column: "sale_date", Operator: utils.OpGreaterThanEqual, Value: time.Now().AddDate(-1, 0, 0)}, + }, + }, + }, + GroupBy: []string{"salesperson_id", "EXTRACT(MONTH FROM sale_date)"}, + }, + }, + { + Name: "top_salespeople", + Query: utils.DynamicQuery{ + Fields: []utils.SelectField{ + {Expression: "salesperson_id"}, + {Expression: "SUM(total) AS yearly_total"}, + }, + From: "monthly_sales", + GroupBy: []string{"salesperson_id"}, + Having: []utils.FilterGroup{ + { + Filters: []utils.DynamicFilter{ + {Column: "SUM(total)", Operator: utils.OpGreaterThan, Value: 10000}, + }, + }, + }, + }, + }, + }, + Fields: []utils.SelectField{ + {Expression: "salespeople.id"}, + {Expression: "salespeople.name"}, + {Expression: "top_salespeople.yearly_total"}, + }, + From: "salespeople", + Joins: []utils.Join{ + { + Type: "INNER", + Table: "top_salespeople", + Alias: "top_salespeople", + OnConditions: utils.FilterGroup{ + Filters: []utils.DynamicFilter{ + {Column: "salespeople.id", Operator: utils.OpEqual, Value: "top_salespeople.salesperson_id"}, + }, + }, + }, + }, + Sort: []utils.SortField{ + {Column: "top_salespeople.yearly_total", Order: "DESC"}, + }, + Limit: 10, + } + + var results []map[string]interface{} + err := qb.ExecuteQuery(context.Background(), db, query, &results) + if err != nil { + log.Printf("Error executing CTE query: %v", err) + return + } + + fmt.Printf("Found %d top salespeople\n", len(results)) + for _, salesperson := range results { + fmt.Printf("Salesperson: %+v\n", salesperson) + } +} + +func unionExample(db *sqlx.DB, qb *utils.QueryBuilder) { + fmt.Println("\n=== UNION Example ===") + + // Query dengan UNION + query := utils.DynamicQuery{ + Fields: []utils.SelectField{ + {Expression: "id"}, + {Expression: "name"}, + {Expression: "email"}, + {Expression: "'customer' AS user_type"}, + }, + From: "customers", + Filters: []utils.FilterGroup{ + { + Filters: []utils.DynamicFilter{ + {Column: "status", Operator: utils.OpEqual, Value: "active"}, + }, + }, + }, + Unions: []utils.Union{ + { + Type: "UNION ALL", + Query: utils.DynamicQuery{ + Fields: []utils.SelectField{ + {Expression: "id"}, + {Expression: "name"}, + {Expression: "email"}, + {Expression: "'employee' AS user_type"}, + }, + From: "employees", + Filters: []utils.FilterGroup{ + { + Filters: []utils.DynamicFilter{ + {Column: "status", Operator: utils.OpEqual, Value: "active"}, + }, + }, + }, + }, + }, + }, + Sort: []utils.SortField{ + {Column: "name", Order: "ASC"}, + }, + Limit: 20, + } + + var results []map[string]interface{} + err := qb.ExecuteQuery(context.Background(), db, query, &results) + if err != nil { + log.Printf("Error executing UNION query: %v", err) + return + } + + fmt.Printf("Found %d users (customers + employees)\n", len(results)) + for _, user := range results { + fmt.Printf("User: %+v\n", user) + } +} + +func aggregateExample(db *sqlx.DB, qb *utils.QueryBuilder) { + fmt.Println("\n=== Aggregate Example ===") + + // Query dengan fungsi agregasi + query := utils.DynamicQuery{ + Fields: []utils.SelectField{ + {Expression: "category"}, + {Expression: "COUNT(*) AS product_count"}, + {Expression: "AVG(price) AS avg_price"}, + {Expression: "MIN(price) AS min_price"}, + {Expression: "MAX(price) AS max_price"}, + {Expression: "SUM(stock_quantity) AS total_stock"}, + }, + From: "products", + Filters: []utils.FilterGroup{ + { + Filters: []utils.DynamicFilter{ + {Column: "status", Operator: utils.OpEqual, Value: "active"}, + }, + }, + }, + GroupBy: []string{"category"}, + Having: []utils.FilterGroup{ + { + Filters: []utils.DynamicFilter{ + {Column: "COUNT(*)", Operator: utils.OpGreaterThan, Value: 5}, + }, + }, + }, + Sort: []utils.SortField{ + {Column: "product_count", Order: "DESC"}, + }, + Limit: 10, + } + + var results []map[string]interface{} + err := qb.ExecuteQuery(context.Background(), db, query, &results) + if err != nil { + log.Printf("Error executing aggregate query: %v", err) + return + } + + fmt.Printf("Found %d product categories\n", len(results)) + for _, category := range results { + fmt.Printf("Category: %+v\n", category) + } +} + +func crudOperationsExample(db *sqlx.DB, qb *utils.QueryBuilder) { + fmt.Println("\n=== CRUD Operations Example ===") + + // INSERT + insertData := utils.InsertData{ + Columns: []string{"name", "email", "status", "created_at"}, + Values: []interface{}{"John Doe", "john@example.com", "active", time.Now()}, + JsonValues: map[string]interface{}{ + "preferences": map[string]interface{}{ + "theme": "dark", + "language": "en", + }, + }, + } + + result, err := qb.ExecuteInsert(context.Background(), db, "customers", insertData, "id") + if err != nil { + log.Printf("Error executing INSERT: %v", err) + return + } + + id, err := result.LastInsertId() + if err != nil { + log.Printf("Error getting inserted ID: %v", err) + return + } + + fmt.Printf("Inserted customer with ID: %d\n", id) + + // UPDATE + updateData := utils.UpdateData{ + Columns: []string{"name", "status"}, + Values: []interface{}{"John Smith", "inactive"}, + JsonUpdates: map[string]utils.JsonUpdate{ + "preferences": { + Path: "$.theme", + Value: "light", + }, + }, + } + + filters := []utils.FilterGroup{ + { + Filters: []utils.DynamicFilter{ + {Column: "id", Operator: utils.OpEqual, Value: id}, + }, + }, + } + + result, err = qb.ExecuteUpdate(context.Background(), db, "customers", updateData, filters, "updated_at") + if err != nil { + log.Printf("Error executing UPDATE: %v", err) + return + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + log.Printf("Error getting rows affected: %v", err) + return + } + + fmt.Printf("Updated %d customer(s)\n", rowsAffected) + + // DELETE + result, err = qb.ExecuteDelete(context.Background(), db, "customers", filters) + if err != nil { + log.Printf("Error executing DELETE: %v", err) + return + } + + rowsAffected, err = result.RowsAffected() + if err != nil { + log.Printf("Error getting rows affected: %v", err) + return + } + + fmt.Printf("Deleted %d customer(s)\n", rowsAffected) +} + + +func mongoExample() { + fmt.Println("\n=== MongoDB Example ===") + + // Inisialisasi koneksi MongoDB + client, err := mongo.Connect(context.Background(), options.Client().ApplyURI("mongodb://localhost:27017")) + if err != nil { + log.Fatalf("Failed to connect to MongoDB: %v", err) + } + defer client.Disconnect(context.Background()) + + db := client.Database("testdb") + collection := db.Collection("users") + + // Inisialisasi MongoQueryBuilder + mqb := utils.NewMongoQueryBuilder() + + // Query sederhana + query := utils.DynamicQuery{ + Fields: []utils.SelectField{ + {Expression: "name"}, + {Expression: "email"}, + {Expression: "status"}, + }, + Filters: []utils.FilterGroup{ + { + Filters: []utils.DynamicFilter{ + {Column: "status", Operator: utils.OpEqual, Value: "active"}, + {Column: "age", Operator: utils.OpGreaterThan, Value: 18}, + }, + LogicOp: "AND", + }, + }, + Sort: []utils.SortField{ + {Column: "name", Order: "ASC"}, + }, + Limit: 10, + } + + var results []map[string]interface{} + err = mqb.ExecuteFind(context.Background(), collection, query, &results) + if err != nil { + log.Printf("Error executing MongoDB query: %v", err) + return + } + + fmt.Printf("Found %d users\n", len(results)) + for _, user := range results { + fmt.Printf("User: %+v\n", user) + } + + // Aggregation pipeline + aggQuery := utils.DynamicQuery{ + Fields: []utils.SelectField{ + {Expression: "department", Alias: "_id"}, + {Expression: "COUNT(*)", Alias: "employee_count"}, + {Expression: "AVG(salary)", Alias: "avg_salary"}, + }, + Filters: []utils.FilterGroup{ + { + Filters: []utils.DynamicFilter{ + {Column: "status", Operator: utils.OpEqual, Value: "active"}, + }, + }, + }, + GroupBy: []string{"department"}, + Sort: []utils.SortField{ + {Column: "employee_count", Order: "DESC"}, + }, + Limit: 10, + } + + var aggResults []map[string]interface{} + err = mqb.ExecuteAggregate(context.Background(), collection, aggQuery, &aggResults) + if err != nil { + log.Printf("Error executing MongoDB aggregation: %v", err) + return + } + + fmt.Printf("Found %d departments\n", len(aggResults)) + for _, dept := range aggResults { + fmt.Printf("Department: %+v\n", dept) + } +} + +func queryParserExample(db *sqlx.DB, qb *utils.QueryBuilder) { + fmt.Println("\n=== Query Parser Example ===") + + // Inisialisasi QueryParser + qp := utils.NewQueryParser() + + // Parse URL query parameters + values := url.Values{} + values.Add("fields", "id,name,email,status") + values.Add("filter[status][_eq]", "active") + values.Add("filter[created_at][_gte]", "2023-01-01") + values.Add("filter[age][_between]", "18,65") + values.Add("sort", "+name,-created_at") + values.Add("limit", "20") + values.Add("offset", "10") + + // Parse query parameters into DynamicQuery + query, err := qp.ParseQuery(values, "users") + if err != nil { + log.Printf("Error parsing query: %v", err) + return + } + + // Execute the parsed query + var results []map[string]interface{} + err = qb.ExecuteQuery(context.Background(), db, query, &results) + if err != nil { + log.Printf("Error executing parsed query: %v", err) + return + } + + fmt.Printf("Found %d users using parsed query\n", len(results)) + for _, user := range results { + fmt.Printf("User: %+v\n", user) + } +} diff --git a/internal/utils/query/exemple.go.txt b/internal/utils/query/exemple.go.txt new file mode 100644 index 0000000..348c467 --- /dev/null +++ b/internal/utils/query/exemple.go.txt @@ -0,0 +1,943 @@ +package main + +import ( + "context" + "fmt" + "log" + "net/url" + "time" + + "api-service/internal/config" + "api-service/internal/database" + "api-service/internal/utils/query" + "api-service/internal/validation" + + "github.com/jmoiron/sqlx" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" +) + +// This file provides comprehensive examples of using the query builder library +// for performing various database operations including CRUD, transactions, joins, etc. +// Each example function demonstrates how to build queries, print them, and execute them. +// ============================================================================= +// DEFINISI MODEL (CONTOH) +// ============================================================================= + +// User adalah contoh struct untuk tabel 'users'. +type User struct { + ID int `db:"id" bson:"_id,omitempty"` + Name string `db:"name" bson:"name"` + Email string `db:"email" bson:"email"` + Status string `db:"status" bson:"status"` + CreatedAt time.Time `db:"created_at" bson:"created_at"` +} + +// Post adalah contoh struct untuk tabel 'posts'. +type Post struct { + ID int `db:"id" bson:"_id,omitempty"` + UserID int `db:"user_id" bson:"user_id"` + Title string `db:"title" bson:"title"` + Content string `db:"content" bson:"content"` + CreatedAt time.Time `db:"created_at" bson:"created_at"` +} + +// Employee adalah contoh struct untuk tabel 'employees' dengan kolom JSON. +type Employee struct { + ID int `db:"id" bson:"_id,omitempty"` + Name string `db:"name" bson:"name"` + Department string `db:"department" bson:"department"` + Salary float64 `db:"salary" bson:"salary"` + Metadata map[string]interface{} `db:"metadata" bson:"metadata"` // Kolom JSON/JSONB +} + +// ============================================================================= +// FUNGSI UTAMA +// ============================================================================= + +func main() { + cfg := setupConfig() + dbService := database.New(cfg) + + fmt.Println("============================================================") + fmt.Println(" CONTOH 1: QUERY DASAR (SELECT, INSERT, UPDATE, DELETE)") + fmt.Println("============================================================") + basicCRUDExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 2: TRANSAKSI SQL (POSTGRESQL)") + fmt.Println("============================================================") + sqlTransactionExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 3: TRANSAKSI MONGODB") + fmt.Println("============================================================") + mongoTransactionExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 4: QUERY DENGAN FILTER DAN PAGINASI") + fmt.Println("============================================================") + filterAndPaginationExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 5: QUERY DENGAN JOIN") + fmt.Println("============================================================") + joinExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 6: QUERY DENGAN CTE (COMMON TABLE EXPRESSION)") + fmt.Println("============================================================") + cteExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 7: QUERY DENGAN WINDOW FUNCTION") + fmt.Println("============================================================") + windowFunctionExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 8: VALIDASI DATA DINAMIS") + fmt.Println("============================================================") + validationExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 9: OPERASI JSON") + fmt.Println("============================================================") + jsonQueryExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 10: QUERY MONGODB (CRUD & AGGREGATION)") + fmt.Println("============================================================") + mongodbExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 11: PENGGUNAAN READ REPLICA") + fmt.Println("============================================================") + readReplicaExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 12: HEALTH CHECK DATABASE") + fmt.Println("============================================================") + healthCheckExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 13: PARSING QUERY DARI URL") + fmt.Println("============================================================") + urlQueryParsingExample(dbService) +} + +func setupConfig() *config.Config { + return &config.Config{ + Databases: map[string]config.DatabaseConfig{ + "main": { + Type: "postgres", + Host: "localhost", + Port: 5432, + Username: "user", + Password: "password", + Database: "company_db", + SSLMode: "disable", + MaxOpenConns: 25, + MaxIdleConns: 5, + ConnMaxLifetime: time.Hour, + }, + }, + "mongodb": config.DatabaseConfig{ + Type: "mongodb", + Host: "localhost", + Port: 27017, + Database: "company_db", + Username: "user", + Password: "password", + }, + } +} + +// ============================================================================= +// CONTOH 1: QUERY DASAR (CRUD) +// ============================================================================= + +// basicCRUDExample demonstrates basic Create, Read, Update, Delete operations using the query builder. +// It shows how to build SQL queries, print them, and execute them while displaying results. +// Expected output: Prints INSERT SQL and result (new ID), SELECT SQL and user data, UPDATE SQL and affected rows, DELETE SQL and affected rows. +// Example raw queries: +// INSERT: INSERT INTO users (name, email, status) VALUES ($1, $2, $3) RETURNING id +// SELECT: SELECT * FROM users WHERE id = $1 +// UPDATE: UPDATE users SET status = $1 WHERE id = $2 +// DELETE: DELETE FROM users WHERE id = $1 +func basicCRUDExample(dbService database.Service) { + ctx := context.Background() + db, err := dbService.GetSQLXDB("main") + if err != nil { + log.Printf("Gagal mendapatkan koneksi DB: %v", err) + return + } + qb := query.NewQueryBuilder(query.DBTypePostgreSQL) + + // --- INSERT --- + fmt.Println("\n--- Operasi INSERT ---") + insertData := query.InsertData{ + Columns: []string{"name", "email", "status"}, + Values: []interface{}{"Alice", "alice@example.com", "active"}, + } + sql, args, err := qb.BuildInsertQuery("users", insertData, "id") + if err != nil { + log.Printf("Error building INSERT: %v", err) + return + } + fmt.Printf("Generated INSERT SQL: %s\nArgs: %v\n", sql, args) + result, err := qb.ExecuteInsert(ctx, db, "users", insertData, "id") + if err != nil { + log.Printf("Error INSERT: %v", err) + return + } + newID, _ := result.LastInsertId() + fmt.Printf("-> INSERT: Berhasil menambah user dengan ID: %d\n", newID) + + // --- SELECT (Single Row) --- + fmt.Println("\n--- Operasi SELECT ---") + var user User + selectQuery := query.DynamicQuery{ + Fields: []query.SelectField{{Expression: "*"}}, + From: "users", + Filters: []query.FilterGroup{{ + Filters: []query.DynamicFilter{{Column: "id", Operator: query.OpEqual, Value: newID}}, + }}, + } + sql, args, err = qb.BuildQuery(selectQuery) + if err != nil { + log.Printf("Error building SELECT: %v", err) + return + } + fmt.Printf("Generated SELECT SQL: %s\nArgs: %v\n", sql, args) + err = qb.ExecuteQueryRow(ctx, db, selectQuery, &user) + if err != nil { + log.Printf("Error SELECT single row: %v", err) + return + } + fmt.Printf("-> SELECT (Single Row): Berhasil mengambil user: %+v\n", user) + + // --- UPDATE --- + fmt.Println("\n--- Operasi UPDATE ---") + updateData := query.UpdateData{ + Columns: []string{"status"}, + Values: []interface{}{"inactive"}, + } + updateFilter := []query.FilterGroup{{ + Filters: []query.DynamicFilter{{Column: "id", Operator: query.OpEqual, Value: newID}}, + }} + sql, args, err = qb.BuildUpdateQuery("users", updateData, updateFilter) + if err != nil { + log.Printf("Error building UPDATE: %v", err) + return + } + fmt.Printf("Generated UPDATE SQL: %s\nArgs: %v\n", sql, args) + _, err = qb.ExecuteUpdate(ctx, db, "users", updateData, updateFilter) + if err != nil { + log.Printf("Error UPDATE: %v", err) + return + } + fmt.Printf("-> UPDATE: Berhasil memperbarui status user dengan ID: %d\n", newID) + + // --- DELETE --- + fmt.Println("\n--- Operasi DELETE ---") + deleteFilter := []query.FilterGroup{{ + Filters: []query.DynamicFilter{{Column: "id", Operator: query.OpEqual, Value: newID}}, + }} + sql, args, err = qb.BuildDeleteQuery("users", deleteFilter) + if err != nil { + log.Printf("Error building DELETE: %v", err) + return + } + fmt.Printf("Generated DELETE SQL: %s\nArgs: %v\n", sql, args) + _, err = qb.ExecuteDelete(ctx, db, "users", deleteFilter) + if err != nil { + log.Printf("Error DELETE: %v", err) + return + } + fmt.Printf("-> DELETE: Berhasil menghapus user dengan ID: %d\n", newID) +} + +// ============================================================================= +// CONTOH 2: TRANSAKSI SQL (POSTGRESQL) +// ============================================================================= + +// sqlTransactionExample demonstrates how to perform atomic transactions involving updates +// across multiple tables using the Query Builder. It builds and prints SQL queries before execution. +// Expected output: Prints UPDATE SQL for salaries and employees, transaction commit/rollback status, and validation results. +// Example raw queries: +// UPDATE salaries: UPDATE salaries SET salary = $1 WHERE employee_id = $2 +// UPDATE employees: UPDATE employees SET last_name = $1 WHERE employee_id = $2 +func sqlTransactionExample(dbService database.Service) { + ctx := context.Background() + employeeID := 123 + newSalary := 75000 + newLastName := "Doe" + + db, err := dbService.GetSQLXDB("main") + if err != nil { + log.Fatalf("Gagal mendapatkan koneksi database SQL: %v", err) + } + qb := query.NewQueryBuilder(query.DBTypePostgreSQL) + + tx, err := db.BeginTxx(ctx, nil) + if err != nil { + log.Fatalf("Gagal memulai transaksi SQL: %v", err) + } + + defer func() { + if p := recover(); p != nil { + fmt.Println("Terjadi panic, melakukan rollback transaksi...") + _ = tx.Rollback() + panic(p) + } else if err != nil { + fmt.Printf("Transaksi dibatalkan (ROLLBACK) karena error: %v\n", err) + _ = tx.Rollback() + } else { + fmt.Println("Tidak ada error, melakukan COMMIT transaksi...") + err = tx.Commit() + if err != nil { + log.Printf("Gagal melakukan COMMIT transaksi: %v", err) + } + } + }() + + fmt.Printf("Memulai transaksi untuk employee_id: %d\n", employeeID) + + // --- Operasi 1: Update gaji di tabel 'salaries' --- + fmt.Println("\n--- Operasi 1: UPDATE salaries ---") + salariesUpdateData := query.UpdateData{ + Columns: []string{"salary"}, + Values: []interface{}{newSalary}, + } + salariesFilter := []query.FilterGroup{ + { + Filters: []query.DynamicFilter{ + {Column: "employee_id", Operator: query.OpEqual, Value: employeeID}, + }, + }, + } + sql, args, err := qb.BuildUpdateQuery("salaries", salariesUpdateData, salariesFilter) + if err != nil { + log.Printf("Error building UPDATE salaries: %v", err) + return + } + fmt.Printf("Generated UPDATE salaries SQL: %s\nArgs: %v\n", sql, args) + salariesResult, err := qb.ExecuteUpdate(ctx, tx, "salaries", salariesUpdateData, salariesFilter) + if err != nil { + return + } + salariesRowsAffected, _ := salariesResult.RowsAffected() + fmt.Printf("-> UPDATE salaries: %d baris terpengaruh.\n", salariesRowsAffected) + + // --- Operasi 2: Update informasi di tabel 'employees' --- + fmt.Println("\n--- Operasi 2: UPDATE employees ---") + employeesUpdateData := query.UpdateData{ + Columns: []string{"last_name"}, + Values: []interface{}{newLastName}, + } + employeesFilter := []query.FilterGroup{ + { + Filters: []query.DynamicFilter{ + {Column: "employee_id", Operator: query.OpEqual, Value: employeeID}, + }, + }, + } + sql, args, err = qb.BuildUpdateQuery("employees", employeesUpdateData, employeesFilter) + if err != nil { + log.Printf("Error building UPDATE employees: %v", err) + return + } + fmt.Printf("Generated UPDATE employees SQL: %s\nArgs: %v\n", sql, args) + employeesResult, err := qb.ExecuteUpdate(ctx, tx, "employees", employeesUpdateData, employeesFilter) + if err != nil { + return + } + employeesRowsAffected, _ := employeesResult.RowsAffected() + fmt.Printf("-> UPDATE employees: %d baris terpengaruh.\n", employeesRowsAffected) + + // --- Validasi Akhir Transaksi --- + if salariesRowsAffected == 1 && employeesRowsAffected == 1 { + fmt.Println("-> Validasi BERHASIL: Kedua tabel berhasil diperbarui.") + } else { + err = fmt.Errorf("validasi GAGAL: diharapkan 1 baris terupdate di setiap tabel, tetapi mendapat %d (salaries) dan %d (employees)", salariesRowsAffected, employeesRowsAffected) + return + } +} + +// ============================================================================= +// CONTOH 3: TRANSAKSI MONGODB +// ============================================================================= + +// mongoTransactionExample demonstrates MongoDB transactions using the query builder. +// It prints the filters and update operations before executing them in a transaction. +// Expected output: Prints MongoDB filters and update operations for salaries and employees, transaction commit/abort status, and validation results. +// Example raw queries: +// MongoDB filters: {"employee_id": 123} +// MongoDB updates: {"$set": {"salary": 75000}}, {"$set": {"last_name": "Doe"}} +func mongoTransactionExample(dbService database.Service) { + ctx := context.Background() + employeeID := 123 + newSalary := 75000 + newLastName := "Doe" + + client, err := dbService.GetMongoClient("mongodb") + if err != nil { + log.Fatalf("Gagal mendapatkan klien MongoDB: %v", err) + } + + salariesCollection := client.Database("company_db").Collection("salaries") + employeesCollection := client.Database("company_db").Collection("employees") + + session, err := client.StartSession() + if err != nil { + log.Fatalf("Gagal memulai sesi MongoDB: %v", err) + } + defer session.EndSession(ctx) + + fmt.Printf("Memulai transaksi MongoDB untuk employee_id: %d\n", employeeID) + + _, err = session.WithTransaction(ctx, func(sessCtx mongo.SessionContext) (interface{}, error) { + // --- Operasi 1: Update gaji di koleksi 'salaries' --- + fmt.Println("\n--- Operasi 1: UPDATE salaries ---") + salariesFilter := bson.M{"employee_id": employeeID} + salariesUpdate := bson.M{"$set": bson.M{"salary": newSalary}} + fmt.Printf("-> MongoDB Update Salaries Filter: %#v\n", salariesFilter) + fmt.Printf("-> MongoDB Update Salaries Operation: %#v\n", salariesUpdate) + + salariesResult, err := salariesCollection.UpdateOne(sessCtx, salariesFilter, salariesUpdate) + if err != nil { + return nil, fmt.Errorf("gagal update koleksi salaries: %w", err) + } + fmt.Printf("-> UPDATE salaries: %d dokumen cocok (matched).\n", salariesResult.MatchedCount) + + // --- Operasi 2: Update informasi di koleksi 'employees' --- + fmt.Println("\n--- Operasi 2: UPDATE employees ---") + employeesFilter := bson.M{"employee_id": employeeID} + employeesUpdate := bson.M{"$set": bson.M{"last_name": newLastName}} + fmt.Printf("-> MongoDB Update Employees Filter: %#v\n", employeesFilter) + fmt.Printf("-> MongoDB Update Employees Operation: %#v\n", employeesUpdate) + + employeesResult, err := employeesCollection.UpdateOne(sessCtx, employeesFilter, employeesUpdate) + if err != nil { + return nil, fmt.Errorf("gagal update koleksi employees: %w", err) + } + fmt.Printf("-> UPDATE employees: %d dokumen cocok (matched).\n", employeesResult.MatchedCount) + + // --- Validasi Akhir Transaksi --- + if salariesResult.MatchedCount == 1 && employeesResult.MatchedCount == 1 { + fmt.Println("-> Validasi BERHASIL: Kedua koleksi berhasil diperbarui.") + return nil, nil + } + + return nil, fmt.Errorf("validasi GAGAL: diharapkan 1 dokumen terupdate di setiap koleksi, tetapi mendapat %d (salaries) dan %d (employees)", salariesResult.MatchedCount, employeesResult.MatchedCount) + }) + + if err != nil { + fmt.Printf("Transaksi MongoDB dibatalkan (ABORT) karena error: %v\n", err) + } else { + fmt.Println("Transaksi MongoDB berhasil di-commit.") + } +} + +// ============================================================================= +// CONTOH 4: FILTER DAN PAGINASI +// ============================================================================= + +// filterAndPaginationExample demonstrates querying with filters and pagination. +// It builds and prints the SELECT query before executing it. +// Expected output: Prints SELECT SQL with filters and pagination, and the number of active users found. +// Example raw query: +// SELECT id, name FROM users WHERE (status = $1 AND created_at > $2) ORDER BY name ASC LIMIT 5 OFFSET 10 +func filterAndPaginationExample(dbService database.Service) { + ctx := context.Background() + db, err := dbService.GetSQLXDB("main") + if err != nil { + log.Printf("Gagal mendapatkan koneksi DB: %v", err) + return + } + qb := query.NewQueryBuilder(query.DBTypePostgreSQL) + + query := query.DynamicQuery{ + Fields: []query.SelectField{ + {Expression: "id"}, + {Expression: "name"}, + }, + From: "users", + Filters: []query.FilterGroup{ + { + LogicOp: "AND", + Filters: []query.DynamicFilter{ + {Column: "status", Operator: query.OpEqual, Value: "active"}, + {Column: "created_at", Operator: query.OpGreaterThan, Value: time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC)}, + }, + }, + }, + Sort: []query.SortField{{Column: "name", Order: "ASC"}}, + Limit: 5, + Offset: 10, + } + + var users []User + sql, args, err := qb.BuildQuery(query) + if err != nil { + log.Printf("Error building SELECT: %v", err) + return + } + fmt.Printf("Generated SELECT SQL: %s\nArgs: %v\n", sql, args) + err = qb.ExecuteQuery(ctx, db, query, &users) + if err != nil { + log.Printf("Error query dengan filter: %v", err) + return + } + fmt.Printf("-> Filter & Paginasi: Ditemukan %d user aktif (halaman 3).\n", len(users)) +} + +// ============================================================================= +// CONTOH 5: QUERY DENGAN JOIN +// ============================================================================= + +// joinExample demonstrates querying with JOIN operations. +// It builds and prints the JOIN query before executing it. +// Expected output: Prints JOIN SQL query and the number of posts with author names found. +// Example raw query: +// SELECT p.id AS post_id, p.title, u.name AS author_name FROM posts p INNER JOIN users u ON p.user_id = u.id LIMIT 10 +func joinExample(dbService database.Service) { + ctx := context.Background() + db, err := dbService.GetSQLXDB("main") + if err != nil { + log.Printf("Gagal mendapatkan koneksi DB: %v", err) + return + } + qb := query.NewQueryBuilder(query.DBTypePostgreSQL) + + query := query.DynamicQuery{ + Fields: []query.SelectField{ + {Expression: "p.id", Alias: "post_id"}, + {Expression: "p.title"}, + {Expression: "u.name", Alias: "author_name"}, + }, + From: "posts", + Aliases: "p", + Joins: []query.Join{ + { + Type: "INNER", + Table: "users", + Alias: "u", + OnConditions: query.FilterGroup{ + Filters: []query.DynamicFilter{ + {Column: "p.user_id", Operator: query.OpEqual, Value: "u.id"}, + }, + }, + }, + }, + Limit: 10, + } + + var results []struct { + PostID int `db:"post_id"` + Title string `db:"title"` + AuthorName string `db:"author_name"` + } + sql, args, err := qb.BuildQuery(query) + if err != nil { + log.Printf("Error building JOIN: %v", err) + return + } + fmt.Printf("Generated JOIN SQL: %s\nArgs: %v\n", sql, args) + err = qb.ExecuteQuery(ctx, db, query, &results) + if err != nil { + log.Printf("Error query JOIN: %v", err) + return + } + fmt.Printf("-> JOIN: Ditemukan %d post dengan nama penulis.\n", len(results)) +} + +// ============================================================================= +// CONTOH 6: QUERY DENGAN CTE +// ============================================================================= + +// cteExample demonstrates querying with Common Table Expressions (CTE). +// It builds and prints the CTE query before executing it. +// Expected output: Prints CTE SQL query and the number of users with more than 5 posts. +func cteExample(dbService database.Service) { + ctx := context.Background() + db, err := dbService.GetSQLXDB("main") + if err != nil { + log.Printf("Gagal mendapatkan koneksi DB: %v", err) + return + } + qb := query.NewQueryBuilder(query.DBTypePostgreSQL) + + query := query.DynamicQuery{ + CTEs: []query.CTE{ + { + Name: "user_post_counts", + Query: query.DynamicQuery{ + Fields: []query.SelectField{ + {Expression: "user_id"}, + {Expression: "COUNT(*)", Alias: "post_count"}, + }, + From: "posts", + GroupBy: []string{"user_id"}, + }, + }, + }, + Fields: []query.SelectField{ + {Expression: "u.name"}, + {Expression: "upc.post_count"}, + }, + From: "users u", + Joins: []query.Join{ + { + Type: "INNER", + Table: "user_post_counts", + Alias: "upc", + OnConditions: query.FilterGroup{ + Filters: []query.DynamicFilter{ + {Column: "u.id", Operator: query.OpEqual, Value: "upc.user_id"}, + }, + }, + }, + }, + Filters: []query.FilterGroup{ + { + Filters: []query.DynamicFilter{ + {Column: "upc.post_count", Operator: query.OpGreaterThan, Value: 5}, + }, + }, + }, + } + + var results []struct { + Name string `db:"name"` + PostCount int `db:"post_count"` + } + sql, args, err := qb.BuildQuery(query) + if err != nil { + log.Printf("Error building CTE: %v", err) + return + } + fmt.Printf("Generated CTE SQL: %s\nArgs: %v\n", sql, args) + err = qb.ExecuteQuery(ctx, db, query, &results) + if err != nil { + log.Printf("Error query CTE: %v", err) + return + } + fmt.Printf("-> CTE: Ditemukan %d user dengan lebih dari 5 post.\n", len(results)) +} + +// ============================================================================= +// CONTOH 7: WINDOW FUNCTION +// ============================================================================= + +// windowFunctionExample demonstrates querying with window functions. +// It builds and prints the window function query before executing it. +// Expected output: Prints window function SQL query and the number of employees with salary rankings. +func windowFunctionExample(dbService database.Service) { + ctx := context.Background() + db, err := dbService.GetSQLXDB("main") + if err != nil { + log.Printf("Gagal mendapatkan koneksi DB: %v", err) + return + } + qb := query.NewQueryBuilder(query.DBTypePostgreSQL) + + query := query.DynamicQuery{ + Fields: []query.SelectField{ + {Expression: "name"}, + {Expression: "department"}, + {Expression: "salary"}, + }, + From: "employees", + WindowFunctions: []query.WindowFunction{ + { + Function: "RANK", + Over: "department", + OrderBy: "salary DESC", + Alias: "salary_rank", + }, + }, + Filters: []query.FilterGroup{ + { + Filters: []query.DynamicFilter{ + {Column: "department", Operator: query.OpEqual, Value: "Engineering"}, + }, + }, + }, + } + + var results []struct { + Name string `db:"name"` + Department string `db:"department"` + Salary float64 `db:"salary"` + SalaryRank int `db:"salary_rank"` + } + sql, args, err := qb.BuildQuery(query) + if err != nil { + log.Printf("Error building Window Function: %v", err) + return + } + fmt.Printf("Generated Window Function SQL: %s\nArgs: %v\n", sql, args) + err = qb.ExecuteQuery(ctx, db, query, &results) + if err != nil { + log.Printf("Error query Window Function: %v", err) + return + } + fmt.Printf("-> Window Function: Ditemukan %d employee di departemen Engineering dengan peringkat gaji.\n", len(results)) +} + +// ============================================================================= +// CONTOH 8: VALIDASI DATA DINAMIS +// ============================================================================= + +// validationExample demonstrates dynamic data validation using the query builder. +// It builds and prints the validation query before executing it. +// Expected output: Prints validation SQL query and whether the email is duplicate or available. +func validationExample(dbService database.Service) { + ctx := context.Background() + db, err := dbService.GetSQLXDB("main") + if err != nil { + log.Printf("Gagal mendapatkan koneksi DB: %v", err) + return + } + qb := query.NewQueryBuilder(query.DBTypePostgreSQL) + validator := validation.NewDynamicValidator(qb) + + userData := map[string]interface{}{"email": "test@example.com"} + emailRule := validation.NewUniqueFieldRule("users", "email") + + // Build and print the validation query + countQuery := query.DynamicQuery{ + From: "users", + Filters: []query.FilterGroup{{ + Filters: []query.DynamicFilter{{Column: "email", Operator: query.OpEqual, Value: "test@example.com"}}, + }}, + } + sql, args, err := qb.BuildCountQuery(countQuery) + if err != nil { + log.Printf("Error building validation query: %v", err) + return + } + fmt.Printf("Generated Validation SQL: %s\nArgs: %v\n", sql, args) + + isDuplicate, err := validator.Validate(ctx, db, emailRule, userData) + if err != nil { + log.Printf("Error validasi: %v", err) + return + } + + if isDuplicate { + fmt.Println("-> Validasi: Email 'test@example.com' sudah ada.") + } else { + fmt.Println("-> Validasi: Email 'test@example.com' tersedia.") + } +} + +// ============================================================================= +// CONTOH 9: OPERASI JSON +// ============================================================================= + +// jsonQueryExample demonstrates JSON operations in queries. +// It builds and prints the JSON queries before executing them. +// Expected output: Prints JSON SELECT and UPDATE SQL queries, number of employees found, and update success message. +func jsonQueryExample(dbService database.Service) { + ctx := context.Background() + db, err := dbService.GetSQLXDB("main") + if err != nil { + log.Printf("Gagal mendapatkan koneksi DB: %v", err) + return + } + qb := query.NewQueryBuilder(query.DBTypePostgreSQL) + + query := query.DynamicQuery{ + Fields: []query.SelectField{{Expression: "*"}}, + From: "employees", + Filters: []query.FilterGroup{{ + Filters: []query.DynamicFilter{ + { + Column: "metadata", + Operator: query.OpJsonEqual, + Value: "Engineering", + Options: map[string]interface{}{"path": "department"}, + }, + }, + }}, + } + + var employees []Employee + sql, args, err := qb.BuildQuery(query) + if err != nil { + log.Printf("Error building JSON query: %v", err) + return + } + fmt.Printf("Generated JSON SELECT SQL: %s\nArgs: %v\n", sql, args) + err = qb.ExecuteQuery(ctx, db, query, &employees) + if err != nil { + log.Printf("Error query JSON: %v", err) + return + } + fmt.Printf("-> Operasi JSON: Ditemukan %d employee di departemen Engineering (dari metadata JSON).\n", len(employees)) + + updateData := query.UpdateData{ + JsonUpdates: map[string]query.JsonUpdate{ + "metadata": {Path: "role", Value: "Senior Developer"}, + }, + } + filter := []query.FilterGroup{{Filters: []query.DynamicFilter{{Column: "id", Operator: query.OpEqual, Value: 1}}}} + sql, args, err = qb.BuildUpdateQuery("employees", updateData, filter) + if err != nil { + log.Printf("Error building JSON update: %v", err) + return + } + fmt.Printf("Generated JSON UPDATE SQL: %s\nArgs: %v\n", sql, args) + _, err = qb.ExecuteUpdate(ctx, db, "employees", updateData, filter) + if err != nil { + log.Printf("Error update JSON: %v", err) + return + } + fmt.Println("-> Operasi JSON: Berhasil memperbarui 'role' di metadata untuk employee ID 1.") +} + +// ============================================================================= +// CONTOH 10: QUERY MONGODB +// ============================================================================= + +// mongodbExample demonstrates MongoDB queries using the query builder. +// It prints the built filters and pipelines before executing them. +// Expected output: Prints MongoDB find filter, number of active users, aggregation pipeline, and number of departments. +func mongodbExample(dbService database.Service) { + ctx := context.Background() + client, err := dbService.GetMongoClient("mongodb") + if err != nil { + log.Printf("Gagal mendapatkan klien MongoDB: %v", err) + return + } + collection := client.Database("company_db").Collection("users") + mqb := query.NewMongoQueryBuilder() + + // --- FIND --- + fmt.Println("\n--- Operasi FIND ---") + findQuery := query.DynamicQuery{ + Filters: []query.FilterGroup{{Filters: []query.DynamicFilter{{Column: "status", Operator: query.OpEqual, Value: "active"}}}}, + Limit: 5, + } + filter, _, _ := mqb.BuildFindQuery(findQuery) + fmt.Printf("-> MongoDB Find Filter: %#v\n", filter) + + var users []User + err = mqb.ExecuteFind(ctx, collection, findQuery, &users) + if err != nil { + log.Printf("Error MongoDB Find: %v", err) + return + } + fmt.Printf("-> MongoDB Find: Ditemukan %d user aktif.\n", len(users)) + + // --- AGGREGATION --- + fmt.Println("\n--- Operasi AGGREGATION ---") + aggQuery := query.DynamicQuery{ + Fields: []query.SelectField{ + {Expression: "department", Alias: "_id"}, + {Expression: "COUNT(*)", Alias: "count"}, + }, + GroupBy: []string{"department"}, + } + pipeline, _ := mqb.BuildAggregateQuery(aggQuery) + fmt.Printf("-> MongoDB Aggregation Pipeline: %#v\n", pipeline) + + var aggResults []struct { + ID string `bson:"_id"` + Count int `bson:"count"` + } + err = mqb.ExecuteAggregate(ctx, collection, aggQuery, &aggResults) + if err != nil { + log.Printf("Error MongoDB Aggregate: %v", err) + return + } + fmt.Printf("-> MongoDB Aggregate: Ditemukan user di %d departemen.\n", len(aggResults)) +} + +// ============================================================================= +// CONTOH 11: PENGGUNAAN READ REPLICA +// ============================================================================= + +// readReplicaExample demonstrates using read replicas for queries. +// It builds and prints the count query before executing it on the read replica. +// Expected output: Prints COUNT SQL query and the total number of users from the read replica. +// Example raw query: +// SELECT COUNT(*) FROM users +func readReplicaExample(dbService database.Service) { + ctx := context.Background() + readDB, err := dbService.GetReadDB("main") + if err != nil { + log.Printf("Gagal mendapatkan read replica: %v", err) + return + } + readxDB := sqlx.NewDb(readDB, "pgx") + qb := query.NewQueryBuilder(query.DBTypePostgreSQL) + + countQuery := query.DynamicQuery{From: "users"} + sql, args, err := qb.BuildCountQuery(countQuery) + if err != nil { + log.Printf("Error building count query: %v", err) + return + } + fmt.Printf("Generated COUNT SQL: %s\nArgs: %v\n", sql, args) + count, err := qb.ExecuteCount(ctx, readxDB, countQuery) + if err != nil { + log.Printf("Error query di read replica: %v", err) + return + } + fmt.Printf("-> Read Replica: Total user (dari read replica): %d\n", count) +} + +// ============================================================================= +// CONTOH 12: HEALTH CHECK DATABASE +// ============================================================================= + +// healthCheckExample demonstrates database health checks. +// It prints the health status of all databases. +// Expected output: Prints health status for each database (up/down with type or error). +func healthCheckExample(dbService database.Service) { + healthStatus := dbService.Health() + fmt.Println("-> Health Check Status:") + for dbName, status := range healthStatus { + if status["status"] == "up" { + fmt.Printf(" - Database %s: SEHAT (%s)\n", dbName, status["type"]) + } else { + fmt.Printf(" - Database %s: TIDAK SEHAT - %s\n", dbName, status["error"]) + } + } +} + +// ============================================================================= +// CONTOH 13: PARSING QUERY DARI URL +// ============================================================================= + +// urlQueryParsingExample demonstrates parsing query parameters from URL. +// It parses the URL query and prints the resulting dynamic query structure. +// Expected output: Prints parsed fields, filters, sort, and limit from the URL query. +func urlQueryParsingExample(dbService database.Service) { + values := url.Values{} + values.Set("fields", "id,name") + values.Set("filter[status][_eq]", "active") + values.Set("filter[age][_gt]", "25") + values.Set("sort", "-name") + values.Set("limit", "10") + + parser := query.NewQueryParser() + dynamicQuery, err := parser.ParseQuery(values, "users") + if err != nil { + log.Printf("Error parsing URL query: %v", err) + return + } + + fmt.Println("-> Parsing URL Query:") + fmt.Printf(" Fields: %v\n", dynamicQuery.Fields) + fmt.Printf(" Filters: %+v\n", dynamicQuery.Filters) + fmt.Printf(" Sort: %+v\n", dynamicQuery.Sort) + fmt.Printf(" Limit: %d\n", dynamicQuery.Limit) +} + +// ============================================================================= +// AKHIR FILE +// ============================================================================= diff --git a/internal/utils/validation/duplicate.go b/internal/utils/validation/duplicate.go new file mode 100644 index 0000000..979ad87 --- /dev/null +++ b/internal/utils/validation/duplicate.go @@ -0,0 +1,244 @@ +package validation + +import ( + "context" + "fmt" + + queryUtils "antrian-operasi/internal/utils/query" + + "github.com/jmoiron/sqlx" +) + +// ============================================================================= +// DYNAMIC VALIDATION RULE +// ============================================================================= + +// ValidationRule mendefinisikan aturan untuk memeriksa duplikat atau kondisi lain. +// Struct ini membuat validator dapat digunakan kembali untuk tabel apa pun. +type ValidationRule struct { + // TableName adalah nama tabel yang akan diperiksa. + TableName string + + // UniqueColumns adalah daftar kolom yang, jika digabungkan, harus unik. + // Contoh: []string{"email"} atau []string{"first_name", "last_name", "dob"} + UniqueColumns []string + + // Conditions adalah filter tambahan yang harus dipenuhi. + // Ini sangat berguna untuk aturan bisnis, seperti "status != 'deleted'". + // Gunakan queryUtils.DynamicFilter untuk fleksibilitas penuh. + Conditions []queryUtils.DynamicFilter + + // ExcludeIDColumn dan ExcludeIDValue digunakan untuk operasi UPDATE, + // untuk memastikan bahwa record tidak membandingkan dirinya sendiri. + ExcludeIDColumn string + ExcludeIDValue interface{} +} + +// NewUniqueFieldRule adalah helper untuk membuat aturan validasi unik untuk satu kolom. +// Ini adalah cara cepat untuk membuat aturan yang paling umum. +func NewUniqueFieldRule(tableName, uniqueColumn string, additionalConditions ...queryUtils.DynamicFilter) ValidationRule { + return ValidationRule{ + TableName: tableName, + UniqueColumns: []string{uniqueColumn}, + Conditions: additionalConditions, + } +} + +// ============================================================================= +// DYNAMIC VALIDATOR +// ============================================================================= + +// DynamicValidator menyediakan metode untuk menjalankan validasi berdasarkan ValidationRule. +// Ini sepenuhnya generik dan tidak terikat pada tabel atau model tertentu. +type DynamicValidator struct { + qb *queryUtils.QueryBuilder +} + +// NewDynamicValidator membuat instance DynamicValidator baru. +func NewDynamicValidator(qb *queryUtils.QueryBuilder) *DynamicValidator { + return &DynamicValidator{qb: qb} +} + +// Validate menjalankan validasi terhadap aturan yang diberikan. +// `data` adalah map yang berisi nilai untuk kolom yang akan diperiksa (biasanya dari request body). +// Mengembalikan `true` jika ada duplikat yang ditemukan (validasi gagal), `false` jika tidak ada duplikat (validasi berhasil). +func (dv *DynamicValidator) Validate(ctx context.Context, db *sqlx.DB, rule ValidationRule, data map[string]interface{}) (bool, error) { + // LOGGING: Log validation start + fmt.Printf("[VALIDATION] Starting validation for table: %s, unique columns: %v, data: %v\n", rule.TableName, rule.UniqueColumns, data) + + if len(rule.UniqueColumns) == 0 { + fmt.Printf("[VALIDATION] ERROR: ValidationRule must have at least one UniqueColumn\n") + return false, fmt.Errorf("ValidationRule must have at least one UniqueColumn") + } + + // 1. Kumpulkan semua filter dari aturan + var allFilters []queryUtils.DynamicFilter + + // Tambahkan kondisi tambahan (misalnya, status != 'deleted') + allFilters = append(allFilters, rule.Conditions...) + fmt.Printf("[VALIDATION] Added %d condition filters\n", len(rule.Conditions)) + + // 2. Bangun filter untuk kolom unik berdasarkan data yang diberikan + for _, colName := range rule.UniqueColumns { + value, exists := data[colName] + if !exists { + // Jika data untuk kolom unik tidak ada, ini adalah kesalahan pemrograman. + fmt.Printf("[VALIDATION] ERROR: data for unique column '%s' not found in provided data map\n", colName) + return false, fmt.Errorf("data for unique column '%s' not found in provided data map", colName) + } + allFilters = append(allFilters, queryUtils.DynamicFilter{ + Column: colName, + Operator: queryUtils.OpEqual, + Value: value, + }) + fmt.Printf("[VALIDATION] Added filter for column '%s' with value: %v\n", colName, value) + } + + // 3. Tambahkan filter pengecualian ID (untuk operasi UPDATE) + if rule.ExcludeIDColumn != "" { + allFilters = append(allFilters, queryUtils.DynamicFilter{ + Column: rule.ExcludeIDColumn, + Operator: queryUtils.OpNotEqual, + Value: rule.ExcludeIDValue, + }) + fmt.Printf("[VALIDATION] Added exclude filter for column '%s' with value: %v\n", rule.ExcludeIDColumn, rule.ExcludeIDValue) + } + + // 4. Bangun dan eksekusi query untuk menghitung jumlah record yang cocok + query := queryUtils.DynamicQuery{ + From: rule.TableName, + Filters: []queryUtils.FilterGroup{{Filters: allFilters, LogicOp: "AND"}}, + } + + fmt.Printf("[VALIDATION] Built query with %d total filters\n", len(allFilters)) + + count, err := dv.qb.ExecuteCount(ctx, db, query) + if err != nil { + fmt.Printf("[VALIDATION] ERROR: failed to execute validation query for table %s: %v\n", rule.TableName, err) + return false, fmt.Errorf("failed to execute validation query for table %s: %w", rule.TableName, err) + } + + fmt.Printf("[VALIDATION] Query executed successfully, count result: %d\n", count) + + // 5. Kembalikan hasil + result := count > 0 + fmt.Printf("[VALIDATION] Validation result: isDuplicate=%t (count > 0: %d > 0 = %t)\n", result, count, result) + return result, nil +} + +// ============================================================================= +// CONTOH PENGGUNAAN (UNTUK DITEMPATKAN DI HANDLER ANDA) +// ============================================================================= + +/* +// --- Cara Penggunaan di RetribusiHandler --- + +// 1. Tambahkan DynamicValidator ke struct handler +type RetribusiHandler struct { + // ... + validator *validation.DynamicValidator +} + +// 2. Inisialisasi di constructor +func NewRetribusiHandler() *RetribusiHandler { + qb := queryUtils.NewQueryBuilder(queryUtils.DBTypePostgreSQL).SetAllowedColumns(...) + + return &RetribusiHandler{ + // ... + validator: validation.NewDynamicValidator(qb), + } +} + +// 3. Gunakan di CreateRetribusi +func (h *RetribusiHandler) CreateRetribusi(c *gin.Context) { + var req retribusi.RetribusiCreateRequest + // ... bind dan validasi request ... + + // Siapkan aturan validasi: KodeTarif harus unik di antara record yang tidak dihapus. + rule := validation.NewUniqueFieldRule( + "data_retribusi", // Nama tabel + "Kode_tarif", // Kolom yang harus unik + queryUtils.DynamicFilter{ // Kondisi tambahan + Column: "status", + Operator: queryUtils.OpNotEqual, + Value: "deleted", + }, + ) + + // Siapkan data dari request untuk divalidasi + dataToValidate := map[string]interface{}{ + "Kode_tarif": req.KodeTarif, + } + + // Eksekusi validasi + isDuplicate, err := h.validator.Validate(ctx, dbConn, rule, dataToValidate) + if err != nil { + h.logAndRespondError(c, "Failed to validate Kode Tarif", err, http.StatusInternalServerError) + return + } + + if isDuplicate { + h.respondError(c, "Kode Tarif already exists", fmt.Errorf("duplicate Kode Tarif: %s", req.KodeTarif), http.StatusConflict) + return + } + + // ... lanjutkan proses create ... +} + +// 4. Gunakan di UpdateRetribusi +func (h *RetribusiHandler) UpdateRetribusi(c *gin.Context) { + id := c.Param("id") + var req retribusi.RetribusiUpdateRequest + // ... bind dan validasi request ... + + // Siapkan aturan validasi: KodeTarif harus unik, kecuali untuk record dengan ID ini. + rule := validation.ValidationRule{ + TableName: "data_retribusi", + UniqueColumns: []string{"Kode_tarif"}, + Conditions: []queryUtils.DynamicFilter{ + {Column: "status", Operator: queryUtils.OpNotEqual, Value: "deleted"}, + }, + ExcludeIDColumn: "id", // Kecualikan berdasarkan kolom 'id' + ExcludeIDValue: id, // ...dengan nilai ID dari parameter + } + + dataToValidate := map[string]interface{}{ + "Kode_tarif": req.KodeTarif, + } + + isDuplicate, err := h.validator.Validate(ctx, dbConn, rule, dataToValidate) + if err != nil { + h.logAndRespondError(c, "Failed to validate Kode Tarif", err, http.StatusInternalServerError) + return + } + + if isDuplicate { + h.respondError(c, "Kode Tarif already exists", fmt.Errorf("duplicate Kode Tarif: %s", req.KodeTarif), http.StatusConflict) + return + } + + // ... lanjutkan proses update ... +} + +// --- Contoh Penggunaan untuk Kasus Lain --- + +// Contoh: Validasi kombinasi unik untuk tabel 'users' +// (email dan company_id harus unik bersama-sama) +func (h *UserHandler) CreateUser(c *gin.Context) { + // ... + + rule := validation.ValidationRule{ + TableName: "users", + UniqueColumns: []string{"email", "company_id"}, // Unik komposit + } + + dataToValidate := map[string]interface{}{ + "email": req.Email, + "company_id": req.CompanyID, + } + + isDuplicate, err := h.validator.Validate(ctx, dbConn, rule, dataToValidate) + // ... handle error dan duplicate +} + +*/ diff --git a/migrations/DDL.sql b/migrations/DDL.sql new file mode 100644 index 0000000..f8cfc62 --- /dev/null +++ b/migrations/DDL.sql @@ -0,0 +1,423 @@ +-- +-- PostgreSQL database dump +-- + +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; + +-- +-- TOC entry 5 (class 2615 OID 18291) +-- Name: public; Type: SCHEMA; Schema: -; Owner: postgres +-- + +-- *not* creating schema, since initdb creates it + + +--ALTER SCHEMA public OWNER TO postgres; + +-- +-- TOC entry 5092 (class 0 OID 0) +-- Dependencies: 5 +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: postgres +-- + +COMMENT ON SCHEMA public IS ''; + + +SET default_tablespace = ''; + +SET default_table_access_method = heap; + +-- +-- TOC entry 220 (class 1259 OID 18294) +-- Name: daftar_kategori_operasi; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.daftar_kategori_operasi ( + id integer NOT NULL, + status character varying(255) DEFAULT 'draft'::character varying NOT NULL, + date_created timestamp without time zone, + date_updated timestamp without time zone, + "Kategori" character varying(255) +); + + +--ALTER TABLE public.daftar_kategori_operasi OWNER TO postgres; + +-- +-- TOC entry 219 (class 1259 OID 18293) +-- Name: daftar_kategori_operasi_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.daftar_kategori_operasi_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +--ALTER SEQUENCE public.daftar_kategori_operasi_id_seq OWNER TO postgres; + +-- +-- TOC entry 5094 (class 0 OID 0) +-- Dependencies: 219 +-- Name: daftar_kategori_operasi_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.daftar_kategori_operasi_id_seq OWNED BY public.daftar_kategori_operasi.id; + + +-- +-- TOC entry 233 (class 1259 OID 18376) +-- Name: daftar_ksm; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.daftar_ksm ( + id integer NOT NULL, + "Nama_ksm" character varying(255) +); + + +--ALTER TABLE public.daftar_ksm OWNER TO postgres; + +-- +-- TOC entry 232 (class 1259 OID 18375) +-- Name: daftar_ksm_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.daftar_ksm_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +--ALTER SEQUENCE public.daftar_ksm_id_seq OWNER TO postgres; + +-- +-- TOC entry 5095 (class 0 OID 0) +-- Dependencies: 232 +-- Name: daftar_ksm_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.daftar_ksm_id_seq OWNED BY public.daftar_ksm.id; + + +-- +-- TOC entry 223 (class 1259 OID 18317) +-- Name: daftar_spesialis; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.daftar_spesialis ( + id integer NOT NULL, + "Kode" character varying(255), + "Spesialis" character varying(255), + "Id_group_location_simgos" character varying(255) +); + + +--ALTER TABLE public.daftar_spesialis OWNER TO postgres; + +-- +-- TOC entry 222 (class 1259 OID 18316) +-- Name: daftar_spesialis_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.daftar_spesialis_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +--ALTER SEQUENCE public.daftar_spesialis_id_seq OWNER TO postgres; + +-- +-- TOC entry 5096 (class 0 OID 0) +-- Dependencies: 222 +-- Name: daftar_spesialis_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.daftar_spesialis_id_seq OWNED BY public.daftar_spesialis.id; + + +-- +-- TOC entry 225 (class 1259 OID 18327) +-- Name: daftar_subspesialis; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.daftar_subspesialis ( + id integer NOT NULL, + "Kode" character varying(255), + "Subspesialis" character varying(255), + "FK_daftar_spesialis_ID" integer +); + + +--ALTER TABLE public.daftar_subspesialis OWNER TO postgres; + +-- +-- TOC entry 224 (class 1259 OID 18326) +-- Name: daftar_subspesialis_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.daftar_subspesialis_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +--ALTER SEQUENCE public.daftar_subspesialis_id_seq OWNER TO postgres; + +-- +-- TOC entry 5097 (class 0 OID 0) +-- Dependencies: 224 +-- Name: daftar_subspesialis_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.daftar_subspesialis_id_seq OWNED BY public.daftar_subspesialis.id; + + +-- +-- TOC entry 230 (class 1259 OID 18358) +-- Name: data_diagnosa_pasien_operasi; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.data_diagnosa_pasien_operasi ( + id uuid NOT NULL, + user_created uuid, + date_created timestamp with time zone, + user_updated uuid, + date_updated timestamp with time zone, + "Kode_diagnosa" character varying(255), + "Diagnosa" character varying(255), + "Keterangan" character varying(255), + "FK_pasien_operasi_diagnosa_pasien_operasi_ID" uuid, + "Jenis_diagnosa" character varying(255) +); + + +--ALTER TABLE public.data_diagnosa_pasien_operasi OWNER TO postgres; + +-- +-- TOC entry 221 (class 1259 OID 18305) +-- Name: data_pasien_operasi; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.data_pasien_operasi ( + id uuid NOT NULL, + status character varying(20) DEFAULT 'draft'::character varying NOT NULL, + sort integer, + user_created uuid, + date_created timestamp with time zone, + user_updated uuid, + date_updated timestamp with time zone, + "No_rekam_medis" character varying(255), + "No_KTP" character varying(255), + "Nama_pasien" character varying(255), + "Jenis_kelamin" character varying(255), + "Tanggal_lahir" date, + "Umur" character varying(255), + "Alamat" text, + "Tanggal_daftar" timestamp without time zone, + "Kategori_operasi" integer, + "Rencana_operasi" text, + "Keterangan" text, + "Tanggal_selesai_operasi" timestamp without time zone, + "Status_operasi" character varying(255), + "Nomor" integer, + "Old_kategori" integer, + "Spesialis" integer, + "Sub_spesialis" integer NOT NULL, + "Keterangan_status_pasien" text, + "Nomor_spesialis" integer, + "Nomor_sub_spesialis" integer, + "Old_spesialis" integer, + "Old_sub_spesialis" integer +); + + +--ALTER TABLE public.data_pasien_operasi OWNER TO postgres; + +-- +-- TOC entry 229 (class 1259 OID 18351) +-- Name: data_pasien_operasi_data_pegawai; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.data_pasien_operasi_data_pegawai ( + id integer NOT NULL, + data_pasien_operasi_id uuid, + data_pegawai_id uuid +); + + +--ALTER TABLE public.data_pasien_operasi_data_pegawai OWNER TO postgres; + +-- +-- TOC entry 228 (class 1259 OID 18350) +-- Name: data_pasien_operasi_data_pegawai_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.data_pasien_operasi_data_pegawai_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +--ALTER SEQUENCE public.data_pasien_operasi_data_pegawai_id_seq OWNER TO postgres; + +-- +-- TOC entry 5098 (class 0 OID 0) +-- Dependencies: 228 +-- Name: data_pasien_operasi_data_pegawai_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.data_pasien_operasi_data_pegawai_id_seq OWNED BY public.data_pasien_operasi_data_pegawai.id; + + +-- +-- TOC entry 231 (class 1259 OID 18366) +-- Name: data_pegawai; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.data_pegawai ( + id uuid NOT NULL, + status character varying(255) DEFAULT 'draft'::character varying, + sort integer, + user_created uuid, + date_created timestamp with time zone, + user_updated uuid, + date_updated timestamp with time zone, + "Foto_pegawai" uuid, + "Kartu_pegawai" uuid, + "KTP" uuid, + "Nomor_kartu_pegawai" character varying(255), + "NIP" character varying(255), + "NIP_lama" character varying(255), + "Jabatan" integer, + "Golongan" integer, + "Rumpun" integer, + "KSM" integer, + "Tanggal_masuk" date, + "Tanggal_pensiun" date, + "TMT_CPNS" date, + "TMT_PNS" date, + "Satuan_kerja" integer, + "FK_spt_pegawai_id" uuid, + "Nama_depan" character varying(255), + "Nama_belakang" character varying(255), + "Username" character varying(255), + "User_email" character varying(255), + "Id_user" character varying(255), + "Practicioner" uuid, + "HFIS_code" character varying(255), + "HFIS_display" character varying(255), + "Code_dpjp" character varying(255), + "KDDOKTER" bigint, + "Practicioner_detail" bigint, + "Kode_satusehat" character varying(255), + "No_whatsapp" character varying(255), + "Posisi_ruang" integer, + "Substansi" integer, + "NDE" uuid, + "Subspesialis" integer, + "Status_pegawai" integer, + "File_status" uuid, + "Ketarangan" integer, + "Kode_DPJP" character varying(255) +); + + +--ALTER TABLE public.data_pegawai OWNER TO postgres; + +-- +-- TOC entry 226 (class 1259 OID 18336) +-- Name: data_telepon_pasien_operasi; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.data_telepon_pasien_operasi ( + id uuid NOT NULL, + "Nomor_telepon" character varying(255), + "FK_pasien_operasi_telepon_pasien_operasi_ID" uuid +); + + +--ALTER TABLE public.data_telepon_pasien_operasi OWNER TO postgres; + +-- +-- TOC entry 227 (class 1259 OID 18342) +-- Name: data_tindakan_pasien_operasi; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.data_tindakan_pasien_operasi ( + id uuid NOT NULL, + user_created uuid, + date_created timestamp with time zone, + user_updated uuid, + date_updated timestamp with time zone, + "Kode_tindakan" character varying(255), + "Tindakan" character varying(255), + "Tindakan_tambahan" text, + "FK_pasien_operasi_tindakan_pasien_operasi_ID" uuid +); + + +--ALTER TABLE public.data_tindakan_pasien_operasi OWNER TO postgres; + +-- +-- TOC entry 4896 (class 2604 OID 18297) +-- Name: daftar_kategori_operasi id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.daftar_kategori_operasi ALTER COLUMN id SET DEFAULT nextval('public.daftar_kategori_operasi_id_seq'::regclass); + + +-- +-- TOC entry 4903 (class 2604 OID 18379) +-- Name: daftar_ksm id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.daftar_ksm ALTER COLUMN id SET DEFAULT nextval('public.daftar_ksm_id_seq'::regclass); + + +-- +-- TOC entry 4899 (class 2604 OID 18320) +-- Name: daftar_spesialis id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.daftar_spesialis ALTER COLUMN id SET DEFAULT nextval('public.daftar_spesialis_id_seq'::regclass); + + +-- +-- TOC entry 4900 (class 2604 OID 18330) +-- Name: daftar_subspesialis id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.daftar_subspesialis ALTER COLUMN id SET DEFAULT nextval('public.daftar_subspesialis_id_seq'::regclass); + + +-- +-- TOC entry 4901 (class 2604 OID 18354) +-- Name: data_pasien_operasi_data_pegawai id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.data_pasien_operasi_data_pegawai ALTER COLUMN id SET DEFAULT nextval('public.data_pasien_operasi_data_pegawai_id_seq'::regclass); \ No newline at end of file diff --git a/migrations/seed.sql b/migrations/seed.sql new file mode 100644 index 0000000..8e98512 --- /dev/null +++ b/migrations/seed.sql @@ -0,0 +1,215 @@ +INSERT INTO public.daftar_kategori_operasi +(id, status, date_created, date_updated, "Kategori") +VALUES +(1,'published','2026-01-23 08:22:12.314894',NULL,'1.1 Bedah Umum'), +(2,'published','2026-01-23 08:22:12.314894',NULL,'1.2 Bedah Syaraf'), +(3,'published','2026-01-23 08:22:12.314894',NULL,'1.3 Orthopedi'), +(4,'published','2026-01-23 08:22:12.314894',NULL,'1.4 Obgyn'), +(5,'published','2026-01-23 08:22:12.314894',NULL,'1.5 Bedah Plastik'); + +INSERT INTO public.daftar_ksm (id, "Nama_ksm") +VALUES +(1, 'KSM Bedah'), +(2, 'KSM Anestesi'), +(3, 'KSM Penyakit Dalam'); + +INSERT INTO public.daftar_spesialis +(id, "Kode", "Spesialis", "Id_group_location_simgos") +VALUES +(1, 'SPB', 'Spesialis Bedah', NULL), +(2, 'SPOT', 'Spesialis Orthopedi', NULL), +(3, 'SPM', 'Spesialis Mata', NULL); + +INSERT INTO public.daftar_subspesialis +(id, "Kode", "Subspesialis", "FK_daftar_spesialis_ID") +VALUES +(1, 'SUB-DIG', 'Bedah Digestif', 1), +(2, 'SUB-ONK', 'Bedah Onkologi', 1), +(3, 'SUB-SPINE', 'Tulang Belakang', 2), +(4, 'SUB-HIP', 'Panggul & Lutut', 2), +(5, 'SUB-RET', 'Retina', 3); + +INSERT INTO public.data_diagnosa_pasien_operasi +( + id, + user_created, + date_created, + user_updated, + date_updated, + "Kode_diagnosa", + "Diagnosa", + "Keterangan", + "FK_pasien_operasi_diagnosa_pasien_operasi_ID", + "Jenis_diagnosa" +) +VALUES +('8a1cdcbb-b5a3-4766-acaf-14c079768852', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-10-98', 'Hernia Inguinalis', NULL, '54b80a14-e557-482a-9c02-cf43312fe2b0', 'Utama'), +('2fe173de-929d-48e5-8df6-b364b6fb82a9', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-10-95', 'Tumor Mammae', NULL, '441d2ffb-7167-431c-aed2-c28069c88e56', 'Utama'), +('3ef80141-c98e-452a-b13e-f42e57cb2ce4', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-10-6', 'Appendicitis Akut', NULL, '8d995e32-56da-493f-9fdf-f7adba543e68', 'Utama'), +('63ab8f85-ebf8-4db0-bbc4-486719ed9bdf', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-10-85', 'Tonsilitis', NULL, '4ad135b4-6f85-463d-86d5-4bd5347b5122', 'Utama'), +('72dda5ec-2531-49f5-9fb7-7f4b4d910c42', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-10-45', 'Tonsilitis', NULL, '9ca415b2-a22d-4886-a0ff-d23a16c493a5', 'Utama'), +('26d604e8-ed6b-4aad-9535-e3316ee1679e', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-10-66', 'Hemoroid', NULL, '5fe0d3b3-243c-4323-996e-856122dd1483', 'Utama'), +('2c1fada6-bc19-4c51-8ed6-d31d7e1300d9', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-10-76', 'Fraktur Femur', NULL, 'd98a42fe-69e6-4e85-83eb-2ed9bec79462', 'Utama'), +('19bcf454-b77f-44c1-b9d2-0315071d8e98', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-10-64', 'Appendicitis Akut', NULL, 'ddcef114-7ffc-4d5b-88d8-9229904ce2f3', 'Utama'), +('299cbcfa-a052-498c-a74d-fd95e2f346b6', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-10-75', 'Hemoroid', NULL, '46f428fc-03cf-4977-ab56-382132c17c66', 'Utama'), +('925b88c4-0f0b-4e57-a027-a5414ba64828', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-10-23', 'Hernia Inguinalis', NULL, '2f34b2f0-79ef-49de-b92b-e7b3b181a513', 'Utama'), +('0549368c-ce8d-422c-91e1-4aebe5d2b630', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-10-65', 'Caesar Section', NULL, '0c244c0f-4f04-41cd-958d-c53f1dbfaa8d', 'Utama'), +('6f16f291-6629-4136-bc78-06c7b4470322', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-10-64', 'Tonsilitis', NULL, 'f11f320f-663d-412a-9bea-eb4f9c306b4c', 'Utama'), +('70c963ca-7806-49da-a127-03e57b6d335b', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-10-6', 'Hernia Inguinalis', NULL, 'a636d329-08a0-4487-8660-39820bafb845', 'Utama'), +('f8b78a00-ee3e-4113-b6c4-2d19e8a3acc4', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-10-56', 'Katarak', NULL, '023aa46b-f669-4be8-9917-bd61db3d014d', 'Utama'), +('b259bbfd-be7f-434f-bbd6-a05ea06e6619', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-10-74', 'Batu Empedu', NULL, '6cce5ca3-0565-437a-bdd7-8981a5198e47', 'Utama'), +('c6d2db45-b13c-46b3-a239-a9af27d1bab7', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-10-92', 'Fraktur Femur', NULL, 'b525e1e5-6e6d-4e75-8359-1d66765a7b83', 'Utama'); + +INSERT INTO public.data_pasien_operasi +( + id, + status, + sort, + user_created, + date_created, + user_updated, + date_updated, + "No_rekam_medis", + "No_KTP", + "Nama_pasien", + "Jenis_kelamin", + "Tanggal_lahir", + "Umur", + "Alamat", + "Tanggal_daftar", + "Kategori_operasi", + "Rencana_operasi", + "Keterangan", + "Tanggal_selesai_operasi", + "Status_operasi", + "Nomor", + "Old_kategori", + "Spesialis", + "Sub_spesialis", + "Keterangan_status_pasien", + "Nomor_spesialis", + "Nomor_sub_spesialis", + "Old_spesialis", + "Old_sub_spesialis" +) +VALUES +('54b80a14-e557-482a-9c02-cf43312fe2b0','waiting',NULL,NULL,'2026-01-23 08:22:48.003155+07',NULL,NULL,'RM-000001','3574160117775539','Agus Simanjuntak','Laki-laki','1998-02-16','22 Tahun','Jl. Gatot Subroto No.51','2026-01-23 08:22:48.003155',4,'Hemorrhoidectomy',NULL,NULL,'Terjadwal',1,NULL,2,3,NULL,2,3,NULL,NULL), +('441d2ffb-7167-431c-aed2-c28069c88e56','waiting',NULL,NULL,'2026-01-23 08:22:48.003155+07',NULL,NULL,'RM-000002','3572712812442323','Lina Setiawan','Laki-laki','1974-09-08','22 Tahun','Jl. Merdeka No.21','2026-01-23 08:22:48.003155',4,'Hemorrhoidectomy',NULL,NULL,'Terjadwal',2,NULL,1,1,NULL,1,1,NULL,NULL), +('8d995e32-56da-493f-9fdf-f7adba543e68','waiting',NULL,NULL,'2026-01-23 08:22:48.003155+07',NULL,NULL,'RM-000003','3573658249713122','Hana Rahmawati','Laki-laki','2018-07-01','27 Tahun','Jl. Sudirman No.19','2026-01-23 08:22:48.003155',3,'Mastectomy',NULL,NULL,'Terjadwal',3,NULL,1,2,NULL,1,2,NULL,NULL), +('4ad135b4-6f85-463d-86d5-4bd5347b5122','waiting',NULL,NULL,'2026-01-23 08:22:48.003155+07',NULL,NULL,'RM-000004','3575264211898563','Nina Hidayat','Laki-laki','2004-07-03','62 Tahun','Jl. Ahmad Yani No.29','2026-01-23 08:22:48.003155',4,'Tonsillectomy',NULL,NULL,'Terjadwal',4,NULL,3,5,NULL,3,5,NULL,NULL), +('9ca415b2-a22d-4886-a0ff-d23a16c493a5','waiting',NULL,NULL,'2026-01-23 08:22:48.003155+07',NULL,NULL,'RM-000005','3578971794776205','Yudi Susanti','Laki-laki','2005-05-21','53 Tahun','Jl. Gatot Subroto No.27','2026-01-23 08:22:48.003155',2,'Phacoemulsification',NULL,NULL,'Terjadwal',5,NULL,1,2,NULL,1,2,NULL,NULL), +('5fe0d3b3-243c-4323-996e-856122dd1483','waiting',NULL,NULL,'2026-01-23 08:22:48.003155+07',NULL,NULL,'RM-000006','3577087502020920','Oscar Kusuma','Laki-laki','2023-04-20','26 Tahun','Jl. Gatot Subroto No.85','2026-01-23 08:22:48.003155',2,'Herniotomy',NULL,NULL,'Terjadwal',6,NULL,2,4,NULL,2,4,NULL,NULL), +('d98a42fe-69e6-4e85-83eb-2ed9bec79462','waiting',NULL,NULL,'2026-01-23 08:22:48.003155+07',NULL,NULL,'RM-000007','3574004915616037','Oscar Pratama','Laki-laki','1985-05-23','65 Tahun','Jl. Diponegoro No.99','2026-01-23 08:22:48.003155',3,'ORIF',NULL,NULL,'Terjadwal',7,NULL,2,4,NULL,2,4,NULL,NULL), +('ddcef114-7ffc-4d5b-88d8-9229904ce2f3','waiting',NULL,NULL,'2026-01-23 08:22:48.003155+07',NULL,NULL,'RM-000008','3578978139931591','Oscar Lestari','Laki-laki','1971-11-30','62 Tahun','Jl. Gatot Subroto No.28','2026-01-23 08:22:48.003155',2,'Phacoemulsification',NULL,NULL,'Terjadwal',8,NULL,3,5,NULL,3,5,NULL,NULL), +('46f428fc-03cf-4977-ab56-382132c17c66','waiting',NULL,NULL,'2026-01-23 08:22:48.003155+07',NULL,NULL,'RM-000009','3575019167672450','Siti Wijaya','Laki-laki','2020-06-06','19 Tahun','Jl. Diponegoro No.17','2026-01-23 08:22:48.003155',3,'Sectio Caesarea',NULL,NULL,'Terjadwal',9,NULL,2,4,NULL,2,4,NULL,NULL), +('2f34b2f0-79ef-49de-b92b-e7b3b181a513','waiting',NULL,NULL,'2026-01-23 08:22:48.003155+07',NULL,NULL,'RM-000010','3575876121081869','Nina Santoso','Laki-laki','2000-03-11','36 Tahun','Jl. Diponegoro No.90','2026-01-23 08:22:48.003155',2,'Herniotomy',NULL,NULL,'Terjadwal',10,NULL,1,2,NULL,1,2,NULL,NULL), +('0c244c0f-4f04-41cd-958d-c53f1dbfaa8d','waiting',NULL,NULL,'2026-01-23 08:22:48.003155+07',NULL,NULL,'RM-000011','3571621494638527','Oscar Simanjuntak','Laki-laki','2013-12-11','24 Tahun','Jl. Gatot Subroto No.57','2026-01-23 08:22:48.003155',4,'ORIF',NULL,NULL,'Terjadwal',11,NULL,2,3,NULL,2,3,NULL,NULL); + +INSERT INTO public.data_pasien_operasi_data_pegawai +(id, data_pasien_operasi_id, data_pegawai_id) +VALUES +(1, '54b80a14-e557-482a-9c02-cf43312fe2b0', 'b4c116a4-6201-4e00-9b55-2bfe77ac262f'), +(2, '441d2ffb-7167-431c-aed2-c28069c88e56', 'a6b02554-ab33-4e9d-916c-75e23f0bbe80'), +(3, '8d995e32-56da-493f-9fdf-f7adba543e68', '7b861cf5-ffe7-4a42-9323-104bbf257aa1'), +(4, '4ad135b4-6f85-463d-86d5-4bd5347b5122', '41f13636-53e0-48de-9585-a80e8194e846'), +(5, '9ca415b2-a22d-4886-a0ff-d23a16c493a5', '7b861cf5-ffe7-4a42-9323-104bbf257aa1'), +(6, '5fe0d3b3-243c-4323-996e-856122dd1483', '7b861cf5-ffe7-4a42-9323-104bbf257aa1'), +(7, 'd98a42fe-69e6-4e85-83eb-2ed9bec79462', 'b4c116a4-6201-4e00-9b55-2bfe77ac262f'), +(8, 'ddcef114-7ffc-4d5b-88d8-9229904ce2f3', 'a6b02554-ab33-4e9d-916c-75e23f0bbe80'), +(9, '46f428fc-03cf-4977-ab56-382132c17c66', 'b4c116a4-6201-4e00-9b55-2bfe77ac262f'), +(10, '2f34b2f0-79ef-49de-b92b-e7b3b181a513', 'a6b02554-ab33-4e9d-916c-75e23f0bbe80'), +(11, '0c244c0f-4f04-41cd-958d-c53f1dbfaa8d', 'a6b02554-ab33-4e9d-916c-75e23f0bbe80'); + +INSERT INTO public.data_pegawai +( + id, + status, + sort, + user_created, + date_created, + user_updated, + date_updated, + "Foto_pegawai", + "Kartu_pegawai", + "KTP", + "Nomor_kartu_pegawai", + "NIP", + "NIP_lama", + "Jabatan", + "Golongan", + "Rumpun", + "KSM", + "Tanggal_masuk", + "Tanggal_pensiun", + "TMT_CPNS", + "TMT_PNS", + "Satuan_kerja", + "FK_spt_pegawai_id", + "Nama_depan", + "Nama_belakang", + "Username", + "User_email", + "Id_user", + "Practicioner", + "HFIS_code", + "HFIS_display", + "Code_dpjp", + "KDDOKTER", + "Practicioner_detail", + "Kode_satusehat", + "No_whatsapp", + "Posisi_ruang", + "Substansi", + "NDE", + "Subspesialis", + "Status_pegawai", + "File_status", + "Ketarangan", + "Kode_DPJP" +) +VALUES +('7b861cf5-ffe7-4a42-9323-104bbf257aa1','draft',NULL,NULL,'2026-01-23 08:20:15.406222+07',NULL,NULL,NULL,NULL,NULL,NULL,'198001012010011001',NULL,NULL,NULL,NULL,1,NULL,NULL,NULL,NULL,NULL,NULL,'Dr. Andi','Setiawan',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,1,1,NULL,NULL,NULL), +('b4c116a4-6201-4e00-9b55-2bfe77ac262f','draft',NULL,NULL,'2026-01-23 08:20:15.406222+07',NULL,NULL,NULL,NULL,NULL,NULL,'198205052010011002',NULL,NULL,NULL,NULL,1,NULL,NULL,NULL,NULL,NULL,NULL,'Dr. Budi','Santoso',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,2,1,NULL,NULL,NULL), +('a6b02554-ab33-4e9d-916c-75e23f0bbe80','draft',NULL,NULL,'2026-01-23 08:20:15.406222+07',NULL,NULL,NULL,NULL,NULL,NULL,'198503032012012003',NULL,NULL,NULL,NULL,2,NULL,NULL,NULL,NULL,NULL,NULL,'Dr. Citra','Lestari',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,3,1,NULL,NULL,NULL), +('41f13636-53e0-48de-9585-a80e8194e846','draft',NULL,NULL,'2026-01-23 08:20:15.406222+07',NULL,NULL,NULL,NULL,NULL,NULL,'198807072015012004',NULL,NULL,NULL,NULL,2,NULL,NULL,NULL,NULL,NULL,NULL,'Dr. Dewi','Anggraini',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,4,1,NULL,NULL,NULL), +('1338b3ff-5efa-4fe0-b070-485c818b7551','draft',NULL,NULL,'2026-01-23 08:20:15.406222+07',NULL,NULL,NULL,NULL,NULL,NULL,'199009092018011005',NULL,NULL,NULL,NULL,3,NULL,NULL,NULL,NULL,NULL,NULL,'Dr. Eko','Prasetio',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,5,1,NULL,NULL,NULL); + +INSERT INTO public.data_telepon_pasien_operasi +(id, "Nomor_telepon", "FK_pasien_operasi_telepon_pasien_operasi_ID") +VALUES +('0df2c6e1-5ea2-4b47-a38d-acbc4e87acf3', '08594929242', '54b80a14-e557-482a-9c02-cf43312fe2b0'), +('87f8e21f-6332-4c65-a681-356acaaee379', '08840004491', '441d2ffb-7167-431c-aed2-c28069c88e56'), +('a42dabda-3a64-4bda-a3d2-619944ce93c0', '08644234690', '8d995e32-56da-493f-9fdf-f7adba543e68'), +('b4df90e4-0048-4944-8852-20bfcc9192ce', '08671298031', '4ad135b4-6f85-463d-86d5-4bd5347b5122'), +('c0f97b1e-2047-4b18-bb74-afdd32fb82bf', '08239962618', '9ca415b2-a22d-4886-a0ff-d23a16c493a5'), +('127b6583-904c-44cc-aae0-4f3877c95349', '08112641507', '5fe0d3b3-243c-4323-996e-856122dd1483'), +('0d71b0b8-7aa9-4542-8fd0-4e06c42b5379', '08836247929', 'd98a42fe-69e6-4e85-83eb-2ed9bec79462'), +('0390102d-95de-489b-a3f5-7b72fb8c4c7a', '08777144416', 'ddcef114-7ffc-4d5b-88d8-9229904ce2f3'), +('2354bf8d-f349-47c0-8bf6-e5ffcd07aa3a', '08138458824', '46f428fc-03cf-4977-ab56-382132c17c66'), +('541cc463-7104-45bd-b3da-27db82cd6722', '08695133731', '2f34b2f0-79ef-49de-b92b-e7b3b181a513'), +('8519adf3-b728-4ca2-a7ae-fada50978a6d', '08515321902', '0c244c0f-4f04-41cd-958d-c53f1dbfaa8d'); + +INSERT INTO public.data_tindakan_pasien_operasi +( + id, + user_created, + date_created, + user_updated, + date_updated, + "Kode_tindakan", + "Tindakan", + "Tindakan_tambahan", + "FK_pasien_operasi_tindakan_pasien_operasi_ID" +) +VALUES +('e1c9cb34-b65d-4eda-9b5d-acc18375e095', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-9-12', 'Hemorrhoidectomy', NULL, '54b80a14-e557-482a-9c02-cf43312fe2b0'), +('14b24c41-440a-4c7e-8ca7-6578777c51eb', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-9-28', 'Hemorrhoidectomy', NULL, '441d2ffb-7167-431c-aed2-c28069c88e56'), +('441455cc-9e97-4b34-82eb-076f8e9dbcbc', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-9-5', 'Mastectomy', NULL, '8d995e32-56da-493f-9fdf-f7adba543e68'), +('e70b1de6-19dc-4e96-9d25-016346b460d2', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-9-93', 'Tonsillectomy', NULL, '4ad135b4-6f85-463d-86d5-4bd5347b5122'), +('afb5a9a6-2d3f-48f0-b3e5-110db806a77f', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-9-84', 'Phacoemulsification', NULL, '9ca415b2-a22d-4886-a0ff-d23a16c493a5'), +('d70d31e4-add4-4bd9-91b2-9e349f117379', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-9-58', 'Herniotomy', NULL, '5fe0d3b3-243c-4323-996e-856122dd1483'), +('473c3c5a-00f0-499e-95e5-aa622e160eaf', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-9-42', 'ORIF', NULL, 'd98a42fe-69e6-4e85-83eb-2ed9bec79462'), +('2e666e5a-b10d-4077-b5a1-3d6a6942a11c', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-9-80', 'Phacoemulsification', NULL, 'ddcef114-7ffc-4d5b-88d8-9229904ce2f3'), +('62a5b0ec-8a96-4111-a562-1fbea465b3fa', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-9-93', 'Sectio Caesarea', NULL, '46f428fc-03cf-4977-ab56-382132c17c66'), +('6460fe0b-85d6-4895-84b8-3a71fb889138', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-9-12', 'Herniotomy', NULL, '2f34b2f0-79ef-49de-b92b-e7b3b181a513'), +('e072a775-13bb-4a0d-b946-8b64c463020d', NULL, '2026-01-23 08:22:48.003155+07', NULL, NULL, 'ICD-9-25', 'ORIF', NULL, '0c244c0f-4f04-41cd-958d-c53f1dbfaa8d');