diff --git a/Dockerfile b/Dockerfile index 4b4c9ce..ed06b59 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,8 +12,7 @@ RUN go build -o main cmd/api/main.go FROM alpine:3.20.1 AS prod WORKDIR /app COPY --from=build /app/main /app/main -COPY --from=build /app/.env /app/.env -EXPOSE 8080 +EXPOSE 8010 CMD ["./main"] diff --git a/docker-compose.yml b/docker-compose.yml index ad18c36..2710f18 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -15,15 +15,24 @@ services: GIN_MODE: release JWT_SECRET: goRSSA@jay@2025 # Default Database Configuration (PostgreSQL) - DB_CONNECTION: postgres - DB_USERNAME: stim - DB_PASSWORD: stim*RS54 - DB_HOST: 10.10.123.165 - DB_DATABASE: satu_db - DB_PORT: 5432 - DB_SSLMODE: disable + # DB_CONNECTION: postgres + # DB_USERNAME: stim + # DB_PASSWORD: stim*RS54 + # DB_HOST: 10.10.123.165 + # DB_DATABASE: satu_db + # DB_PORT: 5432 + # DB_SSLMODE: disable - # satudata Database Configuration (PostgreSQL) + # SIMRS Database Configuration (PostgreSQL) + POSTGRES_SIMRS_CONNECTION: postgres + POSTGRES_SIMRS_USERNAME: brawijaya + POSTGRES_SIMRS_PASSWORD: ub*2025 + POSTGRES_SIMRS_HOST: 10.10.123.238 + POSTGRES_SIMRS_DATABASE: simrs + POSTGRES_SIMRS_PORT: 5432 + POSTGRES_SIMRS_SSLMODE: disable + + # SATUDATA Database Configuration (PostgreSQL) POSTGRES_SATUDATA_CONNECTION: postgres POSTGRES_SATUDATA_USERNAME: stim POSTGRES_SATUDATA_PASSWORD: stim*RS54 @@ -33,14 +42,14 @@ services: POSTGRES_SATUDATA_SSLMODE: disable # Mongo Database - MONGODB_MONGOHL7_CONNECTION: mongodb - MONGODB_MONGOHL7_HOST: 10.10.123.206 - MONGODB_MONGOHL7_PORT: 27017 - MONGODB_MONGOHL7_USER: admin - MONGODB_MONGOHL7_PASS: stim*rs54 - MONGODB_MONGOHL7_MASTER: master - MONGODB_MONGOHL7_LOCAL: local - MONGODB_MONGOHL7_SSLMODE: disable + # MONGODB_MONGOHL7_CONNECTION: mongodb + # MONGODB_MONGOHL7_HOST: 10.10.123.206 + # MONGODB_MONGOHL7_PORT: 27017 + # MONGODB_MONGOHL7_USER: admin + # MONGODB_MONGOHL7_PASS: stim*rs54 + # MONGODB_MONGOHL7_MASTER: master + # MONGODB_MONGOHL7_LOCAL: local + # MONGODB_MONGOHL7_SSLMODE: disable # MYSQL Antrian Database # MYSQL_ANTRIAN_CONNECTION: mysql @@ -52,21 +61,21 @@ services: # MYSQL_ANTRIAN_SSLMODE: disable # MYSQL Medical Database - MYSQL_MEDICAL_CONNECTION: mysql - MYSQL_MEDICAL_HOST: 10.10.123.163 - MYSQL_MEDICAL_USERNAME: meninjardev - MYSQL_MEDICAL_PASSWORD: meninjar*RS54 - MYSQL_MEDICAL_DATABASE: healtcare_database - MYSQL_MEDICAL_PORT: 3306 - MYSQL_MEDICAL_SSLMODE: disable + # MYSQL_MEDICAL_CONNECTION: mysql + # MYSQL_MEDICAL_HOST: 10.10.123.163 + # MYSQL_MEDICAL_USERNAME: meninjardev + # MYSQL_MEDICAL_PASSWORD: meninjar*RS54 + # MYSQL_MEDICAL_DATABASE: healtcare_database + # MYSQL_MEDICAL_PORT: 3306 + # MYSQL_MEDICAL_SSLMODE: disable - # Keycloak Configuration + # KEYCLOAK Configuration KEYCLOAK_ISSUER: https://auth.rssa.top/realms/sandbox KEYCLOAK_AUDIENCE: nuxtsim-pendaftaran KEYCLOAK_JWKS_URL: https://auth.rssa.top/realms/sandbox/protocol/openid-connect/certs - KEYCLOAK_ENABLED: true + KEYCLOAK_ENABLED: "true" - # Auth Configuration + # AUTH Configuration AUTH_TYPE: hybrid AUTH_STATIC_TOKENS: token5,token6,token7,token8 AUTH_FALLBACK_TO: jwt @@ -88,21 +97,21 @@ services: BRIDGING_SATUSEHAT_KFA_URL: https://api-satusehat.kemkes.go.id/kfa-v2 # Swagger Configuration - SWAGGER_TITLE: My Custom API Service + SWAGGER_TITLE: General API Service SWAGGER_DESCRIPTION: This is a custom API service for managing various resources SWAGGER_VERSION: 2.0.0 SWAGGER_CONTACT_NAME: Support Team - SWAGGER_HOST: api.mycompany.com:8080 + SWAGGER_HOST: meninjar.dev.rssa.id:8010 SWAGGER_BASE_PATH: /api/v2 SWAGGER_SCHEMES: https # API Configuration - API_TITLE: API Service UJICOBA + API_TITLE: API Service General API_DESCRIPTION: Dokumentation SWAGGER API_VERSION: 3.0.0 # Security - SECURITY_TRUSTED_ORIGINS: http://meninjar.dev.rssa.id:8050,https://yourdomain.com + SECURITY_TRUSTED_ORIGINS: http://meninjar.dev.rssa.id:8010,https://yourdomain.com SECURITY_MAX_INPUT_LENGTH: 500 RATE_LIMIT_REQUESTS_PER_MINUTE: 120 REDIS_HOST: localhost diff --git a/docs/docs.go b/docs/docs.go index 768acfe..6060163 100644 --- a/docs/docs.go +++ b/docs/docs.go @@ -1,5 +1,4 @@ -// Code generated by swaggo/swag. DO NOT EDIT. - +// Package docs Code generated by swaggo/swag. DO NOT EDIT package docs import "github.com/swaggo/swag" @@ -45,7 +44,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/models.LoginRequest" + "$ref": "#/definitions/api-service_internal_models_auth.LoginRequest" } } ], @@ -53,7 +52,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.TokenResponse" + "$ref": "#/definitions/api-service_internal_models_auth.TokenResponse" } }, "400": { @@ -96,7 +95,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.User" + "$ref": "#/definitions/api-service_internal_models_auth.User" } }, "401": { @@ -142,7 +141,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.TokenResponse" + "$ref": "#/definitions/api-service_internal_models_auth.TokenResponse" } }, "400": { @@ -241,25 +240,25 @@ const docTemplate = `{ "200": { "description": "Success response", "schema": { - "$ref": "#/definitions/retribusi.RetribusiGetByIDResponse" + "$ref": "#/definitions/api-service_internal_models_retribusi.RetribusiGetByIDResponse" } }, "400": { "description": "Invalid ID format", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } }, "404": { "description": "Retribusi not found", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } }, "500": { "description": "Internal server error", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } } } @@ -290,7 +289,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/retribusi.RetribusiUpdateRequest" + "$ref": "#/definitions/api-service_internal_models_retribusi.RetribusiUpdateRequest" } } ], @@ -298,25 +297,25 @@ const docTemplate = `{ "200": { "description": "Retribusi updated successfully", "schema": { - "$ref": "#/definitions/retribusi.RetribusiUpdateResponse" + "$ref": "#/definitions/api-service_internal_models_retribusi.RetribusiUpdateResponse" } }, "400": { "description": "Bad request or validation error", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } }, "404": { "description": "Retribusi not found", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } }, "500": { "description": "Internal server error", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } } } @@ -346,25 +345,25 @@ const docTemplate = `{ "200": { "description": "Retribusi deleted successfully", "schema": { - "$ref": "#/definitions/retribusi.RetribusiDeleteResponse" + "$ref": "#/definitions/api-service_internal_models_retribusi.RetribusiDeleteResponse" } }, "400": { "description": "Invalid ID format", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } }, "404": { "description": "Retribusi not found", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } }, "500": { "description": "Internal server error", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } } } @@ -434,19 +433,19 @@ const docTemplate = `{ "200": { "description": "Success response", "schema": { - "$ref": "#/definitions/retribusi.RetribusiGetResponse" + "$ref": "#/definitions/api-service_internal_models_retribusi.RetribusiGetResponse" } }, "400": { "description": "Bad request", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } }, "500": { "description": "Internal server error", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } } } @@ -470,7 +469,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/retribusi.RetribusiCreateRequest" + "$ref": "#/definitions/api-service_internal_models_retribusi.RetribusiCreateRequest" } } ], @@ -478,19 +477,19 @@ const docTemplate = `{ "201": { "description": "Retribusi created successfully", "schema": { - "$ref": "#/definitions/retribusi.RetribusiCreateResponse" + "$ref": "#/definitions/api-service_internal_models_retribusi.RetribusiCreateResponse" } }, "400": { "description": "Bad request or validation error", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } }, "500": { "description": "Internal server error", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } } } @@ -547,19 +546,19 @@ const docTemplate = `{ "200": { "description": "Success response", "schema": { - "$ref": "#/definitions/retribusi.RetribusiGetResponse" + "$ref": "#/definitions/api-service_internal_models_retribusi.RetribusiGetResponse" } }, "400": { "description": "Bad request", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } }, "500": { "description": "Internal server error", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } } } @@ -590,13 +589,13 @@ const docTemplate = `{ "200": { "description": "Statistics data", "schema": { - "$ref": "#/definitions/models.AggregateData" + "$ref": "#/definitions/api-service_internal_models.AggregateData" } }, "500": { "description": "Internal server error", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } } } @@ -604,7 +603,7 @@ const docTemplate = `{ }, "/api/v1/token/generate": { "post": { - "description": "Generate a JWT token for a user", + "description": "Generate a JWT token for testing purposes", "consumes": [ "application/json" ], @@ -617,12 +616,13 @@ const docTemplate = `{ "summary": "Generate JWT token", "parameters": [ { - "description": "User credentials", + "description": "Token generation data", "name": "token", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/models.LoginRequest" + "type": "object", + "additionalProperties": true } } ], @@ -630,7 +630,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.TokenResponse" + "$ref": "#/definitions/api-service_internal_models_auth.TokenResponse" } }, "400": { @@ -641,22 +641,13 @@ const docTemplate = `{ "type": "string" } } - }, - "401": { - "description": "Unauthorized", - "schema": { - "type": "object", - "additionalProperties": { - "type": "string" - } - } } } } }, "/api/v1/token/generate-direct": { "post": { - "description": "Generate a JWT token directly without password verification (for testing)", + "description": "Generate a JWT token directly with provided data", "consumes": [ "application/json" ], @@ -666,18 +657,16 @@ const docTemplate = `{ "tags": [ "Token" ], - "summary": "Generate token directly", + "summary": "Generate JWT token directly", "parameters": [ { - "description": "User info", - "name": "user", + "description": "Token generation data", + "name": "token", "in": "body", "required": true, "schema": { "type": "object", - "additionalProperties": { - "type": "string" - } + "additionalProperties": true } } ], @@ -685,7 +674,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.TokenResponse" + "$ref": "#/definitions/api-service_internal_models_auth.TokenResponse" } }, "400": { @@ -702,7 +691,7 @@ const docTemplate = `{ } }, "definitions": { - "models.AggregateData": { + "api-service_internal_models.AggregateData": { "type": "object", "properties": { "by_dinas": { @@ -743,7 +732,7 @@ const docTemplate = `{ } } }, - "models.ErrorResponse": { + "api-service_internal_models.ErrorResponse": { "type": "object", "properties": { "code": { @@ -760,22 +749,7 @@ const docTemplate = `{ } } }, - "models.LoginRequest": { - "type": "object", - "required": [ - "password", - "username" - ], - "properties": { - "password": { - "type": "string" - }, - "username": { - "type": "string" - } - } - }, - "models.MetaResponse": { + "api-service_internal_models.MetaResponse": { "type": "object", "properties": { "current_page": { @@ -801,7 +775,7 @@ const docTemplate = `{ } } }, - "models.NullableInt32": { + "api-service_internal_models.NullableInt32": { "type": "object", "properties": { "int32": { @@ -812,7 +786,7 @@ const docTemplate = `{ } } }, - "models.NullableString": { + "api-service_internal_models.NullableString": { "type": "object", "properties": { "string": { @@ -823,7 +797,7 @@ const docTemplate = `{ } } }, - "models.NullableTime": { + "api-service_internal_models.NullableTime": { "type": "object", "properties": { "time": { @@ -834,21 +808,41 @@ const docTemplate = `{ } } }, - "models.TokenResponse": { + "api-service_internal_models_auth.LoginRequest": { + "type": "object", + "required": [ + "password", + "username" + ], + "properties": { + "password": { + "type": "string" + }, + "username": { + "type": "string" + } + } + }, + "api-service_internal_models_auth.TokenResponse": { "type": "object", "properties": { "access_token": { "type": "string" }, "expires_in": { + "description": "Durasi dalam detik", "type": "integer" }, + "refresh_token": { + "type": "string" + }, "token_type": { + "description": "Biasanya \"Bearer\"", "type": "string" } } }, - "models.User": { + "api-service_internal_models_auth.User": { "type": "object", "properties": { "email": { @@ -865,75 +859,75 @@ const docTemplate = `{ } } }, - "retribusi.Retribusi": { + "api-service_internal_models_retribusi.Retribusi": { "type": "object", "properties": { "date_created": { - "$ref": "#/definitions/models.NullableTime" + "$ref": "#/definitions/api-service_internal_models.NullableTime" }, "date_updated": { - "$ref": "#/definitions/models.NullableTime" + "$ref": "#/definitions/api-service_internal_models.NullableTime" }, "dinas": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "id": { "type": "string" }, "jenis": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "kelompok_obyek": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "kode_tarif": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "pelayanan": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "rekening_denda": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "rekening_pokok": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "satuan": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "satuan_overtime": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "sort": { - "$ref": "#/definitions/models.NullableInt32" + "$ref": "#/definitions/api-service_internal_models.NullableInt32" }, "status": { "type": "string" }, "tarif": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "tarif_overtime": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "uraian_1": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "uraian_2": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "uraian_3": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "user_created": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "user_updated": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" } } }, - "retribusi.RetribusiCreateRequest": { + "api-service_internal_models_retribusi.RetribusiCreateRequest": { "type": "object", "required": [ "status" @@ -1009,18 +1003,18 @@ const docTemplate = `{ } } }, - "retribusi.RetribusiCreateResponse": { + "api-service_internal_models_retribusi.RetribusiCreateResponse": { "type": "object", "properties": { "data": { - "$ref": "#/definitions/retribusi.Retribusi" + "$ref": "#/definitions/api-service_internal_models_retribusi.Retribusi" }, "message": { "type": "string" } } }, - "retribusi.RetribusiDeleteResponse": { + "api-service_internal_models_retribusi.RetribusiDeleteResponse": { "type": "object", "properties": { "id": { @@ -1031,38 +1025,38 @@ const docTemplate = `{ } } }, - "retribusi.RetribusiGetByIDResponse": { + "api-service_internal_models_retribusi.RetribusiGetByIDResponse": { "type": "object", "properties": { "data": { - "$ref": "#/definitions/retribusi.Retribusi" + "$ref": "#/definitions/api-service_internal_models_retribusi.Retribusi" }, "message": { "type": "string" } } }, - "retribusi.RetribusiGetResponse": { + "api-service_internal_models_retribusi.RetribusiGetResponse": { "type": "object", "properties": { "data": { "type": "array", "items": { - "$ref": "#/definitions/retribusi.Retribusi" + "$ref": "#/definitions/api-service_internal_models_retribusi.Retribusi" } }, "message": { "type": "string" }, "meta": { - "$ref": "#/definitions/models.MetaResponse" + "$ref": "#/definitions/api-service_internal_models.MetaResponse" }, "summary": { - "$ref": "#/definitions/models.AggregateData" + "$ref": "#/definitions/api-service_internal_models.AggregateData" } } }, - "retribusi.RetribusiUpdateRequest": { + "api-service_internal_models_retribusi.RetribusiUpdateRequest": { "type": "object", "required": [ "status" @@ -1138,11 +1132,11 @@ const docTemplate = `{ } } }, - "retribusi.RetribusiUpdateResponse": { + "api-service_internal_models_retribusi.RetribusiUpdateResponse": { "type": "object", "properties": { "data": { - "$ref": "#/definitions/retribusi.Retribusi" + "$ref": "#/definitions/api-service_internal_models_retribusi.Retribusi" }, "message": { "type": "string" @@ -1162,6 +1156,8 @@ var SwaggerInfo = &swag.Spec{ Description: "A comprehensive Go API service with Swagger documentation", InfoInstanceName: "swagger", SwaggerTemplate: docTemplate, + LeftDelim: "{{", + RightDelim: "}}", } func init() { diff --git a/docs/swagger.json b/docs/swagger.json index ff74f9c..87661e0 100644 --- a/docs/swagger.json +++ b/docs/swagger.json @@ -42,7 +42,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/models.LoginRequest" + "$ref": "#/definitions/api-service_internal_models_auth.LoginRequest" } } ], @@ -50,7 +50,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.TokenResponse" + "$ref": "#/definitions/api-service_internal_models_auth.TokenResponse" } }, "400": { @@ -93,7 +93,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.User" + "$ref": "#/definitions/api-service_internal_models_auth.User" } }, "401": { @@ -139,7 +139,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.TokenResponse" + "$ref": "#/definitions/api-service_internal_models_auth.TokenResponse" } }, "400": { @@ -238,25 +238,25 @@ "200": { "description": "Success response", "schema": { - "$ref": "#/definitions/retribusi.RetribusiGetByIDResponse" + "$ref": "#/definitions/api-service_internal_models_retribusi.RetribusiGetByIDResponse" } }, "400": { "description": "Invalid ID format", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } }, "404": { "description": "Retribusi not found", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } }, "500": { "description": "Internal server error", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } } } @@ -287,7 +287,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/retribusi.RetribusiUpdateRequest" + "$ref": "#/definitions/api-service_internal_models_retribusi.RetribusiUpdateRequest" } } ], @@ -295,25 +295,25 @@ "200": { "description": "Retribusi updated successfully", "schema": { - "$ref": "#/definitions/retribusi.RetribusiUpdateResponse" + "$ref": "#/definitions/api-service_internal_models_retribusi.RetribusiUpdateResponse" } }, "400": { "description": "Bad request or validation error", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } }, "404": { "description": "Retribusi not found", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } }, "500": { "description": "Internal server error", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } } } @@ -343,25 +343,25 @@ "200": { "description": "Retribusi deleted successfully", "schema": { - "$ref": "#/definitions/retribusi.RetribusiDeleteResponse" + "$ref": "#/definitions/api-service_internal_models_retribusi.RetribusiDeleteResponse" } }, "400": { "description": "Invalid ID format", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } }, "404": { "description": "Retribusi not found", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } }, "500": { "description": "Internal server error", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } } } @@ -431,19 +431,19 @@ "200": { "description": "Success response", "schema": { - "$ref": "#/definitions/retribusi.RetribusiGetResponse" + "$ref": "#/definitions/api-service_internal_models_retribusi.RetribusiGetResponse" } }, "400": { "description": "Bad request", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } }, "500": { "description": "Internal server error", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } } } @@ -467,7 +467,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/retribusi.RetribusiCreateRequest" + "$ref": "#/definitions/api-service_internal_models_retribusi.RetribusiCreateRequest" } } ], @@ -475,19 +475,19 @@ "201": { "description": "Retribusi created successfully", "schema": { - "$ref": "#/definitions/retribusi.RetribusiCreateResponse" + "$ref": "#/definitions/api-service_internal_models_retribusi.RetribusiCreateResponse" } }, "400": { "description": "Bad request or validation error", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } }, "500": { "description": "Internal server error", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } } } @@ -544,19 +544,19 @@ "200": { "description": "Success response", "schema": { - "$ref": "#/definitions/retribusi.RetribusiGetResponse" + "$ref": "#/definitions/api-service_internal_models_retribusi.RetribusiGetResponse" } }, "400": { "description": "Bad request", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } }, "500": { "description": "Internal server error", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } } } @@ -587,13 +587,13 @@ "200": { "description": "Statistics data", "schema": { - "$ref": "#/definitions/models.AggregateData" + "$ref": "#/definitions/api-service_internal_models.AggregateData" } }, "500": { "description": "Internal server error", "schema": { - "$ref": "#/definitions/models.ErrorResponse" + "$ref": "#/definitions/api-service_internal_models.ErrorResponse" } } } @@ -601,7 +601,7 @@ }, "/api/v1/token/generate": { "post": { - "description": "Generate a JWT token for a user", + "description": "Generate a JWT token for testing purposes", "consumes": [ "application/json" ], @@ -614,12 +614,13 @@ "summary": "Generate JWT token", "parameters": [ { - "description": "User credentials", + "description": "Token generation data", "name": "token", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/models.LoginRequest" + "type": "object", + "additionalProperties": true } } ], @@ -627,7 +628,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.TokenResponse" + "$ref": "#/definitions/api-service_internal_models_auth.TokenResponse" } }, "400": { @@ -638,22 +639,13 @@ "type": "string" } } - }, - "401": { - "description": "Unauthorized", - "schema": { - "type": "object", - "additionalProperties": { - "type": "string" - } - } } } } }, "/api/v1/token/generate-direct": { "post": { - "description": "Generate a JWT token directly without password verification (for testing)", + "description": "Generate a JWT token directly with provided data", "consumes": [ "application/json" ], @@ -663,18 +655,16 @@ "tags": [ "Token" ], - "summary": "Generate token directly", + "summary": "Generate JWT token directly", "parameters": [ { - "description": "User info", - "name": "user", + "description": "Token generation data", + "name": "token", "in": "body", "required": true, "schema": { "type": "object", - "additionalProperties": { - "type": "string" - } + "additionalProperties": true } } ], @@ -682,7 +672,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.TokenResponse" + "$ref": "#/definitions/api-service_internal_models_auth.TokenResponse" } }, "400": { @@ -699,7 +689,7 @@ } }, "definitions": { - "models.AggregateData": { + "api-service_internal_models.AggregateData": { "type": "object", "properties": { "by_dinas": { @@ -740,7 +730,7 @@ } } }, - "models.ErrorResponse": { + "api-service_internal_models.ErrorResponse": { "type": "object", "properties": { "code": { @@ -757,22 +747,7 @@ } } }, - "models.LoginRequest": { - "type": "object", - "required": [ - "password", - "username" - ], - "properties": { - "password": { - "type": "string" - }, - "username": { - "type": "string" - } - } - }, - "models.MetaResponse": { + "api-service_internal_models.MetaResponse": { "type": "object", "properties": { "current_page": { @@ -798,7 +773,7 @@ } } }, - "models.NullableInt32": { + "api-service_internal_models.NullableInt32": { "type": "object", "properties": { "int32": { @@ -809,7 +784,7 @@ } } }, - "models.NullableString": { + "api-service_internal_models.NullableString": { "type": "object", "properties": { "string": { @@ -820,7 +795,7 @@ } } }, - "models.NullableTime": { + "api-service_internal_models.NullableTime": { "type": "object", "properties": { "time": { @@ -831,21 +806,41 @@ } } }, - "models.TokenResponse": { + "api-service_internal_models_auth.LoginRequest": { + "type": "object", + "required": [ + "password", + "username" + ], + "properties": { + "password": { + "type": "string" + }, + "username": { + "type": "string" + } + } + }, + "api-service_internal_models_auth.TokenResponse": { "type": "object", "properties": { "access_token": { "type": "string" }, "expires_in": { + "description": "Durasi dalam detik", "type": "integer" }, + "refresh_token": { + "type": "string" + }, "token_type": { + "description": "Biasanya \"Bearer\"", "type": "string" } } }, - "models.User": { + "api-service_internal_models_auth.User": { "type": "object", "properties": { "email": { @@ -862,75 +857,75 @@ } } }, - "retribusi.Retribusi": { + "api-service_internal_models_retribusi.Retribusi": { "type": "object", "properties": { "date_created": { - "$ref": "#/definitions/models.NullableTime" + "$ref": "#/definitions/api-service_internal_models.NullableTime" }, "date_updated": { - "$ref": "#/definitions/models.NullableTime" + "$ref": "#/definitions/api-service_internal_models.NullableTime" }, "dinas": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "id": { "type": "string" }, "jenis": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "kelompok_obyek": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "kode_tarif": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "pelayanan": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "rekening_denda": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "rekening_pokok": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "satuan": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "satuan_overtime": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "sort": { - "$ref": "#/definitions/models.NullableInt32" + "$ref": "#/definitions/api-service_internal_models.NullableInt32" }, "status": { "type": "string" }, "tarif": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "tarif_overtime": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "uraian_1": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "uraian_2": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "uraian_3": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "user_created": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" }, "user_updated": { - "$ref": "#/definitions/models.NullableString" + "$ref": "#/definitions/api-service_internal_models.NullableString" } } }, - "retribusi.RetribusiCreateRequest": { + "api-service_internal_models_retribusi.RetribusiCreateRequest": { "type": "object", "required": [ "status" @@ -1006,18 +1001,18 @@ } } }, - "retribusi.RetribusiCreateResponse": { + "api-service_internal_models_retribusi.RetribusiCreateResponse": { "type": "object", "properties": { "data": { - "$ref": "#/definitions/retribusi.Retribusi" + "$ref": "#/definitions/api-service_internal_models_retribusi.Retribusi" }, "message": { "type": "string" } } }, - "retribusi.RetribusiDeleteResponse": { + "api-service_internal_models_retribusi.RetribusiDeleteResponse": { "type": "object", "properties": { "id": { @@ -1028,38 +1023,38 @@ } } }, - "retribusi.RetribusiGetByIDResponse": { + "api-service_internal_models_retribusi.RetribusiGetByIDResponse": { "type": "object", "properties": { "data": { - "$ref": "#/definitions/retribusi.Retribusi" + "$ref": "#/definitions/api-service_internal_models_retribusi.Retribusi" }, "message": { "type": "string" } } }, - "retribusi.RetribusiGetResponse": { + "api-service_internal_models_retribusi.RetribusiGetResponse": { "type": "object", "properties": { "data": { "type": "array", "items": { - "$ref": "#/definitions/retribusi.Retribusi" + "$ref": "#/definitions/api-service_internal_models_retribusi.Retribusi" } }, "message": { "type": "string" }, "meta": { - "$ref": "#/definitions/models.MetaResponse" + "$ref": "#/definitions/api-service_internal_models.MetaResponse" }, "summary": { - "$ref": "#/definitions/models.AggregateData" + "$ref": "#/definitions/api-service_internal_models.AggregateData" } } }, - "retribusi.RetribusiUpdateRequest": { + "api-service_internal_models_retribusi.RetribusiUpdateRequest": { "type": "object", "required": [ "status" @@ -1135,11 +1130,11 @@ } } }, - "retribusi.RetribusiUpdateResponse": { + "api-service_internal_models_retribusi.RetribusiUpdateResponse": { "type": "object", "properties": { "data": { - "$ref": "#/definitions/retribusi.Retribusi" + "$ref": "#/definitions/api-service_internal_models_retribusi.Retribusi" }, "message": { "type": "string" diff --git a/docs/swagger.yaml b/docs/swagger.yaml index 677284f..654bca0 100644 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -1,6 +1,6 @@ basePath: /api/v1 definitions: - models.AggregateData: + api-service_internal_models.AggregateData: properties: by_dinas: additionalProperties: @@ -27,7 +27,7 @@ definitions: updated_today: type: integer type: object - models.ErrorResponse: + api-service_internal_models.ErrorResponse: properties: code: type: integer @@ -38,17 +38,7 @@ definitions: timestamp: type: string type: object - models.LoginRequest: - properties: - password: - type: string - username: - type: string - required: - - password - - username - type: object - models.MetaResponse: + api-service_internal_models.MetaResponse: properties: current_page: type: integer @@ -65,37 +55,51 @@ definitions: total_pages: type: integer type: object - models.NullableInt32: + api-service_internal_models.NullableInt32: properties: int32: type: integer valid: type: boolean type: object - models.NullableString: + api-service_internal_models.NullableString: properties: string: type: string valid: type: boolean type: object - models.NullableTime: + api-service_internal_models.NullableTime: properties: time: type: string valid: type: boolean type: object - models.TokenResponse: + api-service_internal_models_auth.LoginRequest: + properties: + password: + type: string + username: + type: string + required: + - password + - username + type: object + api-service_internal_models_auth.TokenResponse: properties: access_token: type: string expires_in: + description: Durasi dalam detik type: integer + refresh_token: + type: string token_type: + description: Biasanya "Bearer" type: string type: object - models.User: + api-service_internal_models_auth.User: properties: email: type: string @@ -106,52 +110,52 @@ definitions: username: type: string type: object - retribusi.Retribusi: + api-service_internal_models_retribusi.Retribusi: properties: date_created: - $ref: '#/definitions/models.NullableTime' + $ref: '#/definitions/api-service_internal_models.NullableTime' date_updated: - $ref: '#/definitions/models.NullableTime' + $ref: '#/definitions/api-service_internal_models.NullableTime' dinas: - $ref: '#/definitions/models.NullableString' + $ref: '#/definitions/api-service_internal_models.NullableString' id: type: string jenis: - $ref: '#/definitions/models.NullableString' + $ref: '#/definitions/api-service_internal_models.NullableString' kelompok_obyek: - $ref: '#/definitions/models.NullableString' + $ref: '#/definitions/api-service_internal_models.NullableString' kode_tarif: - $ref: '#/definitions/models.NullableString' + $ref: '#/definitions/api-service_internal_models.NullableString' pelayanan: - $ref: '#/definitions/models.NullableString' + $ref: '#/definitions/api-service_internal_models.NullableString' rekening_denda: - $ref: '#/definitions/models.NullableString' + $ref: '#/definitions/api-service_internal_models.NullableString' rekening_pokok: - $ref: '#/definitions/models.NullableString' + $ref: '#/definitions/api-service_internal_models.NullableString' satuan: - $ref: '#/definitions/models.NullableString' + $ref: '#/definitions/api-service_internal_models.NullableString' satuan_overtime: - $ref: '#/definitions/models.NullableString' + $ref: '#/definitions/api-service_internal_models.NullableString' sort: - $ref: '#/definitions/models.NullableInt32' + $ref: '#/definitions/api-service_internal_models.NullableInt32' status: type: string tarif: - $ref: '#/definitions/models.NullableString' + $ref: '#/definitions/api-service_internal_models.NullableString' tarif_overtime: - $ref: '#/definitions/models.NullableString' + $ref: '#/definitions/api-service_internal_models.NullableString' uraian_1: - $ref: '#/definitions/models.NullableString' + $ref: '#/definitions/api-service_internal_models.NullableString' uraian_2: - $ref: '#/definitions/models.NullableString' + $ref: '#/definitions/api-service_internal_models.NullableString' uraian_3: - $ref: '#/definitions/models.NullableString' + $ref: '#/definitions/api-service_internal_models.NullableString' user_created: - $ref: '#/definitions/models.NullableString' + $ref: '#/definitions/api-service_internal_models.NullableString' user_updated: - $ref: '#/definitions/models.NullableString' + $ref: '#/definitions/api-service_internal_models.NullableString' type: object - retribusi.RetribusiCreateRequest: + api-service_internal_models_retribusi.RetribusiCreateRequest: properties: dinas: maxLength: 255 @@ -208,41 +212,41 @@ definitions: required: - status type: object - retribusi.RetribusiCreateResponse: + api-service_internal_models_retribusi.RetribusiCreateResponse: properties: data: - $ref: '#/definitions/retribusi.Retribusi' + $ref: '#/definitions/api-service_internal_models_retribusi.Retribusi' message: type: string type: object - retribusi.RetribusiDeleteResponse: + api-service_internal_models_retribusi.RetribusiDeleteResponse: properties: id: type: string message: type: string type: object - retribusi.RetribusiGetByIDResponse: + api-service_internal_models_retribusi.RetribusiGetByIDResponse: properties: data: - $ref: '#/definitions/retribusi.Retribusi' + $ref: '#/definitions/api-service_internal_models_retribusi.Retribusi' message: type: string type: object - retribusi.RetribusiGetResponse: + api-service_internal_models_retribusi.RetribusiGetResponse: properties: data: items: - $ref: '#/definitions/retribusi.Retribusi' + $ref: '#/definitions/api-service_internal_models_retribusi.Retribusi' type: array message: type: string meta: - $ref: '#/definitions/models.MetaResponse' + $ref: '#/definitions/api-service_internal_models.MetaResponse' summary: - $ref: '#/definitions/models.AggregateData' + $ref: '#/definitions/api-service_internal_models.AggregateData' type: object - retribusi.RetribusiUpdateRequest: + api-service_internal_models_retribusi.RetribusiUpdateRequest: properties: dinas: maxLength: 255 @@ -299,10 +303,10 @@ definitions: required: - status type: object - retribusi.RetribusiUpdateResponse: + api-service_internal_models_retribusi.RetribusiUpdateResponse: properties: data: - $ref: '#/definitions/retribusi.Retribusi' + $ref: '#/definitions/api-service_internal_models_retribusi.Retribusi' message: type: string type: object @@ -331,14 +335,14 @@ paths: name: login required: true schema: - $ref: '#/definitions/models.LoginRequest' + $ref: '#/definitions/api-service_internal_models_auth.LoginRequest' produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/models.TokenResponse' + $ref: '#/definitions/api-service_internal_models_auth.TokenResponse' "400": description: Bad request schema: @@ -363,7 +367,7 @@ paths: "200": description: OK schema: - $ref: '#/definitions/models.User' + $ref: '#/definitions/api-service_internal_models_auth.User' "401": description: Unauthorized schema: @@ -395,7 +399,7 @@ paths: "200": description: OK schema: - $ref: '#/definitions/models.TokenResponse' + $ref: '#/definitions/api-service_internal_models_auth.TokenResponse' "400": description: Bad request schema: @@ -460,19 +464,19 @@ paths: "200": description: Retribusi deleted successfully schema: - $ref: '#/definitions/retribusi.RetribusiDeleteResponse' + $ref: '#/definitions/api-service_internal_models_retribusi.RetribusiDeleteResponse' "400": description: Invalid ID format schema: - $ref: '#/definitions/models.ErrorResponse' + $ref: '#/definitions/api-service_internal_models.ErrorResponse' "404": description: Retribusi not found schema: - $ref: '#/definitions/models.ErrorResponse' + $ref: '#/definitions/api-service_internal_models.ErrorResponse' "500": description: Internal server error schema: - $ref: '#/definitions/models.ErrorResponse' + $ref: '#/definitions/api-service_internal_models.ErrorResponse' summary: Delete retribusi tags: - Retribusi @@ -492,19 +496,19 @@ paths: "200": description: Success response schema: - $ref: '#/definitions/retribusi.RetribusiGetByIDResponse' + $ref: '#/definitions/api-service_internal_models_retribusi.RetribusiGetByIDResponse' "400": description: Invalid ID format schema: - $ref: '#/definitions/models.ErrorResponse' + $ref: '#/definitions/api-service_internal_models.ErrorResponse' "404": description: Retribusi not found schema: - $ref: '#/definitions/models.ErrorResponse' + $ref: '#/definitions/api-service_internal_models.ErrorResponse' "500": description: Internal server error schema: - $ref: '#/definitions/models.ErrorResponse' + $ref: '#/definitions/api-service_internal_models.ErrorResponse' summary: Get Retribusi by ID tags: - Retribusi @@ -523,26 +527,26 @@ paths: name: request required: true schema: - $ref: '#/definitions/retribusi.RetribusiUpdateRequest' + $ref: '#/definitions/api-service_internal_models_retribusi.RetribusiUpdateRequest' produces: - application/json responses: "200": description: Retribusi updated successfully schema: - $ref: '#/definitions/retribusi.RetribusiUpdateResponse' + $ref: '#/definitions/api-service_internal_models_retribusi.RetribusiUpdateResponse' "400": description: Bad request or validation error schema: - $ref: '#/definitions/models.ErrorResponse' + $ref: '#/definitions/api-service_internal_models.ErrorResponse' "404": description: Retribusi not found schema: - $ref: '#/definitions/models.ErrorResponse' + $ref: '#/definitions/api-service_internal_models.ErrorResponse' "500": description: Internal server error schema: - $ref: '#/definitions/models.ErrorResponse' + $ref: '#/definitions/api-service_internal_models.ErrorResponse' summary: Update retribusi tags: - Retribusi @@ -589,15 +593,15 @@ paths: "200": description: Success response schema: - $ref: '#/definitions/retribusi.RetribusiGetResponse' + $ref: '#/definitions/api-service_internal_models_retribusi.RetribusiGetResponse' "400": description: Bad request schema: - $ref: '#/definitions/models.ErrorResponse' + $ref: '#/definitions/api-service_internal_models.ErrorResponse' "500": description: Internal server error schema: - $ref: '#/definitions/models.ErrorResponse' + $ref: '#/definitions/api-service_internal_models.ErrorResponse' summary: Get retribusi with pagination and optional aggregation tags: - Retribusi @@ -611,22 +615,22 @@ paths: name: request required: true schema: - $ref: '#/definitions/retribusi.RetribusiCreateRequest' + $ref: '#/definitions/api-service_internal_models_retribusi.RetribusiCreateRequest' produces: - application/json responses: "201": description: Retribusi created successfully schema: - $ref: '#/definitions/retribusi.RetribusiCreateResponse' + $ref: '#/definitions/api-service_internal_models_retribusi.RetribusiCreateResponse' "400": description: Bad request or validation error schema: - $ref: '#/definitions/models.ErrorResponse' + $ref: '#/definitions/api-service_internal_models.ErrorResponse' "500": description: Internal server error schema: - $ref: '#/definitions/models.ErrorResponse' + $ref: '#/definitions/api-service_internal_models.ErrorResponse' summary: Create retribusi tags: - Retribusi @@ -664,15 +668,15 @@ paths: "200": description: Success response schema: - $ref: '#/definitions/retribusi.RetribusiGetResponse' + $ref: '#/definitions/api-service_internal_models_retribusi.RetribusiGetResponse' "400": description: Bad request schema: - $ref: '#/definitions/models.ErrorResponse' + $ref: '#/definitions/api-service_internal_models.ErrorResponse' "500": description: Internal server error schema: - $ref: '#/definitions/models.ErrorResponse' + $ref: '#/definitions/api-service_internal_models.ErrorResponse' summary: Get retribusi with dynamic filtering tags: - Retribusi @@ -692,11 +696,11 @@ paths: "200": description: Statistics data schema: - $ref: '#/definitions/models.AggregateData' + $ref: '#/definitions/api-service_internal_models.AggregateData' "500": description: Internal server error schema: - $ref: '#/definitions/models.ErrorResponse' + $ref: '#/definitions/api-service_internal_models.ErrorResponse' summary: Get retribusi statistics tags: - Retribusi @@ -704,33 +708,28 @@ paths: post: consumes: - application/json - description: Generate a JWT token for a user + description: Generate a JWT token for testing purposes parameters: - - description: User credentials + - description: Token generation data in: body name: token required: true schema: - $ref: '#/definitions/models.LoginRequest' + additionalProperties: true + type: object produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/models.TokenResponse' + $ref: '#/definitions/api-service_internal_models_auth.TokenResponse' "400": description: Bad request schema: additionalProperties: type: string type: object - "401": - description: Unauthorized - schema: - additionalProperties: - type: string - type: object summary: Generate JWT token tags: - Token @@ -738,16 +737,14 @@ paths: post: consumes: - application/json - description: Generate a JWT token directly without password verification (for - testing) + description: Generate a JWT token directly with provided data parameters: - - description: User info + - description: Token generation data in: body - name: user + name: token required: true schema: - additionalProperties: - type: string + additionalProperties: true type: object produces: - application/json @@ -755,14 +752,14 @@ paths: "200": description: OK schema: - $ref: '#/definitions/models.TokenResponse' + $ref: '#/definitions/api-service_internal_models_auth.TokenResponse' "400": description: Bad request schema: additionalProperties: type: string type: object - summary: Generate token directly + summary: Generate JWT token directly tags: - Token schemes: diff --git a/go.mod b/go.mod index cb2442d..6477df7 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,6 @@ require ( github.com/gin-contrib/cors v1.7.6 github.com/go-playground/validator/v10 v10.27.0 github.com/go-redis/redis_rate/v10 v10.0.1 - github.com/go-sql-driver/mysql v1.8.1 github.com/jmoiron/sqlx v1.4.0 github.com/joho/godotenv v1.5.1 github.com/lib/pq v1.10.9 @@ -53,6 +52,7 @@ require ( github.com/go-openapi/swag v0.19.15 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-sql-driver/mysql v1.8.1 // indirect github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/goccy/go-json v0.10.5 // indirect github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect diff --git a/internal/config/config.go b/internal/config/config.go index c9bf8a1..20ea90c 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -20,7 +20,7 @@ import ( type Config struct { Server ServerConfig Databases map[string]DatabaseConfig - ReadReplicas map[string][]DatabaseConfig // For read replicas + ReadReplicas map[string][]DatabaseConfig Auth AuthConfig Keycloak KeycloakConfig Bpjs BpjsConfig @@ -65,6 +65,20 @@ type DatabaseConfig struct { MaxOpenConns int // Max open connections MaxIdleConns int // Max idle connections ConnMaxLifetime time.Duration // Connection max lifetime + // Security settings + RequireSSL bool // Require SSL connection + SSLRootCert string // Path to SSL root certificate + SSLCert string // Path to SSL client certificate + SSLKey string // Path to SSL client key + Timeout time.Duration // Connection timeout + ConnectTimeout time.Duration // Connect timeout + ReadTimeout time.Duration // Read timeout + WriteTimeout time.Duration // Write timeout + StatementTimeout time.Duration // Statement timeout for PostgreSQL + // Connection pool settings + MaxLifetime time.Duration // Maximum amount of time a connection may be reused + MaxIdleTime time.Duration // Maximum amount of time a connection may be idle + HealthCheckPeriod time.Duration // Health check period } type AuthConfig struct { @@ -79,6 +93,7 @@ type AuthYAMLConfig struct { StaticTokens []string `yaml:"static_tokens"` FallbackTo string `yaml:"fallback_to"` } + type KeycloakYAMLConfig struct { Issuer string `yaml:"issuer"` Audience string `yaml:"audience"` @@ -121,6 +136,10 @@ type SecurityConfig struct { RateLimit RateLimitConfig `mapstructure:"rate_limit"` // Input Validation MaxInputLength int `mapstructure:"max_input_length"` + // SQL Injection Protection + SanitizeQueries bool `mapstructure:"sanitize_queries"` + // Connection Security + RequireSecureConnections bool `mapstructure:"require_secure_connections"` } // RateLimitConfig berisi pengaturan untuk rate limiter @@ -229,6 +248,8 @@ func LoadConfig() *Config { DB: getEnvAsInt("REDIS_DB", 0), }, }, + SanitizeQueries: getEnvAsBool("SECURITY_SANITIZE_QUERIES", true), + RequireSecureConnections: getEnvAsBool("SECURITY_REQUIRE_SECURE_CONNECTIONS", false), }, } log.Printf("DEBUG: Final Config Object. MaxInputLength is: %d", config.Security.MaxInputLength) @@ -372,32 +393,60 @@ func loadKeycloakConfig() KeycloakConfig { } func (c *Config) loadDatabaseConfigs() { - // Simplified approach: Directly load from environment variables - // This ensures we get the exact values specified in .env - - // // Primary database configuration - // c.Databases["default"] = DatabaseConfig{ - // Name: "default", - // Type: getEnv("DB_CONNECTION", "postgres"), - // Host: getEnv("DB_HOST", "localhost"), - // Port: getEnvAsInt("DB_PORT", 5432), - // Username: getEnv("DB_USERNAME", ""), - // Password: getEnv("DB_PASSWORD", ""), - // Database: getEnv("DB_DATABASE", "satu_db"), - // Schema: getEnv("DB_SCHEMA", "public"), - // SSLMode: getEnv("DB_SSLMODE", "disable"), - // MaxOpenConns: getEnvAsInt("DB_MAX_OPEN_CONNS", 25), - // MaxIdleConns: getEnvAsInt("DB_MAX_IDLE_CONNS", 25), - // ConnMaxLifetime: parseDuration(getEnv("DB_CONN_MAX_LIFETIME", "5m")), - // } - - // SATUDATA database configuration + // Load PostgreSQL configurations c.addPostgreSQLConfigs() - // MongoDB database configuration + // Load MySQL configurations + c.addMySQLConfigs() + + // Load MongoDB configurations c.addMongoDBConfigs() - // Legacy support for backward compatibility + // Load SQLite configurations + c.addSQLiteConfigs() + + // Load custom database configurations from environment variables + c.loadCustomDatabaseConfigs() + + // Remove duplicate database configurations + c.removeDuplicateDatabases() +} + +func (c *Config) removeDuplicateDatabases() { + // Create a map to track unique database connections + uniqueDBs := make(map[string]DatabaseConfig) + duplicates := make(map[string][]string) + + // First pass: identify duplicates + for name, config := range c.Databases { + // Create a unique key based on connection parameters + key := fmt.Sprintf("%s:%s:%d:%s", config.Type, config.Host, config.Port, config.Database) + + if existing, exists := uniqueDBs[key]; exists { + // Found a duplicate + if duplicates[key] == nil { + duplicates[key] = []string{existing.Name} + } + duplicates[key] = append(duplicates[key], name) + log.Printf("⚠️ Database %s is a duplicate of %s (same connection parameters)", name, existing.Name) + } else { + uniqueDBs[key] = config + } + } + + // Second pass: remove duplicates, keeping the first one + for _, dupNames := range duplicates { + // Keep the first database name, remove the rest + keepName := dupNames[0] + for i := 1; i < len(dupNames); i++ { + removeName := dupNames[i] + delete(c.Databases, removeName) + log.Printf("🗑️ Removed duplicate database configuration: %s (kept: %s)", removeName, keepName) + } + } +} + +func (c *Config) loadCustomDatabaseConfigs() { envVars := os.Environ() dbConfigs := make(map[string]map[string]string) @@ -437,28 +486,45 @@ func (c *Config) loadDatabaseConfigs() { continue } - dbConfig := DatabaseConfig{ - Name: name, - Type: getEnvFromMap(config, "connection", getEnvFromMap(config, "type", "postgres")), - Host: getEnvFromMap(config, "host", "localhost"), - Port: getEnvAsIntFromMap(config, "port", 5432), - Username: getEnvFromMap(config, "username", ""), - Password: getEnvFromMap(config, "password", ""), - Database: getEnvFromMap(config, "database", getEnvFromMap(config, "name", name)), - Schema: getEnvFromMap(config, "schema", "public"), - SSLMode: getEnvFromMap(config, "sslmode", "disable"), - Path: getEnvFromMap(config, "path", ""), - Options: getEnvFromMap(config, "options", ""), - MaxOpenConns: getEnvAsIntFromMap(config, "max_open_conns", 25), - MaxIdleConns: getEnvAsIntFromMap(config, "max_idle_conns", 25), - ConnMaxLifetime: parseDuration(getEnvFromMap(config, "conn_max_lifetime", "5m")), - } + dbType := getEnvFromMap(config, "connection", getEnvFromMap(config, "type", "postgres")) // Skip if username is empty and it's not a system config - if dbConfig.Username == "" && !strings.HasPrefix(name, "chrome") { + username := getEnvFromMap(config, "username", "") + if username == "" && !strings.HasPrefix(name, "chrome") { continue } + dbConfig := DatabaseConfig{ + Name: name, + Type: dbType, + Host: getEnvFromMap(config, "host", "localhost"), + Port: getEnvAsIntFromMap(config, "port", getDefaultPort(dbType)), + Username: username, + Password: getEnvFromMap(config, "password", ""), + Database: getEnvFromMap(config, "database", getEnvFromMap(config, "name", name)), + Schema: getEnvFromMap(config, "schema", getDefaultSchema(dbType)), + SSLMode: getEnvFromMap(config, "sslmode", getDefaultSSLMode(dbType)), + Path: getEnvFromMap(config, "path", ""), + Options: getEnvFromMap(config, "options", ""), + MaxOpenConns: getEnvAsIntFromMap(config, "max_open_conns", getDefaultMaxOpenConns(dbType)), + MaxIdleConns: getEnvAsIntFromMap(config, "max_idle_conns", getDefaultMaxIdleConns(dbType)), + ConnMaxLifetime: parseDuration(getEnvFromMap(config, "conn_max_lifetime", getDefaultConnMaxLifetime(dbType))), + // Security settings + RequireSSL: getEnvAsBoolFromMap(config, "require_ssl", false), + SSLRootCert: getEnvFromMap(config, "ssl_root_cert", ""), + SSLCert: getEnvFromMap(config, "ssl_cert", ""), + SSLKey: getEnvFromMap(config, "ssl_key", ""), + Timeout: parseDuration(getEnvFromMap(config, "timeout", "30s")), + ConnectTimeout: parseDuration(getEnvFromMap(config, "connect_timeout", "10s")), + ReadTimeout: parseDuration(getEnvFromMap(config, "read_timeout", "30s")), + WriteTimeout: parseDuration(getEnvFromMap(config, "write_timeout", "30s")), + StatementTimeout: parseDuration(getEnvFromMap(config, "statement_timeout", "120s")), + // Connection pool settings + MaxLifetime: parseDuration(getEnvFromMap(config, "max_lifetime", "1h")), + MaxIdleTime: parseDuration(getEnvFromMap(config, "max_idle_time", "5m")), + HealthCheckPeriod: parseDuration(getEnvFromMap(config, "health_check_period", "1m")), + } + c.Databases[name] = dbConfig } } @@ -499,20 +565,41 @@ func (c *Config) loadReadReplicaConfigs() { } if replicaConfig == nil { - // Create new replica config + // Get primary DB config as base + primaryDB, exists := c.Databases[dbName] + if !exists { + log.Printf("Warning: Primary database %s not found for replica configuration", dbName) + continue + } + + // Create new replica config based on primary newConfig := DatabaseConfig{ Name: replicaKey, - Type: c.Databases[dbName].Type, - Host: getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_HOST", c.Databases[dbName].Host), - Port: getEnvAsInt("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_PORT", c.Databases[dbName].Port), - Username: getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_USERNAME", c.Databases[dbName].Username), - Password: getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_PASSWORD", c.Databases[dbName].Password), - Database: getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_DATABASE", c.Databases[dbName].Database), - Schema: getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_SCHEMA", c.Databases[dbName].Schema), - SSLMode: getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_SSLMODE", c.Databases[dbName].SSLMode), - MaxOpenConns: getEnvAsInt("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_MAX_OPEN_CONNS", c.Databases[dbName].MaxOpenConns), - MaxIdleConns: getEnvAsInt("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_MAX_IDLE_CONNS", c.Databases[dbName].MaxIdleConns), - ConnMaxLifetime: parseDuration(getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_CONN_MAX_LIFETIME", "5m")), + Type: primaryDB.Type, + Host: getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_HOST", primaryDB.Host), + Port: getEnvAsInt("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_PORT", primaryDB.Port), + Username: getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_USERNAME", primaryDB.Username), + Password: getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_PASSWORD", primaryDB.Password), + Database: getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_DATABASE", primaryDB.Database), + Schema: getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_SCHEMA", primaryDB.Schema), + SSLMode: getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_SSLMODE", primaryDB.SSLMode), + MaxOpenConns: getEnvAsInt("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_MAX_OPEN_CONNS", primaryDB.MaxOpenConns), + MaxIdleConns: getEnvAsInt("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_MAX_IDLE_CONNS", primaryDB.MaxIdleConns), + ConnMaxLifetime: parseDuration(getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_CONN_MAX_LIFETIME", primaryDB.ConnMaxLifetime.String())), + // Security settings + RequireSSL: getEnvAsBool("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_REQUIRE_SSL", primaryDB.RequireSSL), + SSLRootCert: getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_SSL_ROOT_CERT", primaryDB.SSLRootCert), + SSLCert: getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_SSL_CERT", primaryDB.SSLCert), + SSLKey: getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_SSL_KEY", primaryDB.SSLKey), + Timeout: parseDuration(getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_TIMEOUT", primaryDB.Timeout.String())), + ConnectTimeout: parseDuration(getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_CONNECT_TIMEOUT", primaryDB.ConnectTimeout.String())), + ReadTimeout: parseDuration(getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_READ_TIMEOUT", primaryDB.ReadTimeout.String())), + WriteTimeout: parseDuration(getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_WRITE_TIMEOUT", primaryDB.WriteTimeout.String())), + StatementTimeout: parseDuration(getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_STATEMENT_TIMEOUT", primaryDB.StatementTimeout.String())), + // Connection pool settings + MaxLifetime: parseDuration(getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_MAX_LIFETIME", primaryDB.MaxLifetime.String())), + MaxIdleTime: parseDuration(getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_MAX_IDLE_TIME", primaryDB.MaxIdleTime.String())), + HealthCheckPeriod: parseDuration(getEnv("DB_"+strings.ToUpper(dbName)+"_REPLICA_"+replicaIndex+"_HEALTH_CHECK_PERIOD", primaryDB.HealthCheckPeriod.String())), } c.ReadReplicas[dbName] = append(c.ReadReplicas[dbName], newConfig) replicaConfig = &c.ReadReplicas[dbName][len(c.ReadReplicas[dbName])-1] @@ -540,6 +627,30 @@ func (c *Config) loadReadReplicaConfigs() { replicaConfig.MaxIdleConns = getEnvAsInt(key, 25) case "conn_max_lifetime": replicaConfig.ConnMaxLifetime = parseDuration(value) + case "require_ssl": + replicaConfig.RequireSSL = getEnvAsBool(key, false) + case "ssl_root_cert": + replicaConfig.SSLRootCert = value + case "ssl_cert": + replicaConfig.SSLCert = value + case "ssl_key": + replicaConfig.SSLKey = value + case "timeout": + replicaConfig.Timeout = parseDuration(value) + case "connect_timeout": + replicaConfig.ConnectTimeout = parseDuration(value) + case "read_timeout": + replicaConfig.ReadTimeout = parseDuration(value) + case "write_timeout": + replicaConfig.WriteTimeout = parseDuration(value) + case "statement_timeout": + replicaConfig.StatementTimeout = parseDuration(value) + case "max_lifetime": + replicaConfig.MaxLifetime = parseDuration(value) + case "max_idle_time": + replicaConfig.MaxIdleTime = parseDuration(value) + case "health_check_period": + replicaConfig.HealthCheckPeriod = parseDuration(value) } } } @@ -554,15 +665,29 @@ func (c *Config) addSpecificDatabase(prefix, defaultType string) { Name: prefix, Type: connection, Host: host, - Port: getEnvAsInt(strings.ToUpper(prefix)+"_PORT", 5432), + Port: getEnvAsInt(strings.ToUpper(prefix)+"_PORT", getDefaultPort(connection)), Username: getEnv(strings.ToUpper(prefix)+"_USERNAME", ""), Password: getEnv(strings.ToUpper(prefix)+"_PASSWORD", ""), Database: getEnv(strings.ToUpper(prefix)+"_DATABASE", getEnv(strings.ToUpper(prefix)+"_NAME", prefix)), - Schema: getEnv(strings.ToUpper(prefix)+"_SCHEMA", "public"), - SSLMode: getEnv(strings.ToUpper(prefix)+"_SSLMODE", "disable"), - MaxOpenConns: getEnvAsInt(strings.ToUpper(prefix)+"_MAX_OPEN_CONNS", 25), - MaxIdleConns: getEnvAsInt(strings.ToUpper(prefix)+"_MAX_IDLE_CONNS", 25), - ConnMaxLifetime: parseDuration(getEnv(strings.ToUpper(prefix)+"_CONN_MAX_LIFETIME", "5m")), + Schema: getEnv(strings.ToUpper(prefix)+"_SCHEMA", getDefaultSchema(connection)), + SSLMode: getEnv(strings.ToUpper(prefix)+"_SSLMODE", getDefaultSSLMode(connection)), + MaxOpenConns: getEnvAsInt(strings.ToUpper(prefix)+"_MAX_OPEN_CONNS", getDefaultMaxOpenConns(connection)), + MaxIdleConns: getEnvAsInt(strings.ToUpper(prefix)+"_MAX_IDLE_CONNS", getDefaultMaxIdleConns(connection)), + ConnMaxLifetime: parseDuration(getEnv(strings.ToUpper(prefix)+"_CONN_MAX_LIFETIME", getDefaultConnMaxLifetime(connection))), + // Security settings + RequireSSL: getEnvAsBool(strings.ToUpper(prefix)+"_REQUIRE_SSL", false), + SSLRootCert: getEnv(strings.ToUpper(prefix)+"_SSL_ROOT_CERT", ""), + SSLCert: getEnv(strings.ToUpper(prefix)+"_SSL_CERT", ""), + SSLKey: getEnv(strings.ToUpper(prefix)+"_SSL_KEY", ""), + Timeout: parseDuration(getEnv(strings.ToUpper(prefix)+"_TIMEOUT", "30s")), + ConnectTimeout: parseDuration(getEnv(strings.ToUpper(prefix)+"_CONNECT_TIMEOUT", "10s")), + ReadTimeout: parseDuration(getEnv(strings.ToUpper(prefix)+"_READ_TIMEOUT", "30s")), + WriteTimeout: parseDuration(getEnv(strings.ToUpper(prefix)+"_WRITE_TIMEOUT", "30s")), + StatementTimeout: parseDuration(getEnv(strings.ToUpper(prefix)+"_STATEMENT_TIMEOUT", "120s")), + // Connection pool settings + MaxLifetime: parseDuration(getEnv(strings.ToUpper(prefix)+"_MAX_LIFETIME", "1h")), + MaxIdleTime: parseDuration(getEnv(strings.ToUpper(prefix)+"_MAX_IDLE_TIME", "5m")), + HealthCheckPeriod: parseDuration(getEnv(strings.ToUpper(prefix)+"_HEALTH_CHECK_PERIOD", "1m")), } c.Databases[prefix] = dbConfig } @@ -570,25 +695,6 @@ func (c *Config) addSpecificDatabase(prefix, defaultType string) { // PostgreSQL database func (c *Config) addPostgreSQLConfigs() { - // SATUDATA database configuration - // defaultPOSTGRESHost := getEnv("POSTGRES_HOST", "localhost") - // if defaultPOSTGRESHost != "" { - // c.Databases["postgres"] = DatabaseConfig{ - // Name: "postgres", - // Type: getEnv("POSTGRES_CONNECTION", "postgres"), - // Host: defaultPOSTGRESHost, - // Port: getEnvAsInt("POSTGRES_PORT", 5432), - // Username: getEnv("POSTGRES_USERNAME", ""), - // Password: getEnv("POSTGRES_PASSWORD", ""), - // Database: getEnv("POSTGRES_DATABASE", "postgres"), - // Schema: getEnv("POSTGRES_SCHEMA", "public"), - // SSLMode: getEnv("POSTGRES_SSLMODE", "disable"), - // MaxOpenConns: getEnvAsInt("POSTGRES_MAX_OPEN_CONNS", 25), - // MaxIdleConns: getEnvAsInt("POSTGRES_MAX_IDLE_CONNS", 25), - // ConnMaxLifetime: parseDuration(getEnv("POSTGRES_CONN_MAX_LIFETIME", "5m")), - // } - // } - // Support for custom PostgreSQL configurations with POSTGRES_ prefix envVars := os.Environ() for _, envVar := range envVars { @@ -624,6 +730,20 @@ func (c *Config) addPostgreSQLConfigs() { MaxOpenConns: getEnvAsInt("POSTGRES_MAX_OPEN_CONNS", 25), MaxIdleConns: getEnvAsInt("POSTGRES_MAX_IDLE_CONNS", 25), ConnMaxLifetime: parseDuration(getEnv("POSTGRES_CONN_MAX_LIFETIME", "5m")), + // Security settings + RequireSSL: getEnvAsBool("POSTGRES_"+strings.ToUpper(dbName)+"_REQUIRE_SSL", false), + SSLRootCert: getEnv("POSTGRES_"+strings.ToUpper(dbName)+"_SSL_ROOT_CERT", ""), + SSLCert: getEnv("POSTGRES_"+strings.ToUpper(dbName)+"_SSL_CERT", ""), + SSLKey: getEnv("POSTGRES_"+strings.ToUpper(dbName)+"_SSL_KEY", ""), + Timeout: parseDuration(getEnv("POSTGRES_"+strings.ToUpper(dbName)+"_TIMEOUT", "30s")), + ConnectTimeout: parseDuration(getEnv("POSTGRES_"+strings.ToUpper(dbName)+"_CONNECT_TIMEOUT", "10s")), + ReadTimeout: parseDuration(getEnv("POSTGRES_"+strings.ToUpper(dbName)+"_READ_TIMEOUT", "30s")), + WriteTimeout: parseDuration(getEnv("POSTGRES_"+strings.ToUpper(dbName)+"_WRITE_TIMEOUT", "30s")), + StatementTimeout: parseDuration(getEnv("POSTGRES_"+strings.ToUpper(dbName)+"_STATEMENT_TIMEOUT", "120s")), + // Connection pool settings + MaxLifetime: parseDuration(getEnv("POSTGRES_"+strings.ToUpper(dbName)+"_MAX_LIFETIME", "1h")), + MaxIdleTime: parseDuration(getEnv("POSTGRES_"+strings.ToUpper(dbName)+"_MAX_IDLE_TIME", "5m")), + HealthCheckPeriod: parseDuration(getEnv("POSTGRES_"+strings.ToUpper(dbName)+"_HEALTH_CHECK_PERIOD", "1m")), } } } @@ -648,6 +768,19 @@ func (c *Config) addMySQLConfigs() { MaxOpenConns: getEnvAsInt("MYSQL_MAX_OPEN_CONNS", 25), MaxIdleConns: getEnvAsInt("MYSQL_MAX_IDLE_CONNS", 25), ConnMaxLifetime: parseDuration(getEnv("MYSQL_CONN_MAX_LIFETIME", "5m")), + // Security settings + RequireSSL: getEnvAsBool("MYSQL_REQUIRE_SSL", false), + SSLRootCert: getEnv("MYSQL_SSL_ROOT_CERT", ""), + SSLCert: getEnv("MYSQL_SSL_CERT", ""), + SSLKey: getEnv("MYSQL_SSL_KEY", ""), + Timeout: parseDuration(getEnv("MYSQL_TIMEOUT", "30s")), + ConnectTimeout: parseDuration(getEnv("MYSQL_CONNECT_TIMEOUT", "10s")), + ReadTimeout: parseDuration(getEnv("MYSQL_READ_TIMEOUT", "30s")), + WriteTimeout: parseDuration(getEnv("MYSQL_WRITE_TIMEOUT", "30s")), + // Connection pool settings + MaxLifetime: parseDuration(getEnv("MYSQL_MAX_LIFETIME", "1h")), + MaxIdleTime: parseDuration(getEnv("MYSQL_MAX_IDLE_TIME", "5m")), + HealthCheckPeriod: parseDuration(getEnv("MYSQL_HEALTH_CHECK_PERIOD", "1m")), } } @@ -687,6 +820,19 @@ func (c *Config) addMySQLConfigs() { MaxOpenConns: getEnvAsInt("MYSQL_MAX_OPEN_CONNS", 25), MaxIdleConns: getEnvAsInt("MYSQL_MAX_IDLE_CONNS", 25), ConnMaxLifetime: parseDuration(getEnv("MYSQL_CONN_MAX_LIFETIME", "5m")), + // Security settings + RequireSSL: getEnvAsBool("MYSQL_"+strings.ToUpper(dbName)+"_REQUIRE_SSL", false), + SSLRootCert: getEnv("MYSQL_"+strings.ToUpper(dbName)+"_SSL_ROOT_CERT", ""), + SSLCert: getEnv("MYSQL_"+strings.ToUpper(dbName)+"_SSL_CERT", ""), + SSLKey: getEnv("MYSQL_"+strings.ToUpper(dbName)+"_SSL_KEY", ""), + Timeout: parseDuration(getEnv("MYSQL_"+strings.ToUpper(dbName)+"_TIMEOUT", "30s")), + ConnectTimeout: parseDuration(getEnv("MYSQL_"+strings.ToUpper(dbName)+"_CONNECT_TIMEOUT", "10s")), + ReadTimeout: parseDuration(getEnv("MYSQL_"+strings.ToUpper(dbName)+"_READ_TIMEOUT", "30s")), + WriteTimeout: parseDuration(getEnv("MYSQL_"+strings.ToUpper(dbName)+"_WRITE_TIMEOUT", "30s")), + // Connection pool settings + MaxLifetime: parseDuration(getEnv("MYSQL_"+strings.ToUpper(dbName)+"_MAX_LIFETIME", "1h")), + MaxIdleTime: parseDuration(getEnv("MYSQL_"+strings.ToUpper(dbName)+"_MAX_IDLE_TIME", "5m")), + HealthCheckPeriod: parseDuration(getEnv("MYSQL_"+strings.ToUpper(dbName)+"_HEALTH_CHECK_PERIOD", "1m")), } } } @@ -712,6 +858,19 @@ func (c *Config) addMongoDBConfigs() { MaxOpenConns: getEnvAsInt("MONGODB_MAX_OPEN_CONNS", 100), MaxIdleConns: getEnvAsInt("MONGODB_MAX_IDLE_CONNS", 10), ConnMaxLifetime: parseDuration(getEnv("MONGODB_CONN_MAX_LIFETIME", "30m")), + // Security settings + RequireSSL: getEnvAsBool("MONGODB_REQUIRE_SSL", false), + SSLRootCert: getEnv("MONGODB_SSL_ROOT_CERT", ""), + SSLCert: getEnv("MONGODB_SSL_CERT", ""), + SSLKey: getEnv("MONGODB_SSL_KEY", ""), + Timeout: parseDuration(getEnv("MONGODB_TIMEOUT", "30s")), + ConnectTimeout: parseDuration(getEnv("MONGODB_CONNECT_TIMEOUT", "10s")), + ReadTimeout: parseDuration(getEnv("MONGODB_READ_TIMEOUT", "30s")), + WriteTimeout: parseDuration(getEnv("MONGODB_WRITE_TIMEOUT", "30s")), + // Connection pool settings + MaxLifetime: parseDuration(getEnv("MONGODB_MAX_LIFETIME", "1h")), + MaxIdleTime: parseDuration(getEnv("MONGODB_MAX_IDLE_TIME", "5m")), + HealthCheckPeriod: parseDuration(getEnv("MONGODB_HEALTH_CHECK_PERIOD", "1m")), } } @@ -730,6 +889,19 @@ func (c *Config) addMongoDBConfigs() { MaxOpenConns: getEnvAsInt("MONGODB_MAX_OPEN_CONNS", 100), MaxIdleConns: getEnvAsInt("MONGODB_MAX_IDLE_CONNS", 10), ConnMaxLifetime: parseDuration(getEnv("MONGODB_CONN_MAX_LIFETIME", "30m")), + // Security settings + RequireSSL: getEnvAsBool("MONGODB_LOCAL_REQUIRE_SSL", false), + SSLRootCert: getEnv("MONGODB_LOCAL_SSL_ROOT_CERT", ""), + SSLCert: getEnv("MONGODB_LOCAL_SSL_CERT", ""), + SSLKey: getEnv("MONGODB_LOCAL_SSL_KEY", ""), + Timeout: parseDuration(getEnv("MONGODB_LOCAL_TIMEOUT", "30s")), + ConnectTimeout: parseDuration(getEnv("MONGODB_LOCAL_CONNECT_TIMEOUT", "10s")), + ReadTimeout: parseDuration(getEnv("MONGODB_LOCAL_READ_TIMEOUT", "30s")), + WriteTimeout: parseDuration(getEnv("MONGODB_LOCAL_WRITE_TIMEOUT", "30s")), + // Connection pool settings + MaxLifetime: parseDuration(getEnv("MONGODB_LOCAL_MAX_LIFETIME", "1h")), + MaxIdleTime: parseDuration(getEnv("MONGODB_LOCAL_MAX_IDLE_TIME", "5m")), + HealthCheckPeriod: parseDuration(getEnv("MONGODB_LOCAL_HEALTH_CHECK_PERIOD", "1m")), } } @@ -766,6 +938,19 @@ func (c *Config) addMongoDBConfigs() { MaxOpenConns: getEnvAsInt("MONGODB_MAX_OPEN_CONNS", 100), MaxIdleConns: getEnvAsInt("MONGODB_MAX_IDLE_CONNS", 10), ConnMaxLifetime: parseDuration(getEnv("MONGODB_CONN_MAX_LIFETIME", "30m")), + // Security settings + RequireSSL: getEnvAsBool("MONGODB_"+strings.ToUpper(dbName)+"_REQUIRE_SSL", false), + SSLRootCert: getEnv("MONGODB_"+strings.ToUpper(dbName)+"_SSL_ROOT_CERT", ""), + SSLCert: getEnv("MONGODB_"+strings.ToUpper(dbName)+"_SSL_CERT", ""), + SSLKey: getEnv("MONGODB_"+strings.ToUpper(dbName)+"_SSL_KEY", ""), + Timeout: parseDuration(getEnv("MONGODB_"+strings.ToUpper(dbName)+"_TIMEOUT", "30s")), + ConnectTimeout: parseDuration(getEnv("MONGODB_"+strings.ToUpper(dbName)+"_CONNECT_TIMEOUT", "10s")), + ReadTimeout: parseDuration(getEnv("MONGODB_"+strings.ToUpper(dbName)+"_READ_TIMEOUT", "30s")), + WriteTimeout: parseDuration(getEnv("MONGODB_"+strings.ToUpper(dbName)+"_WRITE_TIMEOUT", "30s")), + // Connection pool settings + MaxLifetime: parseDuration(getEnv("MONGODB_"+strings.ToUpper(dbName)+"_MAX_LIFETIME", "1h")), + MaxIdleTime: parseDuration(getEnv("MONGODB_"+strings.ToUpper(dbName)+"_MAX_IDLE_TIME", "5m")), + HealthCheckPeriod: parseDuration(getEnv("MONGODB_"+strings.ToUpper(dbName)+"_HEALTH_CHECK_PERIOD", "1m")), } } } @@ -773,6 +958,155 @@ func (c *Config) addMongoDBConfigs() { } } +// addSQLiteConfigs adds SQLite database configurations from environment variables +func (c *Config) addSQLiteConfigs() { + // Support for custom SQLite configurations with SQLITE_ prefix + envVars := os.Environ() + for _, envVar := range envVars { + parts := strings.SplitN(envVar, "=", 2) + if len(parts) != 2 { + continue + } + + key := parts[0] + // Parse SQLite configurations (format: SQLITE_[NAME]_[PROPERTY]) + if strings.HasPrefix(key, "SQLITE_") && strings.Contains(key, "_") { + segments := strings.Split(key, "_") + if len(segments) >= 3 { + dbName := strings.ToLower(strings.Join(segments[1:len(segments)-1], "_")) + + // Skip if it's a standard SQLite configuration + if dbName == "connection" || dbName == "dev" || dbName == "default" { + continue + } + + // Create or update SQLite configuration + if _, exists := c.Databases[dbName]; !exists { + sqlitePath := getEnv("SQLITE_"+strings.ToUpper(dbName)+"_PATH", "") + if sqlitePath != "" { + c.Databases[dbName] = DatabaseConfig{ + Name: dbName, + Type: "sqlite", + Path: sqlitePath, + Database: getEnv("SQLITE_"+strings.ToUpper(dbName)+"_DATABASE", dbName), + MaxOpenConns: getEnvAsInt("SQLITE_MAX_OPEN_CONNS", 25), + MaxIdleConns: getEnvAsInt("SQLITE_MAX_IDLE_CONNS", 25), + ConnMaxLifetime: parseDuration(getEnv("SQLITE_CONN_MAX_LIFETIME", "5m")), + // Connection pool settings + MaxLifetime: parseDuration(getEnv("SQLITE_"+strings.ToUpper(dbName)+"_MAX_LIFETIME", "1h")), + MaxIdleTime: parseDuration(getEnv("SQLITE_"+strings.ToUpper(dbName)+"_MAX_IDLE_TIME", "5m")), + HealthCheckPeriod: parseDuration(getEnv("SQLITE_"+strings.ToUpper(dbName)+"_HEALTH_CHECK_PERIOD", "1m")), + } + } + } + } + } + } +} + +// Helper functions for getting default values based on database type +func getDefaultPort(dbType string) int { + switch dbType { + case "postgres": + return 5432 + case "mysql": + return 3306 + case "sqlserver": + return 1433 + case "mongodb": + return 27017 + case "sqlite": + return 0 // SQLite doesn't use port + default: + return 5432 + } +} + +func getDefaultSchema(dbType string) string { + switch dbType { + case "postgres": + return "public" + case "mysql": + return "" + case "sqlserver": + return "dbo" + case "mongodb": + return "" + case "sqlite": + return "" + default: + return "public" + } +} + +func getDefaultSSLMode(dbType string) string { + switch dbType { + case "postgres": + return "disable" + case "mysql": + return "false" + case "sqlserver": + return "false" + case "mongodb": + return "false" + case "sqlite": + return "" + default: + return "disable" + } +} + +func getDefaultMaxOpenConns(dbType string) int { + switch dbType { + case "postgres": + return 25 + case "mysql": + return 25 + case "sqlserver": + return 25 + case "mongodb": + return 100 + case "sqlite": + return 1 // SQLite only supports one writer at a time + default: + return 25 + } +} + +func getDefaultMaxIdleConns(dbType string) int { + switch dbType { + case "postgres": + return 25 + case "mysql": + return 25 + case "sqlserver": + return 25 + case "mongodb": + return 10 + case "sqlite": + return 1 // SQLite only supports one writer at a time + default: + return 25 + } +} + +func getDefaultConnMaxLifetime(dbType string) string { + switch dbType { + case "postgres": + return "5m" + case "mysql": + return "5m" + case "sqlserver": + return "5m" + case "mongodb": + return "30m" + case "sqlite": + return "5m" + default: + return "5m" + } +} + func getEnvFromMap(config map[string]string, key, defaultValue string) string { if value, exists := config[key]; exists { return value @@ -789,6 +1123,15 @@ func getEnvAsIntFromMap(config map[string]string, key string, defaultValue int) return defaultValue } +func getEnvAsBoolFromMap(config map[string]string, key string, defaultValue bool) bool { + if value, exists := config[key]; exists { + if boolValue, err := strconv.ParseBool(value); err == nil { + return boolValue + } + } + return defaultValue +} + func parseDuration(durationStr string) time.Duration { if duration, err := time.ParseDuration(durationStr); err == nil { return duration @@ -869,16 +1212,19 @@ func (c *Config) Validate() error { } for name, db := range c.Databases { - if db.Host == "" { + if db.Type != "sqlite" && db.Host == "" { errs = append(errs, fmt.Sprintf("database host is required for %s", name)) } - if db.Username == "" { + if db.Type != "sqlite" && db.Username == "" { errs = append(errs, fmt.Sprintf("database username is required for %s", name)) } - if db.Password == "" { + if db.Type != "sqlite" && db.Password == "" { errs = append(errs, fmt.Sprintf("database password is required for %s", name)) } - if db.Database == "" { + if db.Type == "sqlite" && db.Path == "" { + errs = append(errs, fmt.Sprintf("database path is required for SQLite database %s", name)) + } + if db.Type != "sqlite" && db.Database == "" { errs = append(errs, fmt.Sprintf("database name is required for %s", name)) } } diff --git a/internal/database/database.go b/internal/database/database.go index b7f5b4f..f2a5db7 100644 --- a/internal/database/database.go +++ b/internal/database/database.go @@ -2,24 +2,22 @@ package database import ( "context" + "crypto/tls" "database/sql" "fmt" - "log" // Import runtime package - - // Import debug package + "log" "strconv" "sync" "time" "api-service/internal/config" - _ "github.com/jackc/pgx/v5" // Import pgx driver + _ "github.com/jackc/pgx/v5" + "github.com/jmoiron/sqlx" "github.com/lib/pq" - _ "gorm.io/driver/postgres" // Import GORM PostgreSQL driver - - _ "github.com/go-sql-driver/mysql" // MySQL driver for database/sql - _ "gorm.io/driver/mysql" // GORM MySQL driver - _ "gorm.io/driver/sqlserver" // GORM SQL Server driver + _ "gorm.io/driver/mysql" + _ "gorm.io/driver/postgres" + _ "gorm.io/driver/sqlserver" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" @@ -40,27 +38,31 @@ const ( type Service interface { Health() map[string]map[string]string GetDB(name string) (*sql.DB, error) + GetSQLXDB(name string) (*sqlx.DB, error) // Tambahkan metode ini GetMongoClient(name string) (*mongo.Client, error) - GetReadDB(name string) (*sql.DB, error) // For read replicas + GetReadDB(name string) (*sql.DB, error) Close() error ListDBs() []string GetDBType(name string) (DatabaseType, error) - // Tambahkan method untuk WebSocket notifications ListenForChanges(ctx context.Context, dbName string, channels []string, callback func(string, string)) error NotifyChange(dbName, channel, payload string) error - GetPrimaryDB(name string) (*sql.DB, error) // Helper untuk get primary DB + GetPrimaryDB(name string) (*sql.DB, error) + ExecuteQuery(ctx context.Context, dbName string, query string, args ...interface{}) (*sql.Rows, error) + ExecuteQueryRow(ctx context.Context, dbName string, query string, args ...interface{}) *sql.Row + Exec(ctx context.Context, dbName string, query string, args ...interface{}) (sql.Result, error) } type service struct { - sqlDatabases map[string]*sql.DB - mongoClients map[string]*mongo.Client - readReplicas map[string][]*sql.DB // Read replicas for load balancing - configs map[string]config.DatabaseConfig - readConfigs map[string][]config.DatabaseConfig - mu sync.RWMutex - readBalancer map[string]int // Round-robin counter for read replicas - listeners map[string]*pq.Listener // Tambahkan untuk tracking listeners - listenersMu sync.RWMutex + sqlDatabases map[string]*sql.DB + sqlxDatabases map[string]*sqlx.DB // Tambahkan map untuk sqlx.DB + mongoClients map[string]*mongo.Client + readReplicas map[string][]*sql.DB + configs map[string]config.DatabaseConfig + readConfigs map[string][]config.DatabaseConfig + mu sync.RWMutex + readBalancer map[string]int + listeners map[string]*pq.Listener + listenersMu sync.RWMutex } var ( @@ -72,18 +74,17 @@ var ( func New(cfg *config.Config) Service { once.Do(func() { dbManager = &service{ - sqlDatabases: make(map[string]*sql.DB), - mongoClients: make(map[string]*mongo.Client), - readReplicas: make(map[string][]*sql.DB), - configs: make(map[string]config.DatabaseConfig), - readConfigs: make(map[string][]config.DatabaseConfig), - readBalancer: make(map[string]int), - listeners: make(map[string]*pq.Listener), + sqlDatabases: make(map[string]*sql.DB), + sqlxDatabases: make(map[string]*sqlx.DB), // Inisialisasi map sqlx + mongoClients: make(map[string]*mongo.Client), + readReplicas: make(map[string][]*sql.DB), + configs: make(map[string]config.DatabaseConfig), + readConfigs: make(map[string][]config.DatabaseConfig), + readBalancer: make(map[string]int), + listeners: make(map[string]*pq.Listener), } - log.Println("Initializing database service...") // Log when the initialization starts - // log.Printf("Current Goroutine ID: %d", runtime.NumGoroutine()) // Log the number of goroutines - // log.Printf("Stack Trace: %s", debug.Stack()) // Log the stack trace + log.Println("Initializing database service...") dbManager.loadFromConfig(cfg) // Initialize all databases @@ -125,14 +126,17 @@ func (s *service) addDatabase(name string, config config.DatabaseConfig) error { s.mu.Lock() defer s.mu.Unlock() - log.Printf("=== Database Connection Debug ===") - // log.Printf("Database: %s", name) - // log.Printf("Type: %s", config.Type) - // log.Printf("Host: %s", config.Host) - // log.Printf("Port: %d", config.Port) - // log.Printf("Database: %s", config.Database) - // log.Printf("Username: %s", config.Username) - // log.Printf("SSLMode: %s", config.SSLMode) + // Check for duplicate database connections + for existingName, existingConfig := range s.configs { + if existingName != name && + existingConfig.Host == config.Host && + existingConfig.Port == config.Port && + existingConfig.Database == config.Database && + existingConfig.Type == config.Type { + log.Printf("⚠️ Database %s appears to be a duplicate of %s (same host:port:database), skipping connection", name, existingName) + return nil + } + } var db *sql.DB var err error @@ -156,12 +160,11 @@ func (s *service) addDatabase(name string, config config.DatabaseConfig) error { if err != nil { log.Printf("❌ Error connecting to database %s: %v", name, err) - log.Printf(" Database: %s@%s:%d/%s", config.Username, config.Host, config.Port, config.Database) return err } log.Printf("✅ Successfully connected to database: %s", name) - return s.configureSQLDB(name, db, config.MaxOpenConns, config.MaxIdleConns, config.ConnMaxLifetime) + return s.configureSQLDB(name, db, config) } func (s *service) addReadReplica(name string, index int, config config.DatabaseConfig) error { @@ -206,19 +209,32 @@ func (s *service) addReadReplica(name string, index int, config config.DatabaseC } func (s *service) openPostgresConnection(config config.DatabaseConfig) (*sql.DB, error) { - connStr := fmt.Sprintf("postgres://%s:%s@%s:%d/%s?sslmode=%s", - config.Username, - config.Password, + // Build connection string with security parameters + // Convert timeout durations to seconds for pgx + connectTimeoutSec := int(config.ConnectTimeout.Seconds()) + statementTimeoutSec := int(config.StatementTimeout.Seconds()) + + connStr := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=%d statement_timeout=%d", config.Host, config.Port, + config.Username, + config.Password, config.Database, config.SSLMode, + connectTimeoutSec, + statementTimeoutSec, ) if config.Schema != "" { - connStr += "&search_path=" + config.Schema + connStr += " search_path=" + config.Schema } + // Add SSL configuration if required + if config.RequireSSL { + connStr += " sslcert=" + config.SSLCert + " sslkey=" + config.SSLKey + " sslrootcert=" + config.SSLRootCert + } + + // Open connection using standard database/sql interface db, err := sql.Open("pgx", connStr) if err != nil { return nil, fmt.Errorf("failed to open PostgreSQL connection: %w", err) @@ -228,14 +244,33 @@ func (s *service) openPostgresConnection(config config.DatabaseConfig) (*sql.DB, } func (s *service) openMySQLConnection(config config.DatabaseConfig) (*sql.DB, error) { - connStr := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=true", + // Build connection string with security parameters + connStr := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=true&timeout=%s&readTimeout=%s&writeTimeout=%s", config.Username, config.Password, config.Host, config.Port, config.Database, + config.Timeout, + config.ReadTimeout, + config.WriteTimeout, ) + // Add SSL configuration if required + if config.RequireSSL { + connStr += "&tls=true" + if config.SSLRootCert != "" { + connStr += "&ssl-ca=" + config.SSLRootCert + } + if config.SSLCert != "" { + connStr += "&ssl-cert=" + config.SSLCert + } + if config.SSLKey != "" { + connStr += "&ssl-key=" + config.SSLKey + } + } + + // Open connection db, err := sql.Open("mysql", connStr) if err != nil { return nil, fmt.Errorf("failed to open MySQL connection: %w", err) @@ -245,14 +280,30 @@ func (s *service) openMySQLConnection(config config.DatabaseConfig) (*sql.DB, er } func (s *service) openSQLServerConnection(config config.DatabaseConfig) (*sql.DB, error) { - connStr := fmt.Sprintf("sqlserver://%s:%s@%s:%d?database=%s", + // Build connection string with security parameters + // Convert timeout to seconds for SQL Server + connectTimeoutSec := int(config.ConnectTimeout.Seconds()) + + connStr := fmt.Sprintf("sqlserver://%s:%s@%s:%d?database=%s&connection timeout=%d", config.Username, config.Password, config.Host, config.Port, config.Database, + connectTimeoutSec, ) + // Add SSL configuration if required + if config.RequireSSL { + connStr += "&encrypt=true" + if config.SSLRootCert != "" { + connStr += "&trustServerCertificate=false" + } else { + connStr += "&trustServerCertificate=true" + } + } + + // Open connection db, err := sql.Open("sqlserver", connStr) if err != nil { return nil, fmt.Errorf("failed to open SQL Server connection: %w", err) @@ -262,23 +313,26 @@ func (s *service) openSQLServerConnection(config config.DatabaseConfig) (*sql.DB } func (s *service) openSQLiteConnection(config config.DatabaseConfig) (*sql.DB, error) { - dbPath := config.Path - if dbPath == "" { - dbPath = fmt.Sprintf("./data/%s.db", config.Database) - } - - db, err := sql.Open("sqlite3", dbPath) + // Open connection + db, err := sql.Open("sqlite3", config.Path) if err != nil { return nil, fmt.Errorf("failed to open SQLite connection: %w", err) } + // Enable foreign key constraints and WAL mode for better security and performance + _, err = db.Exec("PRAGMA foreign_keys = ON; PRAGMA journal_mode = WAL;") + if err != nil { + return nil, fmt.Errorf("failed to configure SQLite: %w", err) + } + return db, nil } func (s *service) addMongoDB(name string, config config.DatabaseConfig) error { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), config.Timeout) defer cancel() + // Build MongoDB URI with authentication and TLS options uri := fmt.Sprintf("mongodb://%s:%s@%s:%d/%s", config.Username, config.Password, @@ -287,23 +341,45 @@ func (s *service) addMongoDB(name string, config config.DatabaseConfig) error { config.Database, ) - client, err := mongo.Connect(ctx, options.Client().ApplyURI(uri)) + // Configure client options with security settings + clientOptions := options.Client().ApplyURI(uri) + + // Set TLS configuration if needed + if config.RequireSSL { + clientOptions.SetTLSConfig(&tls.Config{ + InsecureSkipVerify: config.SSLMode == "require", + MinVersion: tls.VersionTLS12, + }) + } + + // Set connection timeout + clientOptions.SetConnectTimeout(config.ConnectTimeout) + clientOptions.SetServerSelectionTimeout(config.Timeout) + + client, err := mongo.Connect(ctx, clientOptions) if err != nil { return fmt.Errorf("failed to connect to MongoDB: %w", err) } + // Ping to verify connection + if err := client.Ping(ctx, nil); err != nil { + return fmt.Errorf("failed to ping MongoDB: %w", err) + } + s.mongoClients[name] = client log.Printf("Successfully connected to MongoDB: %s", name) return nil } -func (s *service) configureSQLDB(name string, db *sql.DB, maxOpenConns, maxIdleConns int, connMaxLifetime time.Duration) error { - db.SetMaxOpenConns(maxOpenConns) - db.SetMaxIdleConns(maxIdleConns) - db.SetConnMaxLifetime(connMaxLifetime) +func (s *service) configureSQLDB(name string, db *sql.DB, config config.DatabaseConfig) error { + // Set connection pool limits + db.SetMaxOpenConns(config.MaxOpenConns) + db.SetMaxIdleConns(config.MaxIdleConns) + db.SetConnMaxLifetime(config.ConnMaxLifetime) + db.SetConnMaxIdleTime(config.MaxIdleTime) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), config.Timeout) defer cancel() if err := db.PingContext(ctx); err != nil { @@ -312,6 +388,28 @@ func (s *service) configureSQLDB(name string, db *sql.DB, maxOpenConns, maxIdleC } s.sqlDatabases[name] = db + + // PERUBAHAN: Tambahkan pembuatan sqlx.DB dari sql.DB yang sudah ada + dbType := DatabaseType(config.Type) + var driverName string + + switch dbType { + case Postgres: + driverName = "pgx" + case MySQL: + driverName = "mysql" + case SQLServer: + driverName = "sqlserver" + case SQLite: + driverName = "sqlite3" + default: + return fmt.Errorf("unsupported database type for sqlx: %s", config.Type) + } + + // Buat sqlx.DB dari sql.DB yang sudah ada + sqlxDB := sqlx.NewDb(db, driverName) + s.sqlxDatabases[name] = sqlxDB + log.Printf("Successfully connected to SQL database: %s", name) return nil @@ -439,26 +537,27 @@ func (s *service) Health() map[string]map[string]string { // GetDB returns a specific SQL database connection by name func (s *service) GetDB(name string) (*sql.DB, error) { - log.Printf("Attempting to get database connection for: %s", name) s.mu.RLock() defer s.mu.RUnlock() db, exists := s.sqlDatabases[name] if !exists { - log.Printf("Error: database %s not found", name) // Log the error return nil, fmt.Errorf("database %s not found", name) } - log.Printf("Current connection pool state for %s: Open: %d, In Use: %d, Idle: %d", - name, db.Stats().OpenConnections, db.Stats().InUse, db.Stats().Idle) + return db, nil +} + +// PERUBAHAN: Tambahkan metode GetSQLXDB +// GetSQLXDB returns a specific SQLX database connection by name +func (s *service) GetSQLXDB(name string) (*sqlx.DB, error) { s.mu.RLock() defer s.mu.RUnlock() - // db, exists := s.sqlDatabases[name] - // if !exists { - // log.Printf("Error: database %s not found", name) // Log the error - // return nil, fmt.Errorf("database %s not found", name) - // } + db, exists := s.sqlxDatabases[name] + if !exists { + return nil, fmt.Errorf("database %s not found", name) + } return db, nil } @@ -537,6 +636,13 @@ func (s *service) Close() error { var errs []error + // Close listeners first + for name, listener := range s.listeners { + if err := listener.Close(); err != nil { + errs = append(errs, fmt.Errorf("failed to close listener for %s: %w", name, err)) + } + } + for name, db := range s.sqlDatabases { if err := db.Close(); err != nil { errs = append(errs, fmt.Errorf("failed to close database %s: %w", name, err)) @@ -566,10 +672,12 @@ func (s *service) Close() error { } s.sqlDatabases = make(map[string]*sql.DB) + s.sqlxDatabases = make(map[string]*sqlx.DB) // Reset map sqlx s.mongoClients = make(map[string]*mongo.Client) s.readReplicas = make(map[string][]*sql.DB) s.configs = make(map[string]config.DatabaseConfig) s.readConfigs = make(map[string][]config.DatabaseConfig) + s.listeners = make(map[string]*pq.Listener) if len(errs) > 0 { return fmt.Errorf("errors closing databases: %v", errs) @@ -583,6 +691,51 @@ func (s *service) GetPrimaryDB(name string) (*sql.DB, error) { return s.GetDB(name) } +// ExecuteQuery executes a query with parameters and returns rows +func (s *service) ExecuteQuery(ctx context.Context, dbName string, query string, args ...interface{}) (*sql.Rows, error) { + db, err := s.GetDB(dbName) + if err != nil { + return nil, fmt.Errorf("failed to get database %s: %w", dbName, err) + } + + // Use parameterized queries to prevent SQL injection + rows, err := db.QueryContext(ctx, query, args...) + if err != nil { + return nil, fmt.Errorf("failed to execute query: %w", err) + } + + return rows, nil +} + +// ExecuteQueryRow executes a query with parameters and returns a single row +func (s *service) ExecuteQueryRow(ctx context.Context, dbName string, query string, args ...interface{}) *sql.Row { + db, err := s.GetDB(dbName) + if err != nil { + // Return an empty row with error + row := &sql.Row{} + return row + } + + // Use parameterized queries to prevent SQL injection + return db.QueryRowContext(ctx, query, args...) +} + +// Exec executes a query with parameters and returns the result +func (s *service) Exec(ctx context.Context, dbName string, query string, args ...interface{}) (sql.Result, error) { + db, err := s.GetDB(dbName) + if err != nil { + return nil, fmt.Errorf("failed to get database %s: %w", dbName, err) + } + + // Use parameterized queries to prevent SQL injection + result, err := db.ExecContext(ctx, query, args...) + if err != nil { + return nil, fmt.Errorf("failed to execute query: %w", err) + } + + return result, nil +} + // ListenForChanges implements PostgreSQL LISTEN/NOTIFY for real-time updates func (s *service) ListenForChanges(ctx context.Context, dbName string, channels []string, callback func(string, string)) error { s.mu.RLock() @@ -599,13 +752,17 @@ func (s *service) ListenForChanges(ctx context.Context, dbName string, channels } // Create connection string for listener - connStr := fmt.Sprintf("postgres://%s:%s@%s:%d/%s?sslmode=%s", + // Convert timeout to seconds for pq + connectTimeoutSec := int(config.ConnectTimeout.Seconds()) + + connStr := fmt.Sprintf("postgres://%s:%s@%s:%d/%s?sslmode=%s&connect_timeout=%d", config.Username, config.Password, config.Host, config.Port, config.Database, config.SSLMode, + connectTimeoutSec, ) // Create listener @@ -687,7 +844,7 @@ func (s *service) NotifyChange(dbName, channel, payload string) error { return fmt.Errorf("NOTIFY only supported for PostgreSQL databases") } - // Execute NOTIFY + // Execute NOTIFY with parameterized query to prevent SQL injection query := "SELECT pg_notify($1, $2)" _, err = db.Exec(query, channel, payload) if err != nil { diff --git a/internal/handlers/retribusi/retribusi.go b/internal/handlers/retribusi/retribusi.go index 5dd0362..667a5a2 100644 --- a/internal/handlers/retribusi/retribusi.go +++ b/internal/handlers/retribusi/retribusi.go @@ -3,21 +3,25 @@ package handlers import ( "api-service/internal/config" "api-service/internal/database" - models "api-service/internal/models" + "api-service/internal/models" "api-service/internal/models/retribusi" queryUtils "api-service/internal/utils/query" + "api-service/internal/utils/validation" "api-service/pkg/logger" "context" "database/sql" "fmt" "net/http" "strconv" + "strings" "sync" "time" "github.com/gin-gonic/gin" "github.com/go-playground/validator/v10" "github.com/google/uuid" + "github.com/jmoiron/sqlx" + "github.com/lib/pq" ) // ============================================================================= @@ -48,6 +52,72 @@ func validateRetribusiStatus(fl validator.FieldLevel) bool { return models.IsValidStatus(fl.Field().String()) } +// ============================================================================= +// CACHE IMPLEMENTATION +// ============================================================================= + +// CacheEntry represents an entry in the cache +type CacheEntry struct { + Data interface{} + ExpiresAt time.Time +} + +// IsExpired checks if the cache entry has expired +func (e *CacheEntry) IsExpired() bool { + return time.Now().After(e.ExpiresAt) +} + +// InMemoryCache implements a simple in-memory cache with TTL +type InMemoryCache struct { + items sync.Map + mu sync.RWMutex +} + +// NewInMemoryCache creates a new in-memory cache +func NewInMemoryCache() *InMemoryCache { + return &InMemoryCache{} +} + +// Get retrieves an item from the cache +func (c *InMemoryCache) Get(key string) (interface{}, bool) { + val, ok := c.items.Load(key) + if !ok { + return nil, false + } + + entry, ok := val.(*CacheEntry) + if !ok || entry.IsExpired() { + c.items.Delete(key) + return nil, false + } + + return entry.Data, true +} + +// Set stores an item in the cache with a TTL +func (c *InMemoryCache) Set(key string, value interface{}, ttl time.Duration) { + entry := &CacheEntry{ + Data: value, + ExpiresAt: time.Now().Add(ttl), + } + c.items.Store(key, entry) +} + +// Delete removes an item from the cache +func (c *InMemoryCache) Delete(key string) { + c.items.Delete(key) +} + +// DeleteByPrefix removes all items with a specific prefix +func (c *InMemoryCache) DeleteByPrefix(prefix string) { + c.items.Range(func(key, value interface{}) bool { + if keyStr, ok := key.(string); ok && strings.HasPrefix(keyStr, prefix) { + c.items.Delete(key) + } + return true + }) +} + // ============================================================================= // RETRIBUSI HANDLER STRUCT // ============================================================================= @@ -56,11 +126,13 @@ func validateRetribusiStatus(fl validator.FieldLevel) bool { type RetribusiHandler struct { db database.Service queryBuilder *queryUtils.QueryBuilder + validator *validation.DynamicValidator + cache *InMemoryCache // Tambahkan cache in-memory } // NewRetribusiHandler creates a new RetribusiHandler with a pre-configured QueryBuilder func NewRetribusiHandler() *RetribusiHandler { - // PERUBAHAN: Inisialisasi QueryBuilder dengan daftar kolom yang diizinkan untuk keamanan. + // CHANGE: Initialize QueryBuilder with allowed columns list for security. queryBuilder := queryUtils.NewQueryBuilder(queryUtils.DBTypePostgreSQL). SetAllowedColumns([]string{ "id", "status", "sort", "user_created", "date_created", @@ -73,6 +145,8 @@ func NewRetribusiHandler() *RetribusiHandler { return &RetribusiHandler{ db: db, queryBuilder: queryBuilder, + validator: validation.NewDynamicValidator(queryBuilder), + cache: NewInMemoryCache(), // Initialize in-memory cache } } @@ -98,8 +172,12 @@ func NewRetribusiHandler() *RetribusiHandler { // @Failure 500 {object} models.ErrorResponse "Internal server error" // @Router /api/v1/retribusis [get] func (h *RetribusiHandler) GetRetribusi(c *gin.Context) { - // PERUBAHAN: Gunakan fungsi inti fetchRetribusisDynamic untuk semua logika pengambilan data. - // Kita hanya perlu membangun DynamicQuery dari parameter sederhana. + // CHANGE: Increase timeout for complex queries + ctx, cancel := context.WithTimeout(c.Request.Context(), 120*time.Second) + defer cancel() + + // CHANGE: Use the core fetchRetribusisDynamic function for all data retrieval logic. + // We only need to build DynamicQuery from simple parameters. query := queryUtils.DynamicQuery{ From: "data_retribusi", Fields: []queryUtils.SelectField{{Expression: "*"}}, @@ -114,6 +192,13 @@ func (h *RetribusiHandler) GetRetribusi(c *gin.Context) { query.Offset = offset } + // CHANGE: Use GetSQLXDB untuk mendapatkan koneksi sqlx.DB + dbConn, err := h.db.GetSQLXDB("postgres_satudata") + if err != nil { + h.logAndRespondError(c, "Database connection failed", err, http.StatusInternalServerError) + return + } + // Parse simple filters var filters []queryUtils.DynamicFilter if status := c.Query("status"); status != "" && models.IsValidStatus(status) { @@ -125,15 +210,80 @@ func (h *RetribusiHandler) GetRetribusi(c *gin.Context) { if dinas := c.Query("dinas"); dinas != "" { filters = append(filters, queryUtils.DynamicFilter{Column: "Dinas", Operator: queryUtils.OpILike, Value: "%" + dinas + "%"}) } - if search := c.Query("search"); search != "" { - // Jika ada search, buat grup filter OR - searchFilters := []queryUtils.DynamicFilter{ + + // CHANGE: Optimize query search dengan caching + search := c.Query("search") + var searchFilters []queryUtils.DynamicFilter + var cacheKey string + var useCache bool + + // FIX: Initialize searchFilters before using it in the cache hit section + if search != "" { + // Batasi panjang search untuk mencegah query terlalu lambat + if len(search) > 50 { + search = search[:50] + } + + // Generate cache key untuk search + cacheKey = fmt.Sprintf("retribusi:search:%s:%d:%d", search, query.Limit, query.Offset) + + // Initialize searchFilters here + searchFilters = []queryUtils.DynamicFilter{ {Column: "Jenis", Operator: queryUtils.OpILike, Value: "%" + search + "%"}, {Column: "Pelayanan", Operator: queryUtils.OpILike, Value: "%" + search + "%"}, {Column: "Dinas", Operator: queryUtils.OpILike, Value: "%" + search + "%"}, {Column: "Kode_tarif", Operator: queryUtils.OpILike, Value: "%" + search + "%"}, {Column: "Uraian_1", Operator: queryUtils.OpILike, Value: "%" + search + "%"}, } + + // Try ambil dari cache terlebih dahulu + if cachedData, found := h.cache.Get(cacheKey); found { + logger.Info("Cache hit for search", map[string]interface{}{"search": search, "cache_key": cacheKey}) + + // Konversi dari interface{} ke tipe yang diharapkan + retribusis, ok := cachedData.([]retribusi.Retribusi) + if !ok { + logger.Error("Failed to convert cached data", map[string]interface{}{"cache_key": cacheKey}) + } else { + // Jika diminta, ambil data agregasi + var aggregateData *models.AggregateData + if c.Query("include_summary") == "true" { + // CHANGE: parseFilterParams dihapus, We gunakan filter yang sudah dibuat. + // Build full filter groups for aggregate data (including search filters) + fullFilterGroups := []queryUtils.FilterGroup{ + {Filters: searchFilters, LogicOp: "OR"}, + } + if len(filters) > 0 { + fullFilterGroups = append(fullFilterGroups, queryUtils.FilterGroup{Filters: filters, LogicOp: "AND"}) + } + aggregateData, err = h.getAggregateData(ctx, dbConn, fullFilterGroups) + if err != nil { + h.logAndRespondError(c, "Failed to get aggregate data", err, http.StatusInternalServerError) + return + } + } + + // Bangun respons + meta := h.calculateMeta(query.Limit, query.Offset, len(retribusis)) + response := retribusi.RetribusiGetResponse{ + Message: "Data retribusi berhasil diambil (dari cache)", + Data: retribusis, + Meta: meta, + } + + if aggregateData != nil { + response.Summary = aggregateData + } + + c.JSON(http.StatusOK, response) + return + } + } + + // Jika tidak ada di cache, tandai untuk disimpan setelah query + useCache = true + + // Jika ada search, buat grup filter OR query.Filters = append(query.Filters, queryUtils.FilterGroup{Filters: searchFilters, LogicOp: "OR"}) } @@ -142,25 +292,22 @@ func (h *RetribusiHandler) GetRetribusi(c *gin.Context) { query.Filters = append(query.Filters, queryUtils.FilterGroup{Filters: filters, LogicOp: "AND"}) } - // Eksekusi query inti - dbConn, err := h.db.GetDB("postgres_satudata") - if err != nil { - h.logAndRespondError(c, "Database connection failed", err, http.StatusInternalServerError) - return - } - ctx, cancel := context.WithTimeout(c.Request.Context(), 30*time.Second) - defer cancel() - retribusis, total, err := h.fetchRetribusisDynamic(ctx, dbConn, query) if err != nil { h.logAndRespondError(c, "Failed to fetch data", err, http.StatusInternalServerError) return } + // CHANGE: Save hasil search ke cache jika ada parameter search + if useCache && len(retribusis) > 0 { + h.cache.Set(cacheKey, retribusis, 15*time.Minute) // Cache selama 15 menit + logger.Info("Cached search results", map[string]interface{}{"search": search, "cache_key": cacheKey, "count": len(retribusis)}) + } + // Jika diminta, ambil data agregasi var aggregateData *models.AggregateData if c.Query("include_summary") == "true" { - // PERUBAHAN: parseFilterParams dihapus, kita gunakan filter yang sudah dibuat. + // CHANGE: parseFilterParams dihapus, We gunakan filter yang sudah dibuat. aggregateData, err = h.getAggregateData(ctx, dbConn, query.Filters) if err != nil { h.logAndRespondError(c, "Failed to get aggregate data", err, http.StatusInternalServerError) @@ -202,7 +349,24 @@ func (h *RetribusiHandler) GetRetribusiByID(c *gin.Context) { return } - dbConn, err := h.db.GetDB("postgres_satudata") + // CHANGE: Try ambil dari cache terlebih dahulu + cacheKey := fmt.Sprintf("retribusi:id:%s", id) + if cachedData, found := h.cache.Get(cacheKey); found { + logger.Info("Cache hit for ID", map[string]interface{}{"id": id, "cache_key": cacheKey}) + + // Konversi dari interface{} ke tipe yang diharapkan + if cachedRetribusi, ok := cachedData.(retribusi.Retribusi); ok { + response := retribusi.RetribusiGetByIDResponse{ + Message: "Retribusi details retrieved successfully (dari cache)", + Data: &cachedRetribusi, + } + c.JSON(http.StatusOK, response) + return + } + } + + // CHANGE: Use GetSQLXDB untuk mendapatkan koneksi sqlx.DB + dbConn, err := h.db.GetSQLXDB("postgres_satudata") if err != nil { h.logAndRespondError(c, "Database connection failed", err, http.StatusInternalServerError) return @@ -223,14 +387,8 @@ func (h *RetribusiHandler) GetRetribusiByID(c *gin.Context) { Limit: 1, } - query, args, err := h.queryBuilder.BuildQuery(dynamicQuery) - if err != nil { - h.logAndRespondError(c, "Failed to build query", err, http.StatusInternalServerError) - return - } - - row := dbConn.QueryRowContext(ctx, query, args...) - dataretribusi, err := h.scanRetribusiFromRow(row) + var dataretribusi retribusi.Retribusi + err = h.queryBuilder.ExecuteQueryRow(ctx, dbConn, dynamicQuery, &dataretribusi) if err != nil { if err == sql.ErrNoRows { h.respondError(c, "Retribusi not found", err, http.StatusNotFound) @@ -240,6 +398,9 @@ func (h *RetribusiHandler) GetRetribusiByID(c *gin.Context) { return } + // CHANGE: Save ke cache + h.cache.Set(cacheKey, dataretribusi, 30*time.Minute) // Cache selama 30 menit + response := retribusi.RetribusiGetByIDResponse{ Message: "Retribusi details retrieved successfully", Data: &dataretribusi, @@ -276,7 +437,27 @@ func (h *RetribusiHandler) GetRetribusiDynamic(c *gin.Context) { LogicOp: "AND", }}, dynamicQuery.Filters...) - dbConn, err := h.db.GetDB("postgres_satudata") + // CHANGE: Try ambil dari cache terlebih dahulu + // Buat cache key dari query string + cacheKey := fmt.Sprintf("retribusi:dynamic:%s", c.Request.URL.RawQuery) + if cachedData, found := h.cache.Get(cacheKey); found { + logger.Info("Cache hit for dynamic query", map[string]interface{}{"cache_key": cacheKey}) + + // Konversi dari interface{} ke tipe yang diharapkan + if retribusis, ok := cachedData.([]retribusi.Retribusi); ok { + meta := h.calculateMeta(dynamicQuery.Limit, dynamicQuery.Offset, len(retribusis)) + response := retribusi.RetribusiGetResponse{ + Message: "Data retribusi berhasil diambil (dari cache)", + Data: retribusis, + Meta: meta, + } + c.JSON(http.StatusOK, response) + return + } + } + + // CHANGE: Use GetSQLXDB untuk mendapatkan koneksi sqlx.DB + dbConn, err := h.db.GetSQLXDB("postgres_satudata") if err != nil { h.logAndRespondError(c, "Database connection failed", err, http.StatusInternalServerError) return @@ -290,6 +471,9 @@ func (h *RetribusiHandler) GetRetribusiDynamic(c *gin.Context) { return } + // CHANGE: Save ke cache + h.cache.Set(cacheKey, retribusis, 10*time.Minute) // Cache selama 10 menit + meta := h.calculateMeta(dynamicQuery.Limit, dynamicQuery.Offset, total) response := retribusi.RetribusiGetResponse{ Message: "Data retribusi berhasil diambil", @@ -321,7 +505,8 @@ func (h *RetribusiHandler) CreateRetribusi(c *gin.Context) { return } - dbConn, err := h.db.GetDB("postgres_satudata") + // CHANGE: Use GetSQLXDB untuk mendapatkan koneksi sqlx.DB + dbConn, err := h.db.GetSQLXDB("postgres_satudata") if err != nil { h.logAndRespondError(c, "Database connection failed", err, http.StatusInternalServerError) return @@ -329,6 +514,36 @@ func (h *RetribusiHandler) CreateRetribusi(c *gin.Context) { ctx, cancel := context.WithTimeout(c.Request.Context(), 15*time.Second) defer cancel() + // CHANGE: Validate KodeTarif harus unik + if req.KodeTarif != nil && *req.KodeTarif != "" { + rule := validation.NewUniqueFieldRule( + "data_retribusi", // Nama tabel + "Kode_tarif", // Kolom yang harus unik + queryUtils.DynamicFilter{ // Kondisi tambahan + Column: "status", + Operator: queryUtils.OpNotEqual, + Value: "deleted", + }, + ) + + // Siapkan data dari request untuk divalidasi + dataToValidate := map[string]interface{}{ + "Kode_tarif": *req.KodeTarif, + } + + // Eksekusi validasi + isDuplicate, err := h.validator.Validate(ctx, dbConn, rule, dataToValidate) + if err != nil { + h.logAndRespondError(c, "Failed to validate Kode Tarif", err, http.StatusInternalServerError) + return + } + + if isDuplicate { + h.respondError(c, "Kode Tarif already exists", fmt.Errorf("duplicate Kode Tarif: %s", *req.KodeTarif), http.StatusConflict) + return + } + } + data := queryUtils.InsertData{ Columns: []string{ "id", "status", "date_created", "date_updated", @@ -349,17 +564,23 @@ func (h *RetribusiHandler) CreateRetribusi(c *gin.Context) { "Tarif", "Satuan", "Tarif_overtime", "Satuan_overtime", "Rekening_pokok", "Rekening_denda", "Uraian_1", "Uraian_2", "Uraian_3", } - queryStr, args, err := h.queryBuilder.BuildInsertQuery("data_retribusi", data, returningCols...) + + sql, args, err := h.queryBuilder.BuildInsertQuery("data_retribusi", data, returningCols...) if err != nil { h.logAndRespondError(c, "Failed to build insert query", err, http.StatusInternalServerError) return } - row := dbConn.QueryRowContext(ctx, queryStr, args...) - dataretribusi, err := h.scanRetribusiFromRow(row) + + var dataretribusi retribusi.Retribusi + err = dbConn.GetContext(ctx, &dataretribusi, sql, args...) if err != nil { h.logAndRespondError(c, "Failed to create retribusi", err, http.StatusInternalServerError) return } + + // CHANGE: Invalidate cache yang mungkin terpengaruh + h.invalidateRelatedCache(dataretribusi.Jenis.String, dataretribusi.Dinas.String, dataretribusi.KodeTarif.String) + response := retribusi.RetribusiCreateResponse{Message: "Retribusi berhasil dibuat", Data: &dataretribusi} c.JSON(http.StatusCreated, response) } @@ -393,13 +614,69 @@ func (h *RetribusiHandler) UpdateRetribusi(c *gin.Context) { h.respondError(c, "Validation failed", err, http.StatusBadRequest) return } - dbConn, err := h.db.GetDB("postgres_satudata") + + // CHANGE: Try ambil data lama untuk cache invalidation + var oldData retribusi.Retribusi + dbConn, err := h.db.GetSQLXDB("postgres_satudata") + if err == nil { + ctx, cancel := context.WithTimeout(c.Request.Context(), 5*time.Second) + defer cancel() + + dynamicQuery := queryUtils.DynamicQuery{ + From: "data_retribusi", + Fields: []queryUtils.SelectField{{Expression: "*"}}, + Filters: []queryUtils.FilterGroup{{ + Filters: []queryUtils.DynamicFilter{ + {Column: "id", Operator: queryUtils.OpEqual, Value: id}, + }, + LogicOp: "AND", + }}, + Limit: 1, + } + + err = h.queryBuilder.ExecuteQueryRow(ctx, dbConn, dynamicQuery, &oldData) + if err != nil { + logger.Error("Failed to fetch old data for cache invalidation", map[string]interface{}{"error": err.Error(), "id": id}) + } + } + + // CHANGE: Use GetSQLXDB untuk mendapatkan koneksi sqlx.DB + dbConn, err = h.db.GetSQLXDB("postgres_satudata") if err != nil { h.logAndRespondError(c, "Database connection failed", err, http.StatusInternalServerError) return } ctx, cancel := context.WithTimeout(c.Request.Context(), 15*time.Second) defer cancel() + + // CHANGE: Validate KodeTarif harus unik, kecuali untuk record dengan ID ini + if req.KodeTarif != nil && *req.KodeTarif != "" { + rule := validation.ValidationRule{ + TableName: "data_retribusi", + UniqueColumns: []string{"Kode_tarif"}, + Conditions: []queryUtils.DynamicFilter{ + {Column: "status", Operator: queryUtils.OpNotEqual, Value: "deleted"}, + }, + ExcludeIDColumn: "id", // Kecualikan berdasarkan kolom 'id' + ExcludeIDValue: id, // ...dengan nilai ID dari parameter + } + + dataToValidate := map[string]interface{}{ + "Kode_tarif": *req.KodeTarif, + } + + isDuplicate, err := h.validator.Validate(ctx, dbConn, rule, dataToValidate) + if err != nil { + h.logAndRespondError(c, "Failed to validate Kode Tarif", err, http.StatusInternalServerError) + return + } + + if isDuplicate { + h.respondError(c, "Kode Tarif already exists", fmt.Errorf("duplicate Kode Tarif: %s", *req.KodeTarif), http.StatusConflict) + return + } + } + updateData := queryUtils.UpdateData{ Columns: []string{ "status", "date_updated", "Jenis", "Pelayanan", "Dinas", "Kelompok_obyek", "Kode_tarif", @@ -425,21 +702,35 @@ func (h *RetribusiHandler) UpdateRetribusi(c *gin.Context) { "Tarif", "Satuan", "Tarif_overtime", "Satuan_overtime", "Rekening_pokok", "Rekening_denda", "Uraian_1", "Uraian_2", "Uraian_3", } - queryStr, args, err := h.queryBuilder.BuildUpdateQuery("data_retribusi", updateData, filters, returningCols...) + + sql, args, err := h.queryBuilder.BuildUpdateQuery("data_retribusi", updateData, filters, returningCols...) if err != nil { h.logAndRespondError(c, "Failed to build update query", err, http.StatusInternalServerError) return } - row := dbConn.QueryRowContext(ctx, queryStr, args...) - dataretribusi, err := h.scanRetribusiFromRow(row) + + var dataretribusi retribusi.Retribusi + err = dbConn.GetContext(ctx, &dataretribusi, sql, args...) if err != nil { - if err == sql.ErrNoRows { + if err.Error() == "sql: no rows in result set" { h.respondError(c, "Retribusi not found", err, http.StatusNotFound) } else { h.logAndRespondError(c, "Failed to update retribusi", err, http.StatusInternalServerError) } return } + + // CHANGE: Invalidate cache yang mungkin terpengaruh + // Invalidate cache untuk ID yang diupdate + cacheKey := fmt.Sprintf("retribusi:id:%s", id) + h.cache.Delete(cacheKey) + + // Invalidate cache untuk data lama dan baru + if oldData.ID != "" { + h.invalidateRelatedCache(oldData.Jenis.String, oldData.Dinas.String, oldData.KodeTarif.String) + } + h.invalidateRelatedCache(dataretribusi.Jenis.String, dataretribusi.Dinas.String, dataretribusi.KodeTarif.String) + response := retribusi.RetribusiUpdateResponse{Message: "Retribusi berhasil diperbarui", Data: &dataretribusi} c.JSON(http.StatusOK, response) } @@ -462,13 +753,42 @@ func (h *RetribusiHandler) DeleteRetribusi(c *gin.Context) { h.respondError(c, "Invalid ID format", err, http.StatusBadRequest) return } - dbConn, err := h.db.GetDB("postgres_satudata") + + // CHANGE: Try ambil data untuk cache invalidation + var dataToDelete retribusi.Retribusi + dbConn, err := h.db.GetSQLXDB("postgres_satudata") + if err == nil { + ctx, cancel := context.WithTimeout(c.Request.Context(), 5*time.Second) + defer cancel() + + dynamicQuery := queryUtils.DynamicQuery{ + From: "data_retribusi", + Fields: []queryUtils.SelectField{{Expression: "*"}}, + Filters: []queryUtils.FilterGroup{{ + Filters: []queryUtils.DynamicFilter{ + {Column: "id", Operator: queryUtils.OpEqual, Value: id}, + }, + LogicOp: "AND", + }}, + Limit: 1, + } + + err = h.queryBuilder.ExecuteQueryRow(ctx, dbConn, dynamicQuery, &dataToDelete) + if err != nil { + logger.Error("Failed to fetch data for cache invalidation", map[string]interface{}{"error": err.Error(), "id": id}) + } + } + + // CHANGE: Use GetSQLXDB untuk mendapatkan koneksi sqlx.DB + dbConn, err = h.db.GetSQLXDB("postgres_satudata") if err != nil { h.logAndRespondError(c, "Database connection failed", err, http.StatusInternalServerError) return } ctx, cancel := context.WithTimeout(c.Request.Context(), 15*time.Second) defer cancel() + + // CHANGE: Use ExecuteUpdate untuk soft delete dengan mengubah status updateData := queryUtils.UpdateData{ Columns: []string{"status", "date_updated"}, Values: []interface{}{"deleted", time.Now()}, @@ -480,16 +800,14 @@ func (h *RetribusiHandler) DeleteRetribusi(c *gin.Context) { }, LogicOp: "AND", }} - queryStr, args, err := h.queryBuilder.BuildUpdateQuery("data_retribusi", updateData, filters) - if err != nil { - h.logAndRespondError(c, "Failed to build delete query", err, http.StatusInternalServerError) - return - } - result, err := dbConn.ExecContext(ctx, queryStr, args...) + + // CHANGE: Use ExecuteUpdate alih-alih ExecuteDelete + result, err := h.queryBuilder.ExecuteUpdate(ctx, dbConn, "data_retribusi", updateData, filters) if err != nil { h.logAndRespondError(c, "Failed to delete retribusi", err, http.StatusInternalServerError) return } + rowsAffected, err := result.RowsAffected() if err != nil { h.logAndRespondError(c, "Failed to get affected rows", err, http.StatusInternalServerError) @@ -499,6 +817,17 @@ func (h *RetribusiHandler) DeleteRetribusi(c *gin.Context) { h.respondError(c, "Retribusi not found", sql.ErrNoRows, http.StatusNotFound) return } + + // CHANGE: Invalidate cache yang mungkin terpengaruh + // Invalidate cache untuk ID yang dihapus + cacheKey := fmt.Sprintf("retribusi:id:%s", id) + h.cache.Delete(cacheKey) + + // Invalidate cache untuk data yang dihapus + if dataToDelete.ID != "" { + h.invalidateRelatedCache(dataToDelete.Jenis.String, dataToDelete.Dinas.String, dataToDelete.KodeTarif.String) + } + response := retribusi.RetribusiDeleteResponse{Message: "Retribusi berhasil dihapus", ID: id} c.JSON(http.StatusOK, response) } @@ -514,7 +843,23 @@ func (h *RetribusiHandler) DeleteRetribusi(c *gin.Context) { // @Failure 500 {object} models.ErrorResponse "Internal server error" // @Router /api/v1/retribusis/stats [get] func (h *RetribusiHandler) GetRetribusiStats(c *gin.Context) { - dbConn, err := h.db.GetDB("postgres_satudata") + // CHANGE: Try ambil dari cache terlebih dahulu + cacheKey := fmt.Sprintf("retribusi:stats:%s", c.Query("status")) + if cachedData, found := h.cache.Get(cacheKey); found { + logger.Info("Cache hit for stats", map[string]interface{}{"cache_key": cacheKey}) + + // Konversi dari interface{} ke tipe yang diharapkan + if aggregateData, ok := cachedData.(*models.AggregateData); ok { + c.JSON(http.StatusOK, gin.H{ + "message": "Statistik retribusi berhasil diambil (dari cache)", + "data": aggregateData, + }) + return + } + } + + // CHANGE: Use GetSQLXDB untuk mendapatkan koneksi sqlx.DB + dbConn, err := h.db.GetSQLXDB("postgres_satudata") if err != nil { h.logAndRespondError(c, "Database connection failed", err, http.StatusInternalServerError) return @@ -522,20 +867,31 @@ func (h *RetribusiHandler) GetRetribusiStats(c *gin.Context) { ctx, cancel := context.WithTimeout(c.Request.Context(), 15*time.Second) defer cancel() - // PERUBAHAN: Kita tidak lagi parseFilterParams, kita bisa menggunakan QueryParser di sini juga jika perlu + // CHANGE: Kita tidak lagi parseFilterParams, We bisa menggunakan QueryParser di sini juga jika perlu // atau membangun filter secara manual seperti di GetRetribusi. - // Untuk contoh, kita asumsikan tidak ada filter selain default. + // Untuk contoh, We asumsikan tidak ada filter selain default. filterGroups := []queryUtils.FilterGroup{{ Filters: []queryUtils.DynamicFilter{{Column: "status", Operator: queryUtils.OpNotEqual, Value: "deleted"}}, LogicOp: "AND", }} + // Tambahkan filter status jika ada + if status := c.Query("status"); status != "" && models.IsValidStatus(status) { + filterGroups = append(filterGroups, queryUtils.FilterGroup{ + Filters: []queryUtils.DynamicFilter{{Column: "status", Operator: queryUtils.OpEqual, Value: status}}, + LogicOp: "AND", + }) + } + aggregateData, err := h.getAggregateData(ctx, dbConn, filterGroups) if err != nil { h.logAndRespondError(c, "Failed to get statistics", err, http.StatusInternalServerError) return } + // CHANGE: Save ke cache + h.cache.Set(cacheKey, aggregateData, 5*time.Minute) // Cache stats selama 5 menit + c.JSON(http.StatusOK, gin.H{ "message": "Statistik retribusi berhasil diambil", "data": aggregateData, @@ -546,38 +902,210 @@ func (h *RetribusiHandler) GetRetribusiStats(c *gin.Context) { // HELPER FUNCTIONS // ============================================================================= -// fetchRetribusisDynamic executes a dynamic query to get retribusi data and total count -func (h *RetribusiHandler) fetchRetribusisDynamic(ctx context.Context, dbConn *sql.DB, query queryUtils.DynamicQuery) ([]retribusi.Retribusi, int, error) { +// CHANGE: Fungsi untuk invalidate cache yang terkait dengan data yang diubah +func (h *RetribusiHandler) invalidateRelatedCache(jenis, dinas, kodeTarif string) { + // Invalidate cache untuk search yang mungkin terpengaruh + h.cache.DeleteByPrefix("retribusi:search:") + h.cache.DeleteByPrefix("retribusi:dynamic:") + h.cache.DeleteByPrefix("retribusi:stats:") + + // Invalidate cache untuk filter spesifik + if jenis != "" { + cacheKey := fmt.Sprintf("retribusi:filter:jenis:%s", jenis) + h.cache.Delete(cacheKey) + } + + if dinas != "" { + cacheKey := fmt.Sprintf("retribusi:filter:dinas:%s", dinas) + h.cache.Delete(cacheKey) + } + + if kodeTarif != "" { + cacheKey := fmt.Sprintf("retribusi:filter:kode_tarif:%s", kodeTarif) + h.cache.Delete(cacheKey) + } +} + +// CHANGE: Optimize fungsi fetchRetribusisDynamic untuk menangani timeout +// CHANGE: Optimize fungsi fetchRetribusisDynamic untuk menangani timeout +func (h *RetribusiHandler) fetchRetribusisDynamic(ctx context.Context, dbConn *sqlx.DB, query queryUtils.DynamicQuery) ([]retribusi.Retribusi, int, error) { + logger.Info("Starting fetchRetribusisDynamic", map[string]interface{}{ + "limit": query.Limit, + "offset": query.Offset, + "from": query.From, + }) + var total int var retribusis []retribusi.Retribusi - // 1. Get total count - countQuery := query - countQuery.Limit = 0 - countQuery.Offset = 0 - countQueryStr, countArgs, err := h.queryBuilder.BuildCountQuery(countQuery) - if err != nil { - return nil, 0, fmt.Errorf("failed to build count query: %w", err) - } - if err := dbConn.QueryRowContext(ctx, countQueryStr, countArgs...).Scan(&total); err != nil { - return nil, 0, fmt.Errorf("failed to get total count: %w", err) + // CHANGE: Untuk query dengan search, gunakan pendekatan yang berbeda + hasSearch := false + for _, filterGroup := range query.Filters { + for _, filter := range filterGroup.Filters { + if filter.Operator == queryUtils.OpILike { + hasSearch = true + break + } + } + if hasSearch { + break + } } - // 2. Get main data - mainQuery, mainArgs, err := h.queryBuilder.BuildQuery(query) - if err != nil { - return nil, 0, fmt.Errorf("failed to build main query: %w", err) - } - retribusis, err = h.fetchWithSQL(ctx, dbConn, mainQuery, mainArgs) - if err != nil { - return nil, 0, fmt.Errorf("failed to execute main query: %w", err) + logger.Info("Query analysis", map[string]interface{}{ + "hasSearch": hasSearch, + "totalFilters": len(query.Filters), + }) + + // CHANGE: Optimize untuk mencegah timeout pada query search + // Use context yang lebih pendek untuk query search dan count + queryCtx, queryCancel := context.WithTimeout(ctx, 30*time.Second) + defer queryCancel() + + logger.Info("Context setup", map[string]interface{}{ + "contextTimeout": "30s", + "hasSearch": hasSearch, + }) + + // Untuk query dengan search, batasi limit dan gunakan estimasi total + if hasSearch { + search := getSearchTerm(query) + logger.Info("Executing search query with timeout context", map[string]interface{}{"search_term": search}) + + // CHANGE: Untuk search, batasi limit maksimum untuk mencegah timeout + maxSearchLimit := 50 + if query.Limit > maxSearchLimit { + query.Limit = maxSearchLimit + logger.Info("Reduced search limit to prevent timeout", map[string]interface{}{ + "original_limit": query.Limit, + "new_limit": maxSearchLimit, + }) + } + + // Eksekusi query search + err := h.queryBuilder.ExecuteQuery(queryCtx, dbConn, query, &retribusis) + if err != nil { + // Check if it's a PostgreSQL statement timeout error + if pqErr, ok := err.(*pq.Error); ok && pqErr.Code == "57014" { + logger.Warn("Search query timed out, trying fallback strategy", map[string]interface{}{ + "search_term": search, + }) + + // Fallback: Search only in the most relevant column (e.g., 'Uraian_1') + // We need to rebuild the filters for the fallback + var fallbackFilters []queryUtils.FilterGroup + // Add other non-search filters back (e.g., status, dinas) + for _, fg := range query.Filters { + if fg.LogicOp == "AND" { + fallbackFilters = append(fallbackFilters, fg) + } + } + // Add the single, more specific search filter + fallbackFilters = append([]queryUtils.FilterGroup{{ + Filters: []queryUtils.DynamicFilter{ + {Column: "Uraian_1", Operator: queryUtils.OpILike, Value: "%" + search + "%"}, + }, + LogicOp: "AND", + }}, fallbackFilters...) + + fallbackQuery := query + fallbackQuery.Filters = fallbackFilters + + // Execute the fallback query with a shorter timeout + fallbackCtx, fallbackCancel := context.WithTimeout(ctx, 10*time.Second) + defer fallbackCancel() + + err = h.queryBuilder.ExecuteQuery(fallbackCtx, dbConn, fallbackQuery, &retribusis) + if err != nil { + logger.Error("Fallback search query also failed", map[string]interface{}{ + "error": err.Error(), + "query": fallbackQuery, + }) + // Return a more user-friendly error + return nil, 0, fmt.Errorf("search timed out. The search term '%s' is too general. Please try a more specific term", search) + } + logger.Info("Fallback search query successful", map[string]interface{}{ + "recordsFetched": len(retribusis), + }) + } else { + // It's a different error, handle it as before + logger.Error("Failed to execute search query", map[string]interface{}{ + "error": err.Error(), + "query": query, + }) + return nil, 0, fmt.Errorf("failed to execute search query: %w", err) + } + } + + // Estimasi total untuk search query (tidak hitung exact count untuk performa) + total = len(retribusis) + if len(retribusis) == query.Limit { + // Jika mencapai limit, estimasi ada lebih banyak data + total = query.Offset + query.Limit + 100 + } else { + total = query.Offset + len(retribusis) + } + } else { + logger.Info("Executing regular query without search") + + // Untuk query tanpa search, hitung total count dengan timeout yang lebih pendek + countCtx, countCancel := context.WithTimeout(ctx, 15*time.Second) + defer countCancel() + + count, err := h.queryBuilder.ExecuteCount(countCtx, dbConn, query) + if err != nil { + // Jika count gagal, fallback ke estimasi atau return error + logger.Warn("Failed to get exact count, using estimation", map[string]interface{}{"error": err.Error()}) + // Untuk query tanpa search, We bisa estimasi berdasarkan limit + total = query.Offset + query.Limit + 100 // Estimasi konservatif + } else { + total = int(count) + } + + logger.Info("Count query successful", map[string]interface{}{ + "count": total, + }) + + // Eksekusi query data utama + err = h.queryBuilder.ExecuteQuery(queryCtx, dbConn, query, &retribusis) + if err != nil { + logger.Error("Failed to execute main query", map[string]interface{}{ + "error": err.Error(), + "query": query, + }) + return nil, 0, fmt.Errorf("failed to execute main query: %w", err) + } + + logger.Info("Data query successful", map[string]interface{}{ + "recordsFetched": len(retribusis), + }) } + logger.Info("Query execution completed", map[string]interface{}{ + "totalRecords": total, + "returnedRecords": len(retribusis), + "hasSearch": hasSearch, + }) + return retribusis, total, nil } -// PERUBAHAN: Fungsi agregasi sekarang sepenuhnya menggunakan QueryBuilder dan menghilangkan SQL manual. -func (h *RetribusiHandler) getAggregateData(ctx context.Context, dbConn *sql.DB, filterGroups []queryUtils.FilterGroup) (*models.AggregateData, error) { +// getSearchTerm extracts the search term from a DynamicQuery object. +// It assumes the search is the first filter group with an "OR" logic operator. +func getSearchTerm(query queryUtils.DynamicQuery) string { + for _, filterGroup := range query.Filters { + if filterGroup.LogicOp == "OR" && len(filterGroup.Filters) > 0 { + if valueStr, ok := filterGroup.Filters[0].Value.(string); ok { + return strings.Trim(valueStr, "%") + } + } + } + return "" +} + +// CHANGE: Ubah tipe parameter dari *sql.DB ke *sqlx.DB +// CHANGE: Fungsi agregasi sekarang sepenuhnya menggunakan QueryBuilder dan menghilangkan SQL manual. +func (h *RetribusiHandler) getAggregateData(ctx context.Context, dbConn *sqlx.DB, filterGroups []queryUtils.FilterGroup) (*models.AggregateData, error) { aggregate := &models.AggregateData{ ByStatus: make(map[string]int), ByDinas: make(map[string]int), @@ -592,6 +1120,10 @@ func (h *RetribusiHandler) getAggregateData(ctx context.Context, dbConn *sql.DB, wg.Add(1) go func() { defer wg.Done() + // CHANGE: Use context dengan timeout yang lebih pendek + queryCtx, queryCancel := context.WithTimeout(ctx, 20*time.Second) + defer queryCancel() + query := queryUtils.DynamicQuery{ From: "data_retribusi", Fields: []queryUtils.SelectField{ @@ -601,30 +1133,25 @@ func (h *RetribusiHandler) getAggregateData(ctx context.Context, dbConn *sql.DB, Filters: filterGroups, GroupBy: []string{"status"}, } - sql, args, _ := h.queryBuilder.BuildQuery(query) - rows, err := dbConn.QueryContext(ctx, sql, args...) + var results []struct { + Status string `db:"status"` + Count int `db:"count"` + } + err := h.queryBuilder.ExecuteQuery(queryCtx, dbConn, query, &results) if err != nil { errChan <- fmt.Errorf("status query failed: %w", err) return } - defer rows.Close() mu.Lock() - for rows.Next() { - var status string - var count int - if err := rows.Scan(&status, &count); err != nil { - mu.Unlock() - errChan <- fmt.Errorf("status scan failed: %w", err) - return - } - aggregate.ByStatus[status] = count - switch status { + for _, result := range results { + aggregate.ByStatus[result.Status] = result.Count + switch result.Status { case "active": - aggregate.TotalActive = count + aggregate.TotalActive = result.Count case "draft": - aggregate.TotalDraft = count + aggregate.TotalDraft = result.Count case "inactive": - aggregate.TotalInactive = count + aggregate.TotalInactive = result.Count } } mu.Unlock() @@ -634,6 +1161,10 @@ func (h *RetribusiHandler) getAggregateData(ctx context.Context, dbConn *sql.DB, wg.Add(1) go func() { defer wg.Done() + // CHANGE: Use context dengan timeout yang lebih pendek + queryCtx, queryCancel := context.WithTimeout(ctx, 20*time.Second) + defer queryCancel() + query := queryUtils.DynamicQuery{ From: "data_retribusi", Fields: []queryUtils.SelectField{ @@ -651,23 +1182,18 @@ func (h *RetribusiHandler) getAggregateData(ctx context.Context, dbConn *sql.DB, Sort: []queryUtils.SortField{{Column: "count", Order: "DESC"}}, Limit: 10, } - sql, args, _ := h.queryBuilder.BuildQuery(query) - rows, err := dbConn.QueryContext(ctx, sql, args...) + var results []struct { + Dinas string `db:"dinas"` + Count int `db:"count"` + } + err := h.queryBuilder.ExecuteQuery(queryCtx, dbConn, query, &results) if err != nil { errChan <- fmt.Errorf("dinas query failed: %w", err) return } - defer rows.Close() mu.Lock() - for rows.Next() { - var dinas string - var count int - if err := rows.Scan(&dinas, &count); err != nil { - mu.Unlock() - errChan <- fmt.Errorf("dinas scan failed: %w", err) - return - } - aggregate.ByDinas[dinas] = count + for _, result := range results { + aggregate.ByDinas[result.Dinas] = result.Count } mu.Unlock() }() @@ -676,6 +1202,10 @@ func (h *RetribusiHandler) getAggregateData(ctx context.Context, dbConn *sql.DB, wg.Add(1) go func() { defer wg.Done() + // CHANGE: Use context dengan timeout yang lebih pendek + queryCtx, queryCancel := context.WithTimeout(ctx, 20*time.Second) + defer queryCancel() + query := queryUtils.DynamicQuery{ From: "data_retribusi", Fields: []queryUtils.SelectField{ @@ -693,23 +1223,18 @@ func (h *RetribusiHandler) getAggregateData(ctx context.Context, dbConn *sql.DB, Sort: []queryUtils.SortField{{Column: "count", Order: "DESC"}}, Limit: 10, } - sql, args, _ := h.queryBuilder.BuildQuery(query) - rows, err := dbConn.QueryContext(ctx, sql, args...) + var results []struct { + Jenis string `db:"jenis"` + Count int `db:"count"` + } + err := h.queryBuilder.ExecuteQuery(queryCtx, dbConn, query, &results) if err != nil { errChan <- fmt.Errorf("jenis query failed: %w", err) return } - defer rows.Close() mu.Lock() - for rows.Next() { - var jenis string - var count int - if err := rows.Scan(&jenis, &count); err != nil { - mu.Unlock() - errChan <- fmt.Errorf("jenis scan failed: %w", err) - return - } - aggregate.ByJenis[jenis] = count + for _, result := range results { + aggregate.ByJenis[result.Jenis] = result.Count } mu.Unlock() }() @@ -718,41 +1243,66 @@ func (h *RetribusiHandler) getAggregateData(ctx context.Context, dbConn *sql.DB, wg.Add(1) go func() { defer wg.Done() + // CHANGE: Use context dengan timeout yang lebih pendek + queryCtx, queryCancel := context.WithTimeout(ctx, 20*time.Second) + defer queryCancel() + // Last updated query1 := queryUtils.DynamicQuery{ From: "data_retribusi", Fields: []queryUtils.SelectField{{Expression: "MAX(date_updated)"}}, Filters: filterGroups, } - sql1, args1, _ := h.queryBuilder.BuildQuery(query1) var lastUpdated sql.NullTime - if err := dbConn.QueryRowContext(ctx, sql1, args1...).Scan(&lastUpdated); err != nil { + err := h.queryBuilder.ExecuteQueryRow(queryCtx, dbConn, query1, &lastUpdated) + if err != nil { errChan <- fmt.Errorf("last updated query failed: %w", err) return } - // PERUBAHAN: Hari ini, kita bangun WHERE clause menggunakan QueryBuilder untuk keamanan, - // lalu sisipkan ke query manual untuk CASE statement. - whereQuery := queryUtils.DynamicQuery{From: "data_retribusi", Filters: filterGroups} - whereSQL, whereArgs, err := h.queryBuilder.BuildWhereClause(whereQuery.Filters) + // CHANGE: Menggunakan QueryBuilder untuk query statistik hari ini + today := time.Now().Format("2006-01-02") + + // Query untuk created_today + createdTodayQuery := queryUtils.DynamicQuery{ + From: "data_retribusi", + Fields: []queryUtils.SelectField{ + {Expression: "COUNT(*)", Alias: "count"}, + }, + Filters: append(filterGroups, queryUtils.FilterGroup{ + Filters: []queryUtils.DynamicFilter{ + {Column: "DATE(date_created)", Operator: queryUtils.OpEqual, Value: today}, + }, + LogicOp: "AND", + }), + } + + var createdToday int + err = h.queryBuilder.ExecuteQueryRow(queryCtx, dbConn, createdTodayQuery, &createdToday) if err != nil { - errChan <- fmt.Errorf("failed to build where clause for stats: %w", err) + errChan <- fmt.Errorf("created today query failed: %w", err) return } - today := time.Now().Format("2006-01-02") - // PERUBAHAN: Menggunakan fmt.Sprintf dengan placeholder yang benar untuk PostgreSQL ($1, $2) - todayStatsQuery := fmt.Sprintf(` - SELECT - SUM(CASE WHEN DATE(date_created) = $1 THEN 1 ELSE 0 END) as created_today, - SUM(CASE WHEN DATE(date_updated) = $1 AND DATE(date_created) != $1 THEN 1 ELSE 0 END) as updated_today - FROM data_retribusi WHERE %s`, whereSQL) + // Query untuk updated_today (diupdate hari ini tetapi tidak dibuat hari ini) + updatedTodayQuery := queryUtils.DynamicQuery{ + From: "data_retribusi", + Fields: []queryUtils.SelectField{ + {Expression: "COUNT(*)", Alias: "count"}, + }, + Filters: append(filterGroups, queryUtils.FilterGroup{ + Filters: []queryUtils.DynamicFilter{ + {Column: "DATE(date_updated)", Operator: queryUtils.OpEqual, Value: today}, + {Column: "DATE(date_created)", Operator: queryUtils.OpNotEqual, Value: today}, + }, + LogicOp: "AND", + }), + } - // Argumen pertama untuk 'today', sisanya untuk where clause - args := append([]interface{}{today}, whereArgs...) - var createdToday, updatedToday int - if err := dbConn.QueryRowContext(ctx, todayStatsQuery, args...).Scan(&createdToday, &updatedToday); err != nil { - errChan <- fmt.Errorf("today stats query failed: %w", err) + var updatedToday int + err = h.queryBuilder.ExecuteQueryRow(queryCtx, dbConn, updatedTodayQuery, &updatedToday) + if err != nil { + errChan <- fmt.Errorf("updated today query failed: %w", err) return } @@ -777,49 +1327,6 @@ func (h *RetribusiHandler) getAggregateData(ctx context.Context, dbConn *sql.DB, return aggregate, nil } -// fetchWithSQL executes a query and scans the results into a slice of Retribusi -func (h *RetribusiHandler) fetchWithSQL(ctx context.Context, dbConn *sql.DB, sql string, args []interface{}) ([]retribusi.Retribusi, error) { - rows, err := dbConn.QueryContext(ctx, sql, args...) - if err != nil { - return nil, err - } - defer rows.Close() - - var retribusis []retribusi.Retribusi - for rows.Next() { - retribusi, err := h.scanRetribusi(rows) - if err != nil { - return nil, err - } - retribusis = append(retribusis, retribusi) - } - return retribusis, rows.Err() -} - -// scanRetribusi scans a single row from a sql.Rows object into a Retribusi struct -func (h *RetribusiHandler) scanRetribusi(rows *sql.Rows) (retribusi.Retribusi, error) { - var r retribusi.Retribusi - err := rows.Scan( - &r.ID, &r.Status, &r.Sort, &r.UserCreated, &r.DateCreated, &r.UserUpdated, &r.DateUpdated, - &r.Jenis, &r.Pelayanan, &r.Dinas, &r.KelompokObyek, &r.KodeTarif, - &r.Tarif, &r.Satuan, &r.TarifOvertime, &r.SatuanOvertime, - &r.RekeningPokok, &r.RekeningDenda, &r.Uraian1, &r.Uraian2, &r.Uraian3, - ) - return r, err -} - -// scanRetribusiFromRow scans a single sql.Row object into a Retribusi struct -func (h *RetribusiHandler) scanRetribusiFromRow(row *sql.Row) (retribusi.Retribusi, error) { - var r retribusi.Retribusi - err := row.Scan( - &r.ID, &r.Status, &r.Sort, &r.UserCreated, &r.DateCreated, &r.UserUpdated, &r.DateUpdated, - &r.Jenis, &r.Pelayanan, &r.Dinas, &r.KelompokObyek, &r.KodeTarif, - &r.Tarif, &r.Satuan, &r.TarifOvertime, &r.SatuanOvertime, - &r.RekeningPokok, &r.RekeningDenda, &r.Uraian1, &r.Uraian2, &r.Uraian3, - ) - return r, err -} - // logAndRespondError logs an error and sends a JSON response func (h *RetribusiHandler) logAndRespondError(c *gin.Context, message string, err error, statusCode int) { logger.Error(message, map[string]interface{}{"error": err.Error(), "status_code": statusCode}) @@ -847,10 +1354,3 @@ func (h *RetribusiHandler) calculateMeta(limit, offset, total int) models.MetaRe CurrentPage: currentPage, HasNext: offset+limit < total, HasPrev: offset > 0, } } - -// validateRetribusiSubmission can be used for custom business logic validation before creation/update -func (h *RetribusiHandler) validateRetribusiSubmission(ctx context.Context, dbConn *sql.DB, req *retribusi.RetribusiCreateRequest) error { - // TODO: Implementasikan validasi duplikat atau logika bisnis lainnya di sini. - // Contoh: validasi bahwa KodeTarif belum ada. - return nil -} diff --git a/internal/utils/query/builder.go b/internal/utils/query/builder.go index 5f67839..ca351b6 100644 --- a/internal/utils/query/builder.go +++ b/internal/utils/query/builder.go @@ -472,9 +472,9 @@ func (qb *QueryBuilder) BuildQuery(query DynamicQuery) (string, []interface{}, e finalSQL := strings.Join(queryParts, " ") - // Security check for dangerous patterns + // Security check for dangerous patterns in user input values if qb.enableSecurityChecks { - if err := qb.checkForSqlInjection(finalSQL); err != nil { + if err := qb.checkForSqlInjectionInArgs(allArgs); err != nil { return "", nil, err } } @@ -1327,16 +1327,20 @@ func (qb *QueryBuilder) escapeIdentifier(col string) string { } } -// checkForSqlInjection checks for potential SQL injection patterns -func (qb *QueryBuilder) checkForSqlInjection(sql string) error { +// checkForSqlInjectionInArgs checks for potential SQL injection patterns in query arguments +func (qb *QueryBuilder) checkForSqlInjectionInArgs(args []interface{}) error { if !qb.enableSecurityChecks { return nil } - lowerSQL := strings.ToLower(sql) - for _, pattern := range qb.dangerousPatterns { - if pattern.MatchString(lowerSQL) { - return fmt.Errorf("potential SQL injection detected: pattern %s matched", pattern.String()) + for _, arg := range args { + if str, ok := arg.(string); ok { + lowerStr := strings.ToLower(str) + for _, pattern := range qb.dangerousPatterns { + if pattern.MatchString(lowerStr) { + return fmt.Errorf("potential SQL injection detected in query argument: pattern %s matched", pattern.String()) + } + } } } return nil diff --git a/internal/utils/query/exemple.go.exemple b/internal/utils/query/exemple.go.exemple new file mode 100644 index 0000000..348c467 --- /dev/null +++ b/internal/utils/query/exemple.go.exemple @@ -0,0 +1,943 @@ +package main + +import ( + "context" + "fmt" + "log" + "net/url" + "time" + + "api-service/internal/config" + "api-service/internal/database" + "api-service/internal/utils/query" + "api-service/internal/validation" + + "github.com/jmoiron/sqlx" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" +) + +// This file provides comprehensive examples of using the query builder library +// for performing various database operations including CRUD, transactions, joins, etc. +// Each example function demonstrates how to build queries, print them, and execute them. +// ============================================================================= +// DEFINISI MODEL (CONTOH) +// ============================================================================= + +// User adalah contoh struct untuk tabel 'users'. +type User struct { + ID int `db:"id" bson:"_id,omitempty"` + Name string `db:"name" bson:"name"` + Email string `db:"email" bson:"email"` + Status string `db:"status" bson:"status"` + CreatedAt time.Time `db:"created_at" bson:"created_at"` +} + +// Post adalah contoh struct untuk tabel 'posts'. +type Post struct { + ID int `db:"id" bson:"_id,omitempty"` + UserID int `db:"user_id" bson:"user_id"` + Title string `db:"title" bson:"title"` + Content string `db:"content" bson:"content"` + CreatedAt time.Time `db:"created_at" bson:"created_at"` +} + +// Employee adalah contoh struct untuk tabel 'employees' dengan kolom JSON. +type Employee struct { + ID int `db:"id" bson:"_id,omitempty"` + Name string `db:"name" bson:"name"` + Department string `db:"department" bson:"department"` + Salary float64 `db:"salary" bson:"salary"` + Metadata map[string]interface{} `db:"metadata" bson:"metadata"` // Kolom JSON/JSONB +} + +// ============================================================================= +// FUNGSI UTAMA +// ============================================================================= + +func main() { + cfg := setupConfig() + dbService := database.New(cfg) + + fmt.Println("============================================================") + fmt.Println(" CONTOH 1: QUERY DASAR (SELECT, INSERT, UPDATE, DELETE)") + fmt.Println("============================================================") + basicCRUDExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 2: TRANSAKSI SQL (POSTGRESQL)") + fmt.Println("============================================================") + sqlTransactionExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 3: TRANSAKSI MONGODB") + fmt.Println("============================================================") + mongoTransactionExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 4: QUERY DENGAN FILTER DAN PAGINASI") + fmt.Println("============================================================") + filterAndPaginationExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 5: QUERY DENGAN JOIN") + fmt.Println("============================================================") + joinExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 6: QUERY DENGAN CTE (COMMON TABLE EXPRESSION)") + fmt.Println("============================================================") + cteExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 7: QUERY DENGAN WINDOW FUNCTION") + fmt.Println("============================================================") + windowFunctionExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 8: VALIDASI DATA DINAMIS") + fmt.Println("============================================================") + validationExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 9: OPERASI JSON") + fmt.Println("============================================================") + jsonQueryExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 10: QUERY MONGODB (CRUD & AGGREGATION)") + fmt.Println("============================================================") + mongodbExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 11: PENGGUNAAN READ REPLICA") + fmt.Println("============================================================") + readReplicaExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 12: HEALTH CHECK DATABASE") + fmt.Println("============================================================") + healthCheckExample(dbService) + + fmt.Println("\n============================================================") + fmt.Println(" CONTOH 13: PARSING QUERY DARI URL") + fmt.Println("============================================================") + urlQueryParsingExample(dbService) +} + +func setupConfig() *config.Config { + return &config.Config{ + Databases: map[string]config.DatabaseConfig{ + "main": { + Type: "postgres", + Host: "localhost", + Port: 5432, + Username: "user", + Password: "password", + Database: "company_db", + SSLMode: "disable", + MaxOpenConns: 25, + MaxIdleConns: 5, + ConnMaxLifetime: time.Hour, + }, + }, + "mongodb": config.DatabaseConfig{ + Type: "mongodb", + Host: "localhost", + Port: 27017, + Database: "company_db", + Username: "user", + Password: "password", + }, + } +} + +// ============================================================================= +// CONTOH 1: QUERY DASAR (CRUD) +// ============================================================================= + +// basicCRUDExample demonstrates basic Create, Read, Update, Delete operations using the query builder. +// It shows how to build SQL queries, print them, and execute them while displaying results. +// Expected output: Prints INSERT SQL and result (new ID), SELECT SQL and user data, UPDATE SQL and affected rows, DELETE SQL and affected rows. +// Example raw queries: +// INSERT: INSERT INTO users (name, email, status) VALUES ($1, $2, $3) RETURNING id +// SELECT: SELECT * FROM users WHERE id = $1 +// UPDATE: UPDATE users SET status = $1 WHERE id = $2 +// DELETE: DELETE FROM users WHERE id = $1 +func basicCRUDExample(dbService database.Service) { + ctx := context.Background() + db, err := dbService.GetSQLXDB("main") + if err != nil { + log.Printf("Gagal mendapatkan koneksi DB: %v", err) + return + } + qb := query.NewQueryBuilder(query.DBTypePostgreSQL) + + // --- INSERT --- + fmt.Println("\n--- Operasi INSERT ---") + insertData := query.InsertData{ + Columns: []string{"name", "email", "status"}, + Values: []interface{}{"Alice", "alice@example.com", "active"}, + } + sql, args, err := qb.BuildInsertQuery("users", insertData, "id") + if err != nil { + log.Printf("Error building INSERT: %v", err) + return + } + fmt.Printf("Generated INSERT SQL: %s\nArgs: %v\n", sql, args) + result, err := qb.ExecuteInsert(ctx, db, "users", insertData, "id") + if err != nil { + log.Printf("Error INSERT: %v", err) + return + } + newID, _ := result.LastInsertId() + fmt.Printf("-> INSERT: Berhasil menambah user dengan ID: %d\n", newID) + + // --- SELECT (Single Row) --- + fmt.Println("\n--- Operasi SELECT ---") + var user User + selectQuery := query.DynamicQuery{ + Fields: []query.SelectField{{Expression: "*"}}, + From: "users", + Filters: []query.FilterGroup{{ + Filters: []query.DynamicFilter{{Column: "id", Operator: query.OpEqual, Value: newID}}, + }}, + } + sql, args, err = qb.BuildQuery(selectQuery) + if err != nil { + log.Printf("Error building SELECT: %v", err) + return + } + fmt.Printf("Generated SELECT SQL: %s\nArgs: %v\n", sql, args) + err = qb.ExecuteQueryRow(ctx, db, selectQuery, &user) + if err != nil { + log.Printf("Error SELECT single row: %v", err) + return + } + fmt.Printf("-> SELECT (Single Row): Berhasil mengambil user: %+v\n", user) + + // --- UPDATE --- + fmt.Println("\n--- Operasi UPDATE ---") + updateData := query.UpdateData{ + Columns: []string{"status"}, + Values: []interface{}{"inactive"}, + } + updateFilter := []query.FilterGroup{{ + Filters: []query.DynamicFilter{{Column: "id", Operator: query.OpEqual, Value: newID}}, + }} + sql, args, err = qb.BuildUpdateQuery("users", updateData, updateFilter) + if err != nil { + log.Printf("Error building UPDATE: %v", err) + return + } + fmt.Printf("Generated UPDATE SQL: %s\nArgs: %v\n", sql, args) + _, err = qb.ExecuteUpdate(ctx, db, "users", updateData, updateFilter) + if err != nil { + log.Printf("Error UPDATE: %v", err) + return + } + fmt.Printf("-> UPDATE: Berhasil memperbarui status user dengan ID: %d\n", newID) + + // --- DELETE --- + fmt.Println("\n--- Operasi DELETE ---") + deleteFilter := []query.FilterGroup{{ + Filters: []query.DynamicFilter{{Column: "id", Operator: query.OpEqual, Value: newID}}, + }} + sql, args, err = qb.BuildDeleteQuery("users", deleteFilter) + if err != nil { + log.Printf("Error building DELETE: %v", err) + return + } + fmt.Printf("Generated DELETE SQL: %s\nArgs: %v\n", sql, args) + _, err = qb.ExecuteDelete(ctx, db, "users", deleteFilter) + if err != nil { + log.Printf("Error DELETE: %v", err) + return + } + fmt.Printf("-> DELETE: Berhasil menghapus user dengan ID: %d\n", newID) +} + +// ============================================================================= +// CONTOH 2: TRANSAKSI SQL (POSTGRESQL) +// ============================================================================= + +// sqlTransactionExample demonstrates how to perform atomic transactions involving updates +// across multiple tables using the Query Builder. It builds and prints SQL queries before execution. +// Expected output: Prints UPDATE SQL for salaries and employees, transaction commit/rollback status, and validation results. +// Example raw queries: +// UPDATE salaries: UPDATE salaries SET salary = $1 WHERE employee_id = $2 +// UPDATE employees: UPDATE employees SET last_name = $1 WHERE employee_id = $2 +func sqlTransactionExample(dbService database.Service) { + ctx := context.Background() + employeeID := 123 + newSalary := 75000 + newLastName := "Doe" + + db, err := dbService.GetSQLXDB("main") + if err != nil { + log.Fatalf("Gagal mendapatkan koneksi database SQL: %v", err) + } + qb := query.NewQueryBuilder(query.DBTypePostgreSQL) + + tx, err := db.BeginTxx(ctx, nil) + if err != nil { + log.Fatalf("Gagal memulai transaksi SQL: %v", err) + } + + defer func() { + if p := recover(); p != nil { + fmt.Println("Terjadi panic, melakukan rollback transaksi...") + _ = tx.Rollback() + panic(p) + } else if err != nil { + fmt.Printf("Transaksi dibatalkan (ROLLBACK) karena error: %v\n", err) + _ = tx.Rollback() + } else { + fmt.Println("Tidak ada error, melakukan COMMIT transaksi...") + err = tx.Commit() + if err != nil { + log.Printf("Gagal melakukan COMMIT transaksi: %v", err) + } + } + }() + + fmt.Printf("Memulai transaksi untuk employee_id: %d\n", employeeID) + + // --- Operasi 1: Update gaji di tabel 'salaries' --- + fmt.Println("\n--- Operasi 1: UPDATE salaries ---") + salariesUpdateData := query.UpdateData{ + Columns: []string{"salary"}, + Values: []interface{}{newSalary}, + } + salariesFilter := []query.FilterGroup{ + { + Filters: []query.DynamicFilter{ + {Column: "employee_id", Operator: query.OpEqual, Value: employeeID}, + }, + }, + } + sql, args, err := qb.BuildUpdateQuery("salaries", salariesUpdateData, salariesFilter) + if err != nil { + log.Printf("Error building UPDATE salaries: %v", err) + return + } + fmt.Printf("Generated UPDATE salaries SQL: %s\nArgs: %v\n", sql, args) + salariesResult, err := qb.ExecuteUpdate(ctx, tx, "salaries", salariesUpdateData, salariesFilter) + if err != nil { + return + } + salariesRowsAffected, _ := salariesResult.RowsAffected() + fmt.Printf("-> UPDATE salaries: %d baris terpengaruh.\n", salariesRowsAffected) + + // --- Operasi 2: Update informasi di tabel 'employees' --- + fmt.Println("\n--- Operasi 2: UPDATE employees ---") + employeesUpdateData := query.UpdateData{ + Columns: []string{"last_name"}, + Values: []interface{}{newLastName}, + } + employeesFilter := []query.FilterGroup{ + { + Filters: []query.DynamicFilter{ + {Column: "employee_id", Operator: query.OpEqual, Value: employeeID}, + }, + }, + } + sql, args, err = qb.BuildUpdateQuery("employees", employeesUpdateData, employeesFilter) + if err != nil { + log.Printf("Error building UPDATE employees: %v", err) + return + } + fmt.Printf("Generated UPDATE employees SQL: %s\nArgs: %v\n", sql, args) + employeesResult, err := qb.ExecuteUpdate(ctx, tx, "employees", employeesUpdateData, employeesFilter) + if err != nil { + return + } + employeesRowsAffected, _ := employeesResult.RowsAffected() + fmt.Printf("-> UPDATE employees: %d baris terpengaruh.\n", employeesRowsAffected) + + // --- Validasi Akhir Transaksi --- + if salariesRowsAffected == 1 && employeesRowsAffected == 1 { + fmt.Println("-> Validasi BERHASIL: Kedua tabel berhasil diperbarui.") + } else { + err = fmt.Errorf("validasi GAGAL: diharapkan 1 baris terupdate di setiap tabel, tetapi mendapat %d (salaries) dan %d (employees)", salariesRowsAffected, employeesRowsAffected) + return + } +} + +// ============================================================================= +// CONTOH 3: TRANSAKSI MONGODB +// ============================================================================= + +// mongoTransactionExample demonstrates MongoDB transactions using the query builder. +// It prints the filters and update operations before executing them in a transaction. +// Expected output: Prints MongoDB filters and update operations for salaries and employees, transaction commit/abort status, and validation results. +// Example raw queries: +// MongoDB filters: {"employee_id": 123} +// MongoDB updates: {"$set": {"salary": 75000}}, {"$set": {"last_name": "Doe"}} +func mongoTransactionExample(dbService database.Service) { + ctx := context.Background() + employeeID := 123 + newSalary := 75000 + newLastName := "Doe" + + client, err := dbService.GetMongoClient("mongodb") + if err != nil { + log.Fatalf("Gagal mendapatkan klien MongoDB: %v", err) + } + + salariesCollection := client.Database("company_db").Collection("salaries") + employeesCollection := client.Database("company_db").Collection("employees") + + session, err := client.StartSession() + if err != nil { + log.Fatalf("Gagal memulai sesi MongoDB: %v", err) + } + defer session.EndSession(ctx) + + fmt.Printf("Memulai transaksi MongoDB untuk employee_id: %d\n", employeeID) + + _, err = session.WithTransaction(ctx, func(sessCtx mongo.SessionContext) (interface{}, error) { + // --- Operasi 1: Update gaji di koleksi 'salaries' --- + fmt.Println("\n--- Operasi 1: UPDATE salaries ---") + salariesFilter := bson.M{"employee_id": employeeID} + salariesUpdate := bson.M{"$set": bson.M{"salary": newSalary}} + fmt.Printf("-> MongoDB Update Salaries Filter: %#v\n", salariesFilter) + fmt.Printf("-> MongoDB Update Salaries Operation: %#v\n", salariesUpdate) + + salariesResult, err := salariesCollection.UpdateOne(sessCtx, salariesFilter, salariesUpdate) + if err != nil { + return nil, fmt.Errorf("gagal update koleksi salaries: %w", err) + } + fmt.Printf("-> UPDATE salaries: %d dokumen cocok (matched).\n", salariesResult.MatchedCount) + + // --- Operasi 2: Update informasi di koleksi 'employees' --- + fmt.Println("\n--- Operasi 2: UPDATE employees ---") + employeesFilter := bson.M{"employee_id": employeeID} + employeesUpdate := bson.M{"$set": bson.M{"last_name": newLastName}} + fmt.Printf("-> MongoDB Update Employees Filter: %#v\n", employeesFilter) + fmt.Printf("-> MongoDB Update Employees Operation: %#v\n", employeesUpdate) + + employeesResult, err := employeesCollection.UpdateOne(sessCtx, employeesFilter, employeesUpdate) + if err != nil { + return nil, fmt.Errorf("gagal update koleksi employees: %w", err) + } + fmt.Printf("-> UPDATE employees: %d dokumen cocok (matched).\n", employeesResult.MatchedCount) + + // --- Validasi Akhir Transaksi --- + if salariesResult.MatchedCount == 1 && employeesResult.MatchedCount == 1 { + fmt.Println("-> Validasi BERHASIL: Kedua koleksi berhasil diperbarui.") + return nil, nil + } + + return nil, fmt.Errorf("validasi GAGAL: diharapkan 1 dokumen terupdate di setiap koleksi, tetapi mendapat %d (salaries) dan %d (employees)", salariesResult.MatchedCount, employeesResult.MatchedCount) + }) + + if err != nil { + fmt.Printf("Transaksi MongoDB dibatalkan (ABORT) karena error: %v\n", err) + } else { + fmt.Println("Transaksi MongoDB berhasil di-commit.") + } +} + +// ============================================================================= +// CONTOH 4: FILTER DAN PAGINASI +// ============================================================================= + +// filterAndPaginationExample demonstrates querying with filters and pagination. +// It builds and prints the SELECT query before executing it. +// Expected output: Prints SELECT SQL with filters and pagination, and the number of active users found. +// Example raw query: +// SELECT id, name FROM users WHERE (status = $1 AND created_at > $2) ORDER BY name ASC LIMIT 5 OFFSET 10 +func filterAndPaginationExample(dbService database.Service) { + ctx := context.Background() + db, err := dbService.GetSQLXDB("main") + if err != nil { + log.Printf("Gagal mendapatkan koneksi DB: %v", err) + return + } + qb := query.NewQueryBuilder(query.DBTypePostgreSQL) + + query := query.DynamicQuery{ + Fields: []query.SelectField{ + {Expression: "id"}, + {Expression: "name"}, + }, + From: "users", + Filters: []query.FilterGroup{ + { + LogicOp: "AND", + Filters: []query.DynamicFilter{ + {Column: "status", Operator: query.OpEqual, Value: "active"}, + {Column: "created_at", Operator: query.OpGreaterThan, Value: time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC)}, + }, + }, + }, + Sort: []query.SortField{{Column: "name", Order: "ASC"}}, + Limit: 5, + Offset: 10, + } + + var users []User + sql, args, err := qb.BuildQuery(query) + if err != nil { + log.Printf("Error building SELECT: %v", err) + return + } + fmt.Printf("Generated SELECT SQL: %s\nArgs: %v\n", sql, args) + err = qb.ExecuteQuery(ctx, db, query, &users) + if err != nil { + log.Printf("Error query dengan filter: %v", err) + return + } + fmt.Printf("-> Filter & Paginasi: Ditemukan %d user aktif (halaman 3).\n", len(users)) +} + +// ============================================================================= +// CONTOH 5: QUERY DENGAN JOIN +// ============================================================================= + +// joinExample demonstrates querying with JOIN operations. +// It builds and prints the JOIN query before executing it. +// Expected output: Prints JOIN SQL query and the number of posts with author names found. +// Example raw query: +// SELECT p.id AS post_id, p.title, u.name AS author_name FROM posts p INNER JOIN users u ON p.user_id = u.id LIMIT 10 +func joinExample(dbService database.Service) { + ctx := context.Background() + db, err := dbService.GetSQLXDB("main") + if err != nil { + log.Printf("Gagal mendapatkan koneksi DB: %v", err) + return + } + qb := query.NewQueryBuilder(query.DBTypePostgreSQL) + + query := query.DynamicQuery{ + Fields: []query.SelectField{ + {Expression: "p.id", Alias: "post_id"}, + {Expression: "p.title"}, + {Expression: "u.name", Alias: "author_name"}, + }, + From: "posts", + Aliases: "p", + Joins: []query.Join{ + { + Type: "INNER", + Table: "users", + Alias: "u", + OnConditions: query.FilterGroup{ + Filters: []query.DynamicFilter{ + {Column: "p.user_id", Operator: query.OpEqual, Value: "u.id"}, + }, + }, + }, + }, + Limit: 10, + } + + var results []struct { + PostID int `db:"post_id"` + Title string `db:"title"` + AuthorName string `db:"author_name"` + } + sql, args, err := qb.BuildQuery(query) + if err != nil { + log.Printf("Error building JOIN: %v", err) + return + } + fmt.Printf("Generated JOIN SQL: %s\nArgs: %v\n", sql, args) + err = qb.ExecuteQuery(ctx, db, query, &results) + if err != nil { + log.Printf("Error query JOIN: %v", err) + return + } + fmt.Printf("-> JOIN: Ditemukan %d post dengan nama penulis.\n", len(results)) +} + +// ============================================================================= +// CONTOH 6: QUERY DENGAN CTE +// ============================================================================= + +// cteExample demonstrates querying with Common Table Expressions (CTE). +// It builds and prints the CTE query before executing it. +// Expected output: Prints CTE SQL query and the number of users with more than 5 posts. +func cteExample(dbService database.Service) { + ctx := context.Background() + db, err := dbService.GetSQLXDB("main") + if err != nil { + log.Printf("Gagal mendapatkan koneksi DB: %v", err) + return + } + qb := query.NewQueryBuilder(query.DBTypePostgreSQL) + + query := query.DynamicQuery{ + CTEs: []query.CTE{ + { + Name: "user_post_counts", + Query: query.DynamicQuery{ + Fields: []query.SelectField{ + {Expression: "user_id"}, + {Expression: "COUNT(*)", Alias: "post_count"}, + }, + From: "posts", + GroupBy: []string{"user_id"}, + }, + }, + }, + Fields: []query.SelectField{ + {Expression: "u.name"}, + {Expression: "upc.post_count"}, + }, + From: "users u", + Joins: []query.Join{ + { + Type: "INNER", + Table: "user_post_counts", + Alias: "upc", + OnConditions: query.FilterGroup{ + Filters: []query.DynamicFilter{ + {Column: "u.id", Operator: query.OpEqual, Value: "upc.user_id"}, + }, + }, + }, + }, + Filters: []query.FilterGroup{ + { + Filters: []query.DynamicFilter{ + {Column: "upc.post_count", Operator: query.OpGreaterThan, Value: 5}, + }, + }, + }, + } + + var results []struct { + Name string `db:"name"` + PostCount int `db:"post_count"` + } + sql, args, err := qb.BuildQuery(query) + if err != nil { + log.Printf("Error building CTE: %v", err) + return + } + fmt.Printf("Generated CTE SQL: %s\nArgs: %v\n", sql, args) + err = qb.ExecuteQuery(ctx, db, query, &results) + if err != nil { + log.Printf("Error query CTE: %v", err) + return + } + fmt.Printf("-> CTE: Ditemukan %d user dengan lebih dari 5 post.\n", len(results)) +} + +// ============================================================================= +// CONTOH 7: WINDOW FUNCTION +// ============================================================================= + +// windowFunctionExample demonstrates querying with window functions. +// It builds and prints the window function query before executing it. +// Expected output: Prints window function SQL query and the number of employees with salary rankings. +func windowFunctionExample(dbService database.Service) { + ctx := context.Background() + db, err := dbService.GetSQLXDB("main") + if err != nil { + log.Printf("Gagal mendapatkan koneksi DB: %v", err) + return + } + qb := query.NewQueryBuilder(query.DBTypePostgreSQL) + + query := query.DynamicQuery{ + Fields: []query.SelectField{ + {Expression: "name"}, + {Expression: "department"}, + {Expression: "salary"}, + }, + From: "employees", + WindowFunctions: []query.WindowFunction{ + { + Function: "RANK", + Over: "department", + OrderBy: "salary DESC", + Alias: "salary_rank", + }, + }, + Filters: []query.FilterGroup{ + { + Filters: []query.DynamicFilter{ + {Column: "department", Operator: query.OpEqual, Value: "Engineering"}, + }, + }, + }, + } + + var results []struct { + Name string `db:"name"` + Department string `db:"department"` + Salary float64 `db:"salary"` + SalaryRank int `db:"salary_rank"` + } + sql, args, err := qb.BuildQuery(query) + if err != nil { + log.Printf("Error building Window Function: %v", err) + return + } + fmt.Printf("Generated Window Function SQL: %s\nArgs: %v\n", sql, args) + err = qb.ExecuteQuery(ctx, db, query, &results) + if err != nil { + log.Printf("Error query Window Function: %v", err) + return + } + fmt.Printf("-> Window Function: Ditemukan %d employee di departemen Engineering dengan peringkat gaji.\n", len(results)) +} + +// ============================================================================= +// CONTOH 8: VALIDASI DATA DINAMIS +// ============================================================================= + +// validationExample demonstrates dynamic data validation using the query builder. +// It builds and prints the validation query before executing it. +// Expected output: Prints validation SQL query and whether the email is duplicate or available. +func validationExample(dbService database.Service) { + ctx := context.Background() + db, err := dbService.GetSQLXDB("main") + if err != nil { + log.Printf("Gagal mendapatkan koneksi DB: %v", err) + return + } + qb := query.NewQueryBuilder(query.DBTypePostgreSQL) + validator := validation.NewDynamicValidator(qb) + + userData := map[string]interface{}{"email": "test@example.com"} + emailRule := validation.NewUniqueFieldRule("users", "email") + + // Build and print the validation query + countQuery := query.DynamicQuery{ + From: "users", + Filters: []query.FilterGroup{{ + Filters: []query.DynamicFilter{{Column: "email", Operator: query.OpEqual, Value: "test@example.com"}}, + }}, + } + sql, args, err := qb.BuildCountQuery(countQuery) + if err != nil { + log.Printf("Error building validation query: %v", err) + return + } + fmt.Printf("Generated Validation SQL: %s\nArgs: %v\n", sql, args) + + isDuplicate, err := validator.Validate(ctx, db, emailRule, userData) + if err != nil { + log.Printf("Error validasi: %v", err) + return + } + + if isDuplicate { + fmt.Println("-> Validasi: Email 'test@example.com' sudah ada.") + } else { + fmt.Println("-> Validasi: Email 'test@example.com' tersedia.") + } +} + +// ============================================================================= +// CONTOH 9: OPERASI JSON +// ============================================================================= + +// jsonQueryExample demonstrates JSON operations in queries. +// It builds and prints the JSON queries before executing them. +// Expected output: Prints JSON SELECT and UPDATE SQL queries, number of employees found, and update success message. +func jsonQueryExample(dbService database.Service) { + ctx := context.Background() + db, err := dbService.GetSQLXDB("main") + if err != nil { + log.Printf("Gagal mendapatkan koneksi DB: %v", err) + return + } + qb := query.NewQueryBuilder(query.DBTypePostgreSQL) + + query := query.DynamicQuery{ + Fields: []query.SelectField{{Expression: "*"}}, + From: "employees", + Filters: []query.FilterGroup{{ + Filters: []query.DynamicFilter{ + { + Column: "metadata", + Operator: query.OpJsonEqual, + Value: "Engineering", + Options: map[string]interface{}{"path": "department"}, + }, + }, + }}, + } + + var employees []Employee + sql, args, err := qb.BuildQuery(query) + if err != nil { + log.Printf("Error building JSON query: %v", err) + return + } + fmt.Printf("Generated JSON SELECT SQL: %s\nArgs: %v\n", sql, args) + err = qb.ExecuteQuery(ctx, db, query, &employees) + if err != nil { + log.Printf("Error query JSON: %v", err) + return + } + fmt.Printf("-> Operasi JSON: Ditemukan %d employee di departemen Engineering (dari metadata JSON).\n", len(employees)) + + updateData := query.UpdateData{ + JsonUpdates: map[string]query.JsonUpdate{ + "metadata": {Path: "role", Value: "Senior Developer"}, + }, + } + filter := []query.FilterGroup{{Filters: []query.DynamicFilter{{Column: "id", Operator: query.OpEqual, Value: 1}}}} + sql, args, err = qb.BuildUpdateQuery("employees", updateData, filter) + if err != nil { + log.Printf("Error building JSON update: %v", err) + return + } + fmt.Printf("Generated JSON UPDATE SQL: %s\nArgs: %v\n", sql, args) + _, err = qb.ExecuteUpdate(ctx, db, "employees", updateData, filter) + if err != nil { + log.Printf("Error update JSON: %v", err) + return + } + fmt.Println("-> Operasi JSON: Berhasil memperbarui 'role' di metadata untuk employee ID 1.") +} + +// ============================================================================= +// CONTOH 10: QUERY MONGODB +// ============================================================================= + +// mongodbExample demonstrates MongoDB queries using the query builder. +// It prints the built filters and pipelines before executing them. +// Expected output: Prints MongoDB find filter, number of active users, aggregation pipeline, and number of departments. +func mongodbExample(dbService database.Service) { + ctx := context.Background() + client, err := dbService.GetMongoClient("mongodb") + if err != nil { + log.Printf("Gagal mendapatkan klien MongoDB: %v", err) + return + } + collection := client.Database("company_db").Collection("users") + mqb := query.NewMongoQueryBuilder() + + // --- FIND --- + fmt.Println("\n--- Operasi FIND ---") + findQuery := query.DynamicQuery{ + Filters: []query.FilterGroup{{Filters: []query.DynamicFilter{{Column: "status", Operator: query.OpEqual, Value: "active"}}}}, + Limit: 5, + } + filter, _, _ := mqb.BuildFindQuery(findQuery) + fmt.Printf("-> MongoDB Find Filter: %#v\n", filter) + + var users []User + err = mqb.ExecuteFind(ctx, collection, findQuery, &users) + if err != nil { + log.Printf("Error MongoDB Find: %v", err) + return + } + fmt.Printf("-> MongoDB Find: Ditemukan %d user aktif.\n", len(users)) + + // --- AGGREGATION --- + fmt.Println("\n--- Operasi AGGREGATION ---") + aggQuery := query.DynamicQuery{ + Fields: []query.SelectField{ + {Expression: "department", Alias: "_id"}, + {Expression: "COUNT(*)", Alias: "count"}, + }, + GroupBy: []string{"department"}, + } + pipeline, _ := mqb.BuildAggregateQuery(aggQuery) + fmt.Printf("-> MongoDB Aggregation Pipeline: %#v\n", pipeline) + + var aggResults []struct { + ID string `bson:"_id"` + Count int `bson:"count"` + } + err = mqb.ExecuteAggregate(ctx, collection, aggQuery, &aggResults) + if err != nil { + log.Printf("Error MongoDB Aggregate: %v", err) + return + } + fmt.Printf("-> MongoDB Aggregate: Ditemukan user di %d departemen.\n", len(aggResults)) +} + +// ============================================================================= +// CONTOH 11: PENGGUNAAN READ REPLICA +// ============================================================================= + +// readReplicaExample demonstrates using read replicas for queries. +// It builds and prints the count query before executing it on the read replica. +// Expected output: Prints COUNT SQL query and the total number of users from the read replica. +// Example raw query: +// SELECT COUNT(*) FROM users +func readReplicaExample(dbService database.Service) { + ctx := context.Background() + readDB, err := dbService.GetReadDB("main") + if err != nil { + log.Printf("Gagal mendapatkan read replica: %v", err) + return + } + readxDB := sqlx.NewDb(readDB, "pgx") + qb := query.NewQueryBuilder(query.DBTypePostgreSQL) + + countQuery := query.DynamicQuery{From: "users"} + sql, args, err := qb.BuildCountQuery(countQuery) + if err != nil { + log.Printf("Error building count query: %v", err) + return + } + fmt.Printf("Generated COUNT SQL: %s\nArgs: %v\n", sql, args) + count, err := qb.ExecuteCount(ctx, readxDB, countQuery) + if err != nil { + log.Printf("Error query di read replica: %v", err) + return + } + fmt.Printf("-> Read Replica: Total user (dari read replica): %d\n", count) +} + +// ============================================================================= +// CONTOH 12: HEALTH CHECK DATABASE +// ============================================================================= + +// healthCheckExample demonstrates database health checks. +// It prints the health status of all databases. +// Expected output: Prints health status for each database (up/down with type or error). +func healthCheckExample(dbService database.Service) { + healthStatus := dbService.Health() + fmt.Println("-> Health Check Status:") + for dbName, status := range healthStatus { + if status["status"] == "up" { + fmt.Printf(" - Database %s: SEHAT (%s)\n", dbName, status["type"]) + } else { + fmt.Printf(" - Database %s: TIDAK SEHAT - %s\n", dbName, status["error"]) + } + } +} + +// ============================================================================= +// CONTOH 13: PARSING QUERY DARI URL +// ============================================================================= + +// urlQueryParsingExample demonstrates parsing query parameters from URL. +// It parses the URL query and prints the resulting dynamic query structure. +// Expected output: Prints parsed fields, filters, sort, and limit from the URL query. +func urlQueryParsingExample(dbService database.Service) { + values := url.Values{} + values.Set("fields", "id,name") + values.Set("filter[status][_eq]", "active") + values.Set("filter[age][_gt]", "25") + values.Set("sort", "-name") + values.Set("limit", "10") + + parser := query.NewQueryParser() + dynamicQuery, err := parser.ParseQuery(values, "users") + if err != nil { + log.Printf("Error parsing URL query: %v", err) + return + } + + fmt.Println("-> Parsing URL Query:") + fmt.Printf(" Fields: %v\n", dynamicQuery.Fields) + fmt.Printf(" Filters: %+v\n", dynamicQuery.Filters) + fmt.Printf(" Sort: %+v\n", dynamicQuery.Sort) + fmt.Printf(" Limit: %d\n", dynamicQuery.Limit) +} + +// ============================================================================= +// AKHIR FILE +// ============================================================================= diff --git a/internal/utils/validation/duplicate_validator.go b/internal/utils/validation/duplicate.go similarity index 87% rename from internal/utils/validation/duplicate_validator.go rename to internal/utils/validation/duplicate.go index 01d18f8..d28fcbf 100644 --- a/internal/utils/validation/duplicate_validator.go +++ b/internal/utils/validation/duplicate.go @@ -63,7 +63,11 @@ func NewDynamicValidator(qb *queryUtils.QueryBuilder) *DynamicValidator { // `data` adalah map yang berisi nilai untuk kolom yang akan diperiksa (biasanya dari request body). // Mengembalikan `true` jika ada duplikat yang ditemukan (validasi gagal), `false` jika tidak ada duplikat (validasi berhasil). func (dv *DynamicValidator) Validate(ctx context.Context, db *sqlx.DB, rule ValidationRule, data map[string]interface{}) (bool, error) { + // LOGGING: Log validation start + fmt.Printf("[VALIDATION] Starting validation for table: %s, unique columns: %v, data: %v\n", rule.TableName, rule.UniqueColumns, data) + if len(rule.UniqueColumns) == 0 { + fmt.Printf("[VALIDATION] ERROR: ValidationRule must have at least one UniqueColumn\n") return false, fmt.Errorf("ValidationRule must have at least one UniqueColumn") } @@ -72,12 +76,14 @@ func (dv *DynamicValidator) Validate(ctx context.Context, db *sqlx.DB, rule Vali // Tambahkan kondisi tambahan (misalnya, status != 'deleted') allFilters = append(allFilters, rule.Conditions...) + fmt.Printf("[VALIDATION] Added %d condition filters\n", len(rule.Conditions)) // 2. Bangun filter untuk kolom unik berdasarkan data yang diberikan for _, colName := range rule.UniqueColumns { value, exists := data[colName] if !exists { // Jika data untuk kolom unik tidak ada, ini adalah kesalahan pemrograman. + fmt.Printf("[VALIDATION] ERROR: data for unique column '%s' not found in provided data map\n", colName) return false, fmt.Errorf("data for unique column '%s' not found in provided data map", colName) } allFilters = append(allFilters, queryUtils.DynamicFilter{ @@ -85,6 +91,7 @@ func (dv *DynamicValidator) Validate(ctx context.Context, db *sqlx.DB, rule Vali Operator: queryUtils.OpEqual, Value: value, }) + fmt.Printf("[VALIDATION] Added filter for column '%s' with value: %v\n", colName, value) } // 3. Tambahkan filter pengecualian ID (untuk operasi UPDATE) @@ -94,6 +101,7 @@ func (dv *DynamicValidator) Validate(ctx context.Context, db *sqlx.DB, rule Vali Operator: queryUtils.OpNotEqual, Value: rule.ExcludeIDValue, }) + fmt.Printf("[VALIDATION] Added exclude filter for column '%s' with value: %v\n", rule.ExcludeIDColumn, rule.ExcludeIDValue) } // 4. Bangun dan eksekusi query untuk menghitung jumlah record yang cocok @@ -102,13 +110,20 @@ func (dv *DynamicValidator) Validate(ctx context.Context, db *sqlx.DB, rule Vali Filters: []queryUtils.FilterGroup{{Filters: allFilters, LogicOp: "AND"}}, } + fmt.Printf("[VALIDATION] Built query with %d total filters\n", len(allFilters)) + count, err := dv.qb.ExecuteCount(ctx, db, query) if err != nil { + fmt.Printf("[VALIDATION] ERROR: failed to execute validation query for table %s: %v\n", rule.TableName, err) return false, fmt.Errorf("failed to execute validation query for table %s: %w", rule.TableName, err) } + fmt.Printf("[VALIDATION] Query executed successfully, count result: %d\n", count) + // 5. Kembalikan hasil - return count > 0, nil + result := count > 0 + fmt.Printf("[VALIDATION] Validation result: isDuplicate=%t (count > 0: %d > 0 = %t)\n", result, count, result) + return result, nil } // ============================================================================= diff --git a/tools/general/services-config.yaml b/tools/general/services-config.yaml index b647c33..658695e 100644 --- a/tools/general/services-config.yaml +++ b/tools/general/services-config.yaml @@ -3,165 +3,278 @@ global: output_dir: "internal/handlers" enable_swagger: true enable_logging: true + database: + default_connection: "postgres_satudata" + timeout_seconds: 30 services: - schedule: - name: "Jadwal Dokter" - category: "schedule" - package: "schedule" - description: "Jadwal Dokter management" - base_url: "" - timeout: 30 - retry_count: 3 - - endpoints: - schedule: - description: "Jadwal dokter management" - handler_folder: "master" - handler_file: "schedule.go" - handler_name: "schedule" - table_name: "daftar_jadwal_dokter" - # Definisikan skema tabel di sini - # = - schema: - columns: - - name: "id" - type: "serial4" - primary_key: true - go_type: "string" # Override tipe Go, UUID biasanya string - - name: "Hari" - type: "int4" - nullable: true - - name: "Nama_hari" - type: "varchar" - nullable: true - - name: "Waktu" - type: "varchar" - nullable: true - - name: "Dokter" - type: "uuid" - nullable: true - go_type: "string" # Override tipe Go - - name: "Spesialis" - type: "int4" - nullable: true - - name: "Sub_spesialis" - type: "int4" - nullable: true - - name: "Status" - type: "int4" - nullable: true - # ====================================================================== - functions: - list: - methods: ["GET"] - path: "/" - get_routes: "/" - get_path: "/" - model: "Schedule" - response_model: "ScheduleGetResponse" - description: "Get schedule list with pagination and filters" - summary: "Get Schedule List" - tags: ["Schedule"] - require_auth: true - cache_enabled: true - enable_database: true - cache_ttl: 300 - has_pagination: true - has_filter: true - has_search: true - has_stats: true - - get: - methods: ["GET"] - path: "/:id" - get_routes: "/:id" - get_path: "/:id" - model: "Schedule" - response_model: "ScheduleGetByIDResponse" - description: "Get schedule by ID" - summary: "Get schedule by ID" - tags: ["Schedule"] - require_auth: true - cache_enabled: true - enable_database: true - cache_ttl: 300 + pasien: + name: "Manajemen Data Pasien" + category: "pasien" + package: "pasien" + description: "API untuk mengelola data pasien dengan informasi lokasi lengkap" + base_url: "" + timeout: 30 + retry_count: 3 + table_name: "m_pasien" - search: - methods: ["GET"] - path: "/search" - get_routes: "/search" - get_path: "/search" - model: "Schedule" - response_model: "ScheduleGetResponse" - description: "Search schedule" - summary: "Search Schedule" - tags: ["Schedule"] - require_auth: true - cache_enabled: true - enable_database: true - cache_ttl: 300 - has_search: true - - create: - methods: ["POST"] - path: "/" - post_routes: "/" - post_path: "/" - model: "Schedule" - response_model: "ScheduleCreateResponse" - request_model: "ScheduleCreateRequest" - description: "Create new schedule" - summary: "Create Schedule" - tags: ["Schedule"] - require_auth: true - cache_enabled: false - enable_database: true - cache_ttl: 0 - - update: - methods: ["PUT"] - path: "/:id" - put_routes: "/:id" - put_path: "/:id" - model: "Schedule" - response_model: "ScheduleUpdateResponse" - request_model: "ScheduleUpdateRequest" - description: "Update schedule" - summary: "Update Schedule" - tags: ["Schedule"] - require_auth: true - cache_enabled: false - enable_database: true - cache_ttl: 0 - - delete: - methods: ["DELETE"] - path: "/:id" - delete_routes: "/:id" - delete_path: "/:id" - model: "Schedule" - response_model: "ScheduleDeleteResponse" - description: "Delete schedule" - summary: "Delete Schedule" - tags: ["Schedule"] - require_auth: true - cache_enabled: false - enable_database: true - cache_ttl: 0 - - stats: - methods: ["GET"] - path: "/stats" - get_routes: "/stats" - get_path: "/stats" - model: "AggregateData" - response_model: "AggregateData" - description: "Get retribusi statistics" - summary: "Get Retribusi Stats" - tags: ["Retribusi"] - require_auth: true - cache_enabled: true - enable_database: true - cache_ttl: 180 - has_stats: true \ No newline at end of file + # Define all columns once for reuse + columns: + - name: "nomr" + type: "varchar" + nullable: true + go_type: "string" + description: "Nomor Rekam Medis" + - name: "title" + type: "varchar" + nullable: true + go_type: "string" + description: "Gelar pasien (Tn, Ny, Sdr, dll)" + - name: "nama" + type: "varchar" + nullable: true + go_type: "string" + validation: "required,min=1,max=100" + description: "Nama lengkap pasien" + - name: "tempat" + type: "varchar" + nullable: true + go_type: "string" + description: "Tempat lahir pasien" + - name: "tgllahir" + type: "date" + nullable: true + go_type: "time.Time" + description: "Tanggal lahir pasien" + - name: "jeniskelamin" + type: "varchar" + nullable: true + go_type: "string" + validation: "oneof=L P" + description: "Jenis kelamin (L/P)" + - name: "alamat" + type: "varchar" + nullable: true + go_type: "string" + description: "Alamat lengkap pasien" + - name: "kelurahan" + type: "int8" + nullable: true + go_type: "int64" + description: "ID Kelurahan" + - name: "kdkecamatan" + type: "int4" + nullable: true + go_type: "int32" + description: "ID Kecamatan" + - name: "kota" + type: "int4" + nullable: true + go_type: "int32" + description: "ID Kota" + - name: "kdprovinsi" + type: "int4" + nullable: true + go_type: "int32" + description: "ID Provinsi" + - name: "agama" + type: "int4" + nullable: true + go_type: "int32" + description: "ID Agama" + - name: "no_kartu" + type: "varchar" + nullable: true + go_type: "string" + description: "Nomor kartu identitas" + - name: "noktp_baru" + type: "varchar" + nullable: true + go_type: "string" + description: "Nomor KTP baru" + + # Define relationships with other tables + relationships: + - name: "provinsi" + table: "m_provinsi" + foreign_key: "kdprovinsi" + local_key: "idprovinsi" + columns: + - name: "idprovinsi" + type: "int4" + nullable: false + go_type: "int32" + primary_key: true + - name: "namaprovinsi" + type: "varchar" + nullable: true + go_type: "string" + description: "Nama provinsi" + - name: "kota" + table: "m_kota" + foreign_key: "kota" + local_key: "idkota" + columns: + - name: "idkota" + type: "int4" + nullable: false + go_type: "int32" + primary_key: true + - name: "namakota" + type: "varchar" + nullable: true + go_type: "string" + description: "Nama kota" + - name: "kecamatan" + table: "m_kecamatan" + foreign_key: "kdkecamatan" + local_key: "idkecamatan" + columns: + - name: "idkecamatan" + type: "int8" + nullable: false + go_type: "int64" + primary_key: true + - name: "namakecamatan" + type: "varchar" + nullable: true + go_type: "string" + description: "Nama kecamatan" + - name: "kelurahan" + table: "m_kelurahan" + foreign_key: "kelurahan" + local_key: "idkelurahan" + columns: + - name: "idkelurahan" + type: "int8" + nullable: false + go_type: "int64" + primary_key: true + - name: "namakelurahan" + type: "varchar" + nullable: true + go_type: "string" + description: "Nama kelurahan" + + # Define reusable field groups + field_groups: + base_fields: ["nomr", "title", "nama", "tempat", "tgllahir", "jeniskelamin"] + location_fields: ["alamat", "kelurahan", "kdkecamatan", "kota", "kdprovinsi"] + identity_fields: ["agama", "no_kartu", "noktp_baru"] + all_fields: ["nomr", "title", "nama", "tempat", "tgllahir", "jeniskelamin", "alamat", "kelurahan", "kdkecamatan", "kota", "kdprovinsi", "agama", "no_kartu", "noktp_baru"] + with_location_names: ["nomr", "title", "nama", "tempat", "tgllahir", "jeniskelamin", "alamat", "kelurahan", "namakelurahan", "kdkecamatan", "namakecamatan", "kota", "namakota", "kdprovinsi", "namaprovinsi", "agama", "no_kartu", "noktp_baru"] + + # Define endpoints with reusable configurations + endpoints: + list: + methods: ["GET"] + path: "/" + description: "Get list of pasien with pagination and filters" + summary: "Get Pasien List" + tags: ["Pasien"] + require_auth: true + cache_enabled: true + cache_ttl: 300 + has_pagination: true + has_filter: true + has_search: true + has_stats: true + fields: "with_location_names" + response_model: "PasienGetResponse" + + get_by_nomr: + methods: ["GET"] + path: "/:nomr" + description: "Get pasien by NOMR" + summary: "Get Pasien by NOMR" + tags: ["Pasien"] + require_auth: true + cache_enabled: true + cache_ttl: 300 + fields: "with_location_names" + response_model: "PasienGetByNOMRResponse" + + create: + methods: ["POST"] + path: "/" + description: "Create a new pasien" + summary: "Create Pasien" + tags: ["Pasien"] + require_auth: true + fields: "all_fields" + request_model: "PasienCreateRequest" + response_model: "PasienCreateResponse" + + update: + methods: ["PUT"] + path: "/:nomr" + description: "Update an existing pasien" + summary: "Update Pasien" + tags: ["Pasien"] + require_auth: true + fields: "all_fields" + request_model: "PasienUpdateRequest" + response_model: "PasienUpdateResponse" + + delete: + methods: ["DELETE"] + path: "/:nomr" + description: "Delete a pasien" + summary: "Delete Pasien" + tags: ["Pasien"] + require_auth: true + soft_delete: false + response_model: "PasienDeleteResponse" + + dynamic: + methods: ["GET"] + path: "/dynamic" + description: "Get pasien with dynamic filtering" + summary: "Get Pasien Dynamic" + tags: ["Pasien"] + require_auth: true + has_dynamic: true + fields: "with_location_names" + response_model: "PasienGetResponse" + + search: + methods: ["GET"] + path: "/search" + description: "Search pasien by name or NOMR" + summary: "Search Pasien" + tags: ["Pasien"] + require_auth: true + has_search: true + fields: "with_location_names" + response_model: "PasienGetResponse" + + stats: + methods: ["GET"] + path: "/stats" + description: "Get pasien statistics" + summary: "Get Pasien Stats" + tags: ["Pasien"] + require_auth: true + has_stats: true + response_model: "AggregateData" + + by_location: + methods: ["GET"] + path: "/by-location" + description: "Get pasien by location (provinsi, kota, kecamatan, kelurahan)" + summary: "Get Pasien by Location" + tags: ["Pasien"] + require_auth: true + has_filter: true + fields: "with_location_names" + response_model: "PasienGetResponse" + + by_age: + methods: ["GET"] + path: "/by-age" + description: "Get pasien statistics by age group" + summary: "Get Pasien by Age Group" + tags: ["Pasien"] + require_auth: true + has_stats: true + response_model: "PasienAgeStatsResponse" \ No newline at end of file