From c61b902538e8e4b2bb83136f190e044a6bbcdd9b Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Sun, 22 Mar 2020 23:12:55 +0800 Subject: [PATCH] Upgrade xorm to v1.0.0 (#10646) * Upgrade xorm to v1.0.0 * small nit * Fix tests * Update xorm * Update xorm * fix go.sum * fix test * Fix bug when dump * Fix bug * update xorm to latest * Fix migration test * update xorm to latest * Fix import order * Use xorm tag --- go.mod | 5 +- go.sum | 10 +- models/access.go | 7 +- models/list_options.go | 3 + models/log.go | 18 +- models/login_source.go | 20 +- models/migrations/v110.go | 24 +- models/migrations/v81.go | 9 +- models/models.go | 28 +- models/org_team.go | 12 +- models/repo.go | 6 +- models/repo_unit.go | 4 +- models/ssh_key.go | 2 +- models/test_fixtures.go | 3 +- models/unit_tests.go | 4 +- routers/admin/auths.go | 6 +- vendor/modules.txt | 19 +- vendor/xorm.io/builder/.drone.yml | 71 +- vendor/xorm.io/builder/.gitignore | 1 + vendor/xorm.io/builder/builder.go | 60 +- vendor/xorm.io/builder/builder_insert.go | 2 + vendor/xorm.io/builder/builder_limit.go | 9 +- vendor/xorm.io/builder/builder_select.go | 2 +- ...der_union.go => builder_set_operations.go} | 14 +- vendor/xorm.io/builder/builder_update.go | 4 + vendor/xorm.io/builder/cond_eq.go | 4 + vendor/xorm.io/builder/go.mod | 2 +- vendor/xorm.io/builder/go.sum | 4 +- vendor/xorm.io/builder/sql.go | 9 +- vendor/xorm.io/core/.drone.yml | 33 - vendor/xorm.io/core/.gitignore | 1 - vendor/xorm.io/core/LICENSE | 27 - vendor/xorm.io/core/README.md | 118 -- vendor/xorm.io/core/benchmark.sh | 1 - vendor/xorm.io/core/dialect.go | 327 ----- vendor/xorm.io/core/driver.go | 31 - vendor/xorm.io/core/filter.go | 93 -- vendor/xorm.io/core/go.mod | 8 - vendor/xorm.io/core/go.sum | 20 - vendor/xorm.io/core/ilogger.go | 37 - vendor/xorm.io/xorm/.changelog.yml | 53 + vendor/xorm.io/xorm/.drone.yml | 155 +- vendor/xorm.io/xorm/.gitignore | 3 + vendor/xorm.io/xorm/.revive.toml | 25 + vendor/xorm.io/xorm/CHANGELOG.md | 173 +++ vendor/xorm.io/xorm/CONTRIBUTING.md | 41 + vendor/xorm.io/xorm/Makefile | 214 +++ vendor/xorm.io/xorm/README.md | 67 +- vendor/xorm.io/xorm/README_CN.md | 82 +- vendor/xorm.io/{core => xorm/caches}/cache.go | 16 +- vendor/xorm.io/xorm/caches/encode.go | 58 + vendor/xorm.io/xorm/caches/leveldb.go | 94 ++ .../xorm/{cache_lru.go => caches/lru.go} | 20 +- vendor/xorm.io/xorm/caches/manager.go | 56 + .../memory_store.go} | 6 +- .../xorm/{ => contexts}/context_cache.go | 2 +- vendor/xorm.io/xorm/convert.go | 125 ++ .../convert/conversion.go} | 4 +- vendor/xorm.io/{ => xorm}/core/db.go | 69 +- vendor/xorm.io/{ => xorm}/core/error.go | 0 vendor/xorm.io/{ => xorm}/core/rows.go | 0 vendor/xorm.io/{ => xorm}/core/scan.go | 0 vendor/xorm.io/{ => xorm}/core/stmt.go | 68 +- vendor/xorm.io/{ => xorm}/core/tx.go | 91 +- vendor/xorm.io/xorm/dialects/dialect.go | 278 ++++ vendor/xorm.io/xorm/dialects/driver.go | 63 + vendor/xorm.io/xorm/dialects/filter.go | 43 + .../xorm/{ => dialects}/gen_reserved.sh | 0 .../{dialect_mssql.go => dialects/mssql.go} | 197 ++- .../{dialect_mysql.go => dialects/mysql.go} | 234 +-- .../{dialect_oracle.go => dialects/oracle.go} | 227 ++- .../xorm/{ => dialects}/pg_reserved.txt | 0 .../postgres.go} | 338 +++-- vendor/xorm.io/xorm/dialects/quote.go | 15 + .../sqlite3.go} | 218 +-- vendor/xorm.io/xorm/dialects/table_name.go | 90 ++ vendor/xorm.io/xorm/dialects/time.go | 49 + vendor/xorm.io/xorm/doc.go | 2 +- vendor/xorm.io/xorm/engine.go | 820 +++-------- vendor/xorm.io/xorm/engine_cond.go | 232 --- vendor/xorm.io/xorm/engine_context.go | 28 - vendor/xorm.io/xorm/engine_group.go | 42 +- vendor/xorm.io/xorm/engine_table.go | 113 -- vendor/xorm.io/xorm/error.go | 27 - vendor/xorm.io/xorm/go.mod | 5 +- vendor/xorm.io/xorm/go.sum | 19 +- vendor/xorm.io/xorm/helpers.go | 332 ----- vendor/xorm.io/xorm/helpler_time.go | 21 - vendor/xorm.io/xorm/interface.go | 41 +- .../xorm.io/xorm/{ => internal/json}/json.go | 2 +- .../xorm.io/xorm/internal/statements/cache.go | 79 ++ .../xorm/internal/statements/column_map.go | 66 + .../statements/expr_param.go} | 37 +- .../xorm/internal/statements/insert.go | 143 ++ vendor/xorm.io/xorm/internal/statements/pk.go | 79 ++ .../xorm.io/xorm/internal/statements/query.go | 441 ++++++ .../xorm/internal/statements/statement.go | 996 +++++++++++++ .../statements}/statement_args.go | 38 +- .../xorm/internal/statements/update.go | 295 ++++ .../xorm/internal/statements/values.go | 154 ++ vendor/xorm.io/xorm/internal/utils/name.go | 13 + vendor/xorm.io/xorm/internal/utils/reflect.go | 13 + vendor/xorm.io/xorm/internal/utils/slice.go | 22 + vendor/xorm.io/xorm/internal/utils/sql.go | 19 + vendor/xorm.io/xorm/internal/utils/strings.go | 30 + vendor/xorm.io/xorm/internal/utils/zero.go | 145 ++ vendor/xorm.io/xorm/{ => log}/logger.go | 106 +- vendor/xorm.io/xorm/log/logger_context.go | 97 ++ vendor/xorm.io/xorm/{ => log}/syslogger.go | 14 +- vendor/xorm.io/{core => xorm/names}/mapper.go | 18 +- vendor/xorm.io/xorm/names/table_name.go | 56 + vendor/xorm.io/xorm/rows.go | 49 +- .../xorm.io/{core => xorm/schemas}/column.go | 53 +- .../xorm.io/{core => xorm/schemas}/index.go | 12 +- vendor/xorm.io/{core => xorm/schemas}/pk.go | 13 +- vendor/xorm.io/xorm/schemas/quote.go | 236 ++++ .../xorm.io/{core => xorm/schemas}/table.go | 31 +- vendor/xorm.io/{core => xorm/schemas}/type.go | 25 +- vendor/xorm.io/xorm/session.go | 191 +-- vendor/xorm.io/xorm/session_cols.go | 21 +- vendor/xorm.io/xorm/session_cond.go | 17 +- vendor/xorm.io/xorm/session_context.go | 23 - vendor/xorm.io/xorm/session_convert.go | 240 +--- vendor/xorm.io/xorm/session_delete.go | 71 +- vendor/xorm.io/xorm/session_exist.go | 77 +- vendor/xorm.io/xorm/session_find.go | 189 +-- vendor/xorm.io/xorm/session_get.go | 54 +- vendor/xorm.io/xorm/session_insert.go | 441 ++---- vendor/xorm.io/xorm/session_iterate.go | 47 +- vendor/xorm.io/xorm/session_query.go | 79 +- vendor/xorm.io/xorm/session_raw.go | 69 +- vendor/xorm.io/xorm/session_schema.go | 178 ++- vendor/xorm.io/xorm/session_stats.go | 31 +- vendor/xorm.io/xorm/session_tx.go | 110 +- vendor/xorm.io/xorm/session_update.go | 220 ++- vendor/xorm.io/xorm/statement.go | 1256 ----------------- vendor/xorm.io/xorm/statement_columnmap.go | 35 - vendor/xorm.io/xorm/statement_quote.go | 19 - vendor/xorm.io/xorm/tags/parser.go | 307 ++++ vendor/xorm.io/xorm/{ => tags}/tag.go | 101 +- vendor/xorm.io/xorm/test_mssql.sh | 1 - vendor/xorm.io/xorm/test_mssql_cache.sh | 1 - vendor/xorm.io/xorm/test_mymysql.sh | 1 - vendor/xorm.io/xorm/test_mymysql_cache.sh | 1 - vendor/xorm.io/xorm/test_mysql.sh | 1 - vendor/xorm.io/xorm/test_mysql_cache.sh | 1 - vendor/xorm.io/xorm/test_postgres.sh | 1 - vendor/xorm.io/xorm/test_postgres_cache.sh | 1 - vendor/xorm.io/xorm/test_sqlite.sh | 1 - vendor/xorm.io/xorm/test_sqlite_cache.sh | 1 - vendor/xorm.io/xorm/test_tidb.sh | 1 - vendor/xorm.io/xorm/transaction.go | 26 - vendor/xorm.io/xorm/types.go | 16 - vendor/xorm.io/xorm/xorm.go | 92 +- 154 files changed, 7175 insertions(+), 5942 deletions(-) create mode 100644 vendor/xorm.io/builder/.gitignore rename vendor/xorm.io/builder/{builder_union.go => builder_set_operations.go} (69%) delete mode 100644 vendor/xorm.io/core/.drone.yml delete mode 100644 vendor/xorm.io/core/.gitignore delete mode 100644 vendor/xorm.io/core/LICENSE delete mode 100644 vendor/xorm.io/core/README.md delete mode 100644 vendor/xorm.io/core/benchmark.sh delete mode 100644 vendor/xorm.io/core/dialect.go delete mode 100644 vendor/xorm.io/core/driver.go delete mode 100644 vendor/xorm.io/core/filter.go delete mode 100644 vendor/xorm.io/core/go.mod delete mode 100644 vendor/xorm.io/core/go.sum delete mode 100644 vendor/xorm.io/core/ilogger.go create mode 100644 vendor/xorm.io/xorm/.changelog.yml create mode 100644 vendor/xorm.io/xorm/.revive.toml create mode 100644 vendor/xorm.io/xorm/CHANGELOG.md create mode 100644 vendor/xorm.io/xorm/Makefile rename vendor/xorm.io/{core => xorm/caches}/cache.go (85%) create mode 100644 vendor/xorm.io/xorm/caches/encode.go create mode 100644 vendor/xorm.io/xorm/caches/leveldb.go rename vendor/xorm.io/xorm/{cache_lru.go => caches/lru.go} (93%) create mode 100644 vendor/xorm.io/xorm/caches/manager.go rename vendor/xorm.io/xorm/{cache_memory_store.go => caches/memory_store.go} (93%) rename vendor/xorm.io/xorm/{ => contexts}/context_cache.go (97%) rename vendor/xorm.io/{core/converstion.go => xorm/convert/conversion.go} (81%) rename vendor/xorm.io/{ => xorm}/core/db.go (80%) rename vendor/xorm.io/{ => xorm}/core/error.go (100%) rename vendor/xorm.io/{ => xorm}/core/rows.go (100%) rename vendor/xorm.io/{ => xorm}/core/scan.go (100%) rename vendor/xorm.io/{ => xorm}/core/stmt.go (77%) rename vendor/xorm.io/{ => xorm}/core/tx.go (68%) create mode 100644 vendor/xorm.io/xorm/dialects/dialect.go create mode 100644 vendor/xorm.io/xorm/dialects/driver.go create mode 100644 vendor/xorm.io/xorm/dialects/filter.go rename vendor/xorm.io/xorm/{ => dialects}/gen_reserved.sh (100%) rename vendor/xorm.io/xorm/{dialect_mssql.go => dialects/mssql.go} (78%) rename vendor/xorm.io/xorm/{dialect_mysql.go => dialects/mysql.go} (77%) rename vendor/xorm.io/xorm/{dialect_oracle.go => dialects/oracle.go} (83%) rename vendor/xorm.io/xorm/{ => dialects}/pg_reserved.txt (100%) rename vendor/xorm.io/xorm/{dialect_postgres.go => dialects/postgres.go} (84%) create mode 100644 vendor/xorm.io/xorm/dialects/quote.go rename vendor/xorm.io/xorm/{dialect_sqlite3.go => dialects/sqlite3.go} (68%) create mode 100644 vendor/xorm.io/xorm/dialects/table_name.go create mode 100644 vendor/xorm.io/xorm/dialects/time.go delete mode 100644 vendor/xorm.io/xorm/engine_cond.go delete mode 100644 vendor/xorm.io/xorm/engine_context.go delete mode 100644 vendor/xorm.io/xorm/engine_table.go delete mode 100644 vendor/xorm.io/xorm/helpers.go delete mode 100644 vendor/xorm.io/xorm/helpler_time.go rename vendor/xorm.io/xorm/{ => internal/json}/json.go (98%) create mode 100644 vendor/xorm.io/xorm/internal/statements/cache.go create mode 100644 vendor/xorm.io/xorm/internal/statements/column_map.go rename vendor/xorm.io/xorm/{statement_exprparam.go => internal/statements/expr_param.go} (71%) create mode 100644 vendor/xorm.io/xorm/internal/statements/insert.go create mode 100644 vendor/xorm.io/xorm/internal/statements/pk.go create mode 100644 vendor/xorm.io/xorm/internal/statements/query.go create mode 100644 vendor/xorm.io/xorm/internal/statements/statement.go rename vendor/xorm.io/xorm/{ => internal/statements}/statement_args.go (75%) create mode 100644 vendor/xorm.io/xorm/internal/statements/update.go create mode 100644 vendor/xorm.io/xorm/internal/statements/values.go create mode 100644 vendor/xorm.io/xorm/internal/utils/name.go create mode 100644 vendor/xorm.io/xorm/internal/utils/reflect.go create mode 100644 vendor/xorm.io/xorm/internal/utils/slice.go create mode 100644 vendor/xorm.io/xorm/internal/utils/sql.go create mode 100644 vendor/xorm.io/xorm/internal/utils/strings.go create mode 100644 vendor/xorm.io/xorm/internal/utils/zero.go rename vendor/xorm.io/xorm/{ => log}/logger.go (64%) create mode 100644 vendor/xorm.io/xorm/log/logger_context.go rename vendor/xorm.io/xorm/{ => log}/syslogger.go (88%) rename vendor/xorm.io/{core => xorm/names}/mapper.go (93%) create mode 100644 vendor/xorm.io/xorm/names/table_name.go rename vendor/xorm.io/{core => xorm/schemas}/column.go (77%) rename vendor/xorm.io/{core => xorm/schemas}/index.go (98%) rename vendor/xorm.io/{core => xorm/schemas}/pk.go (77%) create mode 100644 vendor/xorm.io/xorm/schemas/quote.go rename vendor/xorm.io/{core => xorm/schemas}/table.go (95%) rename vendor/xorm.io/{core => xorm/schemas}/type.go (95%) delete mode 100644 vendor/xorm.io/xorm/session_context.go delete mode 100644 vendor/xorm.io/xorm/statement.go delete mode 100644 vendor/xorm.io/xorm/statement_columnmap.go delete mode 100644 vendor/xorm.io/xorm/statement_quote.go create mode 100644 vendor/xorm.io/xorm/tags/parser.go rename vendor/xorm.io/xorm/{ => tags}/tag.go (72%) delete mode 100644 vendor/xorm.io/xorm/test_mssql.sh delete mode 100644 vendor/xorm.io/xorm/test_mssql_cache.sh delete mode 100644 vendor/xorm.io/xorm/test_mymysql.sh delete mode 100644 vendor/xorm.io/xorm/test_mymysql_cache.sh delete mode 100644 vendor/xorm.io/xorm/test_mysql.sh delete mode 100644 vendor/xorm.io/xorm/test_mysql_cache.sh delete mode 100644 vendor/xorm.io/xorm/test_postgres.sh delete mode 100644 vendor/xorm.io/xorm/test_postgres_cache.sh delete mode 100644 vendor/xorm.io/xorm/test_sqlite.sh delete mode 100644 vendor/xorm.io/xorm/test_sqlite_cache.sh delete mode 100644 vendor/xorm.io/xorm/test_tidb.sh delete mode 100644 vendor/xorm.io/xorm/transaction.go delete mode 100644 vendor/xorm.io/xorm/types.go diff --git a/go.mod b/go.mod index 8a4c1e476..559f3064d 100644 --- a/go.mod +++ b/go.mod @@ -116,7 +116,6 @@ require ( gopkg.in/testfixtures.v2 v2.5.0 mvdan.cc/xurls/v2 v2.1.0 strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251 - xorm.io/builder v0.3.6 - xorm.io/core v0.7.3 - xorm.io/xorm v0.8.2-0.20200120024500-c37aff9b3a4a + xorm.io/builder v0.3.7 + xorm.io/xorm v1.0.0 ) diff --git a/go.sum b/go.sum index bcf166887..881004b51 100644 --- a/go.sum +++ b/go.sum @@ -39,6 +39,8 @@ gitea.com/macaron/session v0.0.0-20191207215012-613cebf0674d h1:XLww3CvnFZkXVwau gitea.com/macaron/session v0.0.0-20191207215012-613cebf0674d/go.mod h1:FanKy3WjWb5iw/iZBPk4ggoQT9FcM6bkBPvmDmsH6tY= gitea.com/macaron/toolbox v0.0.0-20190822013122-05ff0fc766b7 h1:N9QFoeNsUXLhl14mefLzGluqV7w2mGU3u+iZU+jCeWk= gitea.com/macaron/toolbox v0.0.0-20190822013122-05ff0fc766b7/go.mod h1:kgsbFPPS4P+acDYDOPDa3N4IWWOuDJt5/INKRUz7aks= +gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:lSA0F4e9A2NcQSqGqTOXqu2aRi/XEQxDCBwM8yJtE6s= +gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:EXuID2Zs0pAQhH8yz+DNjUbjppKQzKFAn28TMYPB6IU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -793,11 +795,11 @@ strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251 h1:mUcz5b3 strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251/go.mod h1:FJGmPh3vz9jSos1L/F91iAgnC/aejc0wIIrF2ZwJxdY= xorm.io/builder v0.3.6 h1:ha28mQ2M+TFx96Hxo+iq6tQgnkC9IZkM6D8w9sKHHF8= xorm.io/builder v0.3.6/go.mod h1:LEFAPISnRzG+zxaxj2vPicRwz67BdhFreKg8yv8/TgU= +xorm.io/builder v0.3.7 h1:2pETdKRK+2QG4mLX4oODHEhn5Z8j1m8sXa7jfu+/SZI= +xorm.io/builder v0.3.7/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE= xorm.io/core v0.7.2 h1:mEO22A2Z7a3fPaZMk6gKL/jMD80iiyNwRrX5HOv3XLw= xorm.io/core v0.7.2/go.mod h1:jJfd0UAEzZ4t87nbQYtVjmqpIODugN6PD2D9E+dJvdM= -xorm.io/core v0.7.3 h1:W8ws1PlrnkS1CZU1YWaYLMQcQilwAmQXU0BJDJon+H0= -xorm.io/core v0.7.3/go.mod h1:jJfd0UAEzZ4t87nbQYtVjmqpIODugN6PD2D9E+dJvdM= xorm.io/xorm v0.8.0 h1:iALxgJrX8O00f8Jk22GbZwPmxJNgssV5Mv4uc2HL9PM= xorm.io/xorm v0.8.0/go.mod h1:ZkJLEYLoVyg7amJK/5r779bHyzs2AU8f8VMiP6BM7uY= -xorm.io/xorm v0.8.2-0.20200120024500-c37aff9b3a4a h1:hzGd080rlkZ5a7v6Tr3x8PJJnWPfKxGMMl92c8DNcww= -xorm.io/xorm v0.8.2-0.20200120024500-c37aff9b3a4a/go.mod h1:ZkJLEYLoVyg7amJK/5r779bHyzs2AU8f8VMiP6BM7uY= +xorm.io/xorm v1.0.0 h1:ceiwUTrJHqfNFxIUcWjkcbz6kt7sINf2dOXlgLLhaQM= +xorm.io/xorm v1.0.0/go.mod h1:o4vnEsQ5V2F1/WK6w4XTwmiWJeGj82tqjAnHe44wVHY= diff --git a/models/access.go b/models/access.go index c50986706..4b72a752b 100644 --- a/models/access.go +++ b/models/access.go @@ -207,7 +207,12 @@ func (repo *Repository) refreshAccesses(e Engine, accessMap map[int64]*userAcces // Delete old accesses and insert new ones for repository. if _, err = e.Delete(&Access{RepoID: repo.ID}); err != nil { return fmt.Errorf("delete old accesses: %v", err) - } else if _, err = e.Insert(newAccesses); err != nil { + } + if len(newAccesses) == 0 { + return nil + } + + if _, err = e.Insert(newAccesses); err != nil { return fmt.Errorf("insert new accesses: %v", err) } return nil diff --git a/models/list_options.go b/models/list_options.go index c49b83d1f..0946917fe 100644 --- a/models/list_options.go +++ b/models/list_options.go @@ -25,6 +25,9 @@ func (opts ListOptions) getPaginatedSession() *xorm.Session { func (opts ListOptions) setSessionPagination(sess *xorm.Session) *xorm.Session { opts.setDefaultValues() + if opts.PageSize <= 0 { + return sess + } return sess.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize) } diff --git a/models/log.go b/models/log.go index 4caea9a8b..1cfddd90a 100644 --- a/models/log.go +++ b/models/log.go @@ -9,7 +9,7 @@ import ( "code.gitea.io/gitea/modules/log" - "xorm.io/core" + xormlog "xorm.io/xorm/log" ) // XORMLogBridge a logger bridge from Logger to xorm @@ -19,7 +19,7 @@ type XORMLogBridge struct { } // NewXORMLogger inits a log bridge for xorm -func NewXORMLogger(showSQL bool) core.ILogger { +func NewXORMLogger(showSQL bool) xormlog.Logger { return &XORMLogBridge{ showSQL: showSQL, logger: log.GetLogger("xorm"), @@ -72,22 +72,22 @@ func (l *XORMLogBridge) Warnf(format string, v ...interface{}) { } // Level get logger level -func (l *XORMLogBridge) Level() core.LogLevel { +func (l *XORMLogBridge) Level() xormlog.LogLevel { switch l.logger.GetLevel() { case log.TRACE, log.DEBUG: - return core.LOG_DEBUG + return xormlog.LOG_DEBUG case log.INFO: - return core.LOG_INFO + return xormlog.LOG_INFO case log.WARN: - return core.LOG_WARNING + return xormlog.LOG_WARNING case log.ERROR, log.CRITICAL: - return core.LOG_ERR + return xormlog.LOG_ERR } - return core.LOG_OFF + return xormlog.LOG_OFF } // SetLevel set the logger level -func (l *XORMLogBridge) SetLevel(lvl core.LogLevel) { +func (l *XORMLogBridge) SetLevel(lvl xormlog.LogLevel) { } // ShowSQL set if record SQL diff --git a/models/login_source.go b/models/login_source.go index 88028283e..535044623 100644 --- a/models/login_source.go +++ b/models/login_source.go @@ -22,8 +22,8 @@ import ( "code.gitea.io/gitea/modules/timeutil" "github.com/unknwon/com" - "xorm.io/core" "xorm.io/xorm" + "xorm.io/xorm/convert" ) // LoginType represents an login type. @@ -60,11 +60,11 @@ var SecurityProtocolNames = map[ldap.SecurityProtocol]string{ // Ensure structs implemented interface. var ( - _ core.Conversion = &LDAPConfig{} - _ core.Conversion = &SMTPConfig{} - _ core.Conversion = &PAMConfig{} - _ core.Conversion = &OAuth2Config{} - _ core.Conversion = &SSPIConfig{} + _ convert.Conversion = &LDAPConfig{} + _ convert.Conversion = &SMTPConfig{} + _ convert.Conversion = &PAMConfig{} + _ convert.Conversion = &OAuth2Config{} + _ convert.Conversion = &SSPIConfig{} ) // LDAPConfig holds configuration for LDAP login source. @@ -165,10 +165,10 @@ func (cfg *SSPIConfig) ToDB() ([]byte, error) { type LoginSource struct { ID int64 `xorm:"pk autoincr"` Type LoginType - Name string `xorm:"UNIQUE"` - IsActived bool `xorm:"INDEX NOT NULL DEFAULT false"` - IsSyncEnabled bool `xorm:"INDEX NOT NULL DEFAULT false"` - Cfg core.Conversion `xorm:"TEXT"` + Name string `xorm:"UNIQUE"` + IsActived bool `xorm:"INDEX NOT NULL DEFAULT false"` + IsSyncEnabled bool `xorm:"INDEX NOT NULL DEFAULT false"` + Cfg convert.Conversion `xorm:"TEXT"` CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` diff --git a/models/migrations/v110.go b/models/migrations/v110.go index dc46c2ef7..4a1c3c47a 100644 --- a/models/migrations/v110.go +++ b/models/migrations/v110.go @@ -5,32 +5,26 @@ package migrations import ( - "xorm.io/core" "xorm.io/xorm" + "xorm.io/xorm/schemas" ) func changeReviewContentToText(x *xorm.Engine) error { - - if x.Dialect().DBType() == core.MYSQL { + switch x.Dialect().URI().DBType { + case schemas.MYSQL: _, err := x.Exec("ALTER TABLE review MODIFY COLUMN content TEXT") return err - } - - if x.Dialect().DBType() == core.ORACLE { + case schemas.ORACLE: _, err := x.Exec("ALTER TABLE review MODIFY content TEXT") return err - } - - if x.Dialect().DBType() == core.MSSQL { + case schemas.MSSQL: _, err := x.Exec("ALTER TABLE review ALTER COLUMN content TEXT") return err - } - - if x.Dialect().DBType() == core.POSTGRES { + case schemas.POSTGRES: _, err := x.Exec("ALTER TABLE review ALTER COLUMN content TYPE TEXT") return err + default: + // SQLite doesn't support ALTER COLUMN, and it seem to already make String to _TEXT_ default so no migration needed + return nil } - - // SQLite doesn't support ALTER COLUMN, and it seem to already make String to _TEXT_ default so no migration needed - return nil } diff --git a/models/migrations/v81.go b/models/migrations/v81.go index 271d479a6..4e9e7658e 100644 --- a/models/migrations/v81.go +++ b/models/migrations/v81.go @@ -8,17 +8,18 @@ import ( "fmt" "xorm.io/xorm" + "xorm.io/xorm/schemas" ) func changeU2FCounterType(x *xorm.Engine) error { var err error - switch x.Dialect().DriverName() { - case "mysql": + switch x.Dialect().URI().DBType { + case schemas.MYSQL: _, err = x.Exec("ALTER TABLE `u2f_registration` MODIFY `counter` BIGINT") - case "postgres": + case schemas.POSTGRES: _, err = x.Exec("ALTER TABLE `u2f_registration` ALTER COLUMN `counter` SET DATA TYPE bigint") - case "mssql": + case schemas.MSSQL: _, err = x.Exec("ALTER TABLE `u2f_registration` ALTER COLUMN `counter` BIGINT") } diff --git a/models/models.go b/models/models.go index 088445590..08c778031 100644 --- a/models/models.go +++ b/models/models.go @@ -15,8 +15,9 @@ import ( // Needed for the MySQL driver _ "github.com/go-sql-driver/mysql" - "xorm.io/core" "xorm.io/xorm" + "xorm.io/xorm/names" + "xorm.io/xorm/schemas" // Needed for the Postgresql driver _ "github.com/lib/pq" @@ -127,7 +128,7 @@ func init() { gonicNames := []string{"SSL", "UID"} for _, name := range gonicNames { - core.LintGonicMapper[name] = true + names.LintGonicMapper[name] = true } } @@ -152,8 +153,7 @@ func NewTestEngine(x *xorm.Engine) (err error) { return fmt.Errorf("Connect to database: %v", err) } - x.ShowExecTime(true) - x.SetMapper(core.GonicMapper{}) + x.SetMapper(names.GonicMapper{}) x.SetLogger(NewXORMLogger(!setting.ProdMode)) x.ShowSQL(!setting.ProdMode) return x.StoreEngine("InnoDB").Sync2(tables...) @@ -166,8 +166,7 @@ func SetEngine() (err error) { return fmt.Errorf("Failed to connect to database: %v", err) } - x.ShowExecTime(true) - x.SetMapper(core.GonicMapper{}) + x.SetMapper(names.GonicMapper{}) // WARNING: for serv command, MUST remove the output to os.stdout, // so use log file to instead print to stdout. x.SetLogger(NewXORMLogger(setting.Database.LogSQL)) @@ -249,21 +248,26 @@ func Ping() error { // DumpDatabase dumps all data from database according the special database SQL syntax to file system. func DumpDatabase(filePath string, dbType string) error { - var tbs []*core.Table + var tbs []*schemas.Table for _, t := range tables { - t := x.TableInfo(t) - t.Table.Name = t.Name - tbs = append(tbs, t.Table) + t, err := x.TableInfo(t) + if err != nil { + return err + } + tbs = append(tbs, t) } if len(dbType) > 0 { - return x.DumpTablesToFile(tbs, filePath, core.DbType(dbType)) + return x.DumpTablesToFile(tbs, filePath, schemas.DBType(dbType)) } return x.DumpTablesToFile(tbs, filePath) } // MaxBatchInsertSize returns the table's max batch insert size func MaxBatchInsertSize(bean interface{}) int { - t := x.TableInfo(bean) + t, err := x.TableInfo(bean) + if err != nil { + return 50 + } return 999 / len(t.ColumnsSeq()) } diff --git a/models/org_team.go b/models/org_team.go index f8013d12c..82c27b2c0 100644 --- a/models/org_team.go +++ b/models/org_team.go @@ -1072,12 +1072,14 @@ func UpdateTeamUnits(team *Team, units []TeamUnit) (err error) { return err } - if _, err = sess.Insert(units); err != nil { - errRollback := sess.Rollback() - if errRollback != nil { - log.Error("UpdateTeamUnits sess.Rollback: %v", errRollback) + if len(units) > 0 { + if _, err = sess.Insert(units); err != nil { + errRollback := sess.Rollback() + if errRollback != nil { + log.Error("UpdateTeamUnits sess.Rollback: %v", errRollback) + } + return err } - return err } return sess.Commit() diff --git a/models/repo.go b/models/repo.go index 672507819..dd50db9a0 100644 --- a/models/repo.go +++ b/models/repo.go @@ -1417,8 +1417,10 @@ func UpdateRepositoryUnits(repo *Repository, units []RepoUnit, deleteUnitTypes [ return err } - if _, err = sess.Insert(units); err != nil { - return err + if len(units) > 0 { + if _, err = sess.Insert(units); err != nil { + return err + } } return sess.Commit() diff --git a/models/repo_unit.go b/models/repo_unit.go index ec680c395..42ce8f6c8 100644 --- a/models/repo_unit.go +++ b/models/repo_unit.go @@ -10,8 +10,8 @@ import ( "code.gitea.io/gitea/modules/timeutil" "github.com/unknwon/com" - "xorm.io/core" "xorm.io/xorm" + "xorm.io/xorm/convert" ) // RepoUnit describes all units of a repository @@ -19,7 +19,7 @@ type RepoUnit struct { ID int64 RepoID int64 `xorm:"INDEX(s)"` Type UnitType `xorm:"INDEX(s)"` - Config core.Conversion `xorm:"TEXT"` + Config convert.Conversion `xorm:"TEXT"` CreatedUnix timeutil.TimeStamp `xorm:"INDEX CREATED"` } diff --git a/models/ssh_key.go b/models/ssh_key.go index d3e9de577..5d9f5afef 100644 --- a/models/ssh_key.go +++ b/models/ssh_key.go @@ -524,7 +524,7 @@ func AddPublicKey(ownerID int64, name, content string, loginSourceID int64) (*Pu func GetPublicKeyByID(keyID int64) (*PublicKey, error) { key := new(PublicKey) has, err := x. - Id(keyID). + ID(keyID). Get(key) if err != nil { return nil, err diff --git a/models/test_fixtures.go b/models/test_fixtures.go index fe6a790b0..6c160742b 100644 --- a/models/test_fixtures.go +++ b/models/test_fixtures.go @@ -9,6 +9,7 @@ import ( "time" "gopkg.in/testfixtures.v2" + "xorm.io/xorm/schemas" ) var fixtures *testfixtures.Context @@ -36,7 +37,7 @@ func LoadFixtures() error { fmt.Printf("LoadFixtures failed after retries: %v\n", err) } // Now if we're running postgres we need to tell it to update the sequences - if x.Dialect().DriverName() == "postgres" { + if x.Dialect().URI().DBType == schemas.POSTGRES { results, err := x.QueryString(`SELECT 'SELECT SETVAL(' || quote_literal(quote_ident(PGT.schemaname) || '.' || quote_ident(S.relname)) || ', COALESCE(MAX(' ||quote_ident(C.attname)|| '), 1) ) FROM ' || diff --git a/models/unit_tests.go b/models/unit_tests.go index b2c487ddc..1b27eebcd 100644 --- a/models/unit_tests.go +++ b/models/unit_tests.go @@ -20,8 +20,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/unknwon/com" "gopkg.in/testfixtures.v2" - "xorm.io/core" "xorm.io/xorm" + "xorm.io/xorm/names" ) // NonexistentID an ID that will never exist @@ -92,7 +92,7 @@ func CreateTestEngine(fixturesDir string) error { if err != nil { return err } - x.SetMapper(core.GonicMapper{}) + x.SetMapper(names.GonicMapper{}) if err = x.StoreEngine("InnoDB").Sync2(tables...); err != nil { return err } diff --git a/routers/admin/auths.go b/routers/admin/auths.go index 9b96f0803..a4fd5290b 100644 --- a/routers/admin/auths.go +++ b/routers/admin/auths.go @@ -20,7 +20,7 @@ import ( "code.gitea.io/gitea/modules/util" "github.com/unknwon/com" - "xorm.io/core" + "xorm.io/xorm/convert" ) const ( @@ -214,7 +214,7 @@ func NewAuthSourcePost(ctx *context.Context, form auth.AuthenticationForm) { ctx.Data["SSPIDefaultLanguage"] = "" hasTLS := false - var config core.Conversion + var config convert.Conversion switch models.LoginType(form.Type) { case models.LoginLDAP, models.LoginDLDAP: config = parseLDAPConfig(form) @@ -322,7 +322,7 @@ func EditAuthSourcePost(ctx *context.Context, form auth.AuthenticationForm) { return } - var config core.Conversion + var config convert.Conversion switch models.LoginType(form.Type) { case models.LoginLDAP, models.LoginDLDAP: config = parseLDAPConfig(form) diff --git a/vendor/modules.txt b/vendor/modules.txt index 0af9b75cb..429b8d3ca 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -766,12 +766,21 @@ mvdan.cc/xurls/v2 # strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251 ## explicit strk.kbt.io/projects/go/libravatar -# xorm.io/builder v0.3.6 +# xorm.io/builder v0.3.7 ## explicit xorm.io/builder -# xorm.io/core v0.7.3 -## explicit -xorm.io/core -# xorm.io/xorm v0.8.2-0.20200120024500-c37aff9b3a4a +# xorm.io/xorm v1.0.0 ## explicit xorm.io/xorm +xorm.io/xorm/caches +xorm.io/xorm/contexts +xorm.io/xorm/convert +xorm.io/xorm/core +xorm.io/xorm/dialects +xorm.io/xorm/internal/json +xorm.io/xorm/internal/statements +xorm.io/xorm/internal/utils +xorm.io/xorm/log +xorm.io/xorm/names +xorm.io/xorm/schemas +xorm.io/xorm/tags diff --git a/vendor/xorm.io/builder/.drone.yml b/vendor/xorm.io/builder/.drone.yml index 557dbf66f..61d323d5f 100644 --- a/vendor/xorm.io/builder/.drone.yml +++ b/vendor/xorm.io/builder/.drone.yml @@ -1,31 +1,6 @@ --- kind: pipeline -name: go1.10 - -workspace: - base: /go - path: src/xorm.io/builder - -steps: -- name: test - pull: default - image: golang:1.10 - commands: - - go get -u golang.org/x/lint/golint - - go get -u github.com/stretchr/testify/assert - - go get -u github.com/go-xorm/sqlfiddle - - golint ./... - - go vet - - go test -v -race -coverprofile=coverage.txt -covermode=atomic - when: - event: - - push - - tag - - pull_request - ---- -kind: pipeline -name: go1.11 +name: testing steps: - name: test @@ -36,50 +11,6 @@ steps: - golint ./... - go vet - go test -v -race -coverprofile=coverage.txt -covermode=atomic - environment: - GOPROXY: https://goproxy.cn - GO111MODULE: "on" - when: - event: - - push - - tag - - pull_request - ---- -kind: pipeline -name: go1.12 - -steps: -- name: test - pull: default - image: golang:1.12 - commands: - - go get -u golang.org/x/lint/golint - - golint ./... - - go vet - - go test -v -race -coverprofile=coverage.txt -covermode=atomic - environment: - GOPROXY: https://goproxy.cn - GO111MODULE: "on" - when: - event: - - push - - tag - - pull_request - ---- -kind: pipeline -name: go1.13 - -steps: -- name: test - pull: default - image: golang:1.13 - commands: - - go get -u golang.org/x/lint/golint - - golint ./... - - go vet - - go test -v -race -coverprofile=coverage.txt -covermode=atomic environment: GOPROXY: https://goproxy.cn GO111MODULE: "on" diff --git a/vendor/xorm.io/builder/.gitignore b/vendor/xorm.io/builder/.gitignore new file mode 100644 index 000000000..723ef36f4 --- /dev/null +++ b/vendor/xorm.io/builder/.gitignore @@ -0,0 +1 @@ +.idea \ No newline at end of file diff --git a/vendor/xorm.io/builder/builder.go b/vendor/xorm.io/builder/builder.go index 4f1422284..cccc8a7fd 100644 --- a/vendor/xorm.io/builder/builder.go +++ b/vendor/xorm.io/builder/builder.go @@ -17,7 +17,7 @@ const ( insertType // insert updateType // update deleteType // delete - unionType // union + setOpType // set operation ) // all databasees @@ -27,6 +27,10 @@ const ( MYSQL = "mysql" MSSQL = "mssql" ORACLE = "oracle" + + UNION = "union" + INTERSECT = "intersect" + EXCEPT = "except" ) type join struct { @@ -35,9 +39,10 @@ type join struct { joinCond Cond } -type union struct { - unionType string - builder *Builder +type setOp struct { + opType string + distinctType string + builder *Builder } type limit struct { @@ -56,7 +61,7 @@ type Builder struct { cond Cond selects []string joins []join - unions []union + setOps []setOp limitation *limit insertCols []string insertVals []interface{} @@ -144,33 +149,48 @@ func (b *Builder) Into(tableName string) *Builder { } // Union sets union conditions -func (b *Builder) Union(unionTp string, unionCond *Builder) *Builder { +func (b *Builder) Union(distinctType string, cond *Builder) *Builder { + return b.setOperation(UNION, distinctType, cond) +} + +// Intersect sets intersect conditions +func (b *Builder) Intersect(distinctType string, cond *Builder) *Builder { + return b.setOperation(INTERSECT, distinctType, cond) +} + +// Except sets except conditions +func (b *Builder) Except(distinctType string, cond *Builder) *Builder { + return b.setOperation(EXCEPT, distinctType, cond) +} + +func (b *Builder) setOperation(opType, distinctType string, cond *Builder) *Builder { + var builder *Builder - if b.optype != unionType { + if b.optype != setOpType { builder = &Builder{cond: NewCond()} - builder.optype = unionType + builder.optype = setOpType builder.dialect = b.dialect builder.selects = b.selects - currentUnions := b.unions - // erase sub unions (actually append to new Builder.unions) - b.unions = nil + currentSetOps := b.setOps + // erase sub setOps (actually append to new Builder.unions) + b.setOps = nil - for e := range currentUnions { - currentUnions[e].builder.dialect = b.dialect + for e := range currentSetOps { + currentSetOps[e].builder.dialect = b.dialect } - builder.unions = append(append(builder.unions, union{"", b}), currentUnions...) + builder.setOps = append(append(builder.setOps, setOp{opType, "", b}), currentSetOps...) } else { builder = b } - if unionCond != nil { - if unionCond.dialect == "" && builder.dialect != "" { - unionCond.dialect = builder.dialect + if cond != nil { + if cond.dialect == "" && builder.dialect != "" { + cond.dialect = builder.dialect } - builder.unions = append(builder.unions, union{unionTp, unionCond}) + builder.setOps = append(builder.setOps, setOp{opType, distinctType, cond}) } return builder @@ -240,8 +260,8 @@ func (b *Builder) WriteTo(w Writer) error { return b.updateWriteTo(w) case deleteType: return b.deleteWriteTo(w) - case unionType: - return b.unionWriteTo(w) + case setOpType: + return b.setOpWriteTo(w) } return ErrNotSupportType diff --git a/vendor/xorm.io/builder/builder_insert.go b/vendor/xorm.io/builder/builder_insert.go index 9558a8aca..8cef5c56c 100644 --- a/vendor/xorm.io/builder/builder_insert.go +++ b/vendor/xorm.io/builder/builder_insert.go @@ -58,6 +58,8 @@ func (b *Builder) insertWriteTo(w Writer) error { if e, ok := value.(expr); ok { fmt.Fprintf(valBuffer, "(%s)", e.sql) args = append(args, e.args...) + } else if value == nil { + fmt.Fprintf(valBuffer, `null`) } else { fmt.Fprint(valBuffer, "?") args = append(args, value) diff --git a/vendor/xorm.io/builder/builder_limit.go b/vendor/xorm.io/builder/builder_limit.go index 82435dacb..82e117936 100644 --- a/vendor/xorm.io/builder/builder_limit.go +++ b/vendor/xorm.io/builder/builder_limit.go @@ -21,6 +21,9 @@ func (b *Builder) limitWriteTo(w Writer) error { } // erase limit condition b.limitation = nil + defer func() { + b.limitation = limit + }() ow := w.(*BytesWriter) switch strings.ToLower(strings.TrimSpace(b.dialect)) { @@ -34,7 +37,7 @@ func (b *Builder) limitWriteTo(w Writer) error { b.selects = append(selects, "ROWNUM RN") var wb *Builder - if b.optype == unionType { + if b.optype == setOpType { wb = Dialect(b.dialect).Select("at.*", "ROWNUM RN"). From(b, "at") } else { @@ -55,7 +58,7 @@ func (b *Builder) limitWriteTo(w Writer) error { return final.WriteTo(ow) case SQLITE, MYSQL, POSTGRES: // if type UNION, we need to write previous content back to current writer - if b.optype == unionType { + if b.optype == setOpType { if err := b.WriteTo(ow); err != nil { return err } @@ -77,7 +80,7 @@ func (b *Builder) limitWriteTo(w Writer) error { b.selects[1:]...), "ROW_NUMBER() OVER (ORDER BY (SELECT 1)) AS RN") var wb *Builder - if b.optype == unionType { + if b.optype == setOpType { wb = Dialect(b.dialect).Select("*", "ROW_NUMBER() OVER (ORDER BY (SELECT 1)) AS RN"). From(b, "at") } else { diff --git a/vendor/xorm.io/builder/builder_select.go b/vendor/xorm.io/builder/builder_select.go index 814d1b5af..087a71d87 100644 --- a/vendor/xorm.io/builder/builder_select.go +++ b/vendor/xorm.io/builder/builder_select.go @@ -63,7 +63,7 @@ func (b *Builder) selectWriteTo(w Writer) error { } switch b.subQuery.optype { - case selectType, unionType: + case selectType, setOpType: fmt.Fprint(w, " FROM (") if err := b.subQuery.WriteTo(w); err != nil { return err diff --git a/vendor/xorm.io/builder/builder_union.go b/vendor/xorm.io/builder/builder_set_operations.go similarity index 69% rename from vendor/xorm.io/builder/builder_union.go rename to vendor/xorm.io/builder/builder_set_operations.go index 4ba921617..b2b4a3daf 100644 --- a/vendor/xorm.io/builder/builder_union.go +++ b/vendor/xorm.io/builder/builder_set_operations.go @@ -9,19 +9,19 @@ import ( "strings" ) -func (b *Builder) unionWriteTo(w Writer) error { +func (b *Builder) setOpWriteTo(w Writer) error { if b.limitation != nil || b.cond.IsValid() || b.orderBy != "" || b.having != "" || b.groupBy != "" { return ErrNotUnexpectedUnionConditions } - for idx, u := range b.unions { - current := u.builder + for idx, o := range b.setOps { + current := o.builder if current.optype != selectType { return ErrUnsupportedUnionMembers } - if len(b.unions) == 1 { + if len(b.setOps) == 1 { if err := current.selectWriteTo(w); err != nil { return err } @@ -31,7 +31,11 @@ func (b *Builder) unionWriteTo(w Writer) error { } if idx != 0 { - fmt.Fprint(w, fmt.Sprintf(" UNION %v ", strings.ToUpper(u.unionType))) + if o.distinctType == "" { + fmt.Fprint(w, fmt.Sprintf(" %s ", strings.ToUpper(o.opType))) + } else { + fmt.Fprint(w, fmt.Sprintf(" %s %s ", strings.ToUpper(o.opType), strings.ToUpper(o.distinctType))) + } } fmt.Fprint(w, "(") diff --git a/vendor/xorm.io/builder/builder_update.go b/vendor/xorm.io/builder/builder_update.go index 9b6e10bc9..5fffbe349 100644 --- a/vendor/xorm.io/builder/builder_update.go +++ b/vendor/xorm.io/builder/builder_update.go @@ -45,6 +45,10 @@ func (b *Builder) updateWriteTo(w Writer) error { } } + if !b.cond.IsValid() { + return nil + } + if _, err := fmt.Fprint(w, " WHERE "); err != nil { return err } diff --git a/vendor/xorm.io/builder/cond_eq.go b/vendor/xorm.io/builder/cond_eq.go index 32f04d5d4..9976d1804 100644 --- a/vendor/xorm.io/builder/cond_eq.go +++ b/vendor/xorm.io/builder/cond_eq.go @@ -64,6 +64,10 @@ func (eq Eq) OpWriteTo(op string, w Writer) error { return err } w.Append(int(v.(Decr))) + case nil: + if _, err := fmt.Fprintf(w, "%s=null", k); err != nil { + return err + } default: if _, err := fmt.Fprintf(w, "%s=?", k); err != nil { return err diff --git a/vendor/xorm.io/builder/go.mod b/vendor/xorm.io/builder/go.mod index 9c730113f..620f943bd 100644 --- a/vendor/xorm.io/builder/go.mod +++ b/vendor/xorm.io/builder/go.mod @@ -3,6 +3,6 @@ module xorm.io/builder go 1.11 require ( - github.com/go-xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a + gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a github.com/stretchr/testify v1.3.0 ) diff --git a/vendor/xorm.io/builder/go.sum b/vendor/xorm.io/builder/go.sum index 468ba4a2d..a5727cab6 100644 --- a/vendor/xorm.io/builder/go.sum +++ b/vendor/xorm.io/builder/go.sum @@ -1,7 +1,7 @@ +gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:lSA0F4e9A2NcQSqGqTOXqu2aRi/XEQxDCBwM8yJtE6s= +gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:EXuID2Zs0pAQhH8yz+DNjUbjppKQzKFAn28TMYPB6IU= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:9wScpmSP5A3Bk8V3XHWUcJmYTh+ZnlHVyc+A4oZYS3Y= -github.com/go-xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:56xuuqnHyryaerycW3BfssRdxQstACi0Epw/yC5E2xM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= diff --git a/vendor/xorm.io/builder/sql.go b/vendor/xorm.io/builder/sql.go index 4250fea18..a6d1066b8 100644 --- a/vendor/xorm.io/builder/sql.go +++ b/vendor/xorm.io/builder/sql.go @@ -75,6 +75,7 @@ func noSQLQuoteNeeded(a interface{}) bool { } t := reflect.TypeOf(a) + switch t.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return true @@ -133,12 +134,16 @@ func ConvertToBoundSQL(sql string, args []interface{}) (string, error) { return buf.String(), nil } -// ConvertPlaceholder replaces ? to $1, $2 ... or :1, :2 ... according prefix +// ConvertPlaceholder replaces the place holder ? to $1, $2 ... or :1, :2 ... according prefix func ConvertPlaceholder(sql, prefix string) (string, error) { buf := strings.Builder{} var i, j, start int + var ready = true for ; i < len(sql); i++ { - if sql[i] == '?' { + if sql[i] == '\'' && i > 0 && sql[i-1] != '\\' { + ready = !ready + } + if ready && sql[i] == '?' { if _, err := buf.WriteString(sql[start:i]); err != nil { return "", err } diff --git a/vendor/xorm.io/core/.drone.yml b/vendor/xorm.io/core/.drone.yml deleted file mode 100644 index 3c118d4c0..000000000 --- a/vendor/xorm.io/core/.drone.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -kind: pipeline -name: go1.12 - -steps: - -- name: test - pull: default - image: golang:1.12 - commands: - - go vet - - "go test -v -race -coverprofile=coverage.txt -covermode=atomic -dbConn=\"root:@tcp(mysql:3306)/core_test?charset=utf8mb4\"" - environment: - GO111MODULE: "on" - GOPROXY: https://goproxy.cn - when: - event: - - push - - tag - - pull_request - -services: -- name: mysql - pull: default - image: mysql:5.7 - environment: - MYSQL_ALLOW_EMPTY_PASSWORD: yes - MYSQL_DATABASE: core_test - when: - event: - - push - - tag - - pull_request \ No newline at end of file diff --git a/vendor/xorm.io/core/.gitignore b/vendor/xorm.io/core/.gitignore deleted file mode 100644 index 98e6ef67f..000000000 --- a/vendor/xorm.io/core/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.db diff --git a/vendor/xorm.io/core/LICENSE b/vendor/xorm.io/core/LICENSE deleted file mode 100644 index 113079780..000000000 --- a/vendor/xorm.io/core/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013 - 2015 Lunny Xiao -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of the {organization} nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/xorm.io/core/README.md b/vendor/xorm.io/core/README.md deleted file mode 100644 index 54436b689..000000000 --- a/vendor/xorm.io/core/README.md +++ /dev/null @@ -1,118 +0,0 @@ -Core is a lightweight wrapper of sql.DB. - -[![Build Status](https://drone.gitea.com/api/badges/xorm/core/status.svg)](https://drone.gitea.com/xorm/core) -[![Test Coverage](https://gocover.io/_badge/xorm.io/core)](https://gocover.io/xorm.io/core) -[![Go Report Card](https://goreportcard.com/badge/code.gitea.io/gitea)](https://goreportcard.com/report/xorm.io/core) - -# Open -```Go -db, _ := core.Open(db, connstr) -``` - -# SetMapper -```Go -db.SetMapper(SameMapper()) -``` - -## Scan usage - -### Scan -```Go -rows, _ := db.Query() -for rows.Next() { - rows.Scan() -} -``` - -### ScanMap -```Go -rows, _ := db.Query() -for rows.Next() { - rows.ScanMap() -``` - -### ScanSlice - -You can use `[]string`, `[][]byte`, `[]interface{}`, `[]*string`, `[]sql.NullString` to ScanSclice. Notice, slice's length should be equal or less than select columns. - -```Go -rows, _ := db.Query() -cols, _ := rows.Columns() -for rows.Next() { - var s = make([]string, len(cols)) - rows.ScanSlice(&s) -} -``` - -```Go -rows, _ := db.Query() -cols, _ := rows.Columns() -for rows.Next() { - var s = make([]*string, len(cols)) - rows.ScanSlice(&s) -} -``` - -### ScanStruct -```Go -rows, _ := db.Query() -for rows.Next() { - rows.ScanStructByName() - rows.ScanStructByIndex() -} -``` - -## Query usage -```Go -rows, err := db.Query("select * from table where name = ?", name) - -user = User{ - Name:"lunny", -} -rows, err := db.QueryStruct("select * from table where name = ?Name", - &user) - -var user = map[string]interface{}{ - "name": "lunny", -} -rows, err = db.QueryMap("select * from table where name = ?name", - &user) -``` - -## QueryRow usage -```Go -row := db.QueryRow("select * from table where name = ?", name) - -user = User{ - Name:"lunny", -} -row := db.QueryRowStruct("select * from table where name = ?Name", - &user) - -var user = map[string]interface{}{ - "name": "lunny", -} -row = db.QueryRowMap("select * from table where name = ?name", - &user) -``` - -## Exec usage -```Go -db.Exec("insert into user (`name`, title, age, alias, nick_name,created) values (?,?,?,?,?,?)", name, title, age, alias...) - -user = User{ - Name:"lunny", - Title:"test", - Age: 18, -} -result, err = db.ExecStruct("insert into user (`name`, title, age, alias, nick_name,created) values (?Name,?Title,?Age,?Alias,?NickName,?Created)", - &user) - -var user = map[string]interface{}{ - "Name": "lunny", - "Title": "test", - "Age": 18, -} -result, err = db.ExecMap("insert into user (`name`, title, age, alias, nick_name,created) values (?Name,?Title,?Age,?Alias,?NickName,?Created)", - &user) -``` \ No newline at end of file diff --git a/vendor/xorm.io/core/benchmark.sh b/vendor/xorm.io/core/benchmark.sh deleted file mode 100644 index eab9e57e9..000000000 --- a/vendor/xorm.io/core/benchmark.sh +++ /dev/null @@ -1 +0,0 @@ -go test -v -bench=. -run=XXX diff --git a/vendor/xorm.io/core/dialect.go b/vendor/xorm.io/core/dialect.go deleted file mode 100644 index c166596c6..000000000 --- a/vendor/xorm.io/core/dialect.go +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package core - -import ( - "fmt" - "strings" - "time" -) - -type DbType string - -type Uri struct { - DbType DbType - Proto string - Host string - Port string - DbName string - User string - Passwd string - Charset string - Laddr string - Raddr string - Timeout time.Duration - Schema string -} - -// a dialect is a driver's wrapper -type Dialect interface { - SetLogger(logger ILogger) - Init(*DB, *Uri, string, string) error - URI() *Uri - DB() *DB - DBType() DbType - SqlType(*Column) string - FormatBytes(b []byte) string - - DriverName() string - DataSourceName() string - - IsReserved(string) bool - Quote(string) string - - AndStr() string - OrStr() string - EqStr() string - RollBackStr() string - AutoIncrStr() string - - SupportInsertMany() bool - SupportEngine() bool - SupportCharset() bool - SupportDropIfExists() bool - IndexOnTable() bool - ShowCreateNull() bool - - IndexCheckSql(tableName, idxName string) (string, []interface{}) - TableCheckSql(tableName string) (string, []interface{}) - - IsColumnExist(tableName string, colName string) (bool, error) - - CreateTableSql(table *Table, tableName, storeEngine, charset string) string - DropTableSql(tableName string) string - CreateIndexSql(tableName string, index *Index) string - DropIndexSql(tableName string, index *Index) string - - ModifyColumnSql(tableName string, col *Column) string - - ForUpdateSql(query string) string - - // CreateTableIfNotExists(table *Table, tableName, storeEngine, charset string) error - // MustDropTable(tableName string) error - - GetColumns(tableName string) ([]string, map[string]*Column, error) - GetTables() ([]*Table, error) - GetIndexes(tableName string) (map[string]*Index, error) - - Filters() []Filter - SetParams(params map[string]string) -} - -func OpenDialect(dialect Dialect) (*DB, error) { - return Open(dialect.DriverName(), dialect.DataSourceName()) -} - -// Base represents a basic dialect and all real dialects could embed this struct -type Base struct { - db *DB - dialect Dialect - driverName string - dataSourceName string - logger ILogger - *Uri -} - -func (b *Base) DB() *DB { - return b.db -} - -func (b *Base) SetLogger(logger ILogger) { - b.logger = logger -} - -func (b *Base) Init(db *DB, dialect Dialect, uri *Uri, drivername, dataSourceName string) error { - b.db, b.dialect, b.Uri = db, dialect, uri - b.driverName, b.dataSourceName = drivername, dataSourceName - return nil -} - -func (b *Base) URI() *Uri { - return b.Uri -} - -func (b *Base) DBType() DbType { - return b.Uri.DbType -} - -func (b *Base) FormatBytes(bs []byte) string { - return fmt.Sprintf("0x%x", bs) -} - -func (b *Base) DriverName() string { - return b.driverName -} - -func (b *Base) ShowCreateNull() bool { - return true -} - -func (b *Base) DataSourceName() string { - return b.dataSourceName -} - -func (b *Base) AndStr() string { - return "AND" -} - -func (b *Base) OrStr() string { - return "OR" -} - -func (b *Base) EqStr() string { - return "=" -} - -func (db *Base) RollBackStr() string { - return "ROLL BACK" -} - -func (db *Base) SupportDropIfExists() bool { - return true -} - -func (db *Base) DropTableSql(tableName string) string { - quote := db.dialect.Quote - return fmt.Sprintf("DROP TABLE IF EXISTS %s", quote(tableName)) -} - -func (db *Base) HasRecords(query string, args ...interface{}) (bool, error) { - db.LogSQL(query, args) - rows, err := db.DB().Query(query, args...) - if err != nil { - return false, err - } - defer rows.Close() - - if rows.Next() { - return true, nil - } - return false, nil -} - -func (db *Base) IsColumnExist(tableName, colName string) (bool, error) { - query := fmt.Sprintf( - "SELECT %v FROM %v.%v WHERE %v = ? AND %v = ? AND %v = ?", - db.dialect.Quote("COLUMN_NAME"), - db.dialect.Quote("INFORMATION_SCHEMA"), - db.dialect.Quote("COLUMNS"), - db.dialect.Quote("TABLE_SCHEMA"), - db.dialect.Quote("TABLE_NAME"), - db.dialect.Quote("COLUMN_NAME"), - ) - return db.HasRecords(query, db.DbName, tableName, colName) -} - -/* -func (db *Base) CreateTableIfNotExists(table *Table, tableName, storeEngine, charset string) error { - sql, args := db.dialect.TableCheckSql(tableName) - rows, err := db.DB().Query(sql, args...) - if db.Logger != nil { - db.Logger.Info("[sql]", sql, args) - } - if err != nil { - return err - } - defer rows.Close() - - if rows.Next() { - return nil - } - - sql = db.dialect.CreateTableSql(table, tableName, storeEngine, charset) - _, err = db.DB().Exec(sql) - if db.Logger != nil { - db.Logger.Info("[sql]", sql) - } - return err -}*/ - -func (db *Base) CreateIndexSql(tableName string, index *Index) string { - quote := db.dialect.Quote - var unique string - var idxName string - if index.Type == UniqueType { - unique = " UNIQUE" - } - idxName = index.XName(tableName) - return fmt.Sprintf("CREATE%s INDEX %v ON %v (%v)", unique, - quote(idxName), quote(tableName), - quote(strings.Join(index.Cols, quote(",")))) -} - -func (db *Base) DropIndexSql(tableName string, index *Index) string { - quote := db.dialect.Quote - var name string - if index.IsRegular { - name = index.XName(tableName) - } else { - name = index.Name - } - return fmt.Sprintf("DROP INDEX %v ON %s", quote(name), quote(tableName)) -} - -func (db *Base) ModifyColumnSql(tableName string, col *Column) string { - return fmt.Sprintf("alter table %s MODIFY COLUMN %s", tableName, col.StringNoPk(db.dialect)) -} - -func (b *Base) CreateTableSql(table *Table, tableName, storeEngine, charset string) string { - var sql string - sql = "CREATE TABLE IF NOT EXISTS " - if tableName == "" { - tableName = table.Name - } - - sql += b.dialect.Quote(tableName) - sql += " (" - - if len(table.ColumnsSeq()) > 0 { - pkList := table.PrimaryKeys - - for _, colName := range table.ColumnsSeq() { - col := table.GetColumn(colName) - if col.IsPrimaryKey && len(pkList) == 1 { - sql += col.String(b.dialect) - } else { - sql += col.StringNoPk(b.dialect) - } - sql = strings.TrimSpace(sql) - if b.DriverName() == MYSQL && len(col.Comment) > 0 { - sql += " COMMENT '" + col.Comment + "'" - } - sql += ", " - } - - if len(pkList) > 1 { - sql += "PRIMARY KEY ( " - sql += b.dialect.Quote(strings.Join(pkList, b.dialect.Quote(","))) - sql += " ), " - } - - sql = sql[:len(sql)-2] - } - sql += ")" - - if b.dialect.SupportEngine() && storeEngine != "" { - sql += " ENGINE=" + storeEngine - } - if b.dialect.SupportCharset() { - if len(charset) == 0 { - charset = b.dialect.URI().Charset - } - if len(charset) > 0 { - sql += " DEFAULT CHARSET " + charset - } - } - - return sql -} - -func (b *Base) ForUpdateSql(query string) string { - return query + " FOR UPDATE" -} - -func (b *Base) LogSQL(sql string, args []interface{}) { - if b.logger != nil && b.logger.IsShowSQL() { - if len(args) > 0 { - b.logger.Infof("[SQL] %v %v", sql, args) - } else { - b.logger.Infof("[SQL] %v", sql) - } - } -} - -func (b *Base) SetParams(params map[string]string) { -} - -var ( - dialects = map[string]func() Dialect{} -) - -// RegisterDialect register database dialect -func RegisterDialect(dbName DbType, dialectFunc func() Dialect) { - if dialectFunc == nil { - panic("core: Register dialect is nil") - } - dialects[strings.ToLower(string(dbName))] = dialectFunc // !nashtsai! allow override dialect -} - -// QueryDialect query if registered database dialect -func QueryDialect(dbName DbType) Dialect { - if d, ok := dialects[strings.ToLower(string(dbName))]; ok { - return d() - } - return nil -} diff --git a/vendor/xorm.io/core/driver.go b/vendor/xorm.io/core/driver.go deleted file mode 100644 index ceef4ba61..000000000 --- a/vendor/xorm.io/core/driver.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package core - -type Driver interface { - Parse(string, string) (*Uri, error) -} - -var ( - drivers = map[string]Driver{} -) - -func RegisterDriver(driverName string, driver Driver) { - if driver == nil { - panic("core: Register driver is nil") - } - if _, dup := drivers[driverName]; dup { - panic("core: Register called twice for driver " + driverName) - } - drivers[driverName] = driver -} - -func QueryDriver(driverName string) Driver { - return drivers[driverName] -} - -func RegisteredDriverSize() int { - return len(drivers) -} diff --git a/vendor/xorm.io/core/filter.go b/vendor/xorm.io/core/filter.go deleted file mode 100644 index 55be9562b..000000000 --- a/vendor/xorm.io/core/filter.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package core - -import ( - "fmt" - "strings" -) - -// Filter is an interface to filter SQL -type Filter interface { - Do(sql string, dialect Dialect, table *Table) string -} - -// QuoteFilter filter SQL replace ` to database's own quote character -type QuoteFilter struct { -} - -func (s *QuoteFilter) Do(sql string, dialect Dialect, table *Table) string { - dummy := dialect.Quote("") - if len(dummy) != 2 { - return sql - } - prefix, suffix := dummy[0], dummy[1] - raw := []byte(sql) - for i, cnt := 0, 0; i < len(raw); i = i + 1 { - if raw[i] == '`' { - if cnt%2 == 0 { - raw[i] = prefix - } else { - raw[i] = suffix - } - cnt++ - } - } - return string(raw) -} - -// IdFilter filter SQL replace (id) to primary key column name -type IdFilter struct { -} - -type Quoter struct { - dialect Dialect -} - -func NewQuoter(dialect Dialect) *Quoter { - return &Quoter{dialect} -} - -func (q *Quoter) Quote(content string) string { - return q.dialect.Quote(content) -} - -func (i *IdFilter) Do(sql string, dialect Dialect, table *Table) string { - quoter := NewQuoter(dialect) - if table != nil && len(table.PrimaryKeys) == 1 { - sql = strings.Replace(sql, " `(id)` ", " "+quoter.Quote(table.PrimaryKeys[0])+" ", -1) - sql = strings.Replace(sql, " "+quoter.Quote("(id)")+" ", " "+quoter.Quote(table.PrimaryKeys[0])+" ", -1) - return strings.Replace(sql, " (id) ", " "+quoter.Quote(table.PrimaryKeys[0])+" ", -1) - } - return sql -} - -// SeqFilter filter SQL replace ?, ? ... to $1, $2 ... -type SeqFilter struct { - Prefix string - Start int -} - -func convertQuestionMark(sql, prefix string, start int) string { - var buf strings.Builder - var beginSingleQuote bool - var index = start - for _, c := range sql { - if !beginSingleQuote && c == '?' { - buf.WriteString(fmt.Sprintf("%s%v", prefix, index)) - index++ - } else { - if c == '\'' { - beginSingleQuote = !beginSingleQuote - } - buf.WriteRune(c) - } - } - return buf.String() -} - -func (s *SeqFilter) Do(sql string, dialect Dialect, table *Table) string { - return convertQuestionMark(sql, s.Prefix, s.Start) -} diff --git a/vendor/xorm.io/core/go.mod b/vendor/xorm.io/core/go.mod deleted file mode 100644 index 9dc9c1f2e..000000000 --- a/vendor/xorm.io/core/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module xorm.io/core - -require ( - github.com/go-sql-driver/mysql v1.4.1 - github.com/mattn/go-sqlite3 v1.10.0 - github.com/stretchr/testify v1.4.0 - google.golang.org/appengine v1.6.0 // indirect -) diff --git a/vendor/xorm.io/core/go.sum b/vendor/xorm.io/core/go.sum deleted file mode 100644 index 172009ed4..000000000 --- a/vendor/xorm.io/core/go.sum +++ /dev/null @@ -1,20 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= -github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -google.golang.org/appengine v1.6.0 h1:Tfd7cKwKbFRsI8RMAD3oqqw7JPFRrvFlOsfbgVkjOOw= -google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/xorm.io/core/ilogger.go b/vendor/xorm.io/core/ilogger.go deleted file mode 100644 index 0c17750c2..000000000 --- a/vendor/xorm.io/core/ilogger.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package core - -// LogLevel defines a log level -type LogLevel int - -// enumerate all LogLevels -const ( - // !nashtsai! following level also match syslog.Priority value - LOG_DEBUG LogLevel = iota - LOG_INFO - LOG_WARNING - LOG_ERR - LOG_OFF - LOG_UNKNOWN -) - -// ILogger is a logger interface -type ILogger interface { - Debug(v ...interface{}) - Debugf(format string, v ...interface{}) - Error(v ...interface{}) - Errorf(format string, v ...interface{}) - Info(v ...interface{}) - Infof(format string, v ...interface{}) - Warn(v ...interface{}) - Warnf(format string, v ...interface{}) - - Level() LogLevel - SetLevel(l LogLevel) - - ShowSQL(show ...bool) - IsShowSQL() bool -} diff --git a/vendor/xorm.io/xorm/.changelog.yml b/vendor/xorm.io/xorm/.changelog.yml new file mode 100644 index 000000000..1303c9cc9 --- /dev/null +++ b/vendor/xorm.io/xorm/.changelog.yml @@ -0,0 +1,53 @@ +# The full repository name +repo: xorm/xorm + +# Service type (gitea or github) +service: gitea + +# Base URL for Gitea instance if using gitea service type (optional) +# Default: https://gitea.com +base-url: + +# Changelog groups and which labeled PRs to add to each group +groups: + - + name: BREAKING + labels: + - kind/breaking + - + name: FEATURES + labels: + - kind/feature + - + name: SECURITY + labels: + - kind/security + - + name: BUGFIXES + labels: + - kind/bug + - + name: ENHANCEMENTS + labels: + - kind/enhancement + - kind/refactor + - kind/ui + - + name: TESTING + labels: + - kind/testing + - + name: BUILD + labels: + - kind/build + - kind/lint + - + name: DOCS + labels: + - kind/docs + - + name: MISC + default: true + +# regex indicating which labels to skip for the changelog +skip-labels: skip-changelog|backport\/.+ diff --git a/vendor/xorm.io/xorm/.drone.yml b/vendor/xorm.io/xorm/.drone.yml index e9dae7889..0863cce26 100644 --- a/vendor/xorm.io/xorm/.drone.yml +++ b/vendor/xorm.io/xorm/.drone.yml @@ -3,8 +3,7 @@ kind: pipeline name: testing steps: - name: test-vet - pull: default - image: golang:1.12 + image: golang:1.11 environment: GO111MODULE: "on" GOPROXY: "https://goproxy.cn" @@ -16,44 +15,77 @@ steps: - pull_request - name: test-sqlite - pull: default image: golang:1.12 environment: GO111MODULE: "on" GOPROXY: "https://goproxy.cn" commands: - - "go test -v -race -db=\"sqlite3\" -conn_str=\"./test.db\" -coverprofile=coverage1-1.txt -covermode=atomic" - - "go test -v -race -db=\"sqlite3\" -conn_str=\"./test.db\" -cache=true -coverprofile=coverage1-2.txt -covermode=atomic" + - make test-sqlite + - TEST_CACHE_ENABLE=true make test-sqlite + - TEST_QUOTE_POLICY=reserved make test-sqlite + - go test ./caches/... ./contexts/... ./convert/... ./core/... ./dialects/... \ + ./log/... ./migrate/... ./names/... ./schemas/... ./tags/... \ + ./internal/json/... ./internal/statements/... ./internal/utils/... \ + when: event: - push - pull_request - name: test-mysql - pull: default image: golang:1.12 environment: GO111MODULE: "on" GOPROXY: "https://goproxy.cn" + TEST_MYSQL_HOST: mysql + TEST_MYSQL_CHARSET: utf8 + TEST_MYSQL_DBNAME: xorm_test + TEST_MYSQL_USERNAME: root + TEST_MYSQL_PASSWORD: + commands: + - make test-mysql + - TEST_CACHE_ENABLE=true make test-mysql + - TEST_QUOTE_POLICY=reserved make test-mysql + when: + event: + - push + - pull_request + +- name: test-mysql8 + image: golang:1.12 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_MYSQL_HOST: mysql8 + TEST_MYSQL_CHARSET: utf8mb4 + TEST_MYSQL_DBNAME: xorm_test + TEST_MYSQL_USERNAME: root + TEST_MYSQL_PASSWORD: commands: - - "go test -v -race -db=\"mysql\" -conn_str=\"root:@tcp(mysql)/xorm_test\" -coverprofile=coverage2-1.txt -covermode=atomic" - - "go test -v -race -db=\"mysql\" -conn_str=\"root:@tcp(mysql)/xorm_test\" -cache=true -coverprofile=coverage2-2.txt -covermode=atomic" + - make test-mysql + - TEST_CACHE_ENABLE=true make test-mysql + - TEST_QUOTE_POLICY=reserved make test-mysql when: event: - push - pull_request - name: test-mysql-utf8mb4 - pull: default image: golang:1.12 depends_on: - - test-mysql + - test-mysql environment: GO111MODULE: "on" GOPROXY: "https://goproxy.cn" + TEST_MYSQL_HOST: mysql + TEST_MYSQL_CHARSET: utf8mb4 + TEST_MYSQL_DBNAME: xorm_test + TEST_MYSQL_USERNAME: root + TEST_MYSQL_PASSWORD: commands: - - "go test -v -race -db=\"mysql\" -conn_str=\"root:@tcp(mysql)/xorm_test?charset=utf8mb4\" -coverprofile=coverage2.1-1.txt -covermode=atomic" - - "go test -v -race -db=\"mysql\" -conn_str=\"root:@tcp(mysql)/xorm_test?charset=utf8mb4\" -cache=true -coverprofile=coverage2.1-2.txt -covermode=atomic" + - make test-mysql + - TEST_CACHE_ENABLE=true make test-mysql + - TEST_QUOTE_POLICY=reserved make test-mysql when: event: - push @@ -67,9 +99,14 @@ steps: environment: GO111MODULE: "on" GOPROXY: "https://goproxy.cn" + TEST_MYSQL_HOST: mysql:3306 + TEST_MYSQL_DBNAME: xorm_test + TEST_MYSQL_USERNAME: root + TEST_MYSQL_PASSWORD: commands: - - "go test -v -race -db=\"mymysql\" -conn_str=\"tcp:mysql:3306*xorm_test/root/\" -coverprofile=coverage3-1.txt -covermode=atomic" - - "go test -v -race -db=\"mymysql\" -conn_str=\"tcp:mysql:3306*xorm_test/root/\" -cache=true -coverprofile=coverage3-2.txt -covermode=atomic" + - make test-mymysql + - TEST_CACHE_ENABLE=true make test-mymysql + - TEST_QUOTE_POLICY=reserved make test-mymysql when: event: - push @@ -81,9 +118,14 @@ steps: environment: GO111MODULE: "on" GOPROXY: "https://goproxy.cn" + TEST_PGSQL_HOST: pgsql + TEST_PGSQL_DBNAME: xorm_test + TEST_PGSQL_USERNAME: postgres + TEST_PGSQL_PASSWORD: postgres commands: - - "go test -v -race -db=\"postgres\" -conn_str=\"postgres://postgres:@pgsql/xorm_test?sslmode=disable\" -coverprofile=coverage4-1.txt -covermode=atomic" - - "go test -v -race -db=\"postgres\" -conn_str=\"postgres://postgres:@pgsql/xorm_test?sslmode=disable\" -cache=true -coverprofile=coverage4-2.txt -covermode=atomic" + - make test-postgres + - TEST_CACHE_ENABLE=true make test-postgres + - TEST_QUOTE_POLICY=reserved make test-postgres when: event: - push @@ -92,12 +134,20 @@ steps: - name: test-postgres-schema pull: default image: golang:1.12 + depends_on: + - test-postgres environment: GO111MODULE: "on" GOPROXY: "https://goproxy.cn" + TEST_PGSQL_HOST: pgsql + TEST_PGSQL_SCHEMA: xorm + TEST_PGSQL_DBNAME: xorm_test + TEST_PGSQL_USERNAME: postgres + TEST_PGSQL_PASSWORD: postgres commands: - - "go test -v -race -db=\"postgres\" -conn_str=\"postgres://postgres:@pgsql/xorm_test?sslmode=disable\" -schema=xorm -coverprofile=coverage5-1.txt -covermode=atomic" - - "go test -v -race -db=\"postgres\" -conn_str=\"postgres://postgres:@pgsql/xorm_test?sslmode=disable\" -schema=xorm -cache=true -coverprofile=coverage5-2.txt -covermode=atomic" + - make test-postgres + - TEST_CACHE_ENABLE=true make test-postgres + - TEST_QUOTE_POLICY=reserved make test-postgres when: event: - push @@ -109,9 +159,14 @@ steps: environment: GO111MODULE: "on" GOPROXY: "https://goproxy.cn" + TEST_MSSQL_HOST: mssql + TEST_MSSQL_DBNAME: xorm_test + TEST_MSSQL_USERNAME: sa + TEST_MSSQL_PASSWORD: "yourStrong(!)Password" commands: - - "go test -v -race -db=\"mssql\" -conn_str=\"server=mssql;user id=sa;password=yourStrong(!)Password;database=xorm_test\" -coverprofile=coverage6-1.txt -covermode=atomic" - - "go test -v -race -db=\"mssql\" -conn_str=\"server=mssql;user id=sa;password=yourStrong(!)Password;database=xorm_test\" -cache=true -coverprofile=coverage6-2.txt -covermode=atomic" + - make test-mssql + - TEST_CACHE_ENABLE=true make test-mssql + - TEST_QUOTE_POLICY=reserved make test-mssql when: event: - push @@ -123,9 +178,33 @@ steps: environment: GO111MODULE: "on" GOPROXY: "https://goproxy.cn" + TEST_TIDB_HOST: "tidb:4000" + TEST_TIDB_DBNAME: xorm_test + TEST_TIDB_USERNAME: root + TEST_TIDB_PASSWORD: + commands: + - make test-tidb + - TEST_CACHE_ENABLE=true make test-tidb + - TEST_QUOTE_POLICY=reserved make test-tidb + when: + event: + - push + - pull_request + +- name: test-cockroach + pull: default + image: golang:1.13 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_COCKROACH_HOST: "cockroach:26257" + TEST_COCKROACH_DBNAME: xorm_test + TEST_COCKROACH_USERNAME: root + TEST_COCKROACH_PASSWORD: commands: - - "go test -v -race -db=\"mysql\" -conn_str=\"root:@tcp(tidb:4000)/xorm_test\" -ignore_select_update=true -coverprofile=coverage7-1.txt -covermode=atomic" - - "go test -v -race -db=\"mysql\" -conn_str=\"root:@tcp(tidb:4000)/xorm_test\" -ignore_select_update=true -cache=true -coverprofile=coverage7-2.txt -covermode=atomic" + - sleep 10 + - make test-cockroach + - TEST_CACHE_ENABLE=true make test-cockroach when: event: - push @@ -141,15 +220,15 @@ steps: - test-vet - test-sqlite - test-mysql - - test-mysql-utf8mb4 + - test-mysql8 - test-mymysql - test-postgres - test-postgres-schema - test-mssql - test-tidb + - test-cockroach commands: - - go get github.com/wadey/gocovmerge - - gocovmerge coverage1-1.txt coverage1-2.txt coverage2-1.txt coverage2-2.txt coverage2.1-1.txt coverage2.1-2.txt coverage3-1.txt coverage3-2.txt coverage4-1.txt coverage4-2.txt coverage5-1.txt coverage5-2.txt coverage6-1.txt coverage6-2.txt coverage7-1.txt coverage7-2.txt > coverage.txt + - make coverage when: event: - push @@ -169,12 +248,25 @@ services: - tag - pull_request +- name: mysql8 + pull: default + image: mysql:8.0 + environment: + MYSQL_ALLOW_EMPTY_PASSWORD: yes + MYSQL_DATABASE: xorm_test + when: + event: + - push + - tag + - pull_request + - name: pgsql pull: default image: postgres:9.5 environment: POSTGRES_DB: xorm_test POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres when: event: - push @@ -201,4 +293,15 @@ services: event: - push - tag - - pull_request \ No newline at end of file + - pull_request + +- name: cockroach + pull: default + image: cockroachdb/cockroach:v19.2.4 + commands: + - /cockroach/cockroach start --insecure + when: + event: + - push + - tag + - pull_request diff --git a/vendor/xorm.io/xorm/.gitignore b/vendor/xorm.io/xorm/.gitignore index f1757b983..0d321a6be 100644 --- a/vendor/xorm.io/xorm/.gitignore +++ b/vendor/xorm.io/xorm/.gitignore @@ -7,6 +7,7 @@ # Folders _obj _test +vendor/ # Architecture specific extensions/prefixes *.[568vq] @@ -31,3 +32,5 @@ xorm.test test.db.sql .idea/ + +*coverage.out diff --git a/vendor/xorm.io/xorm/.revive.toml b/vendor/xorm.io/xorm/.revive.toml new file mode 100644 index 000000000..64e223bbf --- /dev/null +++ b/vendor/xorm.io/xorm/.revive.toml @@ -0,0 +1,25 @@ +ignoreGeneratedHeader = false +severity = "warning" +confidence = 0.8 +errorCode = 1 +warningCode = 1 + +[rule.blank-imports] +[rule.context-as-argument] +[rule.context-keys-type] +[rule.dot-imports] +[rule.error-return] +[rule.error-strings] +[rule.error-naming] +[rule.exported] +[rule.if-return] +[rule.increment-decrement] +[rule.var-naming] +[rule.var-declaration] +[rule.package-comments] +[rule.range] +[rule.receiver-naming] +[rule.time-naming] +[rule.unexported-return] +[rule.indent-error-flow] +[rule.errorf] \ No newline at end of file diff --git a/vendor/xorm.io/xorm/CHANGELOG.md b/vendor/xorm.io/xorm/CHANGELOG.md new file mode 100644 index 000000000..85c59d57f --- /dev/null +++ b/vendor/xorm.io/xorm/CHANGELOG.md @@ -0,0 +1,173 @@ +# Changelog + +This changelog goes through all the changes that have been made in each release +without substantial changes to our git log. + +## [1.0.0](https://gitea.com/xorm/xorm/pulls?q=&type=all&state=closed&milestone=1242) - 2020-03-22 + +* BREAKING + * Add context for dialects (#1558) + * Move zero functions to a standalone package (#1548) + * Merge core package back into the main repository and split into serval sub packages. (#1543) +* FEATURES + * Use a new ContextLogger interface to implement logger (#1557) +* BUGFIXES + * Fix setschema (#1606) + * Fix dump/import bug (#1603) + * Fix pk bug (#1602) + * Fix master/slave bug (#1601) + * Fix bug when dump (#1597) + * Ignore schema when dbtype is not postgres (#1593) + * Fix table name (#1590) + * Fix find alias bug (#1581) + * Fix rows bug (#1576) + * Fix map with cols (#1575) + * Fix bug on deleted with join (#1570) + * Improve quote policy (#1567) + * Fix break session sql enable feature (#1566) + * Fix mssql quote (#1535) + * Fix join table name quote bug (#1534) + * Fix mssql issue with duplicate columns. (#1225) + * Fix mysql8.0 sync failed (#808) +* ENHANCEMENTS + * Fix batch insert interface slice be panic (#1598) + * Move some codes to statement sub package (#1574) + * Remove circle file (#1569) + * Move statement as a sub package (#1564) + * Move maptype to tag parser (#1561) + * Move caches to manager (#1553) + * Improve code (#1552) + * Improve some codes (#1551) + * Improve statement (#1549) + * Move tag parser related codes as a standalone sub package (#1547) + * Move reserve words related files into dialects sub package (#1544) + * Fix `Conversion` method `ToDB() ([]byte, error)` return type is nil (#1296) + * Check driver.Valuer response, and skip the column if nil (#1167) + * Add cockroach support and tests (#896) +* TESTING + * Improve tests (#1572) +* BUILD + * Add changelog file and tool configuration (#1546) +* DOCS + * Fix outdate changelog (#1565) + +## old changelog + +* **v0.6.5** + * Postgres schema support + * vgo support + * Add FindAndCount + * Database special params support via NewEngineWithParams + * Some bugs fixed + +* **v0.6.4** + * Automatical Read/Write seperatelly + * Query/QueryString/QueryInterface and action with Where/And + * Get support non-struct variables + * BufferSize on Iterate + * fix some other bugs. + +* **v0.6.3** + * merge tests to main project + * add `Exist` function + * add `SumInt` function + * Mysql now support read and create column comment. + * fix time related bugs. + * fix some other bugs. + +* **v0.6.2** + * refactor tag parse methods + * add Scan features to Get + * add QueryString method + +* **v0.4.5** + * many bugs fixed + * extends support unlimited deep + * Delete Limit support + +* **v0.4.4** + * ql database expriment support + * tidb database expriment support + * sql.NullString and etc. field support + * select ForUpdate support + * many bugs fixed + +* **v0.4.3** + * Json column type support + * oracle expirement support + * bug fixed + +* **v0.4.2** + * Transaction will auto rollback if not Rollback or Commit be called. + * Gonic Mapper support + * bug fixed + +* **v0.4.1** + * deleted tag support for soft delete + * bug fixed + +* **v0.4.0 RC1** + Changes: + * moved xorm cmd to [github.com/go-xorm/cmd](github.com/go-xorm/cmd) + * refactored general DB operation a core lib at [github.com/go-xorm/core](https://github.com/go-xorm/core) + * moved tests to github.com/go-xorm/tests [github.com/go-xorm/tests](github.com/go-xorm/tests) + + Improvements: + * Prepared statement cache + * Add Incr API + * Specify Timezone Location + +* **v0.3.2** + Improvements: + * Add AllCols & MustCols function + * Add TableName for custom table name + + Bug Fixes: + * #46 + * #51 + * #53 + * #89 + * #86 + * #92 + +* **v0.3.1** + + Features: + * Support MSSQL DB via ODBC driver ([github.com/lunny/godbc](https://github.com/lunny/godbc)); + * Composite Key, using multiple pk xorm tag + * Added Row() API as alternative to Iterate() API for traversing result set, provide similar usages to sql.Rows type + * ORM struct allowed declaration of pointer builtin type as members to allow null DB fields + * Before and After Event processors + + Improvements: + * Allowed int/int32/int64/uint/uint32/uint64/string as Primary Key type + * Performance improvement for Get()/Find()/Iterate() + + +* **v0.2.3** : Improved documents; Optimistic Locking support; Timestamp with time zone support; Mapper change to tableMapper and columnMapper & added PrefixMapper & SuffixMapper support custom table or column name's prefix and suffix;Insert now return affected, err instead of id, err; Added UseBool & Distinct; + +* **v0.2.2** : Postgres drivers now support lib/pq; Added method Iterate for record by record to handler;Added SetMaxConns(go1.2+) support; some bugs fixed. + +* **v0.2.1** : Added database reverse tool, now support generate go & c++ codes, see [Xorm Tool README](https://github.com/go-xorm/xorm/blob/master/xorm/README.md); some bug fixed. + +* **v0.2.0** : Added Cache supported, select is speeder up 3~5x; Added SameMapper for same name between struct and table; Added Sync method for auto added tables, columns, indexes; + +* **v0.1.9** : Added postgres and mymysql supported; Added ` and ? supported on Raw SQL even if postgres; Added Cols, StoreEngine, Charset function, Added many column data type supported, please see [Mapping Rules](#mapping). + +* **v0.1.8** : Added union index and union unique supported, please see [Mapping Rules](#mapping). + +* **v0.1.7** : Added IConnectPool interface and NoneConnectPool, SysConnectPool, SimpleConnectPool the three implements. You can choose one of them and the default is SysConnectPool. You can customrize your own connection pool. struct Engine added Close method, It should be invoked before system exit. + +* **v0.1.6** : Added conversion interface support; added struct derive support; added single mapping support + +* **v0.1.5** : Added multi threads support; added Sql() function for struct query; Get function changed return inteface; MakeSession and Create are instead with NewSession and NewEngine. + +* **v0.1.4** : Added simple cascade load support; added more data type supports. + +* **v0.1.3** : Find function now supports both slice and map; Add Table function for multi tables and temperory tables support + +* **v0.1.2** : Insert function now supports both struct and slice pointer parameters, batch inserting and auto transaction + +* **v0.1.1** : Add Id, In functions and improved README + +* **v0.1.0** : Initial release. \ No newline at end of file diff --git a/vendor/xorm.io/xorm/CONTRIBUTING.md b/vendor/xorm.io/xorm/CONTRIBUTING.md index 442aa4d31..a6925a5c0 100644 --- a/vendor/xorm.io/xorm/CONTRIBUTING.md +++ b/vendor/xorm.io/xorm/CONTRIBUTING.md @@ -22,6 +22,47 @@ e.g., // !lunny! this is comments made by lunny ``` +### Build xorm and test it locally + +Once you write some codes on your feature branch, you could build and test locally at first. Just + +``` +make build +``` +and +``` +make test +``` + +The `make test` is an alias of `make test-sqlite`, it will run the tests on a sqlite database file. No extra thing needed to do except you need to cgo compile enviroment. + +If you write a new test method, you could run + +``` +make test-sqlite#TestMyNewMethod +``` + +that will only run the special test method. + +If you want to run another datase, you have to prepare a running database at first, and then, you could + +``` +TEST_MYSQL_HOST= TEST_MYSQL_CHARSET= TEST_MYSQL_DBNAME= TEST_MYSQL_USERNAME= TEST_MYSQL_PASSWORD= make test-mysql +``` + +or other databases: +``` +TEST_MSSQL_HOST= TEST_MSSQL_DBNAME= TEST_MSSQL_USERNAME= TEST_MSSQL_PASSWORD= make test-mssql +``` +``` +TEST_PGSQL_HOST= TEST_PGSQL_SCHEMA= TEST_PGSQL_DBNAME= TEST_PGSQL_USERNAME= TEST_PGSQL_PASSWORD= make test-postgres +``` +``` +TEST_TIDB_HOST= TEST_TIDB_DBNAME= TEST_TIDB_USERNAME= TEST_TIDB_PASSWORD= make test-tidb +``` + +And if your branch is related with cache, you could also enable it via `TEST_CACHE_ENABLE=true`. + ### Patch review Help review existing open [pull requests](https://help.github.com/articles/using-pull-requests) by commenting on the code or diff --git a/vendor/xorm.io/xorm/Makefile b/vendor/xorm.io/xorm/Makefile new file mode 100644 index 000000000..88364f541 --- /dev/null +++ b/vendor/xorm.io/xorm/Makefile @@ -0,0 +1,214 @@ +IMPORT := xorm.io/xorm +export GO111MODULE=on + +GO ?= go +GOFMT ?= gofmt -s +TAGS ?= +SED_INPLACE := sed -i + +GOFILES := $(shell find . -name "*.go" -type f) + +PACKAGES ?= $(shell GO111MODULE=on $(GO) list ./...) + +TEST_COCKROACH_HOST ?= cockroach:26257 +TEST_COCKROACH_SCHEMA ?= +TEST_COCKROACH_DBNAME ?= xorm_test +TEST_COCKROACH_USERNAME ?= postgres +TEST_COCKROACH_PASSWORD ?= + +TEST_MSSQL_HOST ?= mssql:1433 +TEST_MSSQL_DBNAME ?= gitea +TEST_MSSQL_USERNAME ?= sa +TEST_MSSQL_PASSWORD ?= MwantsaSecurePassword1 + +TEST_MYSQL_HOST ?= mysql:3306 +TEST_MYSQL_CHARSET ?= utf8 +TEST_MYSQL_DBNAME ?= xorm_test +TEST_MYSQL_USERNAME ?= root +TEST_MYSQL_PASSWORD ?= + +TEST_PGSQL_HOST ?= pgsql:5432 +TEST_PGSQL_SCHEMA ?= +TEST_PGSQL_DBNAME ?= xorm_test +TEST_PGSQL_USERNAME ?= postgres +TEST_PGSQL_PASSWORD ?= mysecretpassword + +TEST_TIDB_HOST ?= tidb:4000 +TEST_TIDB_DBNAME ?= xorm_test +TEST_TIDB_USERNAME ?= root +TEST_TIDB_PASSWORD ?= + +TEST_CACHE_ENABLE ?= false +TEST_QUOTE_POLICY ?= always + +.PHONY: all +all: build + +.PHONY: build +build: go-check $(GO_SOURCES) + $(GO) build + +.PHONY: clean +clean: + $(GO) clean -i ./... + rm -rf *.sql *.log test.db *coverage.out coverage.all + +.PHONY: coverage +coverage: + @hash gocovmerge > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) get -u github.com/wadey/gocovmerge; \ + fi + gocovmerge $(shell find . -type f -name "coverage.out") > coverage.all;\ + +.PHONY: fmt +fmt: + $(GOFMT) -w $(GOFILES) + +.PHONY: fmt-check +fmt-check: + # get all go files and run go fmt on them + @diff=$$($(GOFMT) -d $(GOFILES)); \ + if [ -n "$$diff" ]; then \ + echo "Please run 'make fmt' and commit the result:"; \ + echo "$${diff}"; \ + exit 1; \ + fi; + +.PHONY: go-check +go-check: + $(eval GO_VERSION := $(shell printf "%03d%03d%03d" $(shell go version | grep -Eo '[0-9]+\.?[0-9]+?\.?[0-9]?\s' | tr '.' ' ');)) + @if [ "$(GO_VERSION)" -lt "001011000" ]; then \ + echo "Gitea requires Go 1.11.0 or greater to build. You can get it at https://golang.org/dl/"; \ + exit 1; \ + fi + +.PHONY: help +help: + @echo "Make Routines:" + @echo " - equivalent to \"build\"" + @echo " - build creates the entire project" + @echo " - clean delete integration files and build files but not css and js files" + @echo " - fmt format the code" + @echo " - lint run code linter revive" + @echo " - misspell check if a word is written wrong" + @echo " - test run default unit test" + @echo " - test-sqlite run unit test for sqlite" + @echo " - vet examines Go source code and reports suspicious constructs" + +.PHONY: lint +lint: revive + +.PHONY: revive +revive: + @hash revive > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) get -u github.com/mgechev/revive; \ + fi + revive -config .revive.toml -exclude=./vendor/... ./... || exit 1 + +.PHONY: misspell +misspell: + @hash misspell > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) get -u github.com/client9/misspell/cmd/misspell; \ + fi + misspell -w -i unknwon $(GOFILES) + +.PHONY: misspell-check +misspell-check: + @hash misspell > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) get -u github.com/client9/misspell/cmd/misspell; \ + fi + misspell -error -i unknwon,destory $(GOFILES) + +.PHONY: test +test: test-sqlite + +.PNONY: test-cockroach +test-cockroach: go-check + $(GO) test -race -db=postgres -schema='$(TEST_COCKROACH_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \ + -conn_str="postgres://$(TEST_COCKROACH_USERNAME):$(TEST_COCKROACH_PASSWORD)@$(TEST_COCKROACH_HOST)/$(TEST_COCKROACH_DBNAME)?sslmode=disable&experimental_serial_normalization=sql_sequence" \ + -ignore_update_limit=true -coverprofile=cockroach.$(TEST_COCKROACH_SCHEMA).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-cockroach\#% +test-cockroach\#%: go-check + $(GO) test -race -run $* -db=postgres -schema='$(TEST_COCKROACH_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \ + -conn_str="postgres://$(TEST_COCKROACH_USERNAME):$(TEST_COCKROACH_PASSWORD)@$(TEST_COCKROACH_HOST)/$(TEST_COCKROACH_DBNAME)?sslmode=disable&experimental_serial_normalization=sql_sequence" \ + -ignore_update_limit=true -coverprofile=cockroach.$(TEST_COCKROACH_SCHEMA).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-mssql +test-mssql: go-check + $(GO) test -v -race -db=mssql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ + -conn_str="server=$(TEST_MSSQL_HOST);user id=$(TEST_MSSQL_USERNAME);password=$(TEST_MSSQL_PASSWORD);database=$(TEST_MSSQL_DBNAME)" \ + -coverprofile=mssql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-mssql\#% +test-mssql\#%: go-check + $(GO) test -v -race -run $* -db=mssql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ + -conn_str="server=$(TEST_MSSQL_HOST);user id=$(TEST_MSSQL_USERNAME);password=$(TEST_MSSQL_PASSWORD);database=$(TEST_MSSQL_DBNAME)" \ + -coverprofile=mssql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-mymysql +test-mymysql: go-check + $(GO) test -v -race -db=mymysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ + -conn_str="tcp:$(TEST_MYSQL_HOST)*$(TEST_MYSQL_DBNAME)/$(TEST_MYSQL_USERNAME)/$(TEST_MYSQL_PASSWORD)" \ + -coverprofile=mymysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-mymysql\#% +test-mymysql\#%: go-check + $(GO) test -v -race -run $* -db=mymysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ + -conn_str="tcp:$(TEST_MYSQL_HOST)*$(TEST_MYSQL_DBNAME)/$(TEST_MYSQL_USERNAME)/$(TEST_MYSQL_PASSWORD)" \ + -coverprofile=mymysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-mysql +test-mysql: go-check + $(GO) test -v -race -db=mysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ + -conn_str="$(TEST_MYSQL_USERNAME):$(TEST_MYSQL_PASSWORD)@tcp($(TEST_MYSQL_HOST))/$(TEST_MYSQL_DBNAME)?charset=$(TEST_MYSQL_CHARSET)" \ + -coverprofile=mysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-mysql\#% +test-mysql\#%: go-check + $(GO) test -v -race -run $* -db=mysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ + -conn_str="$(TEST_MYSQL_USERNAME):$(TEST_MYSQL_PASSWORD)@tcp($(TEST_MYSQL_HOST))/$(TEST_MYSQL_DBNAME)?charset=$(TEST_MYSQL_CHARSET)" \ + -coverprofile=mysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-postgres +test-postgres: go-check + $(GO) test -v -race -db=postgres -schema='$(TEST_PGSQL_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \ + -conn_str="postgres://$(TEST_PGSQL_USERNAME):$(TEST_PGSQL_PASSWORD)@$(TEST_PGSQL_HOST)/$(TEST_PGSQL_DBNAME)?sslmode=disable" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=postgres.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-postgres\#% +test-postgres\#%: go-check + $(GO) test -v -race -run $* -db=postgres -schema='$(TEST_PGSQL_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \ + -conn_str="postgres://$(TEST_PGSQL_USERNAME):$(TEST_PGSQL_PASSWORD)@$(TEST_PGSQL_HOST)/$(TEST_PGSQL_DBNAME)?sslmode=disable" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=postgres.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-sqlite +test-sqlite: go-check + $(GO) test -v -race -cache=$(TEST_CACHE_ENABLE) -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-sqlite-schema +test-sqlite-schema: go-check + $(GO) test -v -race -schema=xorm -cache=$(TEST_CACHE_ENABLE) -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-sqlite\#% +test-sqlite\#%: go-check + $(GO) test -v -race -run $* -cache=$(TEST_CACHE_ENABLE) -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-tidb +test-tidb: go-check + $(GO) test -v -race -db=mysql -cache=$(TEST_CACHE_ENABLE) -ignore_select_update=true \ + -conn_str="$(TEST_TIDB_USERNAME):$(TEST_TIDB_PASSWORD)@tcp($(TEST_TIDB_HOST))/$(TEST_TIDB_DBNAME)" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=tidb.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-tidb\#% +test-tidb\#%: go-check + $(GO) test -v -race -run $* -db=mysql -cache=$(TEST_CACHE_ENABLE) -ignore_select_update=true \ + -conn_str="$(TEST_TIDB_USERNAME):$(TEST_TIDB_PASSWORD)@tcp($(TEST_TIDB_HOST))/$(TEST_TIDB_DBNAME)" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=tidb.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: vet +vet: + $(GO) vet $(PACKAGES) \ No newline at end of file diff --git a/vendor/xorm.io/xorm/README.md b/vendor/xorm.io/xorm/README.md index 17a6ed37f..2dc4d6eb3 100644 --- a/vendor/xorm.io/xorm/README.md +++ b/vendor/xorm.io/xorm/README.md @@ -8,51 +8,50 @@ Xorm is a simple and powerful ORM for Go. [![](https://goreportcard.com/badge/xorm.io/xorm)](https://goreportcard.com/report/xorm.io/xorm) [![Join the chat at https://img.shields.io/discord/323460943201959939.svg](https://img.shields.io/discord/323460943201959939.svg)](https://discord.gg/HuR2CF3) +## Notice + +v1.0.0 has some break changes from v0.8.2. + +- Removed some non gonic function name `Id`, `Sql`, please use `ID`, `SQL` instead. +- Removed the dependent from `xorm.io/core` and moved the codes to `xorm.io/xorm/core`, `xorm.io/xorm/names`, `xorm.io/xorm/schemas` and others. +- Renamed some interface names. i.e. `core.IMapper` -> `names.Mapper`, `core.ILogger` -> `log.Logger`. + ## Features * Struct <-> Table Mapping Support - * Chainable APIs - * Transaction Support - * Both ORM and raw SQL operation Support - * Sync database schema Support - * Query Cache speed up - -* Database Reverse support, See [Xorm Tool README](https://github.com/go-xorm/cmd/blob/master/README.md) - +* Database Reverse support via [xorm.io/reverse](https://xorm.io/reverse) * Simple cascade loading support - * Optimistic Locking support - * SQL Builder support via [xorm.io/builder](https://xorm.io/builder) - * Automatical Read/Write seperatelly - * Postgres schema support - * Context Cache support +* Support log/SQLLog context ## Drivers Support Drivers for Go's sql package which currently support database/sql includes: -* Mysql: [github.com/go-sql-driver/mysql](https://github.com/go-sql-driver/mysql) - -* MyMysql: [github.com/ziutek/mymysql/godrv](https://github.com/ziutek/mymysql/tree/master/godrv) +* [Mysql5.*](https://github.com/mysql/mysql-server/tree/5.7) / [Mysql8.*](https://github.com/mysql/mysql-server) / [Mariadb](https://github.com/MariaDB/server) / [Tidb](https://github.com/pingcap/tidb) + - [github.com/go-sql-driver/mysql](https://github.com/go-sql-driver/mysql) + - [github.com/ziutek/mymysql/godrv](https://github.com/ziutek/mymysql/godrv) -* Postgres: [github.com/lib/pq](https://github.com/lib/pq) +* [Postgres](https://github.com/postgres/postgres) / [Cockroach](https://github.com/cockroachdb/cockroach) + - [github.com/lib/pq](https://github.com/lib/pq) -* Tidb: [github.com/pingcap/tidb](https://github.com/pingcap/tidb) +* [SQLite](https://sqlite.org) + - [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3) -* SQLite: [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3) +* MsSql + - [github.com/denisenkom/go-mssqldb](https://github.com/denisenkom/go-mssqldb) -* MsSql: [github.com/denisenkom/go-mssqldb](https://github.com/denisenkom/go-mssqldb) - -* Oracle: [github.com/mattn/go-oci8](https://github.com/mattn/go-oci8) (experiment) +* Oracle + - [github.com/mattn/go-oci8](https://github.com/mattn/go-oci8) (experiment) ## Installation @@ -62,7 +61,7 @@ Drivers for Go's sql package which currently support database/sql includes: * [Manual](http://xorm.io/docs) -* [GoDoc](http://godoc.org/xorm.io/xorm) +* [GoDoc](http://pkg.go.dev/xorm.io/xorm) ## Quick Start @@ -440,27 +439,7 @@ Support this project by becoming a sponsor. Your logo will show up here with a l ## Changelog -* **v0.7.0** - * Some bugs fixed - -* **v0.6.6** - * Some bugs fixed - -* **v0.6.5** - * Postgres schema support - * vgo support - * Add FindAndCount - * Database special params support via NewEngineWithParams - * Some bugs fixed - -* **v0.6.4** - * Automatical Read/Write seperatelly - * Query/QueryString/QueryInterface and action with Where/And - * Get support non-struct variables - * BufferSize on Iterate - * fix some other bugs. - -[More changes ...](https://github.com/go-xorm/manual-en-US/tree/master/chapter-16) +You can find all the changelog [here](CHANGELOG.md) ## Cases diff --git a/vendor/xorm.io/xorm/README_CN.md b/vendor/xorm.io/xorm/README_CN.md index 644bdc0b6..f6f883101 100644 --- a/vendor/xorm.io/xorm/README_CN.md +++ b/vendor/xorm.io/xorm/README_CN.md @@ -2,57 +2,55 @@ [English](https://gitea.com/xorm/xorm/src/branch/master/README.md) -xorm是一个简单而强大的Go语言ORM库. 通过它可以使数据库操作非常简便。 +xorm 是一个简单而强大的Go语言ORM库. 通过它可以使数据库操作非常简便。 -[![Build Status](https://drone.gitea.com/api/badges/xorm/builder/status.svg)](https://drone.gitea.com/xorm/builder) [![](http://gocover.io/_badge/xorm.io/xorm)](https://gocover.io/xorm.io/xorm) +[![Build Status](https://drone.gitea.com/api/badges/xorm/xorm/status.svg)](https://drone.gitea.com/xorm/xorm) [![](http://gocover.io/_badge/xorm.io/xorm)](https://gocover.io/xorm.io/xorm) [![](https://goreportcard.com/badge/xorm.io/xorm)](https://goreportcard.com/report/xorm.io/xorm) [![Join the chat at https://img.shields.io/discord/323460943201959939.svg](https://img.shields.io/discord/323460943201959939.svg)](https://discord.gg/HuR2CF3) -## 特性 +## Notice -* 支持Struct和数据库表之间的灵活映射,并支持自动同步 +v1.0.0 相对于 v0.8.2 有以下不兼容的变更: -* 事务支持 +- 移除了部分不符合Go语言命名的函数,如 `Id`, `Sql`,请使用 `ID`, `SQL` 替代。 +- 删除了对 `xorm.io/core` 的依赖。大部分代码迁移到了 `xorm.io/xorm/core`, `xorm.io/xorm/names`, `xorm.io/xorm/schemas` 等等几个包中. +- 重命名了几个结构体,如: `core.IMapper` -> `names.Mapper`, `core.ILogger` -> `log.Logger`. -* 同时支持原始SQL语句和ORM操作的混合执行 +## 特性 +* 支持 Struct 和数据库表之间的灵活映射,并支持自动同步 +* 事务支持 +* 同时支持原始SQL语句和ORM操作的混合执行 * 使用连写来简化调用 - -* 支持使用Id, In, Where, Limit, Join, Having, Table, Sql, Cols等函数和结构体等方式作为条件 - +* 支持使用ID, In, Where, Limit, Join, Having, Table, SQL, Cols等函数和结构体等方式作为条件 * 支持级联加载Struct - * Schema支持(仅Postgres) - * 支持缓存 - -* 支持根据数据库自动生成xorm的结构体 - +* 通过 [xorm.io/reverse](https://xorm.io/reverse) 支持根据数据库自动生成 xorm 结构体 * 支持记录版本(即乐观锁) - -* 内置SQL Builder支持 - +* 通过 [xorm.io/builder](https://xorm.io/builder) 内置 SQL Builder 支持 * 上下文缓存支持 +* 支持日志上下文 ## 驱动支持 目前支持的Go数据库驱动和对应的数据库如下: -* Mysql: [github.com/go-sql-driver/mysql](https://github.com/go-sql-driver/mysql) - -* MyMysql: [github.com/ziutek/mymysql/godrv](https://github.com/ziutek/mymysql/godrv) - -* Postgres: [github.com/lib/pq](https://github.com/lib/pq) +* [Mysql5.*](https://github.com/mysql/mysql-server/tree/5.7) / [Mysql8.*](https://github.com/mysql/mysql-server) / [Mariadb](https://github.com/MariaDB/server) / [Tidb](https://github.com/pingcap/tidb) + - [github.com/go-sql-driver/mysql](https://github.com/go-sql-driver/mysql) + - [github.com/ziutek/mymysql/godrv](https://github.com/ziutek/mymysql/godrv) -* Tidb: [github.com/pingcap/tidb](https://github.com/pingcap/tidb) +* [Postgres](https://github.com/postgres/postgres) / [Cockroach](https://github.com/cockroachdb/cockroach) + - [github.com/lib/pq](https://github.com/lib/pq) -* SQLite: [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3) +* [SQLite](https://sqlite.org) + - [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3) -* MsSql: [github.com/denisenkom/go-mssqldb](https://github.com/denisenkom/go-mssqldb) +* MsSql + - [github.com/denisenkom/go-mssqldb](https://github.com/denisenkom/go-mssqldb) -* MsSql: [github.com/lunny/godbc](https://github.com/lunny/godbc) - -* Oracle: [github.com/mattn/go-oci8](https://github.com/mattn/go-oci8) (试验性支持) +* Oracle + - [github.com/mattn/go-oci8](https://github.com/mattn/go-oci8) (试验性支持) ## 安装 @@ -62,7 +60,7 @@ xorm是一个简单而强大的Go语言ORM库. 通过它可以使数据库操作 * [操作指南](http://xorm.io/docs) -* [Godoc代码文档](http://godoc.org/xorm.io/xorm) +* [Godoc代码文档](http://pkg.go.dev/xorm.io/xorm) # 快速开始 @@ -435,14 +433,14 @@ res, err := engine.Transaction(func(session *xorm.Session) (interface{}, error) # 案例 -* [Go语言中文网](http://studygolang.com/) - [github.com/studygolang/studygolang](https://github.com/studygolang/studygolang) - * [Gitea](http://gitea.io) - [github.com/go-gitea/gitea](http://github.com/go-gitea/gitea) * [Gogs](http://try.gogits.org) - [github.com/gogits/gogs](http://github.com/gogits/gogs) * [grafana](https://grafana.com/) - [github.com/grafana/grafana](http://github.com/grafana/grafana) +* [Go语言中文网](http://studygolang.com/) - [github.com/studygolang/studygolang](https://github.com/studygolang/studygolang) + * [github.com/m3ng9i/qreader](https://github.com/m3ng9i/qreader) * [Wego](http://github.com/go-tango/wego) @@ -470,27 +468,7 @@ res, err := engine.Transaction(func(session *xorm.Session) (interface{}, error) ## 更新日志 -* **v0.7.0** - * 修正部分Bug - -* **v0.6.6** - * 修正部分Bug - -* **v0.6.5** - * 通过 engine.SetSchema 来支持 schema,当前仅支持Postgres - * vgo 支持 - * 新增 `FindAndCount` 函数 - * 通过 `NewEngineWithParams` 支持数据库特别参数 - * 修正部分Bug - -* **v0.6.4** - * 自动读写分离支持 - * Query/QueryString/QueryInterface 支持与 Where/And 合用 - * `Get` 支持获取非结构体变量 - * `Iterate` 支持 `BufferSize` - * 修正部分Bug - -[更多更新日志...](https://github.com/go-xorm/manual-zh-CN/tree/master/chapter-16) +请访问 [CHANGELOG.md](CHANGELOG.md) 获得更新日志。 ## LICENSE diff --git a/vendor/xorm.io/core/cache.go b/vendor/xorm.io/xorm/caches/cache.go similarity index 85% rename from vendor/xorm.io/core/cache.go rename to vendor/xorm.io/xorm/caches/cache.go index 982abe6a5..7b80eb88d 100644 --- a/vendor/xorm.io/core/cache.go +++ b/vendor/xorm.io/xorm/caches/cache.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package core +package caches import ( "bytes" @@ -11,6 +11,8 @@ import ( "fmt" "strings" "time" + + "xorm.io/xorm/schemas" ) const ( @@ -28,6 +30,8 @@ const ( var ( ErrCacheMiss = errors.New("xorm/cache: key not found") ErrNotStored = errors.New("xorm/cache: not stored") + // ErrNotExist record does not exist error + ErrNotExist = errors.New("Record does not exist") ) // CacheStore is a interface to store cache @@ -53,7 +57,7 @@ type Cacher interface { ClearBeans(tableName string) } -func encodeIds(ids []PK) (string, error) { +func encodeIds(ids []schemas.PK) (string, error) { buf := new(bytes.Buffer) enc := gob.NewEncoder(buf) err := enc.Encode(ids) @@ -61,8 +65,8 @@ func encodeIds(ids []PK) (string, error) { return buf.String(), err } -func decodeIds(s string) ([]PK, error) { - pks := make([]PK, 0) +func decodeIds(s string) ([]schemas.PK, error) { + pks := make([]schemas.PK, 0) dec := gob.NewDecoder(strings.NewReader(s)) err := dec.Decode(&pks) @@ -71,7 +75,7 @@ func decodeIds(s string) ([]PK, error) { } // GetCacheSql returns cacher PKs via SQL -func GetCacheSql(m Cacher, tableName, sql string, args interface{}) ([]PK, error) { +func GetCacheSql(m Cacher, tableName, sql string, args interface{}) ([]schemas.PK, error) { bytes := m.GetIds(tableName, GenSqlKey(sql, args)) if bytes == nil { return nil, errors.New("Not Exist") @@ -80,7 +84,7 @@ func GetCacheSql(m Cacher, tableName, sql string, args interface{}) ([]PK, error } // PutCacheSql puts cacher SQL and PKs -func PutCacheSql(m Cacher, ids []PK, tableName, sql string, args interface{}) error { +func PutCacheSql(m Cacher, ids []schemas.PK, tableName, sql string, args interface{}) error { bytes, err := encodeIds(ids) if err != nil { return err diff --git a/vendor/xorm.io/xorm/caches/encode.go b/vendor/xorm.io/xorm/caches/encode.go new file mode 100644 index 000000000..4ba39924a --- /dev/null +++ b/vendor/xorm.io/xorm/caches/encode.go @@ -0,0 +1,58 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package caches + +import ( + "bytes" + "crypto/md5" + "encoding/gob" + "encoding/json" + "fmt" + "io" +) + +// md5 hash string +func Md5(str string) string { + m := md5.New() + io.WriteString(m, str) + return fmt.Sprintf("%x", m.Sum(nil)) +} +func Encode(data interface{}) ([]byte, error) { + //return JsonEncode(data) + return GobEncode(data) +} + +func Decode(data []byte, to interface{}) error { + //return JsonDecode(data, to) + return GobDecode(data, to) +} + +func GobEncode(data interface{}) ([]byte, error) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + err := enc.Encode(&data) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func GobDecode(data []byte, to interface{}) error { + buf := bytes.NewBuffer(data) + dec := gob.NewDecoder(buf) + return dec.Decode(to) +} + +func JsonEncode(data interface{}) ([]byte, error) { + val, err := json.Marshal(data) + if err != nil { + return nil, err + } + return val, nil +} + +func JsonDecode(data []byte, to interface{}) error { + return json.Unmarshal(data, to) +} diff --git a/vendor/xorm.io/xorm/caches/leveldb.go b/vendor/xorm.io/xorm/caches/leveldb.go new file mode 100644 index 000000000..d1a177ad0 --- /dev/null +++ b/vendor/xorm.io/xorm/caches/leveldb.go @@ -0,0 +1,94 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package caches + +import ( + "log" + + "github.com/syndtr/goleveldb/leveldb" +) + +// LevelDBStore implements CacheStore provide local machine +type LevelDBStore struct { + store *leveldb.DB + Debug bool + v interface{} +} + +var _ CacheStore = &LevelDBStore{} + +func NewLevelDBStore(dbfile string) (*LevelDBStore, error) { + db := &LevelDBStore{} + h, err := leveldb.OpenFile(dbfile, nil) + if err != nil { + return nil, err + } + db.store = h + return db, nil +} + +func (s *LevelDBStore) Put(key string, value interface{}) error { + val, err := Encode(value) + if err != nil { + if s.Debug { + log.Println("[LevelDB]EncodeErr: ", err, "Key:", key) + } + return err + } + err = s.store.Put([]byte(key), val, nil) + if err != nil { + if s.Debug { + log.Println("[LevelDB]PutErr: ", err, "Key:", key) + } + return err + } + if s.Debug { + log.Println("[LevelDB]Put: ", key) + } + return err +} + +func (s *LevelDBStore) Get(key string) (interface{}, error) { + data, err := s.store.Get([]byte(key), nil) + if err != nil { + if s.Debug { + log.Println("[LevelDB]GetErr: ", err, "Key:", key) + } + if err == leveldb.ErrNotFound { + return nil, ErrNotExist + } + return nil, err + } + + err = Decode(data, &s.v) + if err != nil { + if s.Debug { + log.Println("[LevelDB]DecodeErr: ", err, "Key:", key) + } + return nil, err + } + if s.Debug { + log.Println("[LevelDB]Get: ", key, s.v) + } + return s.v, err +} + +func (s *LevelDBStore) Del(key string) error { + err := s.store.Delete([]byte(key), nil) + if err != nil { + if s.Debug { + log.Println("[LevelDB]DelErr: ", err, "Key:", key) + } + return err + } + if s.Debug { + log.Println("[LevelDB]Del: ", key) + } + return err +} + +func (s *LevelDBStore) Close() { + s.store.Close() +} diff --git a/vendor/xorm.io/xorm/cache_lru.go b/vendor/xorm.io/xorm/caches/lru.go similarity index 93% rename from vendor/xorm.io/xorm/cache_lru.go rename to vendor/xorm.io/xorm/caches/lru.go index ab948bd28..6b45ac944 100644 --- a/vendor/xorm.io/xorm/cache_lru.go +++ b/vendor/xorm.io/xorm/caches/lru.go @@ -2,15 +2,13 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package xorm +package caches import ( "container/list" "fmt" "sync" "time" - - "xorm.io/core" ) // LRUCacher implments cache object facilities @@ -19,7 +17,7 @@ type LRUCacher struct { sqlList *list.List idIndex map[string]map[string]*list.Element sqlIndex map[string]map[string]*list.Element - store core.CacheStore + store CacheStore mutex sync.Mutex MaxElementSize int Expired time.Duration @@ -27,15 +25,15 @@ type LRUCacher struct { } // NewLRUCacher creates a cacher -func NewLRUCacher(store core.CacheStore, maxElementSize int) *LRUCacher { +func NewLRUCacher(store CacheStore, maxElementSize int) *LRUCacher { return NewLRUCacher2(store, 3600*time.Second, maxElementSize) } // NewLRUCacher2 creates a cache include different params -func NewLRUCacher2(store core.CacheStore, expired time.Duration, maxElementSize int) *LRUCacher { +func NewLRUCacher2(store CacheStore, expired time.Duration, maxElementSize int) *LRUCacher { cacher := &LRUCacher{store: store, idList: list.New(), sqlList: list.New(), Expired: expired, - GcInterval: core.CacheGcInterval, MaxElementSize: maxElementSize, + GcInterval: CacheGcInterval, MaxElementSize: maxElementSize, sqlIndex: make(map[string]map[string]*list.Element), idIndex: make(map[string]map[string]*list.Element), } @@ -57,7 +55,7 @@ func (m *LRUCacher) GC() { defer m.mutex.Unlock() var removedNum int for e := m.idList.Front(); e != nil; { - if removedNum <= core.CacheGcMaxRemoved && + if removedNum <= CacheGcMaxRemoved && time.Now().Sub(e.Value.(*idNode).lastVisit) > m.Expired { removedNum++ next := e.Next() @@ -71,7 +69,7 @@ func (m *LRUCacher) GC() { removedNum = 0 for e := m.sqlList.Front(); e != nil; { - if removedNum <= core.CacheGcMaxRemoved && + if removedNum <= CacheGcMaxRemoved && time.Now().Sub(e.Value.(*sqlNode).lastVisit) > m.Expired { removedNum++ next := e.Next() @@ -268,11 +266,11 @@ type sqlNode struct { } func genSQLKey(sql string, args interface{}) string { - return fmt.Sprintf("%v-%v", sql, args) + return fmt.Sprintf("%s-%v", sql, args) } func genID(prefix string, id string) string { - return fmt.Sprintf("%v-%v", prefix, id) + return fmt.Sprintf("%s-%s", prefix, id) } func newIDNode(tbName string, id string) *idNode { diff --git a/vendor/xorm.io/xorm/caches/manager.go b/vendor/xorm.io/xorm/caches/manager.go new file mode 100644 index 000000000..05045210d --- /dev/null +++ b/vendor/xorm.io/xorm/caches/manager.go @@ -0,0 +1,56 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package caches + +import "sync" + +type Manager struct { + cacher Cacher + disableGlobalCache bool + + cachers map[string]Cacher + cacherLock sync.RWMutex +} + +func NewManager() *Manager { + return &Manager{ + cachers: make(map[string]Cacher), + } +} + +// SetDisableGlobalCache disable global cache or not +func (mgr *Manager) SetDisableGlobalCache(disable bool) { + if mgr.disableGlobalCache != disable { + mgr.disableGlobalCache = disable + } +} + +func (mgr *Manager) SetCacher(tableName string, cacher Cacher) { + mgr.cacherLock.Lock() + mgr.cachers[tableName] = cacher + mgr.cacherLock.Unlock() +} + +func (mgr *Manager) GetCacher(tableName string) Cacher { + var cacher Cacher + var ok bool + mgr.cacherLock.RLock() + cacher, ok = mgr.cachers[tableName] + mgr.cacherLock.RUnlock() + if !ok && !mgr.disableGlobalCache { + cacher = mgr.cacher + } + return cacher +} + +// SetDefaultCacher set the default cacher. Xorm's default not enable cacher. +func (mgr *Manager) SetDefaultCacher(cacher Cacher) { + mgr.cacher = cacher +} + +// GetDefaultCacher returns the default cacher +func (mgr *Manager) GetDefaultCacher() Cacher { + return mgr.cacher +} diff --git a/vendor/xorm.io/xorm/cache_memory_store.go b/vendor/xorm.io/xorm/caches/memory_store.go similarity index 93% rename from vendor/xorm.io/xorm/cache_memory_store.go rename to vendor/xorm.io/xorm/caches/memory_store.go index 0c483f458..f16254d82 100644 --- a/vendor/xorm.io/xorm/cache_memory_store.go +++ b/vendor/xorm.io/xorm/caches/memory_store.go @@ -2,15 +2,13 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package xorm +package caches import ( "sync" - - "xorm.io/core" ) -var _ core.CacheStore = NewMemoryStore() +var _ CacheStore = NewMemoryStore() // MemoryStore represents in-memory store type MemoryStore struct { diff --git a/vendor/xorm.io/xorm/context_cache.go b/vendor/xorm.io/xorm/contexts/context_cache.go similarity index 97% rename from vendor/xorm.io/xorm/context_cache.go rename to vendor/xorm.io/xorm/contexts/context_cache.go index 1bc228849..0d0f0f02b 100644 --- a/vendor/xorm.io/xorm/context_cache.go +++ b/vendor/xorm.io/xorm/contexts/context_cache.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package xorm +package contexts // ContextCache is the interface that operates the cache data. type ContextCache interface { diff --git a/vendor/xorm.io/xorm/convert.go b/vendor/xorm.io/xorm/convert.go index 2316ca0b4..05db27048 100644 --- a/vendor/xorm.io/xorm/convert.go +++ b/vendor/xorm.io/xorm/convert.go @@ -346,3 +346,128 @@ func asBool(bs []byte) (bool, error) { } return strconv.ParseBool(string(bs)) } + +// str2PK convert string value to primary key value according to tp +func str2PKValue(s string, tp reflect.Type) (reflect.Value, error) { + var err error + var result interface{} + var defReturn = reflect.Zero(tp) + + switch tp.Kind() { + case reflect.Int: + result, err = strconv.Atoi(s) + if err != nil { + return defReturn, fmt.Errorf("convert %s as int: %s", s, err.Error()) + } + case reflect.Int8: + x, err := strconv.Atoi(s) + if err != nil { + return defReturn, fmt.Errorf("convert %s as int8: %s", s, err.Error()) + } + result = int8(x) + case reflect.Int16: + x, err := strconv.Atoi(s) + if err != nil { + return defReturn, fmt.Errorf("convert %s as int16: %s", s, err.Error()) + } + result = int16(x) + case reflect.Int32: + x, err := strconv.Atoi(s) + if err != nil { + return defReturn, fmt.Errorf("convert %s as int32: %s", s, err.Error()) + } + result = int32(x) + case reflect.Int64: + result, err = strconv.ParseInt(s, 10, 64) + if err != nil { + return defReturn, fmt.Errorf("convert %s as int64: %s", s, err.Error()) + } + case reflect.Uint: + x, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return defReturn, fmt.Errorf("convert %s as uint: %s", s, err.Error()) + } + result = uint(x) + case reflect.Uint8: + x, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return defReturn, fmt.Errorf("convert %s as uint8: %s", s, err.Error()) + } + result = uint8(x) + case reflect.Uint16: + x, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return defReturn, fmt.Errorf("convert %s as uint16: %s", s, err.Error()) + } + result = uint16(x) + case reflect.Uint32: + x, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return defReturn, fmt.Errorf("convert %s as uint32: %s", s, err.Error()) + } + result = uint32(x) + case reflect.Uint64: + result, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return defReturn, fmt.Errorf("convert %s as uint64: %s", s, err.Error()) + } + case reflect.String: + result = s + default: + return defReturn, errors.New("unsupported convert type") + } + return reflect.ValueOf(result).Convert(tp), nil +} + +func str2PK(s string, tp reflect.Type) (interface{}, error) { + v, err := str2PKValue(s, tp) + if err != nil { + return nil, err + } + return v.Interface(), nil +} + +func int64ToIntValue(id int64, tp reflect.Type) reflect.Value { + var v interface{} + kind := tp.Kind() + + if kind == reflect.Ptr { + kind = tp.Elem().Kind() + } + + switch kind { + case reflect.Int16: + temp := int16(id) + v = &temp + case reflect.Int32: + temp := int32(id) + v = &temp + case reflect.Int: + temp := int(id) + v = &temp + case reflect.Int64: + temp := id + v = &temp + case reflect.Uint16: + temp := uint16(id) + v = &temp + case reflect.Uint32: + temp := uint32(id) + v = &temp + case reflect.Uint64: + temp := uint64(id) + v = &temp + case reflect.Uint: + temp := uint(id) + v = &temp + } + + if tp.Kind() == reflect.Ptr { + return reflect.ValueOf(v).Convert(tp) + } + return reflect.ValueOf(v).Elem().Convert(tp) +} + +func int64ToInt(id int64, tp reflect.Type) interface{} { + return int64ToIntValue(id, tp).Interface() +} diff --git a/vendor/xorm.io/core/converstion.go b/vendor/xorm.io/xorm/convert/conversion.go similarity index 81% rename from vendor/xorm.io/core/converstion.go rename to vendor/xorm.io/xorm/convert/conversion.go index 9703c36e0..16f1a92a1 100644 --- a/vendor/xorm.io/core/converstion.go +++ b/vendor/xorm.io/xorm/convert/conversion.go @@ -1,8 +1,8 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. +// Copyright 2017 The Xorm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package core +package convert // Conversion is an interface. A type implements Conversion will according // the custom method to fill into database and retrieve from database. diff --git a/vendor/xorm.io/core/db.go b/vendor/xorm.io/xorm/core/db.go similarity index 80% rename from vendor/xorm.io/core/db.go rename to vendor/xorm.io/xorm/core/db.go index 4847937c7..9aa771ba3 100644 --- a/vendor/xorm.io/core/db.go +++ b/vendor/xorm.io/xorm/core/db.go @@ -12,6 +12,10 @@ import ( "reflect" "regexp" "sync" + "time" + + "xorm.io/xorm/log" + "xorm.io/xorm/names" ) var ( @@ -76,9 +80,10 @@ type cacheStruct struct { // DB is a wrap of sql.DB with extra contents type DB struct { *sql.DB - Mapper IMapper + Mapper names.Mapper reflectCache map[reflect.Type]*cacheStruct reflectCacheMutex sync.RWMutex + Logger log.ContextLogger } // Open opens a database @@ -89,7 +94,7 @@ func Open(driverName, dataSourceName string) (*DB, error) { } return &DB{ DB: db, - Mapper: NewCacheMapper(&SnakeMapper{}), + Mapper: names.NewCacheMapper(&names.SnakeMapper{}), reflectCache: make(map[reflect.Type]*cacheStruct), }, nil } @@ -98,11 +103,24 @@ func Open(driverName, dataSourceName string) (*DB, error) { func FromDB(db *sql.DB) *DB { return &DB{ DB: db, - Mapper: NewCacheMapper(&SnakeMapper{}), + Mapper: names.NewCacheMapper(&names.SnakeMapper{}), reflectCache: make(map[reflect.Type]*cacheStruct), } } +// NeedLogSQL returns true if need to log SQL +func (db *DB) NeedLogSQL(ctx context.Context) bool { + if db.Logger == nil { + return false + } + + v := ctx.Value("__xorm_show_sql") + if showSQL, ok := v.(bool); ok { + return showSQL + } + return db.Logger.IsShowSQL() +} + func (db *DB) reflectNew(typ reflect.Type) reflect.Value { db.reflectCacheMutex.Lock() defer db.reflectCacheMutex.Unlock() @@ -118,7 +136,25 @@ func (db *DB) reflectNew(typ reflect.Type) reflect.Value { // QueryContext overwrites sql.DB.QueryContext func (db *DB) QueryContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + start := time.Now() + showSQL := db.NeedLogSQL(ctx) + if showSQL { + db.Logger.BeforeSQL(log.LogContext{ + Ctx: ctx, + SQL: query, + Args: args, + }) + } rows, err := db.DB.QueryContext(ctx, query, args...) + if showSQL { + db.Logger.AfterSQL(log.LogContext{ + Ctx: ctx, + SQL: query, + Args: args, + ExecuteTime: time.Now().Sub(start), + Err: err, + }) + } if err != nil { if rows != nil { rows.Close() @@ -207,7 +243,7 @@ func (db *DB) ExecMapContext(ctx context.Context, query string, mp interface{}) if err != nil { return nil, err } - return db.DB.ExecContext(ctx, query, args...) + return db.ExecContext(ctx, query, args...) } func (db *DB) ExecMap(query string, mp interface{}) (sql.Result, error) { @@ -219,7 +255,30 @@ func (db *DB) ExecStructContext(ctx context.Context, query string, st interface{ if err != nil { return nil, err } - return db.DB.ExecContext(ctx, query, args...) + return db.ExecContext(ctx, query, args...) +} + +func (db *DB) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + start := time.Now() + showSQL := db.NeedLogSQL(ctx) + if showSQL { + db.Logger.BeforeSQL(log.LogContext{ + Ctx: ctx, + SQL: query, + Args: args, + }) + } + res, err := db.DB.ExecContext(ctx, query, args...) + if showSQL { + db.Logger.AfterSQL(log.LogContext{ + Ctx: ctx, + SQL: query, + Args: args, + ExecuteTime: time.Now().Sub(start), + Err: err, + }) + } + return res, err } func (db *DB) ExecStruct(query string, st interface{}) (sql.Result, error) { diff --git a/vendor/xorm.io/core/error.go b/vendor/xorm.io/xorm/core/error.go similarity index 100% rename from vendor/xorm.io/core/error.go rename to vendor/xorm.io/xorm/core/error.go diff --git a/vendor/xorm.io/core/rows.go b/vendor/xorm.io/xorm/core/rows.go similarity index 100% rename from vendor/xorm.io/core/rows.go rename to vendor/xorm.io/xorm/core/rows.go diff --git a/vendor/xorm.io/core/scan.go b/vendor/xorm.io/xorm/core/scan.go similarity index 100% rename from vendor/xorm.io/core/scan.go rename to vendor/xorm.io/xorm/core/scan.go diff --git a/vendor/xorm.io/core/stmt.go b/vendor/xorm.io/xorm/core/stmt.go similarity index 77% rename from vendor/xorm.io/core/stmt.go rename to vendor/xorm.io/xorm/core/stmt.go index 8a21541a7..9d5954bd3 100644 --- a/vendor/xorm.io/core/stmt.go +++ b/vendor/xorm.io/xorm/core/stmt.go @@ -9,6 +9,9 @@ import ( "database/sql" "errors" "reflect" + "time" + + "xorm.io/xorm/log" ) // Stmt reprents a stmt objects @@ -16,6 +19,7 @@ type Stmt struct { *sql.Stmt db *DB names map[string]int + query string } func (db *DB) PrepareContext(ctx context.Context, query string) (*Stmt, error) { @@ -27,11 +31,28 @@ func (db *DB) PrepareContext(ctx context.Context, query string) (*Stmt, error) { return "?" }) + start := time.Now() + showSQL := db.NeedLogSQL(ctx) + if showSQL { + db.Logger.BeforeSQL(log.LogContext{ + Ctx: ctx, + SQL: "PREPARE", + }) + } stmt, err := db.DB.PrepareContext(ctx, query) + if showSQL { + db.Logger.AfterSQL(log.LogContext{ + Ctx: ctx, + SQL: "PREPARE", + ExecuteTime: time.Now().Sub(start), + Err: err, + }) + } if err != nil { return nil, err } - return &Stmt{stmt, db, names}, nil + + return &Stmt{stmt, db, names, query}, nil } func (db *DB) Prepare(query string) (*Stmt, error) { @@ -48,7 +69,7 @@ func (s *Stmt) ExecMapContext(ctx context.Context, mp interface{}) (sql.Result, for k, i := range s.names { args[i] = vv.Elem().MapIndex(reflect.ValueOf(k)).Interface() } - return s.Stmt.ExecContext(ctx, args...) + return s.ExecContext(ctx, args...) } func (s *Stmt) ExecMap(mp interface{}) (sql.Result, error) { @@ -65,15 +86,56 @@ func (s *Stmt) ExecStructContext(ctx context.Context, st interface{}) (sql.Resul for k, i := range s.names { args[i] = vv.Elem().FieldByName(k).Interface() } - return s.Stmt.ExecContext(ctx, args...) + return s.ExecContext(ctx, args...) } func (s *Stmt) ExecStruct(st interface{}) (sql.Result, error) { return s.ExecStructContext(context.Background(), st) } +func (s *Stmt) ExecContext(ctx context.Context, args ...interface{}) (sql.Result, error) { + start := time.Now() + showSQL := s.db.NeedLogSQL(ctx) + if showSQL { + s.db.Logger.BeforeSQL(log.LogContext{ + Ctx: ctx, + SQL: s.query, + Args: args, + }) + } + res, err := s.Stmt.ExecContext(ctx, args) + if showSQL { + s.db.Logger.AfterSQL(log.LogContext{ + Ctx: ctx, + SQL: s.query, + Args: args, + ExecuteTime: time.Now().Sub(start), + Err: err, + }) + } + return res, err +} + func (s *Stmt) QueryContext(ctx context.Context, args ...interface{}) (*Rows, error) { + start := time.Now() + showSQL := s.db.NeedLogSQL(ctx) + if showSQL { + s.db.Logger.BeforeSQL(log.LogContext{ + Ctx: ctx, + SQL: s.query, + Args: args, + }) + } rows, err := s.Stmt.QueryContext(ctx, args...) + if showSQL { + s.db.Logger.AfterSQL(log.LogContext{ + Ctx: ctx, + SQL: s.query, + Args: args, + ExecuteTime: time.Now().Sub(start), + Err: err, + }) + } if err != nil { return nil, err } diff --git a/vendor/xorm.io/core/tx.go b/vendor/xorm.io/xorm/core/tx.go similarity index 68% rename from vendor/xorm.io/core/tx.go rename to vendor/xorm.io/xorm/core/tx.go index a56b70063..077132679 100644 --- a/vendor/xorm.io/core/tx.go +++ b/vendor/xorm.io/xorm/core/tx.go @@ -7,6 +7,9 @@ package core import ( "context" "database/sql" + "time" + + "xorm.io/xorm/log" ) type Tx struct { @@ -15,7 +18,23 @@ type Tx struct { } func (db *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + start := time.Now() + showSQL := db.NeedLogSQL(ctx) + if showSQL { + db.Logger.BeforeSQL(log.LogContext{ + Ctx: ctx, + SQL: "BEGIN TRANSACTION", + }) + } tx, err := db.DB.BeginTx(ctx, opts) + if showSQL { + db.Logger.AfterSQL(log.LogContext{ + Ctx: ctx, + SQL: "BEGIN TRANSACTION", + ExecuteTime: time.Now().Sub(start), + Err: err, + }) + } if err != nil { return nil, err } @@ -23,11 +42,7 @@ func (db *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { } func (db *DB) Begin() (*Tx, error) { - tx, err := db.DB.Begin() - if err != nil { - return nil, err - } - return &Tx{tx, db}, nil + return db.BeginTx(context.Background(), nil) } func (tx *Tx) PrepareContext(ctx context.Context, query string) (*Stmt, error) { @@ -39,11 +54,27 @@ func (tx *Tx) PrepareContext(ctx context.Context, query string) (*Stmt, error) { return "?" }) + start := time.Now() + showSQL := tx.db.NeedLogSQL(ctx) + if showSQL { + tx.db.Logger.BeforeSQL(log.LogContext{ + Ctx: ctx, + SQL: "PREPARE", + }) + } stmt, err := tx.Tx.PrepareContext(ctx, query) + if showSQL { + tx.db.Logger.AfterSQL(log.LogContext{ + Ctx: ctx, + SQL: "PREPARE", + ExecuteTime: time.Now().Sub(start), + Err: err, + }) + } if err != nil { return nil, err } - return &Stmt{stmt, tx.db, names}, nil + return &Stmt{stmt, tx.db, names, query}, nil } func (tx *Tx) Prepare(query string) (*Stmt, error) { @@ -64,7 +95,7 @@ func (tx *Tx) ExecMapContext(ctx context.Context, query string, mp interface{}) if err != nil { return nil, err } - return tx.Tx.ExecContext(ctx, query, args...) + return tx.ExecContext(ctx, query, args...) } func (tx *Tx) ExecMap(query string, mp interface{}) (sql.Result, error) { @@ -76,7 +107,30 @@ func (tx *Tx) ExecStructContext(ctx context.Context, query string, st interface{ if err != nil { return nil, err } - return tx.Tx.ExecContext(ctx, query, args...) + return tx.ExecContext(ctx, query, args...) +} + +func (tx *Tx) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + start := time.Now() + showSQL := tx.db.NeedLogSQL(ctx) + if showSQL { + tx.db.Logger.BeforeSQL(log.LogContext{ + Ctx: ctx, + SQL: query, + Args: args, + }) + } + res, err := tx.Tx.ExecContext(ctx, query, args...) + if showSQL { + tx.db.Logger.AfterSQL(log.LogContext{ + Ctx: ctx, + SQL: query, + Args: args, + ExecuteTime: time.Now().Sub(start), + Err: err, + }) + } + return res, err } func (tx *Tx) ExecStruct(query string, st interface{}) (sql.Result, error) { @@ -84,8 +138,29 @@ func (tx *Tx) ExecStruct(query string, st interface{}) (sql.Result, error) { } func (tx *Tx) QueryContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + start := time.Now() + showSQL := tx.db.NeedLogSQL(ctx) + if showSQL { + tx.db.Logger.BeforeSQL(log.LogContext{ + Ctx: ctx, + SQL: query, + Args: args, + }) + } rows, err := tx.Tx.QueryContext(ctx, query, args...) + if showSQL { + tx.db.Logger.AfterSQL(log.LogContext{ + Ctx: ctx, + SQL: query, + Args: args, + ExecuteTime: time.Now().Sub(start), + Err: err, + }) + } if err != nil { + if rows != nil { + rows.Close() + } return nil, err } return &Rows{rows, tx.db}, nil diff --git a/vendor/xorm.io/xorm/dialects/dialect.go b/vendor/xorm.io/xorm/dialects/dialect.go new file mode 100644 index 000000000..4fdf35e94 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/dialect.go @@ -0,0 +1,278 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "context" + "fmt" + "strings" + "time" + + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" +) + +// URI represents an uri to visit database +type URI struct { + DBType schemas.DBType + Proto string + Host string + Port string + DBName string + User string + Passwd string + Charset string + Laddr string + Raddr string + Timeout time.Duration + Schema string +} + +// SetSchema set schema +func (uri *URI) SetSchema(schema string) { + if uri.DBType == schemas.POSTGRES { + uri.Schema = schema + } +} + +// Dialect represents a kind of database +type Dialect interface { + Init(*core.DB, *URI) error + URI() *URI + DB() *core.DB + SQLType(*schemas.Column) string + FormatBytes(b []byte) string + DefaultSchema() string + + IsReserved(string) bool + Quoter() schemas.Quoter + SetQuotePolicy(quotePolicy QuotePolicy) + + AutoIncrStr() string + + GetIndexes(ctx context.Context, tableName string) (map[string]*schemas.Index, error) + IndexCheckSQL(tableName, idxName string) (string, []interface{}) + CreateIndexSQL(tableName string, index *schemas.Index) string + DropIndexSQL(tableName string, index *schemas.Index) string + + GetTables(ctx context.Context) ([]*schemas.Table, error) + IsTableExist(ctx context.Context, tableName string) (bool, error) + CreateTableSQL(table *schemas.Table, tableName string) ([]string, bool) + DropTableSQL(tableName string) (string, bool) + + GetColumns(ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) + IsColumnExist(ctx context.Context, tableName string, colName string) (bool, error) + AddColumnSQL(tableName string, col *schemas.Column) string + ModifyColumnSQL(tableName string, col *schemas.Column) string + + ForUpdateSQL(query string) string + + Filters() []Filter + SetParams(params map[string]string) +} + +// Base represents a basic dialect and all real dialects could embed this struct +type Base struct { + db *core.DB + dialect Dialect + uri *URI + quoter schemas.Quoter +} + +func (b *Base) Quoter() schemas.Quoter { + return b.quoter +} + +func (b *Base) DB() *core.DB { + return b.db +} + +func (b *Base) DefaultSchema() string { + return "" +} + +func (b *Base) Init(db *core.DB, dialect Dialect, uri *URI) error { + b.db, b.dialect, b.uri = db, dialect, uri + return nil +} + +func (b *Base) URI() *URI { + return b.uri +} + +func (b *Base) DBType() schemas.DBType { + return b.uri.DBType +} + +// String generate column description string according dialect +func (b *Base) String(col *schemas.Column) string { + sql := b.dialect.Quoter().Quote(col.Name) + " " + + sql += b.dialect.SQLType(col) + " " + + if col.IsPrimaryKey { + sql += "PRIMARY KEY " + if col.IsAutoIncrement { + sql += b.dialect.AutoIncrStr() + " " + } + } + + if col.Default != "" { + sql += "DEFAULT " + col.Default + " " + } + + if col.Nullable { + sql += "NULL " + } else { + sql += "NOT NULL " + } + + return sql +} + +// StringNoPk generate column description string according dialect without primary keys +func (b *Base) StringNoPk(col *schemas.Column) string { + sql := b.dialect.Quoter().Quote(col.Name) + " " + + sql += b.dialect.SQLType(col) + " " + + if col.Default != "" { + sql += "DEFAULT " + col.Default + " " + } + + if col.Nullable { + sql += "NULL " + } else { + sql += "NOT NULL " + } + + return sql +} + +func (b *Base) FormatBytes(bs []byte) string { + return fmt.Sprintf("0x%x", bs) +} + +func (db *Base) DropTableSQL(tableName string) (string, bool) { + quote := db.dialect.Quoter().Quote + return fmt.Sprintf("DROP TABLE IF EXISTS %s", quote(tableName)), true +} + +func (db *Base) HasRecords(ctx context.Context, query string, args ...interface{}) (bool, error) { + rows, err := db.DB().QueryContext(ctx, query, args...) + if err != nil { + return false, err + } + defer rows.Close() + + if rows.Next() { + return true, nil + } + return false, nil +} + +func (db *Base) IsColumnExist(ctx context.Context, tableName, colName string) (bool, error) { + quote := db.dialect.Quoter().Quote + query := fmt.Sprintf( + "SELECT %v FROM %v.%v WHERE %v = ? AND %v = ? AND %v = ?", + quote("COLUMN_NAME"), + quote("INFORMATION_SCHEMA"), + quote("COLUMNS"), + quote("TABLE_SCHEMA"), + quote("TABLE_NAME"), + quote("COLUMN_NAME"), + ) + return db.HasRecords(ctx, query, db.uri.DBName, tableName, colName) +} + +func (db *Base) AddColumnSQL(tableName string, col *schemas.Column) string { + return fmt.Sprintf("ALTER TABLE %v ADD %v", db.dialect.Quoter().Quote(tableName), + db.String(col)) +} + +func (db *Base) CreateIndexSQL(tableName string, index *schemas.Index) string { + quoter := db.dialect.Quoter() + var unique string + var idxName string + if index.Type == schemas.UniqueType { + unique = " UNIQUE" + } + idxName = index.XName(tableName) + return fmt.Sprintf("CREATE%s INDEX %v ON %v (%v)", unique, + quoter.Quote(idxName), quoter.Quote(tableName), + quoter.Join(index.Cols, ",")) +} + +func (db *Base) DropIndexSQL(tableName string, index *schemas.Index) string { + quote := db.dialect.Quoter().Quote + var name string + if index.IsRegular { + name = index.XName(tableName) + } else { + name = index.Name + } + return fmt.Sprintf("DROP INDEX %v ON %s", quote(name), quote(tableName)) +} + +func (db *Base) ModifyColumnSQL(tableName string, col *schemas.Column) string { + return fmt.Sprintf("alter table %s MODIFY COLUMN %s", tableName, db.StringNoPk(col)) +} + +func (b *Base) ForUpdateSQL(query string) string { + return query + " FOR UPDATE" +} + +func (b *Base) SetParams(params map[string]string) { +} + +var ( + dialects = map[string]func() Dialect{} +) + +// RegisterDialect register database dialect +func RegisterDialect(dbName schemas.DBType, dialectFunc func() Dialect) { + if dialectFunc == nil { + panic("core: Register dialect is nil") + } + dialects[strings.ToLower(string(dbName))] = dialectFunc // !nashtsai! allow override dialect +} + +// QueryDialect query if registered database dialect +func QueryDialect(dbName schemas.DBType) Dialect { + if d, ok := dialects[strings.ToLower(string(dbName))]; ok { + return d() + } + return nil +} + +func regDrvsNDialects() bool { + providedDrvsNDialects := map[string]struct { + dbType schemas.DBType + getDriver func() Driver + getDialect func() Dialect + }{ + "mssql": {"mssql", func() Driver { return &odbcDriver{} }, func() Dialect { return &mssql{} }}, + "odbc": {"mssql", func() Driver { return &odbcDriver{} }, func() Dialect { return &mssql{} }}, // !nashtsai! TODO change this when supporting MS Access + "mysql": {"mysql", func() Driver { return &mysqlDriver{} }, func() Dialect { return &mysql{} }}, + "mymysql": {"mysql", func() Driver { return &mymysqlDriver{} }, func() Dialect { return &mysql{} }}, + "postgres": {"postgres", func() Driver { return &pqDriver{} }, func() Dialect { return &postgres{} }}, + "pgx": {"postgres", func() Driver { return &pqDriverPgx{} }, func() Dialect { return &postgres{} }}, + "sqlite3": {"sqlite3", func() Driver { return &sqlite3Driver{} }, func() Dialect { return &sqlite3{} }}, + "oci8": {"oracle", func() Driver { return &oci8Driver{} }, func() Dialect { return &oracle{} }}, + "goracle": {"oracle", func() Driver { return &goracleDriver{} }, func() Dialect { return &oracle{} }}, + } + + for driverName, v := range providedDrvsNDialects { + if driver := QueryDriver(driverName); driver == nil { + RegisterDriver(driverName, v.getDriver()) + RegisterDialect(v.dbType, v.getDialect) + } + } + return true +} + +func init() { + regDrvsNDialects() +} diff --git a/vendor/xorm.io/xorm/dialects/driver.go b/vendor/xorm.io/xorm/dialects/driver.go new file mode 100644 index 000000000..89d21bfc3 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/driver.go @@ -0,0 +1,63 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "fmt" + + "xorm.io/xorm/core" +) + +type Driver interface { + Parse(string, string) (*URI, error) +} + +var ( + drivers = map[string]Driver{} +) + +func RegisterDriver(driverName string, driver Driver) { + if driver == nil { + panic("core: Register driver is nil") + } + if _, dup := drivers[driverName]; dup { + panic("core: Register called twice for driver " + driverName) + } + drivers[driverName] = driver +} + +func QueryDriver(driverName string) Driver { + return drivers[driverName] +} + +func RegisteredDriverSize() int { + return len(drivers) +} + +// OpenDialect opens a dialect via driver name and connection string +func OpenDialect(driverName, connstr string) (Dialect, error) { + driver := QueryDriver(driverName) + if driver == nil { + return nil, fmt.Errorf("Unsupported driver name: %v", driverName) + } + + uri, err := driver.Parse(driverName, connstr) + if err != nil { + return nil, err + } + + dialect := QueryDialect(uri.DBType) + if dialect == nil { + return nil, fmt.Errorf("Unsupported dialect type: %v", uri.DBType) + } + + db, err := core.Open(driverName, connstr) + if err != nil { + return nil, err + } + dialect.Init(db, uri) + + return dialect, nil +} diff --git a/vendor/xorm.io/xorm/dialects/filter.go b/vendor/xorm.io/xorm/dialects/filter.go new file mode 100644 index 000000000..6968b6ce8 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/filter.go @@ -0,0 +1,43 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "fmt" + "strings" +) + +// Filter is an interface to filter SQL +type Filter interface { + Do(sql string) string +} + +// SeqFilter filter SQL replace ?, ? ... to $1, $2 ... +type SeqFilter struct { + Prefix string + Start int +} + +func convertQuestionMark(sql, prefix string, start int) string { + var buf strings.Builder + var beginSingleQuote bool + var index = start + for _, c := range sql { + if !beginSingleQuote && c == '?' { + buf.WriteString(fmt.Sprintf("%s%v", prefix, index)) + index++ + } else { + if c == '\'' { + beginSingleQuote = !beginSingleQuote + } + buf.WriteRune(c) + } + } + return buf.String() +} + +func (s *SeqFilter) Do(sql string) string { + return convertQuestionMark(sql, s.Prefix, s.Start) +} diff --git a/vendor/xorm.io/xorm/gen_reserved.sh b/vendor/xorm.io/xorm/dialects/gen_reserved.sh similarity index 100% rename from vendor/xorm.io/xorm/gen_reserved.sh rename to vendor/xorm.io/xorm/dialects/gen_reserved.sh diff --git a/vendor/xorm.io/xorm/dialect_mssql.go b/vendor/xorm.io/xorm/dialects/mssql.go similarity index 78% rename from vendor/xorm.io/xorm/dialect_mssql.go rename to vendor/xorm.io/xorm/dialects/mssql.go index 29070da2f..dd3f4247e 100644 --- a/vendor/xorm.io/xorm/dialect_mssql.go +++ b/vendor/xorm.io/xorm/dialects/mssql.go @@ -2,16 +2,18 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package xorm +package dialects import ( + "context" "errors" "fmt" "net/url" "strconv" "strings" - "xorm.io/core" + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" ) var ( @@ -202,67 +204,70 @@ var ( "EXIT": true, "PROC": true, } + + mssqlQuoter = schemas.Quoter{'[', ']', schemas.AlwaysReserve} ) type mssql struct { - core.Base + Base } -func (db *mssql) Init(d *core.DB, uri *core.Uri, drivername, dataSourceName string) error { - return db.Base.Init(d, db, uri, drivername, dataSourceName) +func (db *mssql) Init(d *core.DB, uri *URI) error { + db.quoter = mssqlQuoter + return db.Base.Init(d, db, uri) } -func (db *mssql) SqlType(c *core.Column) string { +func (db *mssql) SQLType(c *schemas.Column) string { var res string switch t := c.SQLType.Name; t { - case core.Bool: - res = core.Bit + case schemas.Bool: + res = schemas.Bit if strings.EqualFold(c.Default, "true") { c.Default = "1" } else if strings.EqualFold(c.Default, "false") { c.Default = "0" } - case core.Serial: + case schemas.Serial: c.IsAutoIncrement = true c.IsPrimaryKey = true c.Nullable = false - res = core.Int - case core.BigSerial: + res = schemas.Int + case schemas.BigSerial: c.IsAutoIncrement = true c.IsPrimaryKey = true c.Nullable = false - res = core.BigInt - case core.Bytea, core.Blob, core.Binary, core.TinyBlob, core.MediumBlob, core.LongBlob: - res = core.VarBinary + res = schemas.BigInt + case schemas.Bytea, schemas.Blob, schemas.Binary, schemas.TinyBlob, schemas.MediumBlob, schemas.LongBlob: + res = schemas.VarBinary if c.Length == 0 { c.Length = 50 } - case core.TimeStamp: - res = core.DateTime - case core.TimeStampz: + case schemas.TimeStamp: + res = schemas.DateTime + case schemas.TimeStampz: res = "DATETIMEOFFSET" c.Length = 7 - case core.MediumInt: - res = core.Int - case core.Text, core.MediumText, core.TinyText, core.LongText, core.Json: - res = core.Varchar + "(MAX)" - case core.Double: - res = core.Real - case core.Uuid: - res = core.Varchar + case schemas.MediumInt: + res = schemas.Int + case schemas.Text, schemas.MediumText, schemas.TinyText, schemas.LongText, schemas.Json: + res = schemas.Varchar + "(MAX)" + case schemas.Double: + res = schemas.Real + case schemas.Uuid: + res = schemas.Varchar c.Length = 40 - case core.TinyInt: - res = core.TinyInt + case schemas.TinyInt: + res = schemas.TinyInt c.Length = 0 - case core.BigInt: - res = core.BigInt + case schemas.BigInt: + res = schemas.BigInt c.Length = 0 default: res = t } - if res == core.Int { - return core.Int + if res == schemas.Int { + return schemas.Int } hasLen1 := (c.Length > 0) @@ -276,88 +281,78 @@ func (db *mssql) SqlType(c *core.Column) string { return res } -func (db *mssql) SupportInsertMany() bool { - return true -} - func (db *mssql) IsReserved(name string) bool { - _, ok := mssqlReservedWords[name] + _, ok := mssqlReservedWords[strings.ToUpper(name)] return ok } -func (db *mssql) Quote(name string) string { - return "\"" + name + "\"" -} - -func (db *mssql) SupportEngine() bool { - return false +func (db *mssql) SetQuotePolicy(quotePolicy QuotePolicy) { + switch quotePolicy { + case QuotePolicyNone: + var q = mssqlQuoter + q.IsReserved = schemas.AlwaysNoReserve + db.quoter = q + case QuotePolicyReserved: + var q = mssqlQuoter + q.IsReserved = db.IsReserved + db.quoter = q + case QuotePolicyAlways: + fallthrough + default: + db.quoter = mssqlQuoter + } } func (db *mssql) AutoIncrStr() string { return "IDENTITY" } -func (db *mssql) DropTableSql(tableName string) string { +func (db *mssql) DropTableSQL(tableName string) (string, bool) { return fmt.Sprintf("IF EXISTS (SELECT * FROM sysobjects WHERE id = "+ "object_id(N'%s') and OBJECTPROPERTY(id, N'IsUserTable') = 1) "+ - "DROP TABLE \"%s\"", tableName, tableName) + "DROP TABLE \"%s\"", tableName, tableName), true } -func (db *mssql) SupportCharset() bool { - return false -} - -func (db *mssql) IndexOnTable() bool { - return true -} - -func (db *mssql) IndexCheckSql(tableName, idxName string) (string, []interface{}) { +func (db *mssql) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { args := []interface{}{idxName} sql := "select name from sysindexes where id=object_id('" + tableName + "') and name=?" return sql, args } -/*func (db *mssql) ColumnCheckSql(tableName, colName string) (string, []interface{}) { - args := []interface{}{tableName, colName} - sql := `SELECT "COLUMN_NAME" FROM "INFORMATION_SCHEMA"."COLUMNS" WHERE "TABLE_NAME" = ? AND "COLUMN_NAME" = ?` - return sql, args -}*/ - -func (db *mssql) IsColumnExist(tableName, colName string) (bool, error) { +func (db *mssql) IsColumnExist(ctx context.Context, tableName, colName string) (bool, error) { query := `SELECT "COLUMN_NAME" FROM "INFORMATION_SCHEMA"."COLUMNS" WHERE "TABLE_NAME" = ? AND "COLUMN_NAME" = ?` - return db.HasRecords(query, tableName, colName) + return db.HasRecords(ctx, query, tableName, colName) } -func (db *mssql) TableCheckSql(tableName string) (string, []interface{}) { - args := []interface{}{} +func (db *mssql) IsTableExist(ctx context.Context, tableName string) (bool, error) { sql := "select * from sysobjects where id = object_id(N'" + tableName + "') and OBJECTPROPERTY(id, N'IsUserTable') = 1" - return sql, args + return db.HasRecords(ctx, sql) } -func (db *mssql) GetColumns(tableName string) ([]string, map[string]*core.Column, error) { +func (db *mssql) GetColumns(ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { args := []interface{}{} s := `select a.name as name, b.name as ctype,a.max_length,a.precision,a.scale,a.is_nullable as nullable, "default_is_null" = (CASE WHEN c.text is null THEN 1 ELSE 0 END), replace(replace(isnull(c.text,''),'(',''),')','') as vdefault, - ISNULL(i.is_primary_key, 0), a.is_identity as is_identity + ISNULL(p.is_primary_key, 0), a.is_identity as is_identity from sys.columns a left join sys.types b on a.user_type_id=b.user_type_id left join sys.syscomments c on a.default_object_id=c.id - LEFT OUTER JOIN - sys.index_columns ic ON ic.object_id = a.object_id AND ic.column_id = a.column_id - LEFT OUTER JOIN - sys.indexes i ON ic.object_id = i.object_id AND ic.index_id = i.index_id + LEFT OUTER JOIN (SELECT i.object_id, ic.column_id, i.is_primary_key + FROM sys.indexes i + LEFT JOIN sys.index_columns ic ON ic.object_id = i.object_id AND ic.index_id = i.index_id + WHERE i.is_primary_key = 1 + ) as p on p.object_id = a.object_id AND p.column_id = a.column_id where a.object_id=object_id('` + tableName + `')` - db.LogSQL(s, args) - rows, err := db.DB().Query(s, args...) + rows, err := db.DB().QueryContext(ctx, s, args...) if err != nil { return nil, nil, err } defer rows.Close() - cols := make(map[string]*core.Column) + cols := make(map[string]*schemas.Column) colSeq := make([]string, 0) for rows.Next() { var name, ctype, vdefault string @@ -368,7 +363,7 @@ func (db *mssql) GetColumns(tableName string) ([]string, map[string]*core.Column return nil, nil, err } - col := new(core.Column) + col := new(schemas.Column) col.Indexes = make(map[string]int) col.Name = strings.Trim(name, "` ") col.Nullable = nullable @@ -387,14 +382,14 @@ func (db *mssql) GetColumns(tableName string) ([]string, map[string]*core.Column } switch ct { case "DATETIMEOFFSET": - col.SQLType = core.SQLType{Name: core.TimeStampz, DefaultLength: 0, DefaultLength2: 0} + col.SQLType = schemas.SQLType{Name: schemas.TimeStampz, DefaultLength: 0, DefaultLength2: 0} case "NVARCHAR": - col.SQLType = core.SQLType{Name: core.NVarchar, DefaultLength: 0, DefaultLength2: 0} + col.SQLType = schemas.SQLType{Name: schemas.NVarchar, DefaultLength: 0, DefaultLength2: 0} case "IMAGE": - col.SQLType = core.SQLType{Name: core.VarBinary, DefaultLength: 0, DefaultLength2: 0} + col.SQLType = schemas.SQLType{Name: schemas.VarBinary, DefaultLength: 0, DefaultLength2: 0} default: - if _, ok := core.SqlTypes[ct]; ok { - col.SQLType = core.SQLType{Name: ct, DefaultLength: 0, DefaultLength2: 0} + if _, ok := schemas.SqlTypes[ct]; ok { + col.SQLType = schemas.SQLType{Name: ct, DefaultLength: 0, DefaultLength2: 0} } else { return nil, nil, fmt.Errorf("Unknown colType %v for %v - %v", ct, tableName, col.Name) } @@ -406,20 +401,19 @@ func (db *mssql) GetColumns(tableName string) ([]string, map[string]*core.Column return colSeq, cols, nil } -func (db *mssql) GetTables() ([]*core.Table, error) { +func (db *mssql) GetTables(ctx context.Context) ([]*schemas.Table, error) { args := []interface{}{} s := `select name from sysobjects where xtype ='U'` - db.LogSQL(s, args) - rows, err := db.DB().Query(s, args...) + rows, err := db.DB().QueryContext(ctx, s, args...) if err != nil { return nil, err } defer rows.Close() - tables := make([]*core.Table, 0) + tables := make([]*schemas.Table, 0) for rows.Next() { - table := core.NewEmptyTable() + table := schemas.NewEmptyTable() var name string err = rows.Scan(&name) if err != nil { @@ -431,7 +425,7 @@ func (db *mssql) GetTables() ([]*core.Table, error) { return tables, nil } -func (db *mssql) GetIndexes(tableName string) (map[string]*core.Index, error) { +func (db *mssql) GetIndexes(ctx context.Context, tableName string) (map[string]*schemas.Index, error) { args := []interface{}{tableName} s := `SELECT IXS.NAME AS [INDEX_NAME], @@ -444,15 +438,14 @@ INNER JOIN SYS.COLUMNS C ON IXS.OBJECT_ID=C.OBJECT_ID AND IXCS.COLUMN_ID=C.COLUMN_ID WHERE IXS.TYPE_DESC='NONCLUSTERED' and OBJECT_NAME(IXS.OBJECT_ID) =? ` - db.LogSQL(s, args) - rows, err := db.DB().Query(s, args...) + rows, err := db.DB().QueryContext(ctx, s, args...) if err != nil { return nil, err } defer rows.Close() - indexes := make(map[string]*core.Index, 0) + indexes := make(map[string]*schemas.Index, 0) for rows.Next() { var indexType int var indexName, colName, isUnique string @@ -468,9 +461,9 @@ WHERE IXS.TYPE_DESC='NONCLUSTERED' and OBJECT_NAME(IXS.OBJECT_ID) =? } if i { - indexType = core.UniqueType + indexType = schemas.UniqueType } else { - indexType = core.IndexType + indexType = schemas.IndexType } colName = strings.Trim(colName, "` ") @@ -480,10 +473,10 @@ WHERE IXS.TYPE_DESC='NONCLUSTERED' and OBJECT_NAME(IXS.OBJECT_ID) =? isRegular = true } - var index *core.Index + var index *schemas.Index var ok bool if index, ok = indexes[indexName]; !ok { - index = new(core.Index) + index = new(schemas.Index) index.Type = indexType index.Name = indexName index.IsRegular = isRegular @@ -494,7 +487,7 @@ WHERE IXS.TYPE_DESC='NONCLUSTERED' and OBJECT_NAME(IXS.OBJECT_ID) =? return indexes, nil } -func (db *mssql) CreateTableSql(table *core.Table, tableName, storeEngine, charset string) string { +func (db *mssql) CreateTableSQL(table *schemas.Table, tableName string) ([]string, bool) { var sql string if tableName == "" { tableName = table.Name @@ -502,16 +495,16 @@ func (db *mssql) CreateTableSql(table *core.Table, tableName, storeEngine, chars sql = "IF NOT EXISTS (SELECT [name] FROM sys.tables WHERE [name] = '" + tableName + "' ) CREATE TABLE " - sql += db.Quote(tableName) + " (" + sql += db.Quoter().Quote(tableName) + " (" pkList := table.PrimaryKeys for _, colName := range table.ColumnsSeq() { col := table.GetColumn(colName) if col.IsPrimaryKey && len(pkList) == 1 { - sql += col.String(db) + sql += db.String(col) } else { - sql += col.StringNoPk(db) + sql += db.StringNoPk(col) } sql = strings.TrimSpace(sql) sql += ", " @@ -525,21 +518,21 @@ func (db *mssql) CreateTableSql(table *core.Table, tableName, storeEngine, chars sql = sql[:len(sql)-2] + ")" sql += ";" - return sql + return []string{sql}, true } -func (db *mssql) ForUpdateSql(query string) string { +func (db *mssql) ForUpdateSQL(query string) string { return query } -func (db *mssql) Filters() []core.Filter { - return []core.Filter{&core.IdFilter{}, &core.QuoteFilter{}} +func (db *mssql) Filters() []Filter { + return []Filter{} } type odbcDriver struct { } -func (p *odbcDriver) Parse(driverName, dataSourceName string) (*core.Uri, error) { +func (p *odbcDriver) Parse(driverName, dataSourceName string) (*URI, error) { var dbName string if strings.HasPrefix(dataSourceName, "sqlserver://") { @@ -563,5 +556,5 @@ func (p *odbcDriver) Parse(driverName, dataSourceName string) (*core.Uri, error) if dbName == "" { return nil, errors.New("no db name provided") } - return &core.Uri{DbName: dbName, DbType: core.MSSQL}, nil + return &URI{DBName: dbName, DBType: schemas.MSSQL}, nil } diff --git a/vendor/xorm.io/xorm/dialect_mysql.go b/vendor/xorm.io/xorm/dialects/mysql.go similarity index 77% rename from vendor/xorm.io/xorm/dialect_mysql.go rename to vendor/xorm.io/xorm/dialects/mysql.go index cf1dbb6f2..b75986809 100644 --- a/vendor/xorm.io/xorm/dialect_mysql.go +++ b/vendor/xorm.io/xorm/dialects/mysql.go @@ -2,9 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package xorm +package dialects import ( + "context" "crypto/tls" "errors" "fmt" @@ -13,7 +14,8 @@ import ( "strings" "time" - "xorm.io/core" + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" ) var ( @@ -159,10 +161,12 @@ var ( "YEAR_MONTH": true, "ZEROFILL": true, } + + mysqlQuoter = schemas.Quoter{'`', '`', schemas.AlwaysReserve} ) type mysql struct { - core.Base + Base net string addr string params map[string]string @@ -175,8 +179,9 @@ type mysql struct { rowFormat string } -func (db *mysql) Init(d *core.DB, uri *core.Uri, drivername, dataSourceName string) error { - return db.Base.Init(d, db, uri, drivername, dataSourceName) +func (db *mysql) Init(d *core.DB, uri *URI) error { + db.quoter = mysqlQuoter + return db.Base.Init(d, db, uri) } func (db *mysql) SetParams(params map[string]string) { @@ -199,29 +204,29 @@ func (db *mysql) SetParams(params map[string]string) { } } -func (db *mysql) SqlType(c *core.Column) string { +func (db *mysql) SQLType(c *schemas.Column) string { var res string switch t := c.SQLType.Name; t { - case core.Bool: - res = core.TinyInt + case schemas.Bool: + res = schemas.TinyInt c.Length = 1 - case core.Serial: + case schemas.Serial: c.IsAutoIncrement = true c.IsPrimaryKey = true c.Nullable = false - res = core.Int - case core.BigSerial: + res = schemas.Int + case schemas.BigSerial: c.IsAutoIncrement = true c.IsPrimaryKey = true c.Nullable = false - res = core.BigInt - case core.Bytea: - res = core.Blob - case core.TimeStampz: - res = core.Char + res = schemas.BigInt + case schemas.Bytea: + res = schemas.Blob + case schemas.TimeStampz: + res = schemas.Char c.Length = 64 - case core.Enum: // mysql enum - res = core.Enum + case schemas.Enum: // mysql enum + res = schemas.Enum res += "(" opts := "" for v := range c.EnumOptions { @@ -229,8 +234,8 @@ func (db *mysql) SqlType(c *core.Column) string { } res += strings.TrimLeft(opts, ",") res += ")" - case core.Set: // mysql set - res = core.Set + case schemas.Set: // mysql set + res = schemas.Set res += "(" opts := "" for v := range c.SetOptions { @@ -238,13 +243,13 @@ func (db *mysql) SqlType(c *core.Column) string { } res += strings.TrimLeft(opts, ",") res += ")" - case core.NVarchar: - res = core.Varchar - case core.Uuid: - res = core.Varchar + case schemas.NVarchar: + res = schemas.Varchar + case schemas.Uuid: + res = schemas.Varchar c.Length = 40 - case core.Json: - res = core.Text + case schemas.Json: + res = schemas.Text default: res = t } @@ -252,7 +257,7 @@ func (db *mysql) SqlType(c *core.Column) string { hasLen1 := (c.Length > 0) hasLen2 := (c.Length2 > 0) - if res == core.BigInt && !hasLen1 && !hasLen2 { + if res == schemas.BigInt && !hasLen1 && !hasLen2 { c.Length = 20 hasLen1 = true } @@ -265,70 +270,52 @@ func (db *mysql) SqlType(c *core.Column) string { return res } -func (db *mysql) SupportInsertMany() bool { - return true -} - func (db *mysql) IsReserved(name string) bool { - _, ok := mysqlReservedWords[name] + _, ok := mysqlReservedWords[strings.ToUpper(name)] return ok } -func (db *mysql) Quote(name string) string { - return "`" + name + "`" -} - -func (db *mysql) SupportEngine() bool { - return true -} - func (db *mysql) AutoIncrStr() string { return "AUTO_INCREMENT" } -func (db *mysql) SupportCharset() bool { - return true -} - -func (db *mysql) IndexOnTable() bool { - return true -} - -func (db *mysql) IndexCheckSql(tableName, idxName string) (string, []interface{}) { - args := []interface{}{db.DbName, tableName, idxName} +func (db *mysql) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { + args := []interface{}{db.uri.DBName, tableName, idxName} sql := "SELECT `INDEX_NAME` FROM `INFORMATION_SCHEMA`.`STATISTICS`" sql += " WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ? AND `INDEX_NAME`=?" return sql, args } -/*func (db *mysql) ColumnCheckSql(tableName, colName string) (string, []interface{}) { - args := []interface{}{db.DbName, tableName, colName} - sql := "SELECT `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ? AND `COLUMN_NAME` = ?" - return sql, args -}*/ - -func (db *mysql) TableCheckSql(tableName string) (string, []interface{}) { - args := []interface{}{db.DbName, tableName} +func (db *mysql) IsTableExist(ctx context.Context, tableName string) (bool, error) { sql := "SELECT `TABLE_NAME` from `INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=? and `TABLE_NAME`=?" - return sql, args + return db.HasRecords(ctx, sql, db.uri.DBName, tableName) } -func (db *mysql) GetColumns(tableName string) ([]string, map[string]*core.Column, error) { - args := []interface{}{db.DbName, tableName} +func (db *mysql) AddColumnSQL(tableName string, col *schemas.Column) string { + quoter := db.dialect.Quoter() + sql := fmt.Sprintf("ALTER TABLE %v ADD %v", quoter.Quote(tableName), + db.String(col)) + if len(col.Comment) > 0 { + sql += " COMMENT '" + col.Comment + "'" + } + return sql +} + +func (db *mysql) GetColumns(ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { + args := []interface{}{db.uri.DBName, tableName} s := "SELECT `COLUMN_NAME`, `IS_NULLABLE`, `COLUMN_DEFAULT`, `COLUMN_TYPE`," + " `COLUMN_KEY`, `EXTRA`,`COLUMN_COMMENT` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ?" - db.LogSQL(s, args) - rows, err := db.DB().Query(s, args...) + rows, err := db.DB().QueryContext(ctx, s, args...) if err != nil { return nil, nil, err } defer rows.Close() - cols := make(map[string]*core.Column) + cols := make(map[string]*schemas.Column) colSeq := make([]string, 0) for rows.Next() { - col := new(core.Column) + col := new(schemas.Column) col.Indexes = make(map[string]int) var columnName, isNullable, colType, colKey, extra, comment string @@ -356,7 +343,7 @@ func (db *mysql) GetColumns(tableName string) ([]string, map[string]*core.Column var len1, len2 int if len(cts) == 2 { idx := strings.Index(cts[1], ")") - if colType == core.Enum && cts[1][0] == '\'' { // enum + if colType == schemas.Enum && cts[1][0] == '\'' { // enum options := strings.Split(cts[1][0:idx], ",") col.EnumOptions = make(map[string]int) for k, v := range options { @@ -364,7 +351,7 @@ func (db *mysql) GetColumns(tableName string) ([]string, map[string]*core.Column v = strings.Trim(v, "'") col.EnumOptions[v] = k } - } else if colType == core.Set && cts[1][0] == '\'' { + } else if colType == schemas.Set && cts[1][0] == '\'' { options := strings.Split(cts[1][0:idx], ",") col.SetOptions = make(map[string]int) for k, v := range options { @@ -394,8 +381,8 @@ func (db *mysql) GetColumns(tableName string) ([]string, map[string]*core.Column } col.Length = len1 col.Length2 = len2 - if _, ok := core.SqlTypes[colType]; ok { - col.SQLType = core.SQLType{Name: colType, DefaultLength: len1, DefaultLength2: len2} + if _, ok := schemas.SqlTypes[colType]; ok { + col.SQLType = schemas.SQLType{Name: colType, DefaultLength: len1, DefaultLength2: len2} } else { return nil, nil, fmt.Errorf("Unknown colType %v", colType) } @@ -424,48 +411,65 @@ func (db *mysql) GetColumns(tableName string) ([]string, map[string]*core.Column return colSeq, cols, nil } -func (db *mysql) GetTables() ([]*core.Table, error) { - args := []interface{}{db.DbName} - s := "SELECT `TABLE_NAME`, `ENGINE`, `TABLE_ROWS`, `AUTO_INCREMENT`, `TABLE_COMMENT` from " + +func (db *mysql) GetTables(ctx context.Context) ([]*schemas.Table, error) { + args := []interface{}{db.uri.DBName} + s := "SELECT `TABLE_NAME`, `ENGINE`, `AUTO_INCREMENT`, `TABLE_COMMENT` from " + "`INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=? AND (`ENGINE`='MyISAM' OR `ENGINE` = 'InnoDB' OR `ENGINE` = 'TokuDB')" - db.LogSQL(s, args) - rows, err := db.DB().Query(s, args...) + rows, err := db.DB().QueryContext(ctx, s, args...) if err != nil { return nil, err } defer rows.Close() - tables := make([]*core.Table, 0) + tables := make([]*schemas.Table, 0) for rows.Next() { - table := core.NewEmptyTable() - var name, engine, tableRows, comment string - var autoIncr *string - err = rows.Scan(&name, &engine, &tableRows, &autoIncr, &comment) + table := schemas.NewEmptyTable() + var name, engine string + var autoIncr, comment *string + err = rows.Scan(&name, &engine, &autoIncr, &comment) if err != nil { return nil, err } table.Name = name - table.Comment = comment + if comment != nil { + table.Comment = *comment + } table.StoreEngine = engine tables = append(tables, table) } return tables, nil } -func (db *mysql) GetIndexes(tableName string) (map[string]*core.Index, error) { - args := []interface{}{db.DbName, tableName} +func (db *mysql) SetQuotePolicy(quotePolicy QuotePolicy) { + switch quotePolicy { + case QuotePolicyNone: + var q = mysqlQuoter + q.IsReserved = schemas.AlwaysNoReserve + db.quoter = q + case QuotePolicyReserved: + var q = mysqlQuoter + q.IsReserved = db.IsReserved + db.quoter = q + case QuotePolicyAlways: + fallthrough + default: + db.quoter = mysqlQuoter + } +} + +func (db *mysql) GetIndexes(ctx context.Context, tableName string) (map[string]*schemas.Index, error) { + args := []interface{}{db.uri.DBName, tableName} s := "SELECT `INDEX_NAME`, `NON_UNIQUE`, `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`STATISTICS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ?" - db.LogSQL(s, args) - rows, err := db.DB().Query(s, args...) + rows, err := db.DB().QueryContext(ctx, s, args...) if err != nil { return nil, err } defer rows.Close() - indexes := make(map[string]*core.Index, 0) + indexes := make(map[string]*schemas.Index, 0) for rows.Next() { var indexType int var indexName, colName, nonUnique string @@ -479,9 +483,9 @@ func (db *mysql) GetIndexes(tableName string) (map[string]*core.Index, error) { } if "YES" == nonUnique || nonUnique == "1" { - indexType = core.IndexType + indexType = schemas.IndexType } else { - indexType = core.UniqueType + indexType = schemas.UniqueType } colName = strings.Trim(colName, "` ") @@ -491,10 +495,10 @@ func (db *mysql) GetIndexes(tableName string) (map[string]*core.Index, error) { isRegular = true } - var index *core.Index + var index *schemas.Index var ok bool if index, ok = indexes[indexName]; !ok { - index = new(core.Index) + index = new(schemas.Index) index.IsRegular = isRegular index.Type = indexType index.Name = indexName @@ -505,14 +509,15 @@ func (db *mysql) GetIndexes(tableName string) (map[string]*core.Index, error) { return indexes, nil } -func (db *mysql) CreateTableSql(table *core.Table, tableName, storeEngine, charset string) string { - var sql string - sql = "CREATE TABLE IF NOT EXISTS " +func (db *mysql) CreateTableSQL(table *schemas.Table, tableName string) ([]string, bool) { + var sql = "CREATE TABLE IF NOT EXISTS " if tableName == "" { tableName = table.Name } - sql += db.Quote(tableName) + quoter := db.Quoter() + + sql += quoter.Quote(tableName) sql += " (" if len(table.ColumnsSeq()) > 0 { @@ -521,9 +526,9 @@ func (db *mysql) CreateTableSql(table *core.Table, tableName, storeEngine, chars for _, colName := range table.ColumnsSeq() { col := table.GetColumn(colName) if col.IsPrimaryKey && len(pkList) == 1 { - sql += col.String(db) + sql += db.String(col) } else { - sql += col.StringNoPk(db) + sql += db.StringNoPk(col) } sql = strings.TrimSpace(sql) if len(col.Comment) > 0 { @@ -534,7 +539,7 @@ func (db *mysql) CreateTableSql(table *core.Table, tableName, storeEngine, chars if len(pkList) > 1 { sql += "PRIMARY KEY ( " - sql += db.Quote(strings.Join(pkList, db.Quote(","))) + sql += quoter.Join(pkList, ",") sql += " ), " } @@ -542,10 +547,11 @@ func (db *mysql) CreateTableSql(table *core.Table, tableName, storeEngine, chars } sql += ")" - if storeEngine != "" { - sql += " ENGINE=" + storeEngine + if table.StoreEngine != "" { + sql += " ENGINE=" + table.StoreEngine } + var charset = table.Charset if len(charset) == 0 { charset = db.URI().Charset } @@ -556,18 +562,18 @@ func (db *mysql) CreateTableSql(table *core.Table, tableName, storeEngine, chars if db.rowFormat != "" { sql += " ROW_FORMAT=" + db.rowFormat } - return sql + return []string{sql}, true } -func (db *mysql) Filters() []core.Filter { - return []core.Filter{&core.IdFilter{}} +func (db *mysql) Filters() []Filter { + return []Filter{} } type mymysqlDriver struct { } -func (p *mymysqlDriver) Parse(driverName, dataSourceName string) (*core.Uri, error) { - db := &core.Uri{DbType: core.MYSQL} +func (p *mymysqlDriver) Parse(driverName, dataSourceName string) (*URI, error) { + uri := &URI{DBType: schemas.MYSQL} pd := strings.SplitN(dataSourceName, "*", 2) if len(pd) == 2 { @@ -576,9 +582,9 @@ func (p *mymysqlDriver) Parse(driverName, dataSourceName string) (*core.Uri, err if len(p) != 2 { return nil, errors.New("Wrong protocol part of URI") } - db.Proto = p[0] + uri.Proto = p[0] options := strings.Split(p[1], ",") - db.Raddr = options[0] + uri.Raddr = options[0] for _, o := range options[1:] { kv := strings.SplitN(o, "=", 2) var k, v string @@ -589,13 +595,13 @@ func (p *mymysqlDriver) Parse(driverName, dataSourceName string) (*core.Uri, err } switch k { case "laddr": - db.Laddr = v + uri.Laddr = v case "timeout": to, err := time.ParseDuration(v) if err != nil { return nil, err } - db.Timeout = to + uri.Timeout = to default: return nil, errors.New("Unknown option: " + k) } @@ -608,17 +614,17 @@ func (p *mymysqlDriver) Parse(driverName, dataSourceName string) (*core.Uri, err if len(dup) != 3 { return nil, errors.New("Wrong database part of URI") } - db.DbName = dup[0] - db.User = dup[1] - db.Passwd = dup[2] + uri.DBName = dup[0] + uri.User = dup[1] + uri.Passwd = dup[2] - return db, nil + return uri, nil } type mysqlDriver struct { } -func (p *mysqlDriver) Parse(driverName, dataSourceName string) (*core.Uri, error) { +func (p *mysqlDriver) Parse(driverName, dataSourceName string) (*URI, error) { dsnPattern := regexp.MustCompile( `^(?:(?P.*?)(?::(?P.*))?@)?` + // [user[:password]@] `(?:(?P[^\(]*)(?:\((?P[^\)]*)\))?)?` + // [net[(addr)]] @@ -628,12 +634,12 @@ func (p *mysqlDriver) Parse(driverName, dataSourceName string) (*core.Uri, error // tlsConfigRegister := make(map[string]*tls.Config) names := dsnPattern.SubexpNames() - uri := &core.Uri{DbType: core.MYSQL} + uri := &URI{DBType: schemas.MYSQL} for i, match := range matches { switch names[i] { case "dbname": - uri.DbName = match + uri.DBName = match case "params": if len(match) > 0 { kvs := strings.Split(match, "&") diff --git a/vendor/xorm.io/xorm/dialect_oracle.go b/vendor/xorm.io/xorm/dialects/oracle.go similarity index 83% rename from vendor/xorm.io/xorm/dialect_oracle.go rename to vendor/xorm.io/xorm/dialects/oracle.go index 15010ca5a..c48d32b97 100644 --- a/vendor/xorm.io/xorm/dialect_oracle.go +++ b/vendor/xorm.io/xorm/dialects/oracle.go @@ -2,16 +2,18 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package xorm +package dialects import ( + "context" "errors" "fmt" "regexp" "strconv" "strings" - "xorm.io/core" + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" ) var ( @@ -496,32 +498,35 @@ var ( "YEAR": true, "ZONE": true, } + + oracleQuoter = schemas.Quoter{'[', ']', schemas.AlwaysReserve} ) type oracle struct { - core.Base + Base } -func (db *oracle) Init(d *core.DB, uri *core.Uri, drivername, dataSourceName string) error { - return db.Base.Init(d, db, uri, drivername, dataSourceName) +func (db *oracle) Init(d *core.DB, uri *URI) error { + db.quoter = oracleQuoter + return db.Base.Init(d, db, uri) } -func (db *oracle) SqlType(c *core.Column) string { +func (db *oracle) SQLType(c *schemas.Column) string { var res string switch t := c.SQLType.Name; t { - case core.Bit, core.TinyInt, core.SmallInt, core.MediumInt, core.Int, core.Integer, core.BigInt, core.Bool, core.Serial, core.BigSerial: + case schemas.Bit, schemas.TinyInt, schemas.SmallInt, schemas.MediumInt, schemas.Int, schemas.Integer, schemas.BigInt, schemas.Bool, schemas.Serial, schemas.BigSerial: res = "NUMBER" - case core.Binary, core.VarBinary, core.Blob, core.TinyBlob, core.MediumBlob, core.LongBlob, core.Bytea: - return core.Blob - case core.Time, core.DateTime, core.TimeStamp: - res = core.TimeStamp - case core.TimeStampz: + case schemas.Binary, schemas.VarBinary, schemas.Blob, schemas.TinyBlob, schemas.MediumBlob, schemas.LongBlob, schemas.Bytea: + return schemas.Blob + case schemas.Time, schemas.DateTime, schemas.TimeStamp: + res = schemas.TimeStamp + case schemas.TimeStampz: res = "TIMESTAMP WITH TIME ZONE" - case core.Float, core.Double, core.Numeric, core.Decimal: + case schemas.Float, schemas.Double, schemas.Numeric, schemas.Decimal: res = "NUMBER" - case core.Text, core.MediumText, core.LongText, core.Json: + case schemas.Text, schemas.MediumText, schemas.LongText, schemas.Json: res = "CLOB" - case core.Char, core.Varchar, core.TinyText: + case schemas.Char, schemas.Varchar, schemas.TinyText: res = "VARCHAR2" default: res = t @@ -542,47 +547,23 @@ func (db *oracle) AutoIncrStr() string { return "AUTO_INCREMENT" } -func (db *oracle) SupportInsertMany() bool { - return true -} - func (db *oracle) IsReserved(name string) bool { - _, ok := oracleReservedWords[name] + _, ok := oracleReservedWords[strings.ToUpper(name)] return ok } -func (db *oracle) Quote(name string) string { - return "[" + name + "]" -} - -func (db *oracle) SupportEngine() bool { - return false +func (db *oracle) DropTableSQL(tableName string) (string, bool) { + return fmt.Sprintf("DROP TABLE `%s`", tableName), false } -func (db *oracle) SupportCharset() bool { - return false -} - -func (db *oracle) SupportDropIfExists() bool { - return false -} - -func (db *oracle) IndexOnTable() bool { - return false -} - -func (db *oracle) DropTableSql(tableName string) string { - return fmt.Sprintf("DROP TABLE `%s`", tableName) -} - -func (db *oracle) CreateTableSql(table *core.Table, tableName, storeEngine, charset string) string { - var sql string - sql = "CREATE TABLE " +func (db *oracle) CreateTableSQL(table *schemas.Table, tableName string) ([]string, bool) { + var sql = "CREATE TABLE " if tableName == "" { tableName = table.Name } - sql += db.Quote(tableName) + " (" + quoter := db.Quoter() + sql += quoter.Quote(tableName) + " (" pkList := table.PrimaryKeys @@ -591,7 +572,7 @@ func (db *oracle) CreateTableSql(table *core.Table, tableName, storeEngine, char /*if col.IsPrimaryKey && len(pkList) == 1 { sql += col.String(b.dialect) } else {*/ - sql += col.StringNoPk(db) + sql += db.StringNoPk(col) // } sql = strings.TrimSpace(sql) sql += ", " @@ -599,97 +580,63 @@ func (db *oracle) CreateTableSql(table *core.Table, tableName, storeEngine, char if len(pkList) > 0 { sql += "PRIMARY KEY ( " - sql += db.Quote(strings.Join(pkList, db.Quote(","))) + sql += quoter.Join(pkList, ",") sql += " ), " } sql = sql[:len(sql)-2] + ")" - if db.SupportEngine() && storeEngine != "" { - sql += " ENGINE=" + storeEngine - } - if db.SupportCharset() { - if len(charset) == 0 { - charset = db.URI().Charset - } - if len(charset) > 0 { - sql += " DEFAULT CHARSET " + charset - } + return []string{sql}, false +} + +func (db *oracle) SetQuotePolicy(quotePolicy QuotePolicy) { + switch quotePolicy { + case QuotePolicyNone: + var q = oracleQuoter + q.IsReserved = schemas.AlwaysNoReserve + db.quoter = q + case QuotePolicyReserved: + var q = oracleQuoter + q.IsReserved = db.IsReserved + db.quoter = q + case QuotePolicyAlways: + fallthrough + default: + db.quoter = oracleQuoter } - return sql } -func (db *oracle) IndexCheckSql(tableName, idxName string) (string, []interface{}) { +func (db *oracle) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { args := []interface{}{tableName, idxName} return `SELECT INDEX_NAME FROM USER_INDEXES ` + `WHERE TABLE_NAME = :1 AND INDEX_NAME = :2`, args } -func (db *oracle) TableCheckSql(tableName string) (string, []interface{}) { - args := []interface{}{tableName} - return `SELECT table_name FROM user_tables WHERE table_name = :1`, args +func (db *oracle) IsTableExist(ctx context.Context, tableName string) (bool, error) { + return db.HasRecords(ctx, `SELECT table_name FROM user_tables WHERE table_name = :1`, tableName) } -func (db *oracle) MustDropTable(tableName string) error { - sql, args := db.TableCheckSql(tableName) - db.LogSQL(sql, args) - - rows, err := db.DB().Query(sql, args...) - if err != nil { - return err - } - defer rows.Close() - - if !rows.Next() { - return nil - } - - sql = "Drop Table \"" + tableName + "\"" - db.LogSQL(sql, args) - - _, err = db.DB().Exec(sql) - return err -} - -/*func (db *oracle) ColumnCheckSql(tableName, colName string) (string, []interface{}) { - args := []interface{}{strings.ToUpper(tableName), strings.ToUpper(colName)} - return "SELECT column_name FROM USER_TAB_COLUMNS WHERE table_name = ?" + - " AND column_name = ?", args -}*/ - -func (db *oracle) IsColumnExist(tableName, colName string) (bool, error) { +func (db *oracle) IsColumnExist(ctx context.Context, tableName, colName string) (bool, error) { args := []interface{}{tableName, colName} query := "SELECT column_name FROM USER_TAB_COLUMNS WHERE table_name = :1" + " AND column_name = :2" - db.LogSQL(query, args) - - rows, err := db.DB().Query(query, args...) - if err != nil { - return false, err - } - defer rows.Close() - - if rows.Next() { - return true, nil - } - return false, nil + return db.HasRecords(ctx, query, args...) } -func (db *oracle) GetColumns(tableName string) ([]string, map[string]*core.Column, error) { +func (db *oracle) GetColumns(ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { args := []interface{}{tableName} s := "SELECT column_name,data_default,data_type,data_length,data_precision,data_scale," + "nullable FROM USER_TAB_COLUMNS WHERE table_name = :1" - db.LogSQL(s, args) - rows, err := db.DB().Query(s, args...) + rows, err := db.DB().QueryContext(ctx, s, args...) if err != nil { return nil, nil, err } defer rows.Close() - cols := make(map[string]*core.Column) + cols := make(map[string]*schemas.Column) colSeq := make([]string, 0) for rows.Next() { - col := new(core.Column) + col := new(schemas.Column) col.Indexes = make(map[string]int) var colName, colDefault, nullable, dataType, dataPrecision, dataScale *string @@ -731,30 +678,30 @@ func (db *oracle) GetColumns(tableName string) ([]string, map[string]*core.Colum switch dt { case "VARCHAR2": - col.SQLType = core.SQLType{Name: core.Varchar, DefaultLength: len1, DefaultLength2: len2} + col.SQLType = schemas.SQLType{Name: schemas.Varchar, DefaultLength: len1, DefaultLength2: len2} case "NVARCHAR2": - col.SQLType = core.SQLType{Name: core.NVarchar, DefaultLength: len1, DefaultLength2: len2} + col.SQLType = schemas.SQLType{Name: schemas.NVarchar, DefaultLength: len1, DefaultLength2: len2} case "TIMESTAMP WITH TIME ZONE": - col.SQLType = core.SQLType{Name: core.TimeStampz, DefaultLength: 0, DefaultLength2: 0} + col.SQLType = schemas.SQLType{Name: schemas.TimeStampz, DefaultLength: 0, DefaultLength2: 0} case "NUMBER": - col.SQLType = core.SQLType{Name: core.Double, DefaultLength: len1, DefaultLength2: len2} + col.SQLType = schemas.SQLType{Name: schemas.Double, DefaultLength: len1, DefaultLength2: len2} case "LONG", "LONG RAW": - col.SQLType = core.SQLType{Name: core.Text, DefaultLength: 0, DefaultLength2: 0} + col.SQLType = schemas.SQLType{Name: schemas.Text, DefaultLength: 0, DefaultLength2: 0} case "RAW": - col.SQLType = core.SQLType{Name: core.Binary, DefaultLength: 0, DefaultLength2: 0} + col.SQLType = schemas.SQLType{Name: schemas.Binary, DefaultLength: 0, DefaultLength2: 0} case "ROWID": - col.SQLType = core.SQLType{Name: core.Varchar, DefaultLength: 18, DefaultLength2: 0} + col.SQLType = schemas.SQLType{Name: schemas.Varchar, DefaultLength: 18, DefaultLength2: 0} case "AQ$_SUBSCRIBERS": ignore = true default: - col.SQLType = core.SQLType{Name: strings.ToUpper(dt), DefaultLength: len1, DefaultLength2: len2} + col.SQLType = schemas.SQLType{Name: strings.ToUpper(dt), DefaultLength: len1, DefaultLength2: len2} } if ignore { continue } - if _, ok := core.SqlTypes[col.SQLType.Name]; !ok { + if _, ok := schemas.SqlTypes[col.SQLType.Name]; !ok { return nil, nil, fmt.Errorf("Unknown colType %v %v", *dataType, col.SQLType) } @@ -772,20 +719,19 @@ func (db *oracle) GetColumns(tableName string) ([]string, map[string]*core.Colum return colSeq, cols, nil } -func (db *oracle) GetTables() ([]*core.Table, error) { +func (db *oracle) GetTables(ctx context.Context) ([]*schemas.Table, error) { args := []interface{}{} s := "SELECT table_name FROM user_tables" - db.LogSQL(s, args) - rows, err := db.DB().Query(s, args...) + rows, err := db.DB().QueryContext(ctx, s, args...) if err != nil { return nil, err } defer rows.Close() - tables := make([]*core.Table, 0) + tables := make([]*schemas.Table, 0) for rows.Next() { - table := core.NewEmptyTable() + table := schemas.NewEmptyTable() err = rows.Scan(&table.Name) if err != nil { return nil, err @@ -796,19 +742,18 @@ func (db *oracle) GetTables() ([]*core.Table, error) { return tables, nil } -func (db *oracle) GetIndexes(tableName string) (map[string]*core.Index, error) { +func (db *oracle) GetIndexes(ctx context.Context, tableName string) (map[string]*schemas.Index, error) { args := []interface{}{tableName} s := "SELECT t.column_name,i.uniqueness,i.index_name FROM user_ind_columns t,user_indexes i " + "WHERE t.index_name = i.index_name and t.table_name = i.table_name and t.table_name =:1" - db.LogSQL(s, args) - rows, err := db.DB().Query(s, args...) + rows, err := db.DB().QueryContext(ctx, s, args...) if err != nil { return nil, err } defer rows.Close() - indexes := make(map[string]*core.Index, 0) + indexes := make(map[string]*schemas.Index, 0) for rows.Next() { var indexType int var indexName, colName, uniqueness string @@ -827,15 +772,15 @@ func (db *oracle) GetIndexes(tableName string) (map[string]*core.Index, error) { } if uniqueness == "UNIQUE" { - indexType = core.UniqueType + indexType = schemas.UniqueType } else { - indexType = core.IndexType + indexType = schemas.IndexType } - var index *core.Index + var index *schemas.Index var ok bool if index, ok = indexes[indexName]; !ok { - index = new(core.Index) + index = new(schemas.Index) index.Type = indexType index.Name = indexName index.IsRegular = isRegular @@ -846,15 +791,17 @@ func (db *oracle) GetIndexes(tableName string) (map[string]*core.Index, error) { return indexes, nil } -func (db *oracle) Filters() []core.Filter { - return []core.Filter{&core.QuoteFilter{}, &core.SeqFilter{Prefix: ":", Start: 1}, &core.IdFilter{}} +func (db *oracle) Filters() []Filter { + return []Filter{ + &SeqFilter{Prefix: ":", Start: 1}, + } } type goracleDriver struct { } -func (cfg *goracleDriver) Parse(driverName, dataSourceName string) (*core.Uri, error) { - db := &core.Uri{DbType: core.ORACLE} +func (cfg *goracleDriver) Parse(driverName, dataSourceName string) (*URI, error) { + db := &URI{DBType: schemas.ORACLE} dsnPattern := regexp.MustCompile( `^(?:(?P.*?)(?::(?P.*))?@)?` + // [user[:password]@] `(?:(?P[^\(]*)(?:\((?P[^\)]*)\))?)?` + // [net[(addr)]] @@ -867,10 +814,10 @@ func (cfg *goracleDriver) Parse(driverName, dataSourceName string) (*core.Uri, e for i, match := range matches { switch names[i] { case "dbname": - db.DbName = match + db.DBName = match } } - if db.DbName == "" { + if db.DBName == "" { return nil, errors.New("dbname is empty") } return db, nil @@ -881,8 +828,8 @@ type oci8Driver struct { // dataSourceName=user/password@ipv4:port/dbname // dataSourceName=user/password@[ipv6]:port/dbname -func (p *oci8Driver) Parse(driverName, dataSourceName string) (*core.Uri, error) { - db := &core.Uri{DbType: core.ORACLE} +func (p *oci8Driver) Parse(driverName, dataSourceName string) (*URI, error) { + db := &URI{DBType: schemas.ORACLE} dsnPattern := regexp.MustCompile( `^(?P.*)\/(?P.*)@` + // user:password@ `(?P.*)` + // ip:port @@ -892,10 +839,10 @@ func (p *oci8Driver) Parse(driverName, dataSourceName string) (*core.Uri, error) for i, match := range matches { switch names[i] { case "dbname": - db.DbName = match + db.DBName = match } } - if db.DbName == "" { + if db.DBName == "" { return nil, errors.New("dbname is empty") } return db, nil diff --git a/vendor/xorm.io/xorm/pg_reserved.txt b/vendor/xorm.io/xorm/dialects/pg_reserved.txt similarity index 100% rename from vendor/xorm.io/xorm/pg_reserved.txt rename to vendor/xorm.io/xorm/dialects/pg_reserved.txt diff --git a/vendor/xorm.io/xorm/dialect_postgres.go b/vendor/xorm.io/xorm/dialects/postgres.go similarity index 84% rename from vendor/xorm.io/xorm/dialect_postgres.go rename to vendor/xorm.io/xorm/dialects/postgres.go index ac6d4fe89..0a851fe26 100644 --- a/vendor/xorm.io/xorm/dialect_postgres.go +++ b/vendor/xorm.io/xorm/dialects/postgres.go @@ -2,16 +2,18 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package xorm +package dialects import ( + "context" "errors" "fmt" "net/url" "strconv" "strings" - "xorm.io/core" + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" ) // from http://www.postgresql.org/docs/current/static/sql-keywords-appendix.html @@ -765,71 +767,104 @@ var ( "ZONE": true, } - // DefaultPostgresSchema default postgres schema - DefaultPostgresSchema = "public" + postgresQuoter = schemas.Quoter{'"', '"', schemas.AlwaysReserve} ) const postgresPublicSchema = "public" type postgres struct { - core.Base + Base } -func (db *postgres) Init(d *core.DB, uri *core.Uri, drivername, dataSourceName string) error { - err := db.Base.Init(d, db, uri, drivername, dataSourceName) +func (db *postgres) Init(d *core.DB, uri *URI) error { + db.quoter = postgresQuoter + err := db.Base.Init(d, db, uri) if err != nil { return err } - if db.Schema == "" { - db.Schema = DefaultPostgresSchema + if db.uri.Schema == "" { + db.uri.Schema = postgresPublicSchema } return nil } -func (db *postgres) SqlType(c *core.Column) string { +func (db *postgres) needQuote(name string) bool { + if db.IsReserved(name) { + return true + } + for _, c := range name { + if c >= 'A' && c <= 'Z' { + return true + } + } + return false +} + +func (db *postgres) SetQuotePolicy(quotePolicy QuotePolicy) { + switch quotePolicy { + case QuotePolicyNone: + var q = postgresQuoter + q.IsReserved = schemas.AlwaysNoReserve + db.quoter = q + case QuotePolicyReserved: + var q = postgresQuoter + q.IsReserved = db.needQuote + db.quoter = q + case QuotePolicyAlways: + fallthrough + default: + db.quoter = postgresQuoter + } +} + +func (db *postgres) DefaultSchema() string { + return postgresPublicSchema +} + +func (db *postgres) SQLType(c *schemas.Column) string { var res string switch t := c.SQLType.Name; t { - case core.TinyInt: - res = core.SmallInt + case schemas.TinyInt: + res = schemas.SmallInt return res - case core.Bit: - res = core.Boolean + case schemas.Bit: + res = schemas.Boolean return res - case core.MediumInt, core.Int, core.Integer: + case schemas.MediumInt, schemas.Int, schemas.Integer: if c.IsAutoIncrement { - return core.Serial + return schemas.Serial } - return core.Integer - case core.BigInt: + return schemas.Integer + case schemas.BigInt: if c.IsAutoIncrement { - return core.BigSerial + return schemas.BigSerial } - return core.BigInt - case core.Serial, core.BigSerial: + return schemas.BigInt + case schemas.Serial, schemas.BigSerial: c.IsAutoIncrement = true c.Nullable = false res = t - case core.Binary, core.VarBinary: - return core.Bytea - case core.DateTime: - res = core.TimeStamp - case core.TimeStampz: + case schemas.Binary, schemas.VarBinary: + return schemas.Bytea + case schemas.DateTime: + res = schemas.TimeStamp + case schemas.TimeStampz: return "timestamp with time zone" - case core.Float: - res = core.Real - case core.TinyText, core.MediumText, core.LongText: - res = core.Text - case core.NVarchar: - res = core.Varchar - case core.Uuid: - return core.Uuid - case core.Blob, core.TinyBlob, core.MediumBlob, core.LongBlob: - return core.Bytea - case core.Double: + case schemas.Float: + res = schemas.Real + case schemas.TinyText, schemas.MediumText, schemas.LongText: + res = schemas.Text + case schemas.NVarchar: + res = schemas.Varchar + case schemas.Uuid: + return schemas.Uuid + case schemas.Blob, schemas.TinyBlob, schemas.MediumBlob, schemas.LongBlob: + return schemas.Bytea + case schemas.Double: return "DOUBLE PRECISION" default: if c.IsAutoIncrement { - return core.Serial + return schemas.Serial } res = t } @@ -849,68 +884,83 @@ func (db *postgres) SqlType(c *core.Column) string { return res } -func (db *postgres) SupportInsertMany() bool { - return true -} - func (db *postgres) IsReserved(name string) bool { - _, ok := postgresReservedWords[name] + _, ok := postgresReservedWords[strings.ToUpper(name)] return ok } -func (db *postgres) Quote(name string) string { - name = strings.Replace(name, ".", `"."`, -1) - return "\"" + name + "\"" -} - func (db *postgres) AutoIncrStr() string { return "" } -func (db *postgres) SupportEngine() bool { - return false -} +func (db *postgres) CreateTableSQL(table *schemas.Table, tableName string) ([]string, bool) { + var sql string + sql = "CREATE TABLE IF NOT EXISTS " + if tableName == "" { + tableName = table.Name + } -func (db *postgres) SupportCharset() bool { - return false -} + quoter := db.Quoter() + sql += quoter.Quote(tableName) + sql += " (" -func (db *postgres) IndexOnTable() bool { - return false + if len(table.ColumnsSeq()) > 0 { + pkList := table.PrimaryKeys + + for _, colName := range table.ColumnsSeq() { + col := table.GetColumn(colName) + if col.IsPrimaryKey && len(pkList) == 1 { + sql += db.String(col) + } else { + sql += db.StringNoPk(col) + } + sql = strings.TrimSpace(sql) + sql += ", " + } + + if len(pkList) > 1 { + sql += "PRIMARY KEY ( " + sql += quoter.Join(pkList, ",") + sql += " ), " + } + + sql = sql[:len(sql)-2] + } + sql += ")" + + return []string{sql}, true } -func (db *postgres) IndexCheckSql(tableName, idxName string) (string, []interface{}) { - if len(db.Schema) == 0 { +func (db *postgres) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { + if len(db.uri.Schema) == 0 { args := []interface{}{tableName, idxName} return `SELECT indexname FROM pg_indexes WHERE tablename = ? AND indexname = ?`, args } - args := []interface{}{db.Schema, tableName, idxName} + args := []interface{}{db.uri.Schema, tableName, idxName} return `SELECT indexname FROM pg_indexes ` + `WHERE schemaname = ? AND tablename = ? AND indexname = ?`, args } -func (db *postgres) TableCheckSql(tableName string) (string, []interface{}) { - if len(db.Schema) == 0 { - args := []interface{}{tableName} - return `SELECT tablename FROM pg_tables WHERE tablename = ?`, args +func (db *postgres) IsTableExist(ctx context.Context, tableName string) (bool, error) { + if len(db.uri.Schema) == 0 { + return db.HasRecords(ctx, `SELECT tablename FROM pg_tables WHERE tablename = $1`, tableName) } - args := []interface{}{db.Schema, tableName} - return `SELECT tablename FROM pg_tables WHERE schemaname = ? AND tablename = ?`, args + return db.HasRecords(ctx, `SELECT tablename FROM pg_tables WHERE schemaname = $1 AND tablename = $2`, + db.uri.Schema, tableName) } -func (db *postgres) ModifyColumnSql(tableName string, col *core.Column) string { - if len(db.Schema) == 0 || strings.Contains(tableName, ".") { +func (db *postgres) ModifyColumnSQL(tableName string, col *schemas.Column) string { + if len(db.uri.Schema) == 0 || strings.Contains(tableName, ".") { return fmt.Sprintf("alter table %s ALTER COLUMN %s TYPE %s", - tableName, col.Name, db.SqlType(col)) + tableName, col.Name, db.SQLType(col)) } return fmt.Sprintf("alter table %s.%s ALTER COLUMN %s TYPE %s", - db.Schema, tableName, col.Name, db.SqlType(col)) + db.uri.Schema, tableName, col.Name, db.SQLType(col)) } -func (db *postgres) DropIndexSql(tableName string, index *core.Index) string { - quote := db.Quote +func (db *postgres) DropIndexSQL(tableName string, index *schemas.Index) string { idxName := index.Name tableParts := strings.Split(strings.Replace(tableName, `"`, "", -1), ".") @@ -918,30 +968,29 @@ func (db *postgres) DropIndexSql(tableName string, index *core.Index) string { if !strings.HasPrefix(idxName, "UQE_") && !strings.HasPrefix(idxName, "IDX_") { - if index.Type == core.UniqueType { + if index.Type == schemas.UniqueType { idxName = fmt.Sprintf("UQE_%v_%v", tableName, index.Name) } else { idxName = fmt.Sprintf("IDX_%v_%v", tableName, index.Name) } } - if db.Uri.Schema != "" { - idxName = db.Uri.Schema + "." + idxName + if db.uri.Schema != "" { + idxName = db.uri.Schema + "." + idxName } - return fmt.Sprintf("DROP INDEX %v", quote(idxName)) + return fmt.Sprintf("DROP INDEX %v", db.Quoter().Quote(idxName)) } -func (db *postgres) IsColumnExist(tableName, colName string) (bool, error) { - args := []interface{}{db.Schema, tableName, colName} +func (db *postgres) IsColumnExist(ctx context.Context, tableName, colName string) (bool, error) { + args := []interface{}{db.uri.Schema, tableName, colName} query := "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = $1 AND table_name = $2" + " AND column_name = $3" - if len(db.Schema) == 0 { + if len(db.uri.Schema) == 0 { args = []interface{}{tableName, colName} query = "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = $1" + " AND column_name = $2" } - db.LogSQL(query, args) - rows, err := db.DB().Query(query, args...) + rows, err := db.DB().QueryContext(ctx, query, args...) if err != nil { return false, err } @@ -950,8 +999,8 @@ func (db *postgres) IsColumnExist(tableName, colName string) (bool, error) { return rows.Next(), nil } -func (db *postgres) GetColumns(tableName string) ([]string, map[string]*core.Column, error) { - args := []interface{}{tableName} +func (db *postgres) GetColumns(ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { + args := []interface{}{db.uri.Schema, tableName, db.uri.Schema} s := `SELECT column_name, column_default, is_nullable, data_type, character_maximum_length, CASE WHEN p.contype = 'p' THEN true ELSE false END AS primarykey, CASE WHEN p.contype = 'u' THEN true ELSE false END AS uniquekey @@ -962,28 +1011,19 @@ FROM pg_attribute f LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey) LEFT JOIN pg_class AS g ON p.confrelid = g.oid LEFT JOIN INFORMATION_SCHEMA.COLUMNS s ON s.column_name=f.attname AND c.relname=s.table_name -WHERE c.relkind = 'r'::char AND c.relname = $1%s AND f.attnum > 0 ORDER BY f.attnum;` - - var f string - if len(db.Schema) != 0 { - args = append(args, db.Schema) - f = " AND s.table_schema = $2" - } - s = fmt.Sprintf(s, f) +WHERE n.nspname= $1 AND c.relkind = 'r'::char AND c.relname = $2 AND s.table_schema = $3 AND f.attnum > 0 ORDER BY f.attnum;` - db.LogSQL(s, args) - - rows, err := db.DB().Query(s, args...) + rows, err := db.DB().QueryContext(ctx, s, args...) if err != nil { return nil, nil, err } defer rows.Close() - cols := make(map[string]*core.Column) + cols := make(map[string]*schemas.Column) colSeq := make([]string, 0) for rows.Next() { - col := new(core.Column) + col := new(schemas.Column) col.Indexes = make(map[string]int) var colName, isNullable, dataType string @@ -994,7 +1034,6 @@ WHERE c.relkind = 'r'::char AND c.relname = $1%s AND f.attnum > 0 ORDER BY f.att return nil, nil, err } - // fmt.Println(args, colName, isNullable, dataType, maxLenStr, colDefault, isPK, isUnique) var maxLen int if maxLenStr != nil { maxLen, err = strconv.Atoi(*maxLenStr) @@ -1006,10 +1045,27 @@ WHERE c.relkind = 'r'::char AND c.relname = $1%s AND f.attnum > 0 ORDER BY f.att col.Name = strings.Trim(colName, `" `) if colDefault != nil { - col.Default = *colDefault + var theDefault = *colDefault + // cockroach has type with the default value with ::: + // and postgres with ::, we should remove them before store them + idx := strings.Index(theDefault, ":::") + if idx == -1 { + idx = strings.Index(theDefault, "::") + } + if idx > -1 { + theDefault = theDefault[:idx] + } + + if strings.HasSuffix(theDefault, "+00:00'") { + theDefault = theDefault[:len(theDefault)-7] + "'" + } + + col.Default = theDefault col.DefaultIsEmpty = false if strings.HasPrefix(col.Default, "nextval(") { col.IsAutoIncrement = true + col.Default = "" + col.DefaultIsEmpty = true } } else { col.DefaultIsEmpty = true @@ -1021,26 +1077,37 @@ WHERE c.relkind = 'r'::char AND c.relname = $1%s AND f.attnum > 0 ORDER BY f.att col.Nullable = (isNullable == "YES") - switch dataType { - case "character varying", "character": - col.SQLType = core.SQLType{Name: core.Varchar, DefaultLength: 0, DefaultLength2: 0} + switch strings.ToLower(dataType) { + case "character varying", "character", "string": + col.SQLType = schemas.SQLType{Name: schemas.Varchar, DefaultLength: 0, DefaultLength2: 0} case "timestamp without time zone": - col.SQLType = core.SQLType{Name: core.DateTime, DefaultLength: 0, DefaultLength2: 0} + col.SQLType = schemas.SQLType{Name: schemas.DateTime, DefaultLength: 0, DefaultLength2: 0} case "timestamp with time zone": - col.SQLType = core.SQLType{Name: core.TimeStampz, DefaultLength: 0, DefaultLength2: 0} + col.SQLType = schemas.SQLType{Name: schemas.TimeStampz, DefaultLength: 0, DefaultLength2: 0} case "double precision": - col.SQLType = core.SQLType{Name: core.Double, DefaultLength: 0, DefaultLength2: 0} + col.SQLType = schemas.SQLType{Name: schemas.Double, DefaultLength: 0, DefaultLength2: 0} case "boolean": - col.SQLType = core.SQLType{Name: core.Bool, DefaultLength: 0, DefaultLength2: 0} + col.SQLType = schemas.SQLType{Name: schemas.Bool, DefaultLength: 0, DefaultLength2: 0} case "time without time zone": - col.SQLType = core.SQLType{Name: core.Time, DefaultLength: 0, DefaultLength2: 0} + col.SQLType = schemas.SQLType{Name: schemas.Time, DefaultLength: 0, DefaultLength2: 0} + case "bytes": + col.SQLType = schemas.SQLType{Name: schemas.Binary, DefaultLength: 0, DefaultLength2: 0} case "oid": - col.SQLType = core.SQLType{Name: core.BigInt, DefaultLength: 0, DefaultLength2: 0} + col.SQLType = schemas.SQLType{Name: schemas.BigInt, DefaultLength: 0, DefaultLength2: 0} + case "array": + col.SQLType = schemas.SQLType{Name: schemas.Array, DefaultLength: 0, DefaultLength2: 0} default: - col.SQLType = core.SQLType{Name: strings.ToUpper(dataType), DefaultLength: 0, DefaultLength2: 0} + startIdx := strings.Index(strings.ToLower(dataType), "string(") + if startIdx != -1 && strings.HasSuffix(dataType, ")") { + length := dataType[startIdx+8 : len(dataType)-1] + l, _ := strconv.Atoi(length) + col.SQLType = schemas.SQLType{Name: "STRING", DefaultLength: l, DefaultLength2: 0} + } else { + col.SQLType = schemas.SQLType{Name: strings.ToUpper(dataType), DefaultLength: 0, DefaultLength2: 0} + } } - if _, ok := core.SqlTypes[col.SQLType.Name]; !ok { - return nil, nil, fmt.Errorf("Unknown colType: %v", dataType) + if _, ok := schemas.SqlTypes[col.SQLType.Name]; !ok { + return nil, nil, fmt.Errorf("Unknown colType: %s - %s", dataType, col.SQLType.Name) } col.Length = maxLen @@ -1065,25 +1132,23 @@ WHERE c.relkind = 'r'::char AND c.relname = $1%s AND f.attnum > 0 ORDER BY f.att return colSeq, cols, nil } -func (db *postgres) GetTables() ([]*core.Table, error) { +func (db *postgres) GetTables(ctx context.Context) ([]*schemas.Table, error) { args := []interface{}{} s := "SELECT tablename FROM pg_tables" - if len(db.Schema) != 0 { - args = append(args, db.Schema) + if len(db.uri.Schema) != 0 { + args = append(args, db.uri.Schema) s = s + " WHERE schemaname = $1" } - db.LogSQL(s, args) - - rows, err := db.DB().Query(s, args...) + rows, err := db.DB().QueryContext(ctx, s, args...) if err != nil { return nil, err } defer rows.Close() - tables := make([]*core.Table, 0) + tables := make([]*schemas.Table, 0) for rows.Next() { - table := core.NewEmptyTable() + table := schemas.NewEmptyTable() var name string err = rows.Scan(&name) if err != nil { @@ -1106,22 +1171,21 @@ func getIndexColName(indexdef string) []string { return colNames } -func (db *postgres) GetIndexes(tableName string) (map[string]*core.Index, error) { +func (db *postgres) GetIndexes(ctx context.Context, tableName string) (map[string]*schemas.Index, error) { args := []interface{}{tableName} s := fmt.Sprintf("SELECT indexname, indexdef FROM pg_indexes WHERE tablename=$1") - if len(db.Schema) != 0 { - args = append(args, db.Schema) + if len(db.uri.Schema) != 0 { + args = append(args, db.uri.Schema) s = s + " AND schemaname=$2" } - db.LogSQL(s, args) - rows, err := db.DB().Query(s, args...) + rows, err := db.DB().QueryContext(ctx, s, args...) if err != nil { return nil, err } defer rows.Close() - indexes := make(map[string]*core.Index, 0) + indexes := make(map[string]*schemas.Index, 0) for rows.Next() { var indexType int var indexName, indexdef string @@ -1130,14 +1194,18 @@ func (db *postgres) GetIndexes(tableName string) (map[string]*core.Index, error) if err != nil { return nil, err } + + if indexName == "primary" { + continue + } indexName = strings.Trim(indexName, `" `) if strings.HasSuffix(indexName, "_pkey") { continue } if strings.HasPrefix(indexdef, "CREATE UNIQUE INDEX") { - indexType = core.UniqueType + indexType = schemas.UniqueType } else { - indexType = core.IndexType + indexType = schemas.IndexType } colNames = getIndexColName(indexdef) var isRegular bool @@ -1149,9 +1217,9 @@ func (db *postgres) GetIndexes(tableName string) (map[string]*core.Index, error) } } - index := &core.Index{Name: indexName, Type: indexType, Cols: make([]string, 0)} + index := &schemas.Index{Name: indexName, Type: indexType, Cols: make([]string, 0)} for _, colName := range colNames { - index.Cols = append(index.Cols, strings.Trim(colName, `" `)) + index.Cols = append(index.Cols, strings.TrimSpace(strings.Replace(colName, `"`, "", -1))) } index.IsRegular = isRegular indexes[index.Name] = index @@ -1159,8 +1227,8 @@ func (db *postgres) GetIndexes(tableName string) (map[string]*core.Index, error) return indexes, nil } -func (db *postgres) Filters() []core.Filter { - return []core.Filter{&core.IdFilter{}, &core.QuoteFilter{}, &core.SeqFilter{Prefix: "$", Start: 1}} +func (db *postgres) Filters() []Filter { + return []Filter{&SeqFilter{Prefix: "$", Start: 1}} } type pqDriver struct { @@ -1214,12 +1282,12 @@ func parseOpts(name string, o values) error { return nil } -func (p *pqDriver) Parse(driverName, dataSourceName string) (*core.Uri, error) { - db := &core.Uri{DbType: core.POSTGRES} +func (p *pqDriver) Parse(driverName, dataSourceName string) (*URI, error) { + db := &URI{DBType: schemas.POSTGRES} var err error if strings.HasPrefix(dataSourceName, "postgresql://") || strings.HasPrefix(dataSourceName, "postgres://") { - db.DbName, err = parseURL(dataSourceName) + db.DBName, err = parseURL(dataSourceName) if err != nil { return nil, err } @@ -1230,10 +1298,10 @@ func (p *pqDriver) Parse(driverName, dataSourceName string) (*core.Uri, error) { return nil, err } - db.DbName = o.Get("dbname") + db.DBName = o.Get("dbname") } - if db.DbName == "" { + if db.DBName == "" { return nil, errors.New("dbname is empty") } @@ -1244,7 +1312,7 @@ type pqDriverPgx struct { pqDriver } -func (pgx *pqDriverPgx) Parse(driverName, dataSourceName string) (*core.Uri, error) { +func (pgx *pqDriverPgx) Parse(driverName, dataSourceName string) (*URI, error) { // Remove the leading characters for driver to work if len(dataSourceName) >= 9 && dataSourceName[0] == 0 { dataSourceName = dataSourceName[9:] diff --git a/vendor/xorm.io/xorm/dialects/quote.go b/vendor/xorm.io/xorm/dialects/quote.go new file mode 100644 index 000000000..da4e0dd60 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/quote.go @@ -0,0 +1,15 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +// QuotePolicy describes quote handle policy +type QuotePolicy int + +// All QuotePolicies +const ( + QuotePolicyAlways QuotePolicy = iota + QuotePolicyNone + QuotePolicyReserved +) diff --git a/vendor/xorm.io/xorm/dialect_sqlite3.go b/vendor/xorm.io/xorm/dialects/sqlite3.go similarity index 68% rename from vendor/xorm.io/xorm/dialect_sqlite3.go rename to vendor/xorm.io/xorm/dialects/sqlite3.go index 0a290f3c4..710babe6d 100644 --- a/vendor/xorm.io/xorm/dialect_sqlite3.go +++ b/vendor/xorm.io/xorm/dialects/sqlite3.go @@ -2,16 +2,18 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package xorm +package dialects import ( + "context" "database/sql" "errors" "fmt" "regexp" "strings" - "xorm.io/core" + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" ) var ( @@ -141,45 +143,65 @@ var ( "WITH": true, "WITHOUT": true, } + + sqlite3Quoter = schemas.Quoter{'`', '`', schemas.AlwaysReserve} ) type sqlite3 struct { - core.Base + Base +} + +func (db *sqlite3) Init(d *core.DB, uri *URI) error { + db.quoter = sqlite3Quoter + return db.Base.Init(d, db, uri) } -func (db *sqlite3) Init(d *core.DB, uri *core.Uri, drivername, dataSourceName string) error { - return db.Base.Init(d, db, uri, drivername, dataSourceName) +func (db *sqlite3) SetQuotePolicy(quotePolicy QuotePolicy) { + switch quotePolicy { + case QuotePolicyNone: + var q = sqlite3Quoter + q.IsReserved = schemas.AlwaysNoReserve + db.quoter = q + case QuotePolicyReserved: + var q = sqlite3Quoter + q.IsReserved = db.IsReserved + db.quoter = q + case QuotePolicyAlways: + fallthrough + default: + db.quoter = sqlite3Quoter + } } -func (db *sqlite3) SqlType(c *core.Column) string { +func (db *sqlite3) SQLType(c *schemas.Column) string { switch t := c.SQLType.Name; t { - case core.Bool: + case schemas.Bool: if c.Default == "true" { c.Default = "1" } else if c.Default == "false" { c.Default = "0" } - return core.Integer - case core.Date, core.DateTime, core.TimeStamp, core.Time: - return core.DateTime - case core.TimeStampz: - return core.Text - case core.Char, core.Varchar, core.NVarchar, core.TinyText, - core.Text, core.MediumText, core.LongText, core.Json: - return core.Text - case core.Bit, core.TinyInt, core.SmallInt, core.MediumInt, core.Int, core.Integer, core.BigInt: - return core.Integer - case core.Float, core.Double, core.Real: - return core.Real - case core.Decimal, core.Numeric: - return core.Numeric - case core.TinyBlob, core.Blob, core.MediumBlob, core.LongBlob, core.Bytea, core.Binary, core.VarBinary: - return core.Blob - case core.Serial, core.BigSerial: + return schemas.Integer + case schemas.Date, schemas.DateTime, schemas.TimeStamp, schemas.Time: + return schemas.DateTime + case schemas.TimeStampz: + return schemas.Text + case schemas.Char, schemas.Varchar, schemas.NVarchar, schemas.TinyText, + schemas.Text, schemas.MediumText, schemas.LongText, schemas.Json: + return schemas.Text + case schemas.Bit, schemas.TinyInt, schemas.SmallInt, schemas.MediumInt, schemas.Int, schemas.Integer, schemas.BigInt: + return schemas.Integer + case schemas.Float, schemas.Double, schemas.Real: + return schemas.Real + case schemas.Decimal, schemas.Numeric: + return schemas.Numeric + case schemas.TinyBlob, schemas.Blob, schemas.MediumBlob, schemas.LongBlob, schemas.Bytea, schemas.Binary, schemas.VarBinary: + return schemas.Blob + case schemas.Serial, schemas.BigSerial: c.IsPrimaryKey = true c.IsAutoIncrement = true c.Nullable = false - return core.Integer + return schemas.Integer default: return t } @@ -189,84 +211,100 @@ func (db *sqlite3) FormatBytes(bs []byte) string { return fmt.Sprintf("X'%x'", bs) } -func (db *sqlite3) SupportInsertMany() bool { - return true -} - func (db *sqlite3) IsReserved(name string) bool { - _, ok := sqlite3ReservedWords[name] + _, ok := sqlite3ReservedWords[strings.ToUpper(name)] return ok } -func (db *sqlite3) Quote(name string) string { - return "`" + name + "`" -} - func (db *sqlite3) AutoIncrStr() string { return "AUTOINCREMENT" } -func (db *sqlite3) SupportEngine() bool { - return false -} - -func (db *sqlite3) SupportCharset() bool { - return false -} - -func (db *sqlite3) IndexOnTable() bool { - return false -} - -func (db *sqlite3) IndexCheckSql(tableName, idxName string) (string, []interface{}) { +func (db *sqlite3) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { args := []interface{}{idxName} return "SELECT name FROM sqlite_master WHERE type='index' and name = ?", args } -func (db *sqlite3) TableCheckSql(tableName string) (string, []interface{}) { - args := []interface{}{tableName} - return "SELECT name FROM sqlite_master WHERE type='table' and name = ?", args +func (db *sqlite3) IsTableExist(ctx context.Context, tableName string) (bool, error) { + return db.HasRecords(ctx, "SELECT name FROM sqlite_master WHERE type='table' and name = ?", tableName) } -func (db *sqlite3) DropIndexSql(tableName string, index *core.Index) string { +func (db *sqlite3) DropIndexSQL(tableName string, index *schemas.Index) string { // var unique string - quote := db.Quote idxName := index.Name if !strings.HasPrefix(idxName, "UQE_") && !strings.HasPrefix(idxName, "IDX_") { - if index.Type == core.UniqueType { + if index.Type == schemas.UniqueType { idxName = fmt.Sprintf("UQE_%v_%v", tableName, index.Name) } else { idxName = fmt.Sprintf("IDX_%v_%v", tableName, index.Name) } } - return fmt.Sprintf("DROP INDEX %v", quote(idxName)) + return fmt.Sprintf("DROP INDEX %v", db.Quoter().Quote(idxName)) } -func (db *sqlite3) ForUpdateSql(query string) string { - return query +func (db *sqlite3) CreateTableSQL(table *schemas.Table, tableName string) ([]string, bool) { + var sql string + sql = "CREATE TABLE IF NOT EXISTS " + if tableName == "" { + tableName = table.Name + } + + quoter := db.Quoter() + sql += quoter.Quote(tableName) + sql += " (" + + if len(table.ColumnsSeq()) > 0 { + pkList := table.PrimaryKeys + + for _, colName := range table.ColumnsSeq() { + col := table.GetColumn(colName) + if col.IsPrimaryKey && len(pkList) == 1 { + sql += db.String(col) + } else { + sql += db.StringNoPk(col) + } + sql = strings.TrimSpace(sql) + sql += ", " + } + + if len(pkList) > 1 { + sql += "PRIMARY KEY ( " + sql += quoter.Join(pkList, ",") + sql += " ), " + } + + sql = sql[:len(sql)-2] + } + sql += ")" + + return []string{sql}, true } -/*func (db *sqlite3) ColumnCheckSql(tableName, colName string) (string, []interface{}) { - args := []interface{}{tableName} - sql := "SELECT name FROM sqlite_master WHERE type='table' and name = ? and ((sql like '%`" + colName + "`%') or (sql like '%[" + colName + "]%'))" - return sql, args -}*/ +func (db *sqlite3) ForUpdateSQL(query string) string { + return query +} -func (db *sqlite3) IsColumnExist(tableName, colName string) (bool, error) { - args := []interface{}{tableName} - query := "SELECT name FROM sqlite_master WHERE type='table' and name = ? and ((sql like '%`" + colName + "`%') or (sql like '%[" + colName + "]%'))" - db.LogSQL(query, args) - rows, err := db.DB().Query(query, args...) +func (db *sqlite3) IsColumnExist(ctx context.Context, tableName, colName string) (bool, error) { + query := "SELECT * FROM " + tableName + " LIMIT 0" + rows, err := db.DB().QueryContext(ctx, query) if err != nil { return false, err } defer rows.Close() - if rows.Next() { - return true, nil + cols, err := rows.Columns() + if err != nil { + return false, err + } + + for _, col := range cols { + if strings.EqualFold(col, colName) { + return true, nil + } } + return false, nil } @@ -298,9 +336,9 @@ func splitColStr(colStr string) []string { return results } -func parseString(colStr string) (*core.Column, error) { +func parseString(colStr string) (*schemas.Column, error) { fields := splitColStr(colStr) - col := new(core.Column) + col := new(schemas.Column) col.Indexes = make(map[string]int) col.Nullable = true col.DefaultIsEmpty = true @@ -310,7 +348,7 @@ func parseString(colStr string) (*core.Column, error) { col.Name = strings.Trim(strings.Trim(field, "`[] "), `"`) continue } else if idx == 1 { - col.SQLType = core.SQLType{Name: field, DefaultLength: 0, DefaultLength2: 0} + col.SQLType = schemas.SQLType{Name: field, DefaultLength: 0, DefaultLength2: 0} continue } switch field { @@ -332,11 +370,11 @@ func parseString(colStr string) (*core.Column, error) { return col, nil } -func (db *sqlite3) GetColumns(tableName string) ([]string, map[string]*core.Column, error) { +func (db *sqlite3) GetColumns(ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { args := []interface{}{tableName} s := "SELECT sql FROM sqlite_master WHERE type='table' and name = ?" - db.LogSQL(s, args) - rows, err := db.DB().Query(s, args...) + + rows, err := db.DB().QueryContext(ctx, s, args...) if err != nil { return nil, nil, err } @@ -359,7 +397,7 @@ func (db *sqlite3) GetColumns(tableName string) ([]string, map[string]*core.Colu nEnd := strings.LastIndex(name, ")") reg := regexp.MustCompile(`[^\(,\)]*(\([^\(]*\))?`) colCreates := reg.FindAllString(name[nStart+1:nEnd], -1) - cols := make(map[string]*core.Column) + cols := make(map[string]*schemas.Column) colSeq := make([]string, 0) for _, colStr := range colCreates { @@ -389,20 +427,19 @@ func (db *sqlite3) GetColumns(tableName string) ([]string, map[string]*core.Colu return colSeq, cols, nil } -func (db *sqlite3) GetTables() ([]*core.Table, error) { +func (db *sqlite3) GetTables(ctx context.Context) ([]*schemas.Table, error) { args := []interface{}{} s := "SELECT name FROM sqlite_master WHERE type='table'" - db.LogSQL(s, args) - rows, err := db.DB().Query(s, args...) + rows, err := db.DB().QueryContext(ctx, s, args...) if err != nil { return nil, err } defer rows.Close() - tables := make([]*core.Table, 0) + tables := make([]*schemas.Table, 0) for rows.Next() { - table := core.NewEmptyTable() + table := schemas.NewEmptyTable() err = rows.Scan(&table.Name) if err != nil { return nil, err @@ -415,18 +452,17 @@ func (db *sqlite3) GetTables() ([]*core.Table, error) { return tables, nil } -func (db *sqlite3) GetIndexes(tableName string) (map[string]*core.Index, error) { +func (db *sqlite3) GetIndexes(ctx context.Context, tableName string) (map[string]*schemas.Index, error) { args := []interface{}{tableName} s := "SELECT sql FROM sqlite_master WHERE type='index' and tbl_name = ?" - db.LogSQL(s, args) - rows, err := db.DB().Query(s, args...) + rows, err := db.DB().QueryContext(ctx, s, args...) if err != nil { return nil, err } defer rows.Close() - indexes := make(map[string]*core.Index, 0) + indexes := make(map[string]*schemas.Index, 0) for rows.Next() { var tmpSQL sql.NullString err = rows.Scan(&tmpSQL) @@ -439,7 +475,7 @@ func (db *sqlite3) GetIndexes(tableName string) (map[string]*core.Index, error) } sql := tmpSQL.String - index := new(core.Index) + index := new(schemas.Index) nNStart := strings.Index(sql, "INDEX") nNEnd := strings.Index(sql, "ON") if nNStart == -1 || nNEnd == -1 { @@ -456,9 +492,9 @@ func (db *sqlite3) GetIndexes(tableName string) (map[string]*core.Index, error) } if strings.HasPrefix(sql, "CREATE UNIQUE INDEX") { - index.Type = core.UniqueType + index.Type = schemas.UniqueType } else { - index.Type = core.IndexType + index.Type = schemas.IndexType } nStart := strings.Index(sql, "(") @@ -476,17 +512,17 @@ func (db *sqlite3) GetIndexes(tableName string) (map[string]*core.Index, error) return indexes, nil } -func (db *sqlite3) Filters() []core.Filter { - return []core.Filter{&core.IdFilter{}} +func (db *sqlite3) Filters() []Filter { + return []Filter{} } type sqlite3Driver struct { } -func (p *sqlite3Driver) Parse(driverName, dataSourceName string) (*core.Uri, error) { +func (p *sqlite3Driver) Parse(driverName, dataSourceName string) (*URI, error) { if strings.Contains(dataSourceName, "?") { dataSourceName = dataSourceName[:strings.Index(dataSourceName, "?")] } - return &core.Uri{DbType: core.SQLITE, DbName: dataSourceName}, nil + return &URI{DBType: schemas.SQLITE, DBName: dataSourceName}, nil } diff --git a/vendor/xorm.io/xorm/dialects/table_name.go b/vendor/xorm.io/xorm/dialects/table_name.go new file mode 100644 index 000000000..a989b3863 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/table_name.go @@ -0,0 +1,90 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "fmt" + "reflect" + "strings" + + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/names" +) + +// TableNameWithSchema will add schema prefix on table name if possible +func TableNameWithSchema(dialect Dialect, tableName string) string { + // Add schema name as prefix of table name. + // Only for postgres database. + if dialect.URI().Schema != "" && + dialect.URI().Schema != dialect.DefaultSchema() && + strings.Index(tableName, ".") == -1 { + return fmt.Sprintf("%s.%s", dialect.URI().Schema, tableName) + } + return tableName +} + +// TableNameNoSchema returns table name with given tableName +func TableNameNoSchema(dialect Dialect, mapper names.Mapper, tableName interface{}) string { + quote := dialect.Quoter().Quote + switch tableName.(type) { + case []string: + t := tableName.([]string) + if len(t) > 1 { + return fmt.Sprintf("%v AS %v", quote(t[0]), quote(t[1])) + } else if len(t) == 1 { + return quote(t[0]) + } + case []interface{}: + t := tableName.([]interface{}) + l := len(t) + var table string + if l > 0 { + f := t[0] + switch f.(type) { + case string: + table = f.(string) + case names.TableName: + table = f.(names.TableName).TableName() + default: + v := utils.ReflectValue(f) + t := v.Type() + if t.Kind() == reflect.Struct { + table = names.GetTableName(mapper, v) + } else { + table = quote(fmt.Sprintf("%v", f)) + } + } + } + if l > 1 { + return fmt.Sprintf("%v AS %v", quote(table), quote(fmt.Sprintf("%v", t[1]))) + } else if l == 1 { + return quote(table) + } + case names.TableName: + return tableName.(names.TableName).TableName() + case string: + return tableName.(string) + case reflect.Value: + v := tableName.(reflect.Value) + return names.GetTableName(mapper, v) + default: + v := utils.ReflectValue(tableName) + t := v.Type() + if t.Kind() == reflect.Struct { + return names.GetTableName(mapper, v) + } + return quote(fmt.Sprintf("%v", tableName)) + } + return "" +} + +// FullTableName returns table name with quote and schema according parameter +func FullTableName(dialect Dialect, mapper names.Mapper, bean interface{}, includeSchema ...bool) string { + tbName := TableNameNoSchema(dialect, mapper, bean) + if len(includeSchema) > 0 && includeSchema[0] && !utils.IsSubQuery(tbName) { + tbName = TableNameWithSchema(dialect, tbName) + } + return tbName +} diff --git a/vendor/xorm.io/xorm/dialects/time.go b/vendor/xorm.io/xorm/dialects/time.go new file mode 100644 index 000000000..b0394745d --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/time.go @@ -0,0 +1,49 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "time" + + "xorm.io/xorm/schemas" +) + +// FormatTime format time as column type +func FormatTime(dialect Dialect, sqlTypeName string, t time.Time) (v interface{}) { + switch sqlTypeName { + case schemas.Time: + s := t.Format("2006-01-02 15:04:05") // time.RFC3339 + v = s[11:19] + case schemas.Date: + v = t.Format("2006-01-02") + case schemas.DateTime, schemas.TimeStamp, schemas.Varchar: // !DarthPestilane! format time when sqlTypeName is schemas.Varchar. + v = t.Format("2006-01-02 15:04:05") + case schemas.TimeStampz: + if dialect.URI().DBType == schemas.MSSQL { + v = t.Format("2006-01-02T15:04:05.9999999Z07:00") + } else { + v = t.Format(time.RFC3339Nano) + } + case schemas.BigInt, schemas.Int: + v = t.Unix() + default: + v = t + } + return +} + +func FormatColumnTime(dialect Dialect, defaultTimeZone *time.Location, col *schemas.Column, t time.Time) (v interface{}) { + if t.IsZero() { + if col.Nullable { + return nil + } + return "" + } + + if col.TimeZone != nil { + return FormatTime(dialect, col.SQLType.Name, t.In(col.TimeZone)) + } + return FormatTime(dialect, col.SQLType.Name, t.In(defaultTimeZone)) +} diff --git a/vendor/xorm.io/xorm/doc.go b/vendor/xorm.io/xorm/doc.go index 9620bca19..8df4fb308 100644 --- a/vendor/xorm.io/xorm/doc.go +++ b/vendor/xorm.io/xorm/doc.go @@ -126,7 +126,7 @@ Attention: the above 8 methods should be the last chainable method. engine.ID(1).Get(&user) // for single primary key // SELECT * FROM user WHERE id = 1 - engine.ID(core.PK{1, 2}).Get(&user) // for composite primary keys + engine.ID(schemas.PK{1, 2}).Get(&user) // for composite primary keys // SELECT * FROM user WHERE id1 = 1 AND id2 = 2 engine.In("id", 1, 2, 3).Find(&users) // SELECT * FROM user WHERE id IN (1, 2, 3) diff --git a/vendor/xorm.io/xorm/engine.go b/vendor/xorm.io/xorm/engine.go index a7e52ea48..4694e1c0b 100644 --- a/vendor/xorm.io/xorm/engine.go +++ b/vendor/xorm.io/xorm/engine.go @@ -5,11 +5,8 @@ package xorm import ( - "bufio" - "bytes" "context" "database/sql" - "encoding/gob" "errors" "fmt" "io" @@ -17,70 +14,45 @@ import ( "reflect" "strconv" "strings" - "sync" "time" - "xorm.io/builder" - "xorm.io/core" + "xorm.io/xorm/caches" + "xorm.io/xorm/core" + "xorm.io/xorm/dialects" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/log" + "xorm.io/xorm/names" + "xorm.io/xorm/schemas" + "xorm.io/xorm/tags" ) // Engine is the major struct of xorm, it means a database manager. // Commonly, an application only need one engine type Engine struct { - db *core.DB - dialect core.Dialect - - ColumnMapper core.IMapper - TableMapper core.IMapper - TagIdentifier string - Tables map[reflect.Type]*core.Table - - mutex *sync.RWMutex - Cacher core.Cacher + cacherMgr *caches.Manager + defaultContext context.Context + dialect dialects.Dialect + engineGroup *EngineGroup + logger log.ContextLogger + tagParser *tags.Parser - showSQL bool - showExecTime bool + driverName string + dataSourceName string - logger core.ILogger TZLocation *time.Location // The timezone of the application DatabaseTZ *time.Location // The timezone of the database - - disableGlobalCache bool - - tagHandlers map[string]tagHandler - - engineGroup *EngineGroup - - cachers map[string]core.Cacher - cacherLock sync.RWMutex - - defaultContext context.Context } -func (engine *Engine) setCacher(tableName string, cacher core.Cacher) { - engine.cacherLock.Lock() - engine.cachers[tableName] = cacher - engine.cacherLock.Unlock() +func (engine *Engine) SetCacher(tableName string, cacher caches.Cacher) { + engine.cacherMgr.SetCacher(tableName, cacher) } -func (engine *Engine) SetCacher(tableName string, cacher core.Cacher) { - engine.setCacher(tableName, cacher) +func (engine *Engine) GetCacher(tableName string) caches.Cacher { + return engine.cacherMgr.GetCacher(tableName) } -func (engine *Engine) getCacher(tableName string) core.Cacher { - var cacher core.Cacher - var ok bool - engine.cacherLock.RLock() - cacher, ok = engine.cachers[tableName] - engine.cacherLock.RUnlock() - if !ok && !engine.disableGlobalCache { - cacher = engine.Cacher - } - return cacher -} - -func (engine *Engine) GetCacher(tableName string) core.Cacher { - return engine.getCacher(tableName) +func (engine *Engine) SetQuotePolicy(quotePolicy dialects.QuotePolicy) { + engine.dialect.SetQuotePolicy(quotePolicy) } // BufferSize sets buffer size for iterate @@ -90,97 +62,64 @@ func (engine *Engine) BufferSize(size int) *Session { return session.BufferSize(size) } -// CondDeleted returns the conditions whether a record is soft deleted. -func (engine *Engine) CondDeleted(colName string) builder.Cond { - if engine.dialect.DBType() == core.MSSQL { - return builder.IsNull{colName} - } - return builder.IsNull{colName}.Or(builder.Eq{colName: zeroTime1}) -} - // ShowSQL show SQL statement or not on logger if log level is great than INFO func (engine *Engine) ShowSQL(show ...bool) { engine.logger.ShowSQL(show...) - if len(show) == 0 { - engine.showSQL = true - } else { - engine.showSQL = show[0] - } -} - -// ShowExecTime show SQL statement and execute time or not on logger if log level is great than INFO -func (engine *Engine) ShowExecTime(show ...bool) { - if len(show) == 0 { - engine.showExecTime = true - } else { - engine.showExecTime = show[0] - } + engine.DB().Logger = engine.logger } // Logger return the logger interface -func (engine *Engine) Logger() core.ILogger { +func (engine *Engine) Logger() log.ContextLogger { return engine.logger } // SetLogger set the new logger -func (engine *Engine) SetLogger(logger core.ILogger) { - engine.logger = logger - engine.showSQL = logger.IsShowSQL() - engine.dialect.SetLogger(logger) +func (engine *Engine) SetLogger(logger interface{}) { + var realLogger log.ContextLogger + switch t := logger.(type) { + case log.Logger: + realLogger = log.NewLoggerAdapter(t) + case log.ContextLogger: + realLogger = t + } + engine.logger = realLogger + engine.DB().Logger = realLogger } // SetLogLevel sets the logger level -func (engine *Engine) SetLogLevel(level core.LogLevel) { +func (engine *Engine) SetLogLevel(level log.LogLevel) { engine.logger.SetLevel(level) } // SetDisableGlobalCache disable global cache or not func (engine *Engine) SetDisableGlobalCache(disable bool) { - if engine.disableGlobalCache != disable { - engine.disableGlobalCache = disable - } + engine.cacherMgr.SetDisableGlobalCache(disable) } // DriverName return the current sql driver's name func (engine *Engine) DriverName() string { - return engine.dialect.DriverName() + return engine.driverName } // DataSourceName return the current connection string func (engine *Engine) DataSourceName() string { - return engine.dialect.DataSourceName() + return engine.dataSourceName } // SetMapper set the name mapping rules -func (engine *Engine) SetMapper(mapper core.IMapper) { +func (engine *Engine) SetMapper(mapper names.Mapper) { engine.SetTableMapper(mapper) engine.SetColumnMapper(mapper) } // SetTableMapper set the table name mapping rule -func (engine *Engine) SetTableMapper(mapper core.IMapper) { - engine.TableMapper = mapper +func (engine *Engine) SetTableMapper(mapper names.Mapper) { + engine.tagParser.SetTableMapper(mapper) } // SetColumnMapper set the column name mapping rule -func (engine *Engine) SetColumnMapper(mapper core.IMapper) { - engine.ColumnMapper = mapper -} - -// SupportInsertMany If engine's database support batch insert records like -// "insert into user values (name, age), (name, age)". -// When the return is ture, then engine.Insert(&users) will -// generate batch sql and exeute. -func (engine *Engine) SupportInsertMany() bool { - return engine.dialect.SupportInsertMany() -} - -func (engine *Engine) quoteColumns(columnStr string) string { - columns := strings.Split(columnStr, ",") - for i := 0; i < len(columns); i++ { - columns[i] = engine.Quote(strings.TrimSpace(columns[i])) - } - return strings.Join(columns, ",") +func (engine *Engine) SetColumnMapper(mapper names.Mapper) { + engine.tagParser.SetColumnMapper(mapper) } // Quote Use QuoteStr quote the string sql @@ -206,64 +145,12 @@ func (engine *Engine) QuoteTo(buf *strings.Builder, value string) { if value == "" { return } - - quoteTo(buf, engine.dialect.Quote(""), value) -} - -func quoteTo(buf *strings.Builder, quotePair string, value string) { - if len(quotePair) < 2 { // no quote - _, _ = buf.WriteString(value) - return - } - - prefix, suffix := quotePair[0], quotePair[1] - - i := 0 - for i < len(value) { - // start of a token; might be already quoted - if value[i] == '.' { - _ = buf.WriteByte('.') - i++ - } else if value[i] == prefix || value[i] == '`' { - // Has quotes; skip/normalize `name` to prefix+name+sufix - var ch byte - if value[i] == prefix { - ch = suffix - } else { - ch = '`' - } - i++ - _ = buf.WriteByte(prefix) - for ; i < len(value) && value[i] != ch; i++ { - _ = buf.WriteByte(value[i]) - } - _ = buf.WriteByte(suffix) - i++ - } else { - // Requires quotes - _ = buf.WriteByte(prefix) - for ; i < len(value) && value[i] != '.'; i++ { - _ = buf.WriteByte(value[i]) - } - _ = buf.WriteByte(suffix) - } - } -} - -func (engine *Engine) quote(sql string) string { - return engine.dialect.Quote(sql) -} - -// SqlType will be deprecated, please use SQLType instead -// -// Deprecated: use SQLType instead -func (engine *Engine) SqlType(c *core.Column) string { - return engine.SQLType(c) + engine.dialect.Quoter().QuoteTo(buf, value) } // SQLType A simple wrapper to dialect's core.SqlType method -func (engine *Engine) SQLType(c *core.Column) string { - return engine.dialect.SqlType(c) +func (engine *Engine) SQLType(c *schemas.Column) string { + return engine.dialect.SQLType(c) } // AutoIncrStr Database's autoincrement statement @@ -273,27 +160,27 @@ func (engine *Engine) AutoIncrStr() string { // SetConnMaxLifetime sets the maximum amount of time a connection may be reused. func (engine *Engine) SetConnMaxLifetime(d time.Duration) { - engine.db.SetConnMaxLifetime(d) + engine.DB().SetConnMaxLifetime(d) } // SetMaxOpenConns is only available for go 1.2+ func (engine *Engine) SetMaxOpenConns(conns int) { - engine.db.SetMaxOpenConns(conns) + engine.DB().SetMaxOpenConns(conns) } // SetMaxIdleConns set the max idle connections on pool, default is 2 func (engine *Engine) SetMaxIdleConns(conns int) { - engine.db.SetMaxIdleConns(conns) + engine.DB().SetMaxIdleConns(conns) } // SetDefaultCacher set the default cacher. Xorm's default not enable cacher. -func (engine *Engine) SetDefaultCacher(cacher core.Cacher) { - engine.Cacher = cacher +func (engine *Engine) SetDefaultCacher(cacher caches.Cacher) { + engine.cacherMgr.SetDefaultCacher(cacher) } // GetDefaultCacher returns the default cacher -func (engine *Engine) GetDefaultCacher() core.Cacher { - return engine.Cacher +func (engine *Engine) GetDefaultCacher() caches.Cacher { + return engine.cacherMgr.GetDefaultCacher() } // NoCache If you has set default cacher, and you want temporilly stop use cache, @@ -312,23 +199,23 @@ func (engine *Engine) NoCascade() *Session { } // MapCacher Set a table use a special cacher -func (engine *Engine) MapCacher(bean interface{}, cacher core.Cacher) error { - engine.setCacher(engine.TableName(bean, true), cacher) +func (engine *Engine) MapCacher(bean interface{}, cacher caches.Cacher) error { + engine.SetCacher(dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean, true), cacher) return nil } // NewDB provides an interface to operate database directly func (engine *Engine) NewDB() (*core.DB, error) { - return core.OpenDialect(engine.dialect) + return core.Open(engine.driverName, engine.dataSourceName) } // DB return the wrapper of sql.DB func (engine *Engine) DB() *core.DB { - return engine.db + return engine.dialect.DB() } // Dialect return database dialect -func (engine *Engine) Dialect() core.Dialect { +func (engine *Engine) Dialect() dialects.Dialect { return engine.dialect } @@ -341,7 +228,7 @@ func (engine *Engine) NewSession() *Session { // Close the engine func (engine *Engine) Close() error { - return engine.db.Close() + return engine.DB().Close() } // Ping tests if database is alive @@ -351,25 +238,6 @@ func (engine *Engine) Ping() error { return session.Ping() } -// logSQL save sql -func (engine *Engine) logSQL(sqlStr string, sqlArgs ...interface{}) { - if engine.showSQL && !engine.showExecTime { - if len(sqlArgs) > 0 { - engine.logger.Infof("[SQL] %v %#v", sqlStr, sqlArgs) - } else { - engine.logger.Infof("[SQL] %v", sqlStr) - } - } -} - -// Sql provides raw sql input parameter. When you have a complex SQL statement -// and cannot use Where, Id, In and etc. Methods to describe, you can use SQL. -// -// Deprecated: use SQL instead. -func (engine *Engine) Sql(querystring string, args ...interface{}) *Session { - return engine.SQL(querystring, args...) -} - // SQL method let's you manually write raw SQL and operate // For example: // @@ -398,26 +266,33 @@ func (engine *Engine) NoAutoCondition(no ...bool) *Session { return session.NoAutoCondition(no...) } -func (engine *Engine) loadTableInfo(table *core.Table) error { - colSeq, cols, err := engine.dialect.GetColumns(table.Name) +func (engine *Engine) loadTableInfo(table *schemas.Table) error { + colSeq, cols, err := engine.dialect.GetColumns(engine.defaultContext, table.Name) if err != nil { return err } for _, name := range colSeq { table.AddColumn(cols[name]) } - indexes, err := engine.dialect.GetIndexes(table.Name) + indexes, err := engine.dialect.GetIndexes(engine.defaultContext, table.Name) if err != nil { return err } table.Indexes = indexes + var seq int for _, index := range indexes { for _, name := range index.Cols { - if col := table.GetColumn(name); col != nil { + parts := strings.Split(name, " ") + if len(parts) > 1 { + if parts[1] == "DESC" { + seq = 1 + } + } + if col := table.GetColumn(parts[0]); col != nil { col.Indexes[index.Name] = index.Type } else { - return fmt.Errorf("Unknown col %s in index %v of table %v, columns %v", name, index.Name, table.Name, table.ColumnsSeq()) + return fmt.Errorf("Unknown col %s seq %d, in index %v of table %v, columns %v", name, seq, index.Name, table.Name, table.ColumnsSeq()) } } } @@ -425,8 +300,8 @@ func (engine *Engine) loadTableInfo(table *core.Table) error { } // DBMetas Retrieve all tables, columns, indexes' informations from database. -func (engine *Engine) DBMetas() ([]*core.Table, error) { - tables, err := engine.dialect.GetTables() +func (engine *Engine) DBMetas() ([]*schemas.Table, error) { + tables, err := engine.dialect.GetTables(engine.defaultContext) if err != nil { return nil, err } @@ -440,7 +315,7 @@ func (engine *Engine) DBMetas() ([]*core.Table, error) { } // DumpAllToFile dump database all table structs and data to a file -func (engine *Engine) DumpAllToFile(fp string, tp ...core.DbType) error { +func (engine *Engine) DumpAllToFile(fp string, tp ...schemas.DBType) error { f, err := os.Create(fp) if err != nil { return err @@ -450,7 +325,7 @@ func (engine *Engine) DumpAllToFile(fp string, tp ...core.DbType) error { } // DumpAll dump database all table structs and data to w -func (engine *Engine) DumpAll(w io.Writer, tp ...core.DbType) error { +func (engine *Engine) DumpAll(w io.Writer, tp ...schemas.DBType) error { tables, err := engine.DBMetas() if err != nil { return err @@ -459,7 +334,7 @@ func (engine *Engine) DumpAll(w io.Writer, tp ...core.DbType) error { } // DumpTablesToFile dump specified tables to SQL file. -func (engine *Engine) DumpTablesToFile(tables []*core.Table, fp string, tp ...core.DbType) error { +func (engine *Engine) DumpTablesToFile(tables []*schemas.Table, fp string, tp ...schemas.DBType) error { f, err := os.Create(fp) if err != nil { return err @@ -469,55 +344,70 @@ func (engine *Engine) DumpTablesToFile(tables []*core.Table, fp string, tp ...co } // DumpTables dump specify tables to io.Writer -func (engine *Engine) DumpTables(tables []*core.Table, w io.Writer, tp ...core.DbType) error { +func (engine *Engine) DumpTables(tables []*schemas.Table, w io.Writer, tp ...schemas.DBType) error { return engine.dumpTables(tables, w, tp...) } // dumpTables dump database all table structs and data to w with specify db type -func (engine *Engine) dumpTables(tables []*core.Table, w io.Writer, tp ...core.DbType) error { - var dialect core.Dialect - var distDBName string +func (engine *Engine) dumpTables(tables []*schemas.Table, w io.Writer, tp ...schemas.DBType) error { + var dstDialect dialects.Dialect if len(tp) == 0 { - dialect = engine.dialect - distDBName = string(engine.dialect.DBType()) + dstDialect = engine.dialect } else { - dialect = core.QueryDialect(tp[0]) - if dialect == nil { + dstDialect = dialects.QueryDialect(tp[0]) + if dstDialect == nil { return errors.New("Unsupported database type") } - dialect.Init(nil, engine.dialect.URI(), "", "") - distDBName = string(tp[0]) + + uri := engine.dialect.URI() + destURI := *uri + dstDialect.Init(nil, &destURI) } - _, err := io.WriteString(w, fmt.Sprintf("/*Generated by xorm v%s %s, from %s to %s*/\n\n", - Version, time.Now().In(engine.TZLocation).Format("2006-01-02 15:04:05"), engine.dialect.DBType(), strings.ToUpper(distDBName))) + _, err := io.WriteString(w, fmt.Sprintf("/*Generated by xorm %s, from %s to %s*/\n\n", + time.Now().In(engine.TZLocation).Format("2006-01-02 15:04:05"), engine.dialect.URI().DBType, dstDialect.URI().DBType)) if err != nil { return err } for i, table := range tables { + tableName := table.Name + if dstDialect.URI().Schema != "" { + tableName = fmt.Sprintf("%s.%s", dstDialect.URI().Schema, table.Name) + } + originalTableName := table.Name + if engine.dialect.URI().Schema != "" { + originalTableName = fmt.Sprintf("%s.%s", engine.dialect.URI().Schema, table.Name) + } if i > 0 { _, err = io.WriteString(w, "\n") if err != nil { return err } } - _, err = io.WriteString(w, dialect.CreateTableSql(table, "", table.StoreEngine, "")+";\n") - if err != nil { - return err + sqls, _ := dstDialect.CreateTableSQL(table, tableName) + for _, s := range sqls { + _, err = io.WriteString(w, s+";\n") + if err != nil { + return err + } } + if len(table.PKColumns()) > 0 && dstDialect.URI().DBType == schemas.MSSQL { + fmt.Fprintf(w, "SET IDENTITY_INSERT [%s] ON;\n", table.Name) + } + for _, index := range table.Indexes { - _, err = io.WriteString(w, dialect.CreateIndexSql(table.Name, index)+";\n") + _, err = io.WriteString(w, dstDialect.CreateIndexSQL(table.Name, index)+";\n") if err != nil { return err } } cols := table.ColumnsSeq() - colNames := engine.dialect.Quote(strings.Join(cols, engine.dialect.Quote(", "))) - destColNames := dialect.Quote(strings.Join(cols, dialect.Quote(", "))) + colNames := engine.dialect.Quoter().Join(cols, ", ") + destColNames := dstDialect.Quoter().Join(cols, ", ") - rows, err := engine.DB().Query("SELECT " + colNames + " FROM " + engine.Quote(table.Name)) + rows, err := engine.DB().QueryContext(engine.defaultContext, "SELECT "+colNames+" FROM "+engine.Quote(originalTableName)) if err != nil { return err } @@ -530,7 +420,7 @@ func (engine *Engine) dumpTables(tables []*core.Table, w io.Writer, tp ...core.D return err } - _, err = io.WriteString(w, "INSERT INTO "+dialect.Quote(table.Name)+" ("+destColNames+") VALUES (") + _, err = io.WriteString(w, "INSERT INTO "+dstDialect.Quoter().Quote(tableName)+" ("+destColNames+") VALUES (") if err != nil { return err } @@ -553,26 +443,26 @@ func (engine *Engine) dumpTables(tables []*core.Table, w io.Writer, tp ...core.D } } else if col.SQLType.IsBlob() { if reflect.TypeOf(d).Kind() == reflect.Slice { - temp += fmt.Sprintf(", %s", dialect.FormatBytes(d.([]byte))) + temp += fmt.Sprintf(", %s", dstDialect.FormatBytes(d.([]byte))) } else if reflect.TypeOf(d).Kind() == reflect.String { temp += fmt.Sprintf(", '%s'", d.(string)) } } else if col.SQLType.IsNumeric() { switch reflect.TypeOf(d).Kind() { case reflect.Slice: - if col.SQLType.Name == core.Bool { + if col.SQLType.Name == schemas.Bool { temp += fmt.Sprintf(", %v", strconv.FormatBool(d.([]byte)[0] != byte('0'))) } else { temp += fmt.Sprintf(", %s", string(d.([]byte))) } case reflect.Int16, reflect.Int8, reflect.Int32, reflect.Int64, reflect.Int: - if col.SQLType.Name == core.Bool { + if col.SQLType.Name == schemas.Bool { temp += fmt.Sprintf(", %v", strconv.FormatBool(reflect.ValueOf(d).Int() > 0)) } else { temp += fmt.Sprintf(", %v", d) } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - if col.SQLType.Name == core.Bool { + if col.SQLType.Name == schemas.Bool { temp += fmt.Sprintf(", %v", strconv.FormatBool(reflect.ValueOf(d).Uint() > 0)) } else { temp += fmt.Sprintf(", %v", d) @@ -600,8 +490,8 @@ func (engine *Engine) dumpTables(tables []*core.Table, w io.Writer, tp ...core.D } // FIXME: Hack for postgres - if string(dialect.DBType()) == core.POSTGRES && table.AutoIncrColumn() != nil { - _, err = io.WriteString(w, "SELECT setval('"+table.Name+"_id_seq', COALESCE((SELECT MAX("+table.AutoIncrColumn().Name+") + 1 FROM "+dialect.Quote(table.Name)+"), 1), false);\n") + if dstDialect.URI().DBType == schemas.POSTGRES && table.AutoIncrColumn() != nil { + _, err = io.WriteString(w, "SELECT setval('"+tableName+"_id_seq', COALESCE((SELECT MAX("+table.AutoIncrColumn().Name+") + 1 FROM "+dstDialect.Quoter().Quote(tableName)+"), 1), false);\n") if err != nil { return err } @@ -624,13 +514,6 @@ func (engine *Engine) Where(query interface{}, args ...interface{}) *Session { return session.Where(query, args...) } -// Id will be deprecated, please use ID instead -func (engine *Engine) Id(id interface{}) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Id(id) -} - // ID method provoide a condition as (id) = ? func (engine *Engine) ID(id interface{}) *Session { session := engine.NewSession() @@ -838,46 +721,9 @@ func (engine *Engine) Having(conditions string) *Session { return session.Having(conditions) } -// UnMapType removes the datbase mapper of a type -func (engine *Engine) UnMapType(t reflect.Type) { - engine.mutex.Lock() - defer engine.mutex.Unlock() - delete(engine.Tables, t) -} - -func (engine *Engine) autoMapType(v reflect.Value) (*core.Table, error) { - t := v.Type() - engine.mutex.Lock() - defer engine.mutex.Unlock() - table, ok := engine.Tables[t] - if !ok { - var err error - table, err = engine.mapType(v) - if err != nil { - return nil, err - } - - engine.Tables[t] = table - if engine.Cacher != nil { - if v.CanAddr() { - engine.GobRegister(v.Addr().Interface()) - } else { - engine.GobRegister(v.Interface()) - } - } - } - return table, nil -} - -// GobRegister register one struct to gob for cache use -func (engine *Engine) GobRegister(v interface{}) *Engine { - gob.Register(v) - return engine -} - // Table table struct type Table struct { - *core.Table + *schemas.Table Name string } @@ -887,222 +733,9 @@ func (t *Table) IsValid() bool { } // TableInfo get table info according to bean's content -func (engine *Engine) TableInfo(bean interface{}) *Table { - v := rValue(bean) - tb, err := engine.autoMapType(v) - if err != nil { - engine.logger.Error(err) - } - return &Table{tb, engine.TableName(bean)} -} - -func addIndex(indexName string, table *core.Table, col *core.Column, indexType int) { - if index, ok := table.Indexes[indexName]; ok { - index.AddColumn(col.Name) - col.Indexes[index.Name] = indexType - } else { - index := core.NewIndex(indexName, indexType) - index.AddColumn(col.Name) - table.AddIndex(index) - col.Indexes[index.Name] = indexType - } -} - -// TableName table name interface to define customerize table name -type TableName interface { - TableName() string -} - -var ( - tpTableName = reflect.TypeOf((*TableName)(nil)).Elem() -) - -func (engine *Engine) mapType(v reflect.Value) (*core.Table, error) { - t := v.Type() - table := core.NewEmptyTable() - table.Type = t - table.Name = engine.tbNameForMap(v) - - var idFieldColName string - var hasCacheTag, hasNoCacheTag bool - - for i := 0; i < t.NumField(); i++ { - tag := t.Field(i).Tag - - ormTagStr := tag.Get(engine.TagIdentifier) - var col *core.Column - fieldValue := v.Field(i) - fieldType := fieldValue.Type() - - if ormTagStr != "" { - col = &core.Column{ - FieldName: t.Field(i).Name, - Nullable: true, - IsPrimaryKey: false, - IsAutoIncrement: false, - MapType: core.TWOSIDES, - Indexes: make(map[string]int), - DefaultIsEmpty: true, - } - tags := splitTag(ormTagStr) - - if len(tags) > 0 { - if tags[0] == "-" { - continue - } - - var ctx = tagContext{ - table: table, - col: col, - fieldValue: fieldValue, - indexNames: make(map[string]int), - engine: engine, - } - - if strings.HasPrefix(strings.ToUpper(tags[0]), "EXTENDS") { - pStart := strings.Index(tags[0], "(") - if pStart > -1 && strings.HasSuffix(tags[0], ")") { - var tagPrefix = strings.TrimFunc(tags[0][pStart+1:len(tags[0])-1], func(r rune) bool { - return r == '\'' || r == '"' - }) - - ctx.params = []string{tagPrefix} - } - - if err := ExtendsTagHandler(&ctx); err != nil { - return nil, err - } - continue - } - - for j, key := range tags { - if ctx.ignoreNext { - ctx.ignoreNext = false - continue - } - - k := strings.ToUpper(key) - ctx.tagName = k - ctx.params = []string{} - - pStart := strings.Index(k, "(") - if pStart == 0 { - return nil, errors.New("( could not be the first charactor") - } - if pStart > -1 { - if !strings.HasSuffix(k, ")") { - return nil, fmt.Errorf("field %s tag %s cannot match ) charactor", col.FieldName, key) - } - - ctx.tagName = k[:pStart] - ctx.params = strings.Split(key[pStart+1:len(k)-1], ",") - } - - if j > 0 { - ctx.preTag = strings.ToUpper(tags[j-1]) - } - if j < len(tags)-1 { - ctx.nextTag = tags[j+1] - } else { - ctx.nextTag = "" - } - - if h, ok := engine.tagHandlers[ctx.tagName]; ok { - if err := h(&ctx); err != nil { - return nil, err - } - } else { - if strings.HasPrefix(key, "'") && strings.HasSuffix(key, "'") { - col.Name = key[1 : len(key)-1] - } else { - col.Name = key - } - } - - if ctx.hasCacheTag { - hasCacheTag = true - } - if ctx.hasNoCacheTag { - hasNoCacheTag = true - } - } - - if col.SQLType.Name == "" { - col.SQLType = core.Type2SQLType(fieldType) - } - engine.dialect.SqlType(col) - if col.Length == 0 { - col.Length = col.SQLType.DefaultLength - } - if col.Length2 == 0 { - col.Length2 = col.SQLType.DefaultLength2 - } - if col.Name == "" { - col.Name = engine.ColumnMapper.Obj2Table(t.Field(i).Name) - } - - if ctx.isUnique { - ctx.indexNames[col.Name] = core.UniqueType - } else if ctx.isIndex { - ctx.indexNames[col.Name] = core.IndexType - } - - for indexName, indexType := range ctx.indexNames { - addIndex(indexName, table, col, indexType) - } - } - } else { - var sqlType core.SQLType - if fieldValue.CanAddr() { - if _, ok := fieldValue.Addr().Interface().(core.Conversion); ok { - sqlType = core.SQLType{Name: core.Text} - } - } - if _, ok := fieldValue.Interface().(core.Conversion); ok { - sqlType = core.SQLType{Name: core.Text} - } else { - sqlType = core.Type2SQLType(fieldType) - } - col = core.NewColumn(engine.ColumnMapper.Obj2Table(t.Field(i).Name), - t.Field(i).Name, sqlType, sqlType.DefaultLength, - sqlType.DefaultLength2, true) - - if fieldType.Kind() == reflect.Int64 && (strings.ToUpper(col.FieldName) == "ID" || strings.HasSuffix(strings.ToUpper(col.FieldName), ".ID")) { - idFieldColName = col.Name - } - } - if col.IsAutoIncrement { - col.Nullable = false - } - - table.AddColumn(col) - - } // end for - - if idFieldColName != "" && len(table.PrimaryKeys) == 0 { - col := table.GetColumn(idFieldColName) - col.IsPrimaryKey = true - col.IsAutoIncrement = true - col.Nullable = false - table.PrimaryKeys = append(table.PrimaryKeys, col.Name) - table.AutoIncrement = col.Name - } - - if hasCacheTag { - if engine.Cacher != nil { // !nash! use engine's cacher if provided - engine.logger.Info("enable cache on table:", table.Name) - engine.setCacher(table.Name, engine.Cacher) - } else { - engine.logger.Info("enable LRU cache on table:", table.Name) - engine.setCacher(table.Name, NewLRUCacher2(NewMemoryStore(), time.Hour, 10000)) - } - } - if hasNoCacheTag { - engine.logger.Info("disable cache on table:", table.Name) - engine.setCacher(table.Name, nil) - } - - return table, nil +func (engine *Engine) TableInfo(bean interface{}) (*schemas.Table, error) { + v := utils.ReflectValue(bean) + return engine.tagParser.ParseWithCache(v) } // IsTableEmpty if a table has any reocrd @@ -1119,38 +752,24 @@ func (engine *Engine) IsTableExist(beanOrTableName interface{}) (bool, error) { return session.IsTableExist(beanOrTableName) } -// IdOf get id from one struct -// -// Deprecated: use IDOf instead. -func (engine *Engine) IdOf(bean interface{}) core.PK { - return engine.IDOf(bean) -} - // IDOf get id from one struct -func (engine *Engine) IDOf(bean interface{}) core.PK { - return engine.IdOfV(reflect.ValueOf(bean)) +func (engine *Engine) IDOf(bean interface{}) (schemas.PK, error) { + return engine.IDOfV(reflect.ValueOf(bean)) } -// IdOfV get id from one value of struct -// -// Deprecated: use IDOfV instead. -func (engine *Engine) IdOfV(rv reflect.Value) core.PK { - return engine.IDOfV(rv) +// TableName returns table name with schema prefix if has +func (engine *Engine) TableName(bean interface{}, includeSchema ...bool) string { + return dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean, includeSchema...) } // IDOfV get id from one value of struct -func (engine *Engine) IDOfV(rv reflect.Value) core.PK { - pk, err := engine.idOfV(rv) - if err != nil { - engine.logger.Error(err) - return nil - } - return pk +func (engine *Engine) IDOfV(rv reflect.Value) (schemas.PK, error) { + return engine.idOfV(rv) } -func (engine *Engine) idOfV(rv reflect.Value) (core.PK, error) { +func (engine *Engine) idOfV(rv reflect.Value) (schemas.PK, error) { v := reflect.Indirect(rv) - table, err := engine.autoMapType(v) + table, err := engine.tagParser.ParseWithCache(v) if err != nil { return nil, err } @@ -1191,10 +810,10 @@ func (engine *Engine) idOfV(rv reflect.Value) (core.PK, error) { return nil, err } } - return core.PK(pk), nil + return schemas.PK(pk), nil } -func (engine *Engine) idTypeAssertion(col *core.Column, sid string) (interface{}, error) { +func (engine *Engine) idTypeAssertion(col *schemas.Column, sid string) (interface{}, error) { if col.SQLType.IsNumeric() { n, err := strconv.ParseInt(sid, 10, 64) if err != nil { @@ -1224,8 +843,8 @@ func (engine *Engine) CreateUniques(bean interface{}) error { // ClearCacheBean if enabled cache, clear the cache bean func (engine *Engine) ClearCacheBean(bean interface{}, id string) error { - tableName := engine.TableName(bean) - cacher := engine.getCacher(tableName) + tableName := dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean) + cacher := engine.GetCacher(tableName) if cacher != nil { cacher.ClearIds(tableName) cacher.DelBean(tableName, id) @@ -1236,8 +855,8 @@ func (engine *Engine) ClearCacheBean(bean interface{}, id string) error { // ClearCache if enabled cache, clear some tables' cache func (engine *Engine) ClearCache(beans ...interface{}) error { for _, bean := range beans { - tableName := engine.TableName(bean) - cacher := engine.getCacher(tableName) + tableName := dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean) + cacher := engine.GetCacher(tableName) if cacher != nil { cacher.ClearIds(tableName) cacher.ClearBeans(tableName) @@ -1246,6 +865,11 @@ func (engine *Engine) ClearCache(beans ...interface{}) error { return nil } +// UnMapType remove table from tables cache +func (engine *Engine) UnMapType(t reflect.Type) { + engine.tagParser.ClearCacheTable(t) +} + // Sync the new struct changes to database, this method will automatically add // table, column, index, unique. but will not delete or change anything. // If you change some field, you should change the database manually. @@ -1254,9 +878,9 @@ func (engine *Engine) Sync(beans ...interface{}) error { defer session.Close() for _, bean := range beans { - v := rValue(bean) - tableNameNoSchema := engine.TableName(bean) - table, err := engine.autoMapType(v) + v := utils.ReflectValue(bean) + tableNameNoSchema := dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean) + table, err := engine.tagParser.ParseWithCache(v) if err != nil { return err } @@ -1287,12 +911,12 @@ func (engine *Engine) Sync(beans ...interface{}) error { } } else { for _, col := range table.Columns() { - isExist, err := engine.dialect.IsColumnExist(tableNameNoSchema, col.Name) + isExist, err := engine.dialect.IsColumnExist(session.ctx, tableNameNoSchema, col.Name) if err != nil { return err } if !isExist { - if err := session.statement.setRefBean(bean); err != nil { + if err := session.statement.SetRefBean(bean); err != nil { return err } err = session.addColumn(col.Name) @@ -1303,16 +927,16 @@ func (engine *Engine) Sync(beans ...interface{}) error { } for name, index := range table.Indexes { - if err := session.statement.setRefBean(bean); err != nil { + if err := session.statement.SetRefBean(bean); err != nil { return err } - if index.Type == core.UniqueType { + if index.Type == schemas.UniqueType { isExist, err := session.isIndexExist2(tableNameNoSchema, index.Cols, true) if err != nil { return err } if !isExist { - if err := session.statement.setRefBean(bean); err != nil { + if err := session.statement.SetRefBean(bean); err != nil { return err } @@ -1321,13 +945,13 @@ func (engine *Engine) Sync(beans ...interface{}) error { return err } } - } else if index.Type == core.IndexType { + } else if index.Type == schemas.IndexType { isExist, err := session.isIndexExist2(tableNameNoSchema, index.Cols, false) if err != nil { return err } if !isExist { - if err := session.statement.setRefBean(bean); err != nil { + if err := session.statement.SetRefBean(bean); err != nil { return err } @@ -1543,108 +1167,36 @@ func (engine *Engine) SumsInt(bean interface{}, colNames ...string) ([]int64, er // ImportFile SQL DDL file func (engine *Engine) ImportFile(ddlPath string) ([]sql.Result, error) { - file, err := os.Open(ddlPath) - if err != nil { - return nil, err - } - defer file.Close() - return engine.Import(file) + session := engine.NewSession() + defer session.Close() + return session.ImportFile(ddlPath) } // Import SQL DDL from io.Reader func (engine *Engine) Import(r io.Reader) ([]sql.Result, error) { - var results []sql.Result - var lastError error - scanner := bufio.NewScanner(r) - - semiColSpliter := func(data []byte, atEOF bool) (advance int, token []byte, err error) { - if atEOF && len(data) == 0 { - return 0, nil, nil - } - if i := bytes.IndexByte(data, ';'); i >= 0 { - return i + 1, data[0:i], nil - } - // If we're at EOF, we have a final, non-terminated line. Return it. - if atEOF { - return len(data), data, nil - } - // Request more data. - return 0, nil, nil - } - - scanner.Split(semiColSpliter) - - for scanner.Scan() { - query := strings.Trim(scanner.Text(), " \t\n\r") - if len(query) > 0 { - engine.logSQL(query) - result, err := engine.DB().Exec(query) - results = append(results, result) - if err != nil { - return nil, err - } - } - } - - return results, lastError + session := engine.NewSession() + defer session.Close() + return session.Import(r) } // nowTime return current time -func (engine *Engine) nowTime(col *core.Column) (interface{}, time.Time) { +func (engine *Engine) nowTime(col *schemas.Column) (interface{}, time.Time) { t := time.Now() var tz = engine.DatabaseTZ if !col.DisableTimeZone && col.TimeZone != nil { tz = col.TimeZone } - return engine.formatTime(col.SQLType.Name, t.In(tz)), t.In(engine.TZLocation) -} - -func (engine *Engine) formatColTime(col *core.Column, t time.Time) (v interface{}) { - if t.IsZero() { - if col.Nullable { - return nil - } - return "" - } - - if col.TimeZone != nil { - return engine.formatTime(col.SQLType.Name, t.In(col.TimeZone)) - } - return engine.formatTime(col.SQLType.Name, t.In(engine.DatabaseTZ)) -} - -// formatTime format time as column type -func (engine *Engine) formatTime(sqlTypeName string, t time.Time) (v interface{}) { - switch sqlTypeName { - case core.Time: - s := t.Format("2006-01-02 15:04:05") // time.RFC3339 - v = s[11:19] - case core.Date: - v = t.Format("2006-01-02") - case core.DateTime, core.TimeStamp: - v = t.Format("2006-01-02 15:04:05") - case core.TimeStampz: - if engine.dialect.DBType() == core.MSSQL { - v = t.Format("2006-01-02T15:04:05.9999999Z07:00") - } else { - v = t.Format(time.RFC3339Nano) - } - case core.BigInt, core.Int: - v = t.Unix() - default: - v = t - } - return + return dialects.FormatTime(engine.dialect, col.SQLType.Name, t.In(tz)), t.In(engine.TZLocation) } // GetColumnMapper returns the column name mapper -func (engine *Engine) GetColumnMapper() core.IMapper { - return engine.ColumnMapper +func (engine *Engine) GetColumnMapper() names.Mapper { + return engine.tagParser.GetColumnMapper() } // GetTableMapper returns the table name mapper -func (engine *Engine) GetTableMapper() core.IMapper { - return engine.TableMapper +func (engine *Engine) GetTableMapper() names.Mapper { + return engine.tagParser.GetTableMapper() } // GetTZLocation returns time zone of the application @@ -1669,7 +1221,7 @@ func (engine *Engine) SetTZDatabase(tz *time.Location) { // SetSchema sets the schema of database func (engine *Engine) SetSchema(schema string) { - engine.dialect.URI().Schema = schema + engine.dialect.URI().SetSchema(schema) } // Unscoped always disable struct tag "deleted" @@ -1678,3 +1230,47 @@ func (engine *Engine) Unscoped() *Session { session.isAutoClose = true return session.Unscoped() } + +func (engine *Engine) tbNameWithSchema(v string) string { + return dialects.TableNameWithSchema(engine.dialect, v) +} + +// Context creates a session with the context +func (engine *Engine) Context(ctx context.Context) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Context(ctx) +} + +// SetDefaultContext set the default context +func (engine *Engine) SetDefaultContext(ctx context.Context) { + engine.defaultContext = ctx +} + +// PingContext tests if database is alive +func (engine *Engine) PingContext(ctx context.Context) error { + session := engine.NewSession() + defer session.Close() + return session.PingContext(ctx) +} + +// Transaction Execute sql wrapped in a transaction(abbr as tx), tx will automatic commit if no errors occurred +func (engine *Engine) Transaction(f func(*Session) (interface{}, error)) (interface{}, error) { + session := engine.NewSession() + defer session.Close() + + if err := session.Begin(); err != nil { + return nil, err + } + + result, err := f(session) + if err != nil { + return nil, err + } + + if err := session.Commit(); err != nil { + return nil, err + } + + return result, nil +} diff --git a/vendor/xorm.io/xorm/engine_cond.go b/vendor/xorm.io/xorm/engine_cond.go deleted file mode 100644 index 702ac8043..000000000 --- a/vendor/xorm.io/xorm/engine_cond.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2017 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "database/sql/driver" - "fmt" - "reflect" - "strings" - "time" - - "xorm.io/builder" - "xorm.io/core" -) - -func (engine *Engine) buildConds(table *core.Table, bean interface{}, - includeVersion bool, includeUpdated bool, includeNil bool, - includeAutoIncr bool, allUseBool bool, useAllCols bool, unscoped bool, - mustColumnMap map[string]bool, tableName, aliasName string, addedTableName bool) (builder.Cond, error) { - var conds []builder.Cond - for _, col := range table.Columns() { - if !includeVersion && col.IsVersion { - continue - } - if !includeUpdated && col.IsUpdated { - continue - } - if !includeAutoIncr && col.IsAutoIncrement { - continue - } - - if engine.dialect.DBType() == core.MSSQL && (col.SQLType.Name == core.Text || col.SQLType.IsBlob() || col.SQLType.Name == core.TimeStampz) { - continue - } - if col.SQLType.IsJson() { - continue - } - - var colName string - if addedTableName { - var nm = tableName - if len(aliasName) > 0 { - nm = aliasName - } - colName = engine.Quote(nm) + "." + engine.Quote(col.Name) - } else { - colName = engine.Quote(col.Name) - } - - fieldValuePtr, err := col.ValueOf(bean) - if err != nil { - if !strings.Contains(err.Error(), "is not valid") { - engine.logger.Warn(err) - } - continue - } - - if col.IsDeleted && !unscoped { // tag "deleted" is enabled - conds = append(conds, engine.CondDeleted(colName)) - } - - fieldValue := *fieldValuePtr - if fieldValue.Interface() == nil { - continue - } - - fieldType := reflect.TypeOf(fieldValue.Interface()) - requiredField := useAllCols - - if b, ok := getFlagForColumn(mustColumnMap, col); ok { - if b { - requiredField = true - } else { - continue - } - } - - if fieldType.Kind() == reflect.Ptr { - if fieldValue.IsNil() { - if includeNil { - conds = append(conds, builder.Eq{colName: nil}) - } - continue - } else if !fieldValue.IsValid() { - continue - } else { - // dereference ptr type to instance type - fieldValue = fieldValue.Elem() - fieldType = reflect.TypeOf(fieldValue.Interface()) - requiredField = true - } - } - - var val interface{} - switch fieldType.Kind() { - case reflect.Bool: - if allUseBool || requiredField { - val = fieldValue.Interface() - } else { - // if a bool in a struct, it will not be as a condition because it default is false, - // please use Where() instead - continue - } - case reflect.String: - if !requiredField && fieldValue.String() == "" { - continue - } - // for MyString, should convert to string or panic - if fieldType.String() != reflect.String.String() { - val = fieldValue.String() - } else { - val = fieldValue.Interface() - } - case reflect.Int8, reflect.Int16, reflect.Int, reflect.Int32, reflect.Int64: - if !requiredField && fieldValue.Int() == 0 { - continue - } - val = fieldValue.Interface() - case reflect.Float32, reflect.Float64: - if !requiredField && fieldValue.Float() == 0.0 { - continue - } - val = fieldValue.Interface() - case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64: - if !requiredField && fieldValue.Uint() == 0 { - continue - } - t := int64(fieldValue.Uint()) - val = reflect.ValueOf(&t).Interface() - case reflect.Struct: - if fieldType.ConvertibleTo(core.TimeType) { - t := fieldValue.Convert(core.TimeType).Interface().(time.Time) - if !requiredField && (t.IsZero() || !fieldValue.IsValid()) { - continue - } - val = engine.formatColTime(col, t) - } else if _, ok := reflect.New(fieldType).Interface().(core.Conversion); ok { - continue - } else if valNul, ok := fieldValue.Interface().(driver.Valuer); ok { - val, _ = valNul.Value() - if val == nil { - continue - } - } else { - if col.SQLType.IsJson() { - if col.SQLType.IsText() { - bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - engine.logger.Error(err) - continue - } - val = string(bytes) - } else if col.SQLType.IsBlob() { - var bytes []byte - var err error - bytes, err = DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - engine.logger.Error(err) - continue - } - val = bytes - } - } else { - engine.autoMapType(fieldValue) - if table, ok := engine.Tables[fieldValue.Type()]; ok { - if len(table.PrimaryKeys) == 1 { - pkField := reflect.Indirect(fieldValue).FieldByName(table.PKColumns()[0].FieldName) - // fix non-int pk issues - //if pkField.Int() != 0 { - if pkField.IsValid() && !isZero(pkField.Interface()) { - val = pkField.Interface() - } else { - continue - } - } else { - //TODO: how to handler? - return nil, fmt.Errorf("not supported %v as %v", fieldValue.Interface(), table.PrimaryKeys) - } - } else { - val = fieldValue.Interface() - } - } - } - case reflect.Array: - continue - case reflect.Slice, reflect.Map: - if fieldValue == reflect.Zero(fieldType) { - continue - } - if fieldValue.IsNil() || !fieldValue.IsValid() || fieldValue.Len() == 0 { - continue - } - - if col.SQLType.IsText() { - bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - engine.logger.Error(err) - continue - } - val = string(bytes) - } else if col.SQLType.IsBlob() { - var bytes []byte - var err error - if (fieldType.Kind() == reflect.Array || fieldType.Kind() == reflect.Slice) && - fieldType.Elem().Kind() == reflect.Uint8 { - if fieldValue.Len() > 0 { - val = fieldValue.Bytes() - } else { - continue - } - } else { - bytes, err = DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - engine.logger.Error(err) - continue - } - val = bytes - } - } else { - continue - } - default: - val = fieldValue.Interface() - } - - conds = append(conds, builder.Eq{colName: val}) - } - - return builder.And(conds...), nil -} diff --git a/vendor/xorm.io/xorm/engine_context.go b/vendor/xorm.io/xorm/engine_context.go deleted file mode 100644 index c6cbb76c1..000000000 --- a/vendor/xorm.io/xorm/engine_context.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.8 - -package xorm - -import "context" - -// Context creates a session with the context -func (engine *Engine) Context(ctx context.Context) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Context(ctx) -} - -// SetDefaultContext set the default context -func (engine *Engine) SetDefaultContext(ctx context.Context) { - engine.defaultContext = ctx -} - -// PingContext tests if database is alive -func (engine *Engine) PingContext(ctx context.Context) error { - session := engine.NewSession() - defer session.Close() - return session.PingContext(ctx) -} diff --git a/vendor/xorm.io/xorm/engine_group.go b/vendor/xorm.io/xorm/engine_group.go index 42d49eca9..868d4dc96 100644 --- a/vendor/xorm.io/xorm/engine_group.go +++ b/vendor/xorm.io/xorm/engine_group.go @@ -8,7 +8,10 @@ import ( "context" "time" - "xorm.io/core" + "xorm.io/xorm/caches" + "xorm.io/xorm/dialects" + "xorm.io/xorm/log" + "xorm.io/xorm/names" ) // EngineGroup defines an engine group @@ -109,10 +112,10 @@ func (eg *EngineGroup) Ping() error { } // SetColumnMapper set the column name mapping rule -func (eg *EngineGroup) SetColumnMapper(mapper core.IMapper) { - eg.Engine.ColumnMapper = mapper +func (eg *EngineGroup) SetColumnMapper(mapper names.Mapper) { + eg.Engine.SetColumnMapper(mapper) for i := 0; i < len(eg.slaves); i++ { - eg.slaves[i].ColumnMapper = mapper + eg.slaves[i].SetColumnMapper(mapper) } } @@ -125,7 +128,7 @@ func (eg *EngineGroup) SetConnMaxLifetime(d time.Duration) { } // SetDefaultCacher set the default cacher -func (eg *EngineGroup) SetDefaultCacher(cacher core.Cacher) { +func (eg *EngineGroup) SetDefaultCacher(cacher caches.Cacher) { eg.Engine.SetDefaultCacher(cacher) for i := 0; i < len(eg.slaves); i++ { eg.slaves[i].SetDefaultCacher(cacher) @@ -133,7 +136,7 @@ func (eg *EngineGroup) SetDefaultCacher(cacher core.Cacher) { } // SetLogger set the new logger -func (eg *EngineGroup) SetLogger(logger core.ILogger) { +func (eg *EngineGroup) SetLogger(logger interface{}) { eg.Engine.SetLogger(logger) for i := 0; i < len(eg.slaves); i++ { eg.slaves[i].SetLogger(logger) @@ -141,7 +144,7 @@ func (eg *EngineGroup) SetLogger(logger core.ILogger) { } // SetLogLevel sets the logger level -func (eg *EngineGroup) SetLogLevel(level core.LogLevel) { +func (eg *EngineGroup) SetLogLevel(level log.LogLevel) { eg.Engine.SetLogLevel(level) for i := 0; i < len(eg.slaves); i++ { eg.slaves[i].SetLogLevel(level) @@ -149,7 +152,7 @@ func (eg *EngineGroup) SetLogLevel(level core.LogLevel) { } // SetMapper set the name mapping rules -func (eg *EngineGroup) SetMapper(mapper core.IMapper) { +func (eg *EngineGroup) SetMapper(mapper names.Mapper) { eg.Engine.SetMapper(mapper) for i := 0; i < len(eg.slaves); i++ { eg.slaves[i].SetMapper(mapper) @@ -158,17 +161,17 @@ func (eg *EngineGroup) SetMapper(mapper core.IMapper) { // SetMaxIdleConns set the max idle connections on pool, default is 2 func (eg *EngineGroup) SetMaxIdleConns(conns int) { - eg.Engine.db.SetMaxIdleConns(conns) + eg.Engine.dialect.DB().SetMaxIdleConns(conns) for i := 0; i < len(eg.slaves); i++ { - eg.slaves[i].db.SetMaxIdleConns(conns) + eg.slaves[i].dialect.DB().SetMaxIdleConns(conns) } } // SetMaxOpenConns is only available for go 1.2+ func (eg *EngineGroup) SetMaxOpenConns(conns int) { - eg.Engine.db.SetMaxOpenConns(conns) + eg.Engine.dialect.DB().SetMaxOpenConns(conns) for i := 0; i < len(eg.slaves); i++ { - eg.slaves[i].db.SetMaxOpenConns(conns) + eg.slaves[i].dialect.DB().SetMaxOpenConns(conns) } } @@ -178,19 +181,18 @@ func (eg *EngineGroup) SetPolicy(policy GroupPolicy) *EngineGroup { return eg } -// SetTableMapper set the table name mapping rule -func (eg *EngineGroup) SetTableMapper(mapper core.IMapper) { - eg.Engine.TableMapper = mapper +func (eg *EngineGroup) SetQuotePolicy(quotePolicy dialects.QuotePolicy) { + eg.Engine.SetQuotePolicy(quotePolicy) for i := 0; i < len(eg.slaves); i++ { - eg.slaves[i].TableMapper = mapper + eg.slaves[i].SetQuotePolicy(quotePolicy) } } -// ShowExecTime show SQL statement and execute time or not on logger if log level is great than INFO -func (eg *EngineGroup) ShowExecTime(show ...bool) { - eg.Engine.ShowExecTime(show...) +// SetTableMapper set the table name mapping rule +func (eg *EngineGroup) SetTableMapper(mapper names.Mapper) { + eg.Engine.SetTableMapper(mapper) for i := 0; i < len(eg.slaves); i++ { - eg.slaves[i].ShowExecTime(show...) + eg.slaves[i].SetTableMapper(mapper) } } diff --git a/vendor/xorm.io/xorm/engine_table.go b/vendor/xorm.io/xorm/engine_table.go deleted file mode 100644 index eb5aa850a..000000000 --- a/vendor/xorm.io/xorm/engine_table.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2018 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "fmt" - "reflect" - "strings" - - "xorm.io/core" -) - -// tbNameWithSchema will automatically add schema prefix on table name -func (engine *Engine) tbNameWithSchema(v string) string { - // Add schema name as prefix of table name. - // Only for postgres database. - if engine.dialect.DBType() == core.POSTGRES && - engine.dialect.URI().Schema != "" && - engine.dialect.URI().Schema != postgresPublicSchema && - strings.Index(v, ".") == -1 { - return engine.dialect.URI().Schema + "." + v - } - return v -} - -// TableName returns table name with schema prefix if has -func (engine *Engine) TableName(bean interface{}, includeSchema ...bool) string { - tbName := engine.tbNameNoSchema(bean) - if len(includeSchema) > 0 && includeSchema[0] { - tbName = engine.tbNameWithSchema(tbName) - } - - return tbName -} - -// tbName get some table's table name -func (session *Session) tbNameNoSchema(table *core.Table) string { - if len(session.statement.AltTableName) > 0 { - return session.statement.AltTableName - } - - return table.Name -} - -func (engine *Engine) tbNameForMap(v reflect.Value) string { - if v.Type().Implements(tpTableName) { - return v.Interface().(TableName).TableName() - } - if v.Kind() == reflect.Ptr { - v = v.Elem() - if v.Type().Implements(tpTableName) { - return v.Interface().(TableName).TableName() - } - } - - return engine.TableMapper.Obj2Table(v.Type().Name()) -} - -func (engine *Engine) tbNameNoSchema(tablename interface{}) string { - switch tablename.(type) { - case []string: - t := tablename.([]string) - if len(t) > 1 { - return fmt.Sprintf("%v AS %v", engine.Quote(t[0]), engine.Quote(t[1])) - } else if len(t) == 1 { - return engine.Quote(t[0]) - } - case []interface{}: - t := tablename.([]interface{}) - l := len(t) - var table string - if l > 0 { - f := t[0] - switch f.(type) { - case string: - table = f.(string) - case TableName: - table = f.(TableName).TableName() - default: - v := rValue(f) - t := v.Type() - if t.Kind() == reflect.Struct { - table = engine.tbNameForMap(v) - } else { - table = engine.Quote(fmt.Sprintf("%v", f)) - } - } - } - if l > 1 { - return fmt.Sprintf("%v AS %v", engine.Quote(table), - engine.Quote(fmt.Sprintf("%v", t[1]))) - } else if l == 1 { - return engine.Quote(table) - } - case TableName: - return tablename.(TableName).TableName() - case string: - return tablename.(string) - case reflect.Value: - v := tablename.(reflect.Value) - return engine.tbNameForMap(v) - default: - v := rValue(tablename) - t := v.Type() - if t.Kind() == reflect.Struct { - return engine.tbNameForMap(v) - } - return engine.Quote(fmt.Sprintf("%v", tablename)) - } - return "" -} diff --git a/vendor/xorm.io/xorm/error.go b/vendor/xorm.io/xorm/error.go index a67527acd..3708ce4f4 100644 --- a/vendor/xorm.io/xorm/error.go +++ b/vendor/xorm.io/xorm/error.go @@ -6,7 +6,6 @@ package xorm import ( "errors" - "fmt" ) var ( @@ -20,32 +19,6 @@ var ( ErrNotExist = errors.New("Record does not exist") // ErrCacheFailed cache failed error ErrCacheFailed = errors.New("Cache failed") - // ErrNeedDeletedCond delete needs less one condition error - ErrNeedDeletedCond = errors.New("Delete action needs at least one condition") - // ErrNotImplemented not implemented - ErrNotImplemented = errors.New("Not implemented") // ErrConditionType condition type unsupported ErrConditionType = errors.New("Unsupported condition type") - // ErrUnSupportedSQLType parameter of SQL is not supported - ErrUnSupportedSQLType = errors.New("unsupported sql type") ) - -// ErrFieldIsNotExist columns does not exist -type ErrFieldIsNotExist struct { - FieldName string - TableName string -} - -func (e ErrFieldIsNotExist) Error() string { - return fmt.Sprintf("field %s is not valid on table %s", e.FieldName, e.TableName) -} - -// ErrFieldIsNotValid is not valid -type ErrFieldIsNotValid struct { - FieldName string - TableName string -} - -func (e ErrFieldIsNotValid) Error() string { - return fmt.Sprintf("field %s is not valid on table %s", e.FieldName, e.TableName) -} diff --git a/vendor/xorm.io/xorm/go.mod b/vendor/xorm.io/xorm/go.mod index 6d8b58f41..ff42c0f7e 100644 --- a/vendor/xorm.io/xorm/go.mod +++ b/vendor/xorm.io/xorm/go.mod @@ -9,7 +9,8 @@ require ( github.com/lib/pq v1.0.0 github.com/mattn/go-sqlite3 v1.10.0 github.com/stretchr/testify v1.4.0 + github.com/syndtr/goleveldb v1.0.0 github.com/ziutek/mymysql v1.5.4 - xorm.io/builder v0.3.6 - xorm.io/core v0.7.2 + google.golang.org/appengine v1.6.0 // indirect + xorm.io/builder v0.3.7 ) diff --git a/vendor/xorm.io/xorm/go.sum b/vendor/xorm.io/xorm/go.sum index 2102cc5b7..33e5c456c 100644 --- a/vendor/xorm.io/xorm/go.sum +++ b/vendor/xorm.io/xorm/go.sum @@ -2,6 +2,7 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU= cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= +gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:EXuID2Zs0pAQhH8yz+DNjUbjppKQzKFAn28TMYPB6IU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= @@ -18,6 +19,7 @@ github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zA github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -31,7 +33,9 @@ github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= @@ -42,6 +46,7 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -60,7 +65,9 @@ github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -83,6 +90,8 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -101,20 +110,24 @@ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -135,7 +148,9 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= @@ -145,5 +160,5 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= xorm.io/builder v0.3.6 h1:ha28mQ2M+TFx96Hxo+iq6tQgnkC9IZkM6D8w9sKHHF8= xorm.io/builder v0.3.6/go.mod h1:LEFAPISnRzG+zxaxj2vPicRwz67BdhFreKg8yv8/TgU= -xorm.io/core v0.7.2 h1:mEO22A2Z7a3fPaZMk6gKL/jMD80iiyNwRrX5HOv3XLw= -xorm.io/core v0.7.2/go.mod h1:jJfd0UAEzZ4t87nbQYtVjmqpIODugN6PD2D9E+dJvdM= +xorm.io/builder v0.3.7 h1:2pETdKRK+2QG4mLX4oODHEhn5Z8j1m8sXa7jfu+/SZI= +xorm.io/builder v0.3.7/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE= diff --git a/vendor/xorm.io/xorm/helpers.go b/vendor/xorm.io/xorm/helpers.go deleted file mode 100644 index a31e922c0..000000000 --- a/vendor/xorm.io/xorm/helpers.go +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright 2015 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "xorm.io/core" -) - -// str2PK convert string value to primary key value according to tp -func str2PKValue(s string, tp reflect.Type) (reflect.Value, error) { - var err error - var result interface{} - var defReturn = reflect.Zero(tp) - - switch tp.Kind() { - case reflect.Int: - result, err = strconv.Atoi(s) - if err != nil { - return defReturn, fmt.Errorf("convert %s as int: %s", s, err.Error()) - } - case reflect.Int8: - x, err := strconv.Atoi(s) - if err != nil { - return defReturn, fmt.Errorf("convert %s as int8: %s", s, err.Error()) - } - result = int8(x) - case reflect.Int16: - x, err := strconv.Atoi(s) - if err != nil { - return defReturn, fmt.Errorf("convert %s as int16: %s", s, err.Error()) - } - result = int16(x) - case reflect.Int32: - x, err := strconv.Atoi(s) - if err != nil { - return defReturn, fmt.Errorf("convert %s as int32: %s", s, err.Error()) - } - result = int32(x) - case reflect.Int64: - result, err = strconv.ParseInt(s, 10, 64) - if err != nil { - return defReturn, fmt.Errorf("convert %s as int64: %s", s, err.Error()) - } - case reflect.Uint: - x, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return defReturn, fmt.Errorf("convert %s as uint: %s", s, err.Error()) - } - result = uint(x) - case reflect.Uint8: - x, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return defReturn, fmt.Errorf("convert %s as uint8: %s", s, err.Error()) - } - result = uint8(x) - case reflect.Uint16: - x, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return defReturn, fmt.Errorf("convert %s as uint16: %s", s, err.Error()) - } - result = uint16(x) - case reflect.Uint32: - x, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return defReturn, fmt.Errorf("convert %s as uint32: %s", s, err.Error()) - } - result = uint32(x) - case reflect.Uint64: - result, err = strconv.ParseUint(s, 10, 64) - if err != nil { - return defReturn, fmt.Errorf("convert %s as uint64: %s", s, err.Error()) - } - case reflect.String: - result = s - default: - return defReturn, errors.New("unsupported convert type") - } - return reflect.ValueOf(result).Convert(tp), nil -} - -func str2PK(s string, tp reflect.Type) (interface{}, error) { - v, err := str2PKValue(s, tp) - if err != nil { - return nil, err - } - return v.Interface(), nil -} - -func splitTag(tag string) (tags []string) { - tag = strings.TrimSpace(tag) - var hasQuote = false - var lastIdx = 0 - for i, t := range tag { - if t == '\'' { - hasQuote = !hasQuote - } else if t == ' ' { - if lastIdx < i && !hasQuote { - tags = append(tags, strings.TrimSpace(tag[lastIdx:i])) - lastIdx = i + 1 - } - } - } - if lastIdx < len(tag) { - tags = append(tags, strings.TrimSpace(tag[lastIdx:])) - } - return -} - -type zeroable interface { - IsZero() bool -} - -func isZero(k interface{}) bool { - switch k.(type) { - case int: - return k.(int) == 0 - case int8: - return k.(int8) == 0 - case int16: - return k.(int16) == 0 - case int32: - return k.(int32) == 0 - case int64: - return k.(int64) == 0 - case uint: - return k.(uint) == 0 - case uint8: - return k.(uint8) == 0 - case uint16: - return k.(uint16) == 0 - case uint32: - return k.(uint32) == 0 - case uint64: - return k.(uint64) == 0 - case float32: - return k.(float32) == 0 - case float64: - return k.(float64) == 0 - case bool: - return k.(bool) == false - case string: - return k.(string) == "" - case zeroable: - return k.(zeroable).IsZero() - } - return false -} - -func isStructZero(v reflect.Value) bool { - if !v.IsValid() { - return true - } - - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - switch field.Kind() { - case reflect.Ptr: - field = field.Elem() - fallthrough - case reflect.Struct: - if !isStructZero(field) { - return false - } - default: - if field.CanInterface() && !isZero(field.Interface()) { - return false - } - } - } - return true -} - -func isArrayValueZero(v reflect.Value) bool { - if !v.IsValid() || v.Len() == 0 { - return true - } - - for i := 0; i < v.Len(); i++ { - if !isZero(v.Index(i).Interface()) { - return false - } - } - - return true -} - -func int64ToIntValue(id int64, tp reflect.Type) reflect.Value { - var v interface{} - kind := tp.Kind() - - if kind == reflect.Ptr { - kind = tp.Elem().Kind() - } - - switch kind { - case reflect.Int16: - temp := int16(id) - v = &temp - case reflect.Int32: - temp := int32(id) - v = &temp - case reflect.Int: - temp := int(id) - v = &temp - case reflect.Int64: - temp := id - v = &temp - case reflect.Uint16: - temp := uint16(id) - v = &temp - case reflect.Uint32: - temp := uint32(id) - v = &temp - case reflect.Uint64: - temp := uint64(id) - v = &temp - case reflect.Uint: - temp := uint(id) - v = &temp - } - - if tp.Kind() == reflect.Ptr { - return reflect.ValueOf(v).Convert(tp) - } - return reflect.ValueOf(v).Elem().Convert(tp) -} - -func int64ToInt(id int64, tp reflect.Type) interface{} { - return int64ToIntValue(id, tp).Interface() -} - -func isPKZero(pk core.PK) bool { - for _, k := range pk { - if isZero(k) { - return true - } - } - return false -} - -func indexNoCase(s, sep string) int { - return strings.Index(strings.ToLower(s), strings.ToLower(sep)) -} - -func splitNoCase(s, sep string) []string { - idx := indexNoCase(s, sep) - if idx < 0 { - return []string{s} - } - return strings.Split(s, s[idx:idx+len(sep)]) -} - -func splitNNoCase(s, sep string, n int) []string { - idx := indexNoCase(s, sep) - if idx < 0 { - return []string{s} - } - return strings.SplitN(s, s[idx:idx+len(sep)], n) -} - -func makeArray(elem string, count int) []string { - res := make([]string, count) - for i := 0; i < count; i++ { - res[i] = elem - } - return res -} - -func rValue(bean interface{}) reflect.Value { - return reflect.Indirect(reflect.ValueOf(bean)) -} - -func rType(bean interface{}) reflect.Type { - sliceValue := reflect.Indirect(reflect.ValueOf(bean)) - // return reflect.TypeOf(sliceValue.Interface()) - return sliceValue.Type() -} - -func structName(v reflect.Type) string { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - return v.Name() -} - -func sliceEq(left, right []string) bool { - if len(left) != len(right) { - return false - } - sort.Sort(sort.StringSlice(left)) - sort.Sort(sort.StringSlice(right)) - for i := 0; i < len(left); i++ { - if left[i] != right[i] { - return false - } - } - return true -} - -func indexName(tableName, idxName string) string { - return fmt.Sprintf("IDX_%v_%v", tableName, idxName) -} - -func eraseAny(value string, strToErase ...string) string { - if len(strToErase) == 0 { - return value - } - var replaceSeq []string - for _, s := range strToErase { - replaceSeq = append(replaceSeq, s, "") - } - - replacer := strings.NewReplacer(replaceSeq...) - - return replacer.Replace(value) -} - -func quoteColumns(cols []string, quoteFunc func(string) string, sep string) string { - for i := range cols { - cols[i] = quoteFunc(cols[i]) - } - return strings.Join(cols, sep+" ") -} diff --git a/vendor/xorm.io/xorm/helpler_time.go b/vendor/xorm.io/xorm/helpler_time.go deleted file mode 100644 index f4013e27e..000000000 --- a/vendor/xorm.io/xorm/helpler_time.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import "time" - -const ( - zeroTime0 = "0000-00-00 00:00:00" - zeroTime1 = "0001-01-01 00:00:00" -) - -func formatTime(t time.Time) string { - return t.Format("2006-01-02 15:04:05") -} - -func isTimeZero(t time.Time) bool { - return t.IsZero() || formatTime(t) == zeroTime0 || - formatTime(t) == zeroTime1 -} diff --git a/vendor/xorm.io/xorm/interface.go b/vendor/xorm.io/xorm/interface.go index a564db126..262a2cfee 100644 --- a/vendor/xorm.io/xorm/interface.go +++ b/vendor/xorm.io/xorm/interface.go @@ -10,7 +10,11 @@ import ( "reflect" "time" - "xorm.io/core" + "xorm.io/xorm/caches" + "xorm.io/xorm/dialects" + "xorm.io/xorm/log" + "xorm.io/xorm/names" + "xorm.io/xorm/schemas" ) // Interface defines the interface which Engine, EngineGroup and Session will implementate. @@ -55,6 +59,7 @@ type Interface interface { QueryString(sqlOrArgs ...interface{}) ([]map[string]string, error) Rows(bean interface{}) (*Rows, error) SetExpr(string, interface{}) *Session + Select(string) *Session SQL(interface{}, ...interface{}) *Session Sum(bean interface{}, colName string) (float64, error) SumInt(bean interface{}, colName string) (int64, error) @@ -76,37 +81,41 @@ type EngineInterface interface { ClearCache(...interface{}) error Context(context.Context) *Session CreateTables(...interface{}) error - DBMetas() ([]*core.Table, error) - Dialect() core.Dialect + DBMetas() ([]*schemas.Table, error) + Dialect() dialects.Dialect + DriverName() string DropTables(...interface{}) error - DumpAllToFile(fp string, tp ...core.DbType) error - GetCacher(string) core.Cacher - GetColumnMapper() core.IMapper - GetDefaultCacher() core.Cacher - GetTableMapper() core.IMapper + DumpAllToFile(fp string, tp ...schemas.DBType) error + GetCacher(string) caches.Cacher + GetColumnMapper() names.Mapper + GetDefaultCacher() caches.Cacher + GetTableMapper() names.Mapper GetTZDatabase() *time.Location GetTZLocation() *time.Location - MapCacher(interface{}, core.Cacher) error + ImportFile(fp string) ([]sql.Result, error) + MapCacher(interface{}, caches.Cacher) error NewSession() *Session NoAutoTime() *Session Quote(string) string - SetCacher(string, core.Cacher) + SetCacher(string, caches.Cacher) SetConnMaxLifetime(time.Duration) - SetDefaultCacher(core.Cacher) - SetLogger(logger core.ILogger) - SetLogLevel(core.LogLevel) - SetMapper(core.IMapper) + SetColumnMapper(names.Mapper) + SetDefaultCacher(caches.Cacher) + SetLogger(logger interface{}) + SetLogLevel(log.LogLevel) + SetMapper(names.Mapper) SetMaxOpenConns(int) SetMaxIdleConns(int) + SetQuotePolicy(dialects.QuotePolicy) SetSchema(string) + SetTableMapper(names.Mapper) SetTZDatabase(tz *time.Location) SetTZLocation(tz *time.Location) - ShowExecTime(...bool) ShowSQL(show ...bool) Sync(...interface{}) error Sync2(...interface{}) error StoreEngine(storeEngine string) *Session - TableInfo(bean interface{}) *Table + TableInfo(bean interface{}) (*schemas.Table, error) TableName(interface{}, ...bool) string UnMapType(reflect.Type) } diff --git a/vendor/xorm.io/xorm/json.go b/vendor/xorm.io/xorm/internal/json/json.go similarity index 98% rename from vendor/xorm.io/xorm/json.go rename to vendor/xorm.io/xorm/internal/json/json.go index fdb6ce565..c9a2eb4e2 100644 --- a/vendor/xorm.io/xorm/json.go +++ b/vendor/xorm.io/xorm/internal/json/json.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package xorm +package json import "encoding/json" diff --git a/vendor/xorm.io/xorm/internal/statements/cache.go b/vendor/xorm.io/xorm/internal/statements/cache.go new file mode 100644 index 000000000..cb33df086 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/cache.go @@ -0,0 +1,79 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "fmt" + "strings" + + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" +) + +func (statement *Statement) ConvertIDSQL(sqlStr string) string { + if statement.RefTable != nil { + cols := statement.RefTable.PKColumns() + if len(cols) == 0 { + return "" + } + + colstrs := statement.joinColumns(cols, false) + sqls := utils.SplitNNoCase(sqlStr, " from ", 2) + if len(sqls) != 2 { + return "" + } + + var top string + pLimitN := statement.LimitN + if pLimitN != nil && statement.dialect.URI().DBType == schemas.MSSQL { + top = fmt.Sprintf("TOP %d ", *pLimitN) + } + + newsql := fmt.Sprintf("SELECT %s%s FROM %v", top, colstrs, sqls[1]) + return newsql + } + return "" +} + +func (statement *Statement) ConvertUpdateSQL(sqlStr string) (string, string) { + if statement.RefTable == nil || len(statement.RefTable.PrimaryKeys) != 1 { + return "", "" + } + + colstrs := statement.joinColumns(statement.RefTable.PKColumns(), true) + sqls := utils.SplitNNoCase(sqlStr, "where", 2) + if len(sqls) != 2 { + if len(sqls) == 1 { + return sqls[0], fmt.Sprintf("SELECT %v FROM %v", + colstrs, statement.quote(statement.TableName())) + } + return "", "" + } + + var whereStr = sqls[1] + + // TODO: for postgres only, if any other database? + var paraStr string + if statement.dialect.URI().DBType == schemas.POSTGRES { + paraStr = "$" + } else if statement.dialect.URI().DBType == schemas.MSSQL { + paraStr = ":" + } + + if paraStr != "" { + if strings.Contains(sqls[1], paraStr) { + dollers := strings.Split(sqls[1], paraStr) + whereStr = dollers[0] + for i, c := range dollers[1:] { + ccs := strings.SplitN(c, " ", 2) + whereStr += fmt.Sprintf(paraStr+"%v %v", i+1, ccs[1]) + } + } + } + + return sqls[0], fmt.Sprintf("SELECT %v FROM %v WHERE %v", + colstrs, statement.quote(statement.TableName()), + whereStr) +} diff --git a/vendor/xorm.io/xorm/internal/statements/column_map.go b/vendor/xorm.io/xorm/internal/statements/column_map.go new file mode 100644 index 000000000..bb764b4e0 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/column_map.go @@ -0,0 +1,66 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "strings" + + "xorm.io/xorm/schemas" +) + +type columnMap []string + +func (m columnMap) Contain(colName string) bool { + if len(m) == 0 { + return false + } + + n := len(colName) + for _, mk := range m { + if len(mk) != n { + continue + } + if strings.EqualFold(mk, colName) { + return true + } + } + + return false +} + +func (m columnMap) Len() int { + return len(m) +} + +func (m columnMap) IsEmpty() bool { + return len(m) == 0 +} + +func (m *columnMap) Add(colName string) bool { + if m.Contain(colName) { + return false + } + *m = append(*m, colName) + return true +} + +func getFlagForColumn(m map[string]bool, col *schemas.Column) (val bool, has bool) { + if len(m) == 0 { + return false, false + } + + n := len(col.Name) + + for mk := range m { + if len(mk) != n { + continue + } + if strings.EqualFold(mk, col.Name) { + return m[mk], true + } + } + + return false, false +} diff --git a/vendor/xorm.io/xorm/statement_exprparam.go b/vendor/xorm.io/xorm/internal/statements/expr_param.go similarity index 71% rename from vendor/xorm.io/xorm/statement_exprparam.go rename to vendor/xorm.io/xorm/internal/statements/expr_param.go index fc62e36f1..6657408e4 100644 --- a/vendor/xorm.io/xorm/statement_exprparam.go +++ b/vendor/xorm.io/xorm/internal/statements/expr_param.go @@ -2,13 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package xorm +package statements import ( "fmt" "strings" "xorm.io/builder" + "xorm.io/xorm/schemas" ) type ErrUnsupportedExprType struct { @@ -25,22 +26,22 @@ type exprParam struct { } type exprParams struct { - colNames []string - args []interface{} + ColNames []string + Args []interface{} } func (exprs *exprParams) Len() int { - return len(exprs.colNames) + return len(exprs.ColNames) } func (exprs *exprParams) addParam(colName string, arg interface{}) { - exprs.colNames = append(exprs.colNames, colName) - exprs.args = append(exprs.args, arg) + exprs.ColNames = append(exprs.ColNames, colName) + exprs.Args = append(exprs.Args, arg) } -func (exprs *exprParams) isColExist(colName string) bool { - for _, name := range exprs.colNames { - if strings.EqualFold(trimQuote(name), trimQuote(colName)) { +func (exprs *exprParams) IsColExist(colName string) bool { + for _, name := range exprs.ColNames { + if strings.EqualFold(schemas.CommonQuoter.Trim(name), schemas.CommonQuoter.Trim(colName)) { return true } } @@ -48,16 +49,16 @@ func (exprs *exprParams) isColExist(colName string) bool { } func (exprs *exprParams) getByName(colName string) (exprParam, bool) { - for i, name := range exprs.colNames { + for i, name := range exprs.ColNames { if strings.EqualFold(name, colName) { - return exprParam{name, exprs.args[i]}, true + return exprParam{name, exprs.Args[i]}, true } } return exprParam{}, false } -func (exprs *exprParams) writeArgs(w *builder.BytesWriter) error { - for i, expr := range exprs.args { +func (exprs *exprParams) WriteArgs(w *builder.BytesWriter) error { + for i, expr := range exprs.Args { switch arg := expr.(type) { case *builder.Builder: if _, err := w.WriteString("("); err != nil { @@ -82,7 +83,7 @@ func (exprs *exprParams) writeArgs(w *builder.BytesWriter) error { } w.Append(arg) } - if i != len(exprs.args)-1 { + if i != len(exprs.Args)-1 { if _, err := w.WriteString(","); err != nil { return err } @@ -92,7 +93,7 @@ func (exprs *exprParams) writeArgs(w *builder.BytesWriter) error { } func (exprs *exprParams) writeNameArgs(w *builder.BytesWriter) error { - for i, colName := range exprs.colNames { + for i, colName := range exprs.ColNames { if _, err := w.WriteString(colName); err != nil { return err } @@ -100,7 +101,7 @@ func (exprs *exprParams) writeNameArgs(w *builder.BytesWriter) error { return err } - switch arg := exprs.args[i].(type) { + switch arg := exprs.Args[i].(type) { case *builder.Builder: if _, err := w.WriteString("("); err != nil { return err @@ -112,10 +113,10 @@ func (exprs *exprParams) writeNameArgs(w *builder.BytesWriter) error { return err } default: - w.Append(exprs.args[i]) + w.Append(exprs.Args[i]) } - if i+1 != len(exprs.colNames) { + if i+1 != len(exprs.ColNames) { if _, err := w.WriteString(","); err != nil { return err } diff --git a/vendor/xorm.io/xorm/internal/statements/insert.go b/vendor/xorm.io/xorm/internal/statements/insert.go new file mode 100644 index 000000000..db2fc91c0 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/insert.go @@ -0,0 +1,143 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "strings" + + "xorm.io/builder" + "xorm.io/xorm/schemas" +) + +func (statement *Statement) writeInsertOutput(buf *strings.Builder, table *schemas.Table) error { + if statement.dialect.URI().DBType == schemas.MSSQL && len(table.AutoIncrement) > 0 { + if _, err := buf.WriteString(" OUTPUT Inserted."); err != nil { + return err + } + if _, err := buf.WriteString(table.AutoIncrement); err != nil { + return err + } + } + return nil +} + +func (statement *Statement) GenInsertSQL(colNames []string, args []interface{}) (string, []interface{}, error) { + var ( + table = statement.RefTable + tableName = statement.TableName() + exprs = statement.ExprColumns + colPlaces = strings.Repeat("?, ", len(colNames)) + ) + if exprs.Len() <= 0 && len(colPlaces) > 0 { + colPlaces = colPlaces[0 : len(colPlaces)-2] + } + + var buf = builder.NewWriter() + if _, err := buf.WriteString("INSERT INTO "); err != nil { + return "", nil, err + } + + if err := statement.dialect.Quoter().QuoteTo(buf.Builder, tableName); err != nil { + return "", nil, err + } + + if len(colPlaces) <= 0 { + if statement.dialect.URI().DBType == schemas.MYSQL { + if _, err := buf.WriteString(" VALUES ()"); err != nil { + return "", nil, err + } + } else { + if err := statement.writeInsertOutput(buf.Builder, table); err != nil { + return "", nil, err + } + if _, err := buf.WriteString(" DEFAULT VALUES"); err != nil { + return "", nil, err + } + } + } else { + if _, err := buf.WriteString(" ("); err != nil { + return "", nil, err + } + + if err := statement.dialect.Quoter().JoinWrite(buf.Builder, append(colNames, exprs.ColNames...), ","); err != nil { + return "", nil, err + } + + if statement.Conds().IsValid() { + if _, err := buf.WriteString(")"); err != nil { + return "", nil, err + } + if err := statement.writeInsertOutput(buf.Builder, table); err != nil { + return "", nil, err + } + if _, err := buf.WriteString(" SELECT "); err != nil { + return "", nil, err + } + + if err := statement.WriteArgs(buf, args); err != nil { + return "", nil, err + } + + if len(exprs.Args) > 0 { + if _, err := buf.WriteString(","); err != nil { + return "", nil, err + } + } + if err := exprs.WriteArgs(buf); err != nil { + return "", nil, err + } + + if _, err := buf.WriteString(" FROM "); err != nil { + return "", nil, err + } + + if err := statement.dialect.Quoter().QuoteTo(buf.Builder, tableName); err != nil { + return "", nil, err + } + + if _, err := buf.WriteString(" WHERE "); err != nil { + return "", nil, err + } + + if err := statement.Conds().WriteTo(buf); err != nil { + return "", nil, err + } + } else { + buf.Append(args...) + + if _, err := buf.WriteString(")"); err != nil { + return "", nil, err + } + if err := statement.writeInsertOutput(buf.Builder, table); err != nil { + return "", nil, err + } + if _, err := buf.WriteString(" VALUES ("); err != nil { + return "", nil, err + } + if _, err := buf.WriteString(colPlaces); err != nil { + return "", nil, err + } + + if err := exprs.WriteArgs(buf); err != nil { + return "", nil, err + } + + if _, err := buf.WriteString(")"); err != nil { + return "", nil, err + } + } + } + + if len(table.AutoIncrement) > 0 && statement.dialect.URI().DBType == schemas.POSTGRES { + if _, err := buf.WriteString(" RETURNING "); err != nil { + return "", nil, err + } + if err := statement.dialect.Quoter().QuoteTo(buf.Builder, table.AutoIncrement); err != nil { + return "", nil, err + } + } + + return buf.String(), buf.Args(), nil +} diff --git a/vendor/xorm.io/xorm/internal/statements/pk.go b/vendor/xorm.io/xorm/internal/statements/pk.go new file mode 100644 index 000000000..b6ae0f232 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/pk.go @@ -0,0 +1,79 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "fmt" + "reflect" + + "xorm.io/builder" + "xorm.io/xorm/schemas" +) + +var ( + ptrPkType = reflect.TypeOf(&schemas.PK{}) + pkType = reflect.TypeOf(schemas.PK{}) + stringType = reflect.TypeOf("") + intType = reflect.TypeOf(int64(0)) + uintType = reflect.TypeOf(uint64(0)) +) + +// ID generate "where id = ? " statement or for composite key "where key1 = ? and key2 = ?" +func (statement *Statement) ID(id interface{}) *Statement { + switch t := id.(type) { + case *schemas.PK: + statement.idParam = *t + case schemas.PK: + statement.idParam = t + case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + statement.idParam = schemas.PK{id} + default: + idValue := reflect.ValueOf(id) + idType := idValue.Type() + + switch idType.Kind() { + case reflect.String: + statement.idParam = schemas.PK{idValue.Convert(stringType).Interface()} + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + statement.idParam = schemas.PK{idValue.Convert(intType).Interface()} + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + statement.idParam = schemas.PK{idValue.Convert(uintType).Interface()} + case reflect.Slice: + if idType.ConvertibleTo(pkType) { + statement.idParam = idValue.Convert(pkType).Interface().(schemas.PK) + } + case reflect.Ptr: + if idType.ConvertibleTo(ptrPkType) { + statement.idParam = idValue.Convert(ptrPkType).Elem().Interface().(schemas.PK) + } + } + } + + if statement.idParam == nil { + statement.LastError = fmt.Errorf("ID param %#v is not supported", id) + } + + return statement +} + +func (statement *Statement) ProcessIDParam() error { + if statement.idParam == nil || statement.RefTable == nil { + return nil + } + + if len(statement.RefTable.PrimaryKeys) != len(statement.idParam) { + fmt.Println("=====", statement.RefTable.PrimaryKeys, statement.idParam) + return fmt.Errorf("ID condition is error, expect %d primarykeys, there are %d", + len(statement.RefTable.PrimaryKeys), + len(statement.idParam), + ) + } + + for i, col := range statement.RefTable.PKColumns() { + var colName = statement.colName(col, statement.TableName()) + statement.cond = statement.cond.And(builder.Eq{colName: statement.idParam[i]}) + } + return nil +} diff --git a/vendor/xorm.io/xorm/internal/statements/query.go b/vendor/xorm.io/xorm/internal/statements/query.go new file mode 100644 index 000000000..ab3021bf6 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/query.go @@ -0,0 +1,441 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "errors" + "fmt" + "reflect" + "strings" + + "xorm.io/builder" + "xorm.io/xorm/schemas" +) + +func (statement *Statement) GenQuerySQL(sqlOrArgs ...interface{}) (string, []interface{}, error) { + if len(sqlOrArgs) > 0 { + return statement.ConvertSQLOrArgs(sqlOrArgs...) + } + + if statement.RawSQL != "" { + return statement.GenRawSQL(), statement.RawParams, nil + } + + if len(statement.TableName()) <= 0 { + return "", nil, ErrTableNotFound + } + + var columnStr = statement.ColumnStr() + if len(statement.SelectStr) > 0 { + columnStr = statement.SelectStr + } else { + if statement.JoinStr == "" { + if columnStr == "" { + if statement.GroupByStr != "" { + columnStr = statement.quoteColumnStr(statement.GroupByStr) + } else { + columnStr = statement.genColumnStr() + } + } + } else { + if columnStr == "" { + if statement.GroupByStr != "" { + columnStr = statement.quoteColumnStr(statement.GroupByStr) + } else { + columnStr = "*" + } + } + } + if columnStr == "" { + columnStr = "*" + } + } + + if err := statement.ProcessIDParam(); err != nil { + return "", nil, err + } + + sqlStr, condArgs, err := statement.genSelectSQL(columnStr, true, true) + if err != nil { + return "", nil, err + } + args := append(statement.joinArgs, condArgs...) + + // for mssql and use limit + qs := strings.Count(sqlStr, "?") + if len(args)*2 == qs { + args = append(args, args...) + } + + return sqlStr, args, nil +} + +func (statement *Statement) GenSumSQL(bean interface{}, columns ...string) (string, []interface{}, error) { + if statement.RawSQL != "" { + return statement.GenRawSQL(), statement.RawParams, nil + } + + statement.SetRefBean(bean) + + var sumStrs = make([]string, 0, len(columns)) + for _, colName := range columns { + if !strings.Contains(colName, " ") && !strings.Contains(colName, "(") { + colName = statement.quote(colName) + } else { + colName = statement.ReplaceQuote(colName) + } + sumStrs = append(sumStrs, fmt.Sprintf("COALESCE(sum(%s),0)", colName)) + } + sumSelect := strings.Join(sumStrs, ", ") + + if err := statement.mergeConds(bean); err != nil { + return "", nil, err + } + + sqlStr, condArgs, err := statement.genSelectSQL(sumSelect, true, true) + if err != nil { + return "", nil, err + } + + return sqlStr, append(statement.joinArgs, condArgs...), nil +} + +func (statement *Statement) GenGetSQL(bean interface{}) (string, []interface{}, error) { + v := rValue(bean) + isStruct := v.Kind() == reflect.Struct + if isStruct { + statement.SetRefBean(bean) + } + + var columnStr = statement.ColumnStr() + if len(statement.SelectStr) > 0 { + columnStr = statement.SelectStr + } else { + // TODO: always generate column names, not use * even if join + if len(statement.JoinStr) == 0 { + if len(columnStr) == 0 { + if len(statement.GroupByStr) > 0 { + columnStr = statement.quoteColumnStr(statement.GroupByStr) + } else { + columnStr = statement.genColumnStr() + } + } + } else { + if len(columnStr) == 0 { + if len(statement.GroupByStr) > 0 { + columnStr = statement.quoteColumnStr(statement.GroupByStr) + } + } + } + } + + if len(columnStr) == 0 { + columnStr = "*" + } + + if isStruct { + if err := statement.mergeConds(bean); err != nil { + return "", nil, err + } + } else { + if err := statement.ProcessIDParam(); err != nil { + return "", nil, err + } + } + + sqlStr, condArgs, err := statement.genSelectSQL(columnStr, true, true) + if err != nil { + return "", nil, err + } + + return sqlStr, append(statement.joinArgs, condArgs...), nil +} + +// GenCountSQL generates the SQL for counting +func (statement *Statement) GenCountSQL(beans ...interface{}) (string, []interface{}, error) { + if statement.RawSQL != "" { + return statement.GenRawSQL(), statement.RawParams, nil + } + + var condArgs []interface{} + var err error + if len(beans) > 0 { + statement.SetRefBean(beans[0]) + if err := statement.mergeConds(beans[0]); err != nil { + return "", nil, err + } + } + + var selectSQL = statement.SelectStr + if len(selectSQL) <= 0 { + if statement.IsDistinct { + selectSQL = fmt.Sprintf("count(DISTINCT %s)", statement.ColumnStr()) + } else if statement.ColumnStr() != "" { + selectSQL = fmt.Sprintf("count(%s)", statement.ColumnStr()) + } else { + selectSQL = "count(*)" + } + } + sqlStr, condArgs, err := statement.genSelectSQL(selectSQL, false, false) + if err != nil { + return "", nil, err + } + + return sqlStr, append(statement.joinArgs, condArgs...), nil +} + +func (statement *Statement) genSelectSQL(columnStr string, needLimit, needOrderBy bool) (string, []interface{}, error) { + var ( + distinct string + dialect = statement.dialect + quote = statement.quote + fromStr = " FROM " + top, mssqlCondi, whereStr string + ) + if statement.IsDistinct && !strings.HasPrefix(columnStr, "count") { + distinct = "DISTINCT " + } + + condSQL, condArgs, err := statement.GenCondSQL(statement.cond) + if err != nil { + return "", nil, err + } + if len(condSQL) > 0 { + whereStr = " WHERE " + condSQL + } + + if dialect.URI().DBType == schemas.MSSQL && strings.Contains(statement.TableName(), "..") { + fromStr += statement.TableName() + } else { + fromStr += quote(statement.TableName()) + } + + if statement.TableAlias != "" { + if dialect.URI().DBType == schemas.ORACLE { + fromStr += " " + quote(statement.TableAlias) + } else { + fromStr += " AS " + quote(statement.TableAlias) + } + } + if statement.JoinStr != "" { + fromStr = fmt.Sprintf("%v %v", fromStr, statement.JoinStr) + } + + pLimitN := statement.LimitN + if dialect.URI().DBType == schemas.MSSQL { + if pLimitN != nil { + LimitNValue := *pLimitN + top = fmt.Sprintf("TOP %d ", LimitNValue) + } + if statement.Start > 0 { + var column string + if len(statement.RefTable.PKColumns()) == 0 { + for _, index := range statement.RefTable.Indexes { + if len(index.Cols) == 1 { + column = index.Cols[0] + break + } + } + if len(column) == 0 { + column = statement.RefTable.ColumnsSeq()[0] + } + } else { + column = statement.RefTable.PKColumns()[0].Name + } + if statement.needTableName() { + if len(statement.TableAlias) > 0 { + column = statement.TableAlias + "." + column + } else { + column = statement.TableName() + "." + column + } + } + + var orderStr string + if needOrderBy && len(statement.OrderStr) > 0 { + orderStr = " ORDER BY " + statement.OrderStr + } + + var groupStr string + if len(statement.GroupByStr) > 0 { + groupStr = " GROUP BY " + statement.GroupByStr + } + mssqlCondi = fmt.Sprintf("(%s NOT IN (SELECT TOP %d %s%s%s%s%s))", + column, statement.Start, column, fromStr, whereStr, orderStr, groupStr) + } + } + + var buf strings.Builder + fmt.Fprintf(&buf, "SELECT %v%v%v%v%v", distinct, top, columnStr, fromStr, whereStr) + if len(mssqlCondi) > 0 { + if len(whereStr) > 0 { + fmt.Fprint(&buf, " AND ", mssqlCondi) + } else { + fmt.Fprint(&buf, " WHERE ", mssqlCondi) + } + } + + if statement.GroupByStr != "" { + fmt.Fprint(&buf, " GROUP BY ", statement.GroupByStr) + } + if statement.HavingStr != "" { + fmt.Fprint(&buf, " ", statement.HavingStr) + } + if needOrderBy && statement.OrderStr != "" { + fmt.Fprint(&buf, " ORDER BY ", statement.OrderStr) + } + if needLimit { + if dialect.URI().DBType != schemas.MSSQL && dialect.URI().DBType != schemas.ORACLE { + if statement.Start > 0 { + if pLimitN != nil { + fmt.Fprintf(&buf, " LIMIT %v OFFSET %v", *pLimitN, statement.Start) + } else { + fmt.Fprintf(&buf, "LIMIT 0 OFFSET %v", statement.Start) + } + } else if pLimitN != nil { + fmt.Fprint(&buf, " LIMIT ", *pLimitN) + } + } else if dialect.URI().DBType == schemas.ORACLE { + if statement.Start != 0 || pLimitN != nil { + oldString := buf.String() + buf.Reset() + rawColStr := columnStr + if rawColStr == "*" { + rawColStr = "at.*" + } + fmt.Fprintf(&buf, "SELECT %v FROM (SELECT %v,ROWNUM RN FROM (%v) at WHERE ROWNUM <= %d) aat WHERE RN > %d", + columnStr, rawColStr, oldString, statement.Start+*pLimitN, statement.Start) + } + } + } + if statement.IsForUpdate { + return dialect.ForUpdateSQL(buf.String()), condArgs, nil + } + + return buf.String(), condArgs, nil +} + +func (statement *Statement) GenExistSQL(bean ...interface{}) (string, []interface{}, error) { + if statement.RawSQL != "" { + return statement.GenRawSQL(), statement.RawParams, nil + } + + var sqlStr string + var args []interface{} + var joinStr string + var err error + if len(bean) == 0 { + tableName := statement.TableName() + if len(tableName) <= 0 { + return "", nil, ErrTableNotFound + } + + tableName = statement.quote(tableName) + if len(statement.JoinStr) > 0 { + joinStr = statement.JoinStr + } + + if statement.Conds().IsValid() { + condSQL, condArgs, err := statement.GenCondSQL(statement.Conds()) + if err != nil { + return "", nil, err + } + + if statement.dialect.URI().DBType == schemas.MSSQL { + sqlStr = fmt.Sprintf("SELECT TOP 1 * FROM %s %s WHERE %s", tableName, joinStr, condSQL) + } else if statement.dialect.URI().DBType == schemas.ORACLE { + sqlStr = fmt.Sprintf("SELECT * FROM %s WHERE (%s) %s AND ROWNUM=1", tableName, joinStr, condSQL) + } else { + sqlStr = fmt.Sprintf("SELECT * FROM %s %s WHERE %s LIMIT 1", tableName, joinStr, condSQL) + } + args = condArgs + } else { + if statement.dialect.URI().DBType == schemas.MSSQL { + sqlStr = fmt.Sprintf("SELECT TOP 1 * FROM %s %s", tableName, joinStr) + } else if statement.dialect.URI().DBType == schemas.ORACLE { + sqlStr = fmt.Sprintf("SELECT * FROM %s %s WHERE ROWNUM=1", tableName, joinStr) + } else { + sqlStr = fmt.Sprintf("SELECT * FROM %s %s LIMIT 1", tableName, joinStr) + } + args = []interface{}{} + } + } else { + beanValue := reflect.ValueOf(bean[0]) + if beanValue.Kind() != reflect.Ptr { + return "", nil, errors.New("needs a pointer") + } + + if beanValue.Elem().Kind() == reflect.Struct { + if err := statement.SetRefBean(bean[0]); err != nil { + return "", nil, err + } + } + + if len(statement.TableName()) <= 0 { + return "", nil, ErrTableNotFound + } + statement.Limit(1) + sqlStr, args, err = statement.GenGetSQL(bean[0]) + if err != nil { + return "", nil, err + } + } + + return sqlStr, args, nil +} + +func (statement *Statement) GenFindSQL(autoCond builder.Cond) (string, []interface{}, error) { + if statement.RawSQL != "" { + return statement.GenRawSQL(), statement.RawParams, nil + } + + var sqlStr string + var args []interface{} + var err error + + if len(statement.TableName()) <= 0 { + return "", nil, ErrTableNotFound + } + + var columnStr = statement.ColumnStr() + if len(statement.SelectStr) > 0 { + columnStr = statement.SelectStr + } else { + if statement.JoinStr == "" { + if columnStr == "" { + if statement.GroupByStr != "" { + columnStr = statement.quoteColumnStr(statement.GroupByStr) + } else { + columnStr = statement.genColumnStr() + } + } + } else { + if columnStr == "" { + if statement.GroupByStr != "" { + columnStr = statement.quoteColumnStr(statement.GroupByStr) + } else { + columnStr = "*" + } + } + } + if columnStr == "" { + columnStr = "*" + } + } + + statement.cond = statement.cond.And(autoCond) + + sqlStr, condArgs, err := statement.genSelectSQL(columnStr, true, true) + if err != nil { + return "", nil, err + } + args = append(statement.joinArgs, condArgs...) + // for mssql and use limit + qs := strings.Count(sqlStr, "?") + if len(args)*2 == qs { + args = append(args, args...) + } + + return sqlStr, args, nil +} diff --git a/vendor/xorm.io/xorm/internal/statements/statement.go b/vendor/xorm.io/xorm/internal/statements/statement.go new file mode 100644 index 000000000..af94a9d9e --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/statement.go @@ -0,0 +1,996 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "database/sql/driver" + "errors" + "fmt" + "reflect" + "strings" + "time" + + "xorm.io/builder" + "xorm.io/xorm/contexts" + "xorm.io/xorm/convert" + "xorm.io/xorm/dialects" + "xorm.io/xorm/internal/json" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" + "xorm.io/xorm/tags" +) + +var ( + // ErrConditionType condition type unsupported + ErrConditionType = errors.New("Unsupported condition type") + // ErrUnSupportedSQLType parameter of SQL is not supported + ErrUnSupportedSQLType = errors.New("Unsupported sql type") + // ErrUnSupportedType unsupported error + ErrUnSupportedType = errors.New("Unsupported type error") + // ErrTableNotFound table not found error + ErrTableNotFound = errors.New("Table not found") +) + +// Statement save all the sql info for executing SQL +type Statement struct { + RefTable *schemas.Table + dialect dialects.Dialect + defaultTimeZone *time.Location + tagParser *tags.Parser + Start int + LimitN *int + idParam schemas.PK + OrderStr string + JoinStr string + joinArgs []interface{} + GroupByStr string + HavingStr string + SelectStr string + useAllCols bool + AltTableName string + tableName string + RawSQL string + RawParams []interface{} + UseCascade bool + UseAutoJoin bool + StoreEngine string + Charset string + UseCache bool + UseAutoTime bool + NoAutoCondition bool + IsDistinct bool + IsForUpdate bool + TableAlias string + allUseBool bool + CheckVersion bool + unscoped bool + ColumnMap columnMap + OmitColumnMap columnMap + MustColumnMap map[string]bool + NullableMap map[string]bool + IncrColumns exprParams + DecrColumns exprParams + ExprColumns exprParams + cond builder.Cond + BufferSize int + Context contexts.ContextCache + LastError error +} + +// NewStatement creates a new statement +func NewStatement(dialect dialects.Dialect, tagParser *tags.Parser, defaultTimeZone *time.Location) *Statement { + statement := &Statement{ + dialect: dialect, + tagParser: tagParser, + defaultTimeZone: defaultTimeZone, + } + statement.Reset() + return statement +} + +func (statement *Statement) SetTableName(tableName string) { + statement.tableName = tableName +} + +func (statement *Statement) omitStr() string { + return statement.dialect.Quoter().Join(statement.OmitColumnMap, " ,") +} + +// GenRawSQL generates correct raw sql +func (statement *Statement) GenRawSQL() string { + return statement.ReplaceQuote(statement.RawSQL) +} + +func (statement *Statement) GenCondSQL(condOrBuilder interface{}) (string, []interface{}, error) { + condSQL, condArgs, err := builder.ToSQL(condOrBuilder) + if err != nil { + return "", nil, err + } + return statement.ReplaceQuote(condSQL), condArgs, nil +} + +func (statement *Statement) ReplaceQuote(sql string) string { + if sql == "" || statement.dialect.URI().DBType == schemas.MYSQL || + statement.dialect.URI().DBType == schemas.SQLITE { + return sql + } + return statement.dialect.Quoter().Replace(sql) +} + +func (statement *Statement) SetContextCache(ctxCache contexts.ContextCache) { + statement.Context = ctxCache +} + +// Init reset all the statement's fields +func (statement *Statement) Reset() { + statement.RefTable = nil + statement.Start = 0 + statement.LimitN = nil + statement.OrderStr = "" + statement.UseCascade = true + statement.JoinStr = "" + statement.joinArgs = make([]interface{}, 0) + statement.GroupByStr = "" + statement.HavingStr = "" + statement.ColumnMap = columnMap{} + statement.OmitColumnMap = columnMap{} + statement.AltTableName = "" + statement.tableName = "" + statement.idParam = nil + statement.RawSQL = "" + statement.RawParams = make([]interface{}, 0) + statement.UseCache = true + statement.UseAutoTime = true + statement.NoAutoCondition = false + statement.IsDistinct = false + statement.IsForUpdate = false + statement.TableAlias = "" + statement.SelectStr = "" + statement.allUseBool = false + statement.useAllCols = false + statement.MustColumnMap = make(map[string]bool) + statement.NullableMap = make(map[string]bool) + statement.CheckVersion = true + statement.unscoped = false + statement.IncrColumns = exprParams{} + statement.DecrColumns = exprParams{} + statement.ExprColumns = exprParams{} + statement.cond = builder.NewCond() + statement.BufferSize = 0 + statement.Context = nil + statement.LastError = nil +} + +// NoAutoCondition if you do not want convert bean's field as query condition, then use this function +func (statement *Statement) SetNoAutoCondition(no ...bool) *Statement { + statement.NoAutoCondition = true + if len(no) > 0 { + statement.NoAutoCondition = no[0] + } + return statement +} + +// Alias set the table alias +func (statement *Statement) Alias(alias string) *Statement { + statement.TableAlias = alias + return statement +} + +// SQL adds raw sql statement +func (statement *Statement) SQL(query interface{}, args ...interface{}) *Statement { + switch query.(type) { + case (*builder.Builder): + var err error + statement.RawSQL, statement.RawParams, err = query.(*builder.Builder).ToSQL() + if err != nil { + statement.LastError = err + } + case string: + statement.RawSQL = query.(string) + statement.RawParams = args + default: + statement.LastError = ErrUnSupportedSQLType + } + + return statement +} + +// Where add Where statement +func (statement *Statement) Where(query interface{}, args ...interface{}) *Statement { + return statement.And(query, args...) +} + +func (statement *Statement) quote(s string) string { + return statement.dialect.Quoter().Quote(s) +} + +// And add Where & and statement +func (statement *Statement) And(query interface{}, args ...interface{}) *Statement { + switch query.(type) { + case string: + cond := builder.Expr(query.(string), args...) + statement.cond = statement.cond.And(cond) + case map[string]interface{}: + queryMap := query.(map[string]interface{}) + newMap := make(map[string]interface{}) + for k, v := range queryMap { + newMap[statement.quote(k)] = v + } + statement.cond = statement.cond.And(builder.Eq(newMap)) + case builder.Cond: + cond := query.(builder.Cond) + statement.cond = statement.cond.And(cond) + for _, v := range args { + if vv, ok := v.(builder.Cond); ok { + statement.cond = statement.cond.And(vv) + } + } + default: + statement.LastError = ErrConditionType + } + + return statement +} + +// Or add Where & Or statement +func (statement *Statement) Or(query interface{}, args ...interface{}) *Statement { + switch query.(type) { + case string: + cond := builder.Expr(query.(string), args...) + statement.cond = statement.cond.Or(cond) + case map[string]interface{}: + cond := builder.Eq(query.(map[string]interface{})) + statement.cond = statement.cond.Or(cond) + case builder.Cond: + cond := query.(builder.Cond) + statement.cond = statement.cond.Or(cond) + for _, v := range args { + if vv, ok := v.(builder.Cond); ok { + statement.cond = statement.cond.Or(vv) + } + } + default: + // TODO: not support condition type + } + return statement +} + +// In generate "Where column IN (?) " statement +func (statement *Statement) In(column string, args ...interface{}) *Statement { + in := builder.In(statement.quote(column), args...) + statement.cond = statement.cond.And(in) + return statement +} + +// NotIn generate "Where column NOT IN (?) " statement +func (statement *Statement) NotIn(column string, args ...interface{}) *Statement { + notIn := builder.NotIn(statement.quote(column), args...) + statement.cond = statement.cond.And(notIn) + return statement +} + +func (statement *Statement) SetRefValue(v reflect.Value) error { + var err error + statement.RefTable, err = statement.tagParser.ParseWithCache(reflect.Indirect(v)) + if err != nil { + return err + } + statement.tableName = dialects.FullTableName(statement.dialect, statement.tagParser.GetTableMapper(), v, true) + return nil +} + +func rValue(bean interface{}) reflect.Value { + return reflect.Indirect(reflect.ValueOf(bean)) +} + +func (statement *Statement) SetRefBean(bean interface{}) error { + var err error + statement.RefTable, err = statement.tagParser.ParseWithCache(rValue(bean)) + if err != nil { + return err + } + statement.tableName = dialects.FullTableName(statement.dialect, statement.tagParser.GetTableMapper(), bean, true) + return nil +} + +func (statement *Statement) needTableName() bool { + return len(statement.JoinStr) > 0 +} + +func (statement *Statement) colName(col *schemas.Column, tableName string) string { + if statement.needTableName() { + var nm = tableName + if len(statement.TableAlias) > 0 { + nm = statement.TableAlias + } + return statement.quote(nm) + "." + statement.quote(col.Name) + } + return statement.quote(col.Name) +} + +// TableName return current tableName +func (statement *Statement) TableName() string { + if statement.AltTableName != "" { + return statement.AltTableName + } + + return statement.tableName +} + +// Incr Generate "Update ... Set column = column + arg" statement +func (statement *Statement) Incr(column string, arg ...interface{}) *Statement { + if len(arg) > 0 { + statement.IncrColumns.addParam(column, arg[0]) + } else { + statement.IncrColumns.addParam(column, 1) + } + return statement +} + +// Decr Generate "Update ... Set column = column - arg" statement +func (statement *Statement) Decr(column string, arg ...interface{}) *Statement { + if len(arg) > 0 { + statement.DecrColumns.addParam(column, arg[0]) + } else { + statement.DecrColumns.addParam(column, 1) + } + return statement +} + +// SetExpr Generate "Update ... Set column = {expression}" statement +func (statement *Statement) SetExpr(column string, expression interface{}) *Statement { + if e, ok := expression.(string); ok { + statement.ExprColumns.addParam(column, statement.dialect.Quoter().Replace(e)) + } else { + statement.ExprColumns.addParam(column, expression) + } + return statement +} + +// Distinct generates "DISTINCT col1, col2 " statement +func (statement *Statement) Distinct(columns ...string) *Statement { + statement.IsDistinct = true + statement.Cols(columns...) + return statement +} + +// ForUpdate generates "SELECT ... FOR UPDATE" statement +func (statement *Statement) ForUpdate() *Statement { + statement.IsForUpdate = true + return statement +} + +// Select replace select +func (statement *Statement) Select(str string) *Statement { + statement.SelectStr = statement.ReplaceQuote(str) + return statement +} + +func col2NewCols(columns ...string) []string { + newColumns := make([]string, 0, len(columns)) + for _, col := range columns { + col = strings.Replace(col, "`", "", -1) + col = strings.Replace(col, `"`, "", -1) + ccols := strings.Split(col, ",") + for _, c := range ccols { + newColumns = append(newColumns, strings.TrimSpace(c)) + } + } + return newColumns +} + +// Cols generate "col1, col2" statement +func (statement *Statement) Cols(columns ...string) *Statement { + cols := col2NewCols(columns...) + for _, nc := range cols { + statement.ColumnMap.Add(nc) + } + return statement +} + +func (statement *Statement) ColumnStr() string { + return statement.dialect.Quoter().Join(statement.ColumnMap, ", ") +} + +// AllCols update use only: update all columns +func (statement *Statement) AllCols() *Statement { + statement.useAllCols = true + return statement +} + +// MustCols update use only: must update columns +func (statement *Statement) MustCols(columns ...string) *Statement { + newColumns := col2NewCols(columns...) + for _, nc := range newColumns { + statement.MustColumnMap[strings.ToLower(nc)] = true + } + return statement +} + +// UseBool indicates that use bool fields as update contents and query contiditions +func (statement *Statement) UseBool(columns ...string) *Statement { + if len(columns) > 0 { + statement.MustCols(columns...) + } else { + statement.allUseBool = true + } + return statement +} + +// Omit do not use the columns +func (statement *Statement) Omit(columns ...string) { + newColumns := col2NewCols(columns...) + for _, nc := range newColumns { + statement.OmitColumnMap = append(statement.OmitColumnMap, nc) + } +} + +// Nullable Update use only: update columns to null when value is nullable and zero-value +func (statement *Statement) Nullable(columns ...string) { + newColumns := col2NewCols(columns...) + for _, nc := range newColumns { + statement.NullableMap[strings.ToLower(nc)] = true + } +} + +// Top generate LIMIT limit statement +func (statement *Statement) Top(limit int) *Statement { + statement.Limit(limit) + return statement +} + +// Limit generate LIMIT start, limit statement +func (statement *Statement) Limit(limit int, start ...int) *Statement { + statement.LimitN = &limit + if len(start) > 0 { + statement.Start = start[0] + } + return statement +} + +// OrderBy generate "Order By order" statement +func (statement *Statement) OrderBy(order string) *Statement { + if len(statement.OrderStr) > 0 { + statement.OrderStr += ", " + } + statement.OrderStr += statement.ReplaceQuote(order) + return statement +} + +// Desc generate `ORDER BY xx DESC` +func (statement *Statement) Desc(colNames ...string) *Statement { + var buf strings.Builder + if len(statement.OrderStr) > 0 { + fmt.Fprint(&buf, statement.OrderStr, ", ") + } + for i, col := range colNames { + if i > 0 { + fmt.Fprint(&buf, ", ") + } + statement.dialect.Quoter().QuoteTo(&buf, col) + fmt.Fprint(&buf, " DESC") + } + statement.OrderStr = buf.String() + return statement +} + +// Asc provide asc order by query condition, the input parameters are columns. +func (statement *Statement) Asc(colNames ...string) *Statement { + var buf strings.Builder + if len(statement.OrderStr) > 0 { + fmt.Fprint(&buf, statement.OrderStr, ", ") + } + for i, col := range colNames { + if i > 0 { + fmt.Fprint(&buf, ", ") + } + statement.dialect.Quoter().QuoteTo(&buf, col) + fmt.Fprint(&buf, " ASC") + } + statement.OrderStr = buf.String() + return statement +} + +func (statement *Statement) Conds() builder.Cond { + return statement.cond +} + +// Table tempororily set table name, the parameter could be a string or a pointer of struct +func (statement *Statement) SetTable(tableNameOrBean interface{}) error { + v := rValue(tableNameOrBean) + t := v.Type() + if t.Kind() == reflect.Struct { + var err error + statement.RefTable, err = statement.tagParser.ParseWithCache(v) + if err != nil { + return err + } + } + + statement.AltTableName = dialects.FullTableName(statement.dialect, statement.tagParser.GetTableMapper(), tableNameOrBean, true) + return nil +} + +// Join The joinOP should be one of INNER, LEFT OUTER, CROSS etc - this will be prepended to JOIN +func (statement *Statement) Join(joinOP string, tablename interface{}, condition string, args ...interface{}) *Statement { + var buf strings.Builder + if len(statement.JoinStr) > 0 { + fmt.Fprintf(&buf, "%v %v JOIN ", statement.JoinStr, joinOP) + } else { + fmt.Fprintf(&buf, "%v JOIN ", joinOP) + } + + switch tp := tablename.(type) { + case builder.Builder: + subSQL, subQueryArgs, err := tp.ToSQL() + if err != nil { + statement.LastError = err + return statement + } + + fields := strings.Split(tp.TableName(), ".") + aliasName := statement.dialect.Quoter().Trim(fields[len(fields)-1]) + aliasName = schemas.CommonQuoter.Trim(aliasName) + + fmt.Fprintf(&buf, "(%s) %s ON %v", statement.ReplaceQuote(subSQL), aliasName, statement.ReplaceQuote(condition)) + statement.joinArgs = append(statement.joinArgs, subQueryArgs...) + case *builder.Builder: + subSQL, subQueryArgs, err := tp.ToSQL() + if err != nil { + statement.LastError = err + return statement + } + + fields := strings.Split(tp.TableName(), ".") + aliasName := statement.dialect.Quoter().Trim(fields[len(fields)-1]) + aliasName = schemas.CommonQuoter.Trim(aliasName) + + fmt.Fprintf(&buf, "(%s) %s ON %v", statement.ReplaceQuote(subSQL), aliasName, statement.ReplaceQuote(condition)) + statement.joinArgs = append(statement.joinArgs, subQueryArgs...) + default: + tbName := dialects.FullTableName(statement.dialect, statement.tagParser.GetTableMapper(), tablename, true) + if !utils.IsSubQuery(tbName) { + var buf strings.Builder + statement.dialect.Quoter().QuoteTo(&buf, tbName) + tbName = buf.String() + } + fmt.Fprintf(&buf, "%s ON %v", tbName, statement.ReplaceQuote(condition)) + } + + statement.JoinStr = buf.String() + statement.joinArgs = append(statement.joinArgs, args...) + return statement +} + +// tbName get some table's table name +func (statement *Statement) tbNameNoSchema(table *schemas.Table) string { + if len(statement.AltTableName) > 0 { + return statement.AltTableName + } + + return table.Name +} + +// GroupBy generate "Group By keys" statement +func (statement *Statement) GroupBy(keys string) *Statement { + statement.GroupByStr = statement.ReplaceQuote(keys) + return statement +} + +// Having generate "Having conditions" statement +func (statement *Statement) Having(conditions string) *Statement { + statement.HavingStr = fmt.Sprintf("HAVING %v", statement.ReplaceQuote(conditions)) + return statement +} + +// Unscoped always disable struct tag "deleted" +func (statement *Statement) SetUnscoped() *Statement { + statement.unscoped = true + return statement +} + +func (statement *Statement) GetUnscoped() bool { + return statement.unscoped +} + +func (statement *Statement) genColumnStr() string { + if statement.RefTable == nil { + return "" + } + + var buf strings.Builder + columns := statement.RefTable.Columns() + + for _, col := range columns { + if statement.OmitColumnMap.Contain(col.Name) { + continue + } + + if len(statement.ColumnMap) > 0 && !statement.ColumnMap.Contain(col.Name) { + continue + } + + if col.MapType == schemas.ONLYTODB { + continue + } + + if buf.Len() != 0 { + buf.WriteString(", ") + } + + if statement.JoinStr != "" { + if statement.TableAlias != "" { + buf.WriteString(statement.TableAlias) + } else { + buf.WriteString(statement.TableName()) + } + + buf.WriteString(".") + } + + statement.dialect.Quoter().QuoteTo(&buf, col.Name) + } + + return buf.String() +} + +func (statement *Statement) GenCreateTableSQL() []string { + statement.RefTable.StoreEngine = statement.StoreEngine + statement.RefTable.Charset = statement.Charset + s, _ := statement.dialect.CreateTableSQL(statement.RefTable, statement.TableName()) + return s +} + +func (statement *Statement) GenIndexSQL() []string { + var sqls []string + tbName := statement.TableName() + for _, index := range statement.RefTable.Indexes { + if index.Type == schemas.IndexType { + sql := statement.dialect.CreateIndexSQL(tbName, index) + sqls = append(sqls, sql) + } + } + return sqls +} + +func uniqueName(tableName, uqeName string) string { + return fmt.Sprintf("UQE_%v_%v", tableName, uqeName) +} + +func (statement *Statement) GenUniqueSQL() []string { + var sqls []string + tbName := statement.TableName() + for _, index := range statement.RefTable.Indexes { + if index.Type == schemas.UniqueType { + sql := statement.dialect.CreateIndexSQL(tbName, index) + sqls = append(sqls, sql) + } + } + return sqls +} + +func (statement *Statement) GenDelIndexSQL() []string { + var sqls []string + tbName := statement.TableName() + idx := strings.Index(tbName, ".") + if idx > -1 { + tbName = tbName[idx+1:] + } + for _, index := range statement.RefTable.Indexes { + sqls = append(sqls, statement.dialect.DropIndexSQL(tbName, index)) + } + return sqls +} + +func (statement *Statement) buildConds2(table *schemas.Table, bean interface{}, + includeVersion bool, includeUpdated bool, includeNil bool, + includeAutoIncr bool, allUseBool bool, useAllCols bool, unscoped bool, + mustColumnMap map[string]bool, tableName, aliasName string, addedTableName bool) (builder.Cond, error) { + var conds []builder.Cond + for _, col := range table.Columns() { + if !includeVersion && col.IsVersion { + continue + } + if !includeUpdated && col.IsUpdated { + continue + } + if !includeAutoIncr && col.IsAutoIncrement { + continue + } + + if statement.dialect.URI().DBType == schemas.MSSQL && (col.SQLType.Name == schemas.Text || + col.SQLType.IsBlob() || col.SQLType.Name == schemas.TimeStampz) { + continue + } + if col.SQLType.IsJson() { + continue + } + + var colName string + if addedTableName { + var nm = tableName + if len(aliasName) > 0 { + nm = aliasName + } + colName = statement.quote(nm) + "." + statement.quote(col.Name) + } else { + colName = statement.quote(col.Name) + } + + fieldValuePtr, err := col.ValueOf(bean) + if err != nil { + if !strings.Contains(err.Error(), "is not valid") { + //engine.logger.Warn(err) + } + continue + } + + if col.IsDeleted && !unscoped { // tag "deleted" is enabled + conds = append(conds, statement.CondDeleted(col)) + } + + fieldValue := *fieldValuePtr + if fieldValue.Interface() == nil { + continue + } + + fieldType := reflect.TypeOf(fieldValue.Interface()) + requiredField := useAllCols + + if b, ok := getFlagForColumn(mustColumnMap, col); ok { + if b { + requiredField = true + } else { + continue + } + } + + if fieldType.Kind() == reflect.Ptr { + if fieldValue.IsNil() { + if includeNil { + conds = append(conds, builder.Eq{colName: nil}) + } + continue + } else if !fieldValue.IsValid() { + continue + } else { + // dereference ptr type to instance type + fieldValue = fieldValue.Elem() + fieldType = reflect.TypeOf(fieldValue.Interface()) + requiredField = true + } + } + + var val interface{} + switch fieldType.Kind() { + case reflect.Bool: + if allUseBool || requiredField { + val = fieldValue.Interface() + } else { + // if a bool in a struct, it will not be as a condition because it default is false, + // please use Where() instead + continue + } + case reflect.String: + if !requiredField && fieldValue.String() == "" { + continue + } + // for MyString, should convert to string or panic + if fieldType.String() != reflect.String.String() { + val = fieldValue.String() + } else { + val = fieldValue.Interface() + } + case reflect.Int8, reflect.Int16, reflect.Int, reflect.Int32, reflect.Int64: + if !requiredField && fieldValue.Int() == 0 { + continue + } + val = fieldValue.Interface() + case reflect.Float32, reflect.Float64: + if !requiredField && fieldValue.Float() == 0.0 { + continue + } + val = fieldValue.Interface() + case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64: + if !requiredField && fieldValue.Uint() == 0 { + continue + } + t := int64(fieldValue.Uint()) + val = reflect.ValueOf(&t).Interface() + case reflect.Struct: + if fieldType.ConvertibleTo(schemas.TimeType) { + t := fieldValue.Convert(schemas.TimeType).Interface().(time.Time) + if !requiredField && (t.IsZero() || !fieldValue.IsValid()) { + continue + } + val = dialects.FormatColumnTime(statement.dialect, statement.defaultTimeZone, col, t) + } else if _, ok := reflect.New(fieldType).Interface().(convert.Conversion); ok { + continue + } else if valNul, ok := fieldValue.Interface().(driver.Valuer); ok { + val, _ = valNul.Value() + if val == nil && !requiredField { + continue + } + } else { + if col.SQLType.IsJson() { + if col.SQLType.IsText() { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + val = string(bytes) + } else if col.SQLType.IsBlob() { + var bytes []byte + var err error + bytes, err = json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + val = bytes + } + } else { + table, err := statement.tagParser.ParseWithCache(fieldValue) + if err != nil { + val = fieldValue.Interface() + } else { + if len(table.PrimaryKeys) == 1 { + pkField := reflect.Indirect(fieldValue).FieldByName(table.PKColumns()[0].FieldName) + // fix non-int pk issues + //if pkField.Int() != 0 { + if pkField.IsValid() && !utils.IsZero(pkField.Interface()) { + val = pkField.Interface() + } else { + continue + } + } else { + //TODO: how to handler? + return nil, fmt.Errorf("not supported %v as %v", fieldValue.Interface(), table.PrimaryKeys) + } + } + } + } + case reflect.Array: + continue + case reflect.Slice, reflect.Map: + if fieldValue == reflect.Zero(fieldType) { + continue + } + if fieldValue.IsNil() || !fieldValue.IsValid() || fieldValue.Len() == 0 { + continue + } + + if col.SQLType.IsText() { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + val = string(bytes) + } else if col.SQLType.IsBlob() { + var bytes []byte + var err error + if (fieldType.Kind() == reflect.Array || fieldType.Kind() == reflect.Slice) && + fieldType.Elem().Kind() == reflect.Uint8 { + if fieldValue.Len() > 0 { + val = fieldValue.Bytes() + } else { + continue + } + } else { + bytes, err = json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + val = bytes + } + } else { + continue + } + default: + val = fieldValue.Interface() + } + + conds = append(conds, builder.Eq{colName: val}) + } + + return builder.And(conds...), nil +} + +func (statement *Statement) BuildConds(table *schemas.Table, bean interface{}, includeVersion bool, includeUpdated bool, includeNil bool, includeAutoIncr bool, addedTableName bool) (builder.Cond, error) { + return statement.buildConds2(table, bean, includeVersion, includeUpdated, includeNil, includeAutoIncr, statement.allUseBool, statement.useAllCols, + statement.unscoped, statement.MustColumnMap, statement.TableName(), statement.TableAlias, addedTableName) +} + +func (statement *Statement) mergeConds(bean interface{}) error { + if !statement.NoAutoCondition { + var addedTableName = (len(statement.JoinStr) > 0) + autoCond, err := statement.BuildConds(statement.RefTable, bean, true, true, false, true, addedTableName) + if err != nil { + return err + } + statement.cond = statement.cond.And(autoCond) + } + + if err := statement.ProcessIDParam(); err != nil { + return err + } + return nil +} + +func (statement *Statement) GenConds(bean interface{}) (string, []interface{}, error) { + if err := statement.mergeConds(bean); err != nil { + return "", nil, err + } + + return statement.GenCondSQL(statement.cond) +} + +func (statement *Statement) quoteColumnStr(columnStr string) string { + columns := strings.Split(columnStr, ",") + return statement.dialect.Quoter().Join(columns, ",") +} + +func (statement *Statement) ConvertSQLOrArgs(sqlOrArgs ...interface{}) (string, []interface{}, error) { + sql, args, err := convertSQLOrArgs(sqlOrArgs...) + if err != nil { + return "", nil, err + } + return statement.ReplaceQuote(sql), args, nil +} + +func convertSQLOrArgs(sqlOrArgs ...interface{}) (string, []interface{}, error) { + switch sqlOrArgs[0].(type) { + case string: + return sqlOrArgs[0].(string), sqlOrArgs[1:], nil + case *builder.Builder: + return sqlOrArgs[0].(*builder.Builder).ToSQL() + case builder.Builder: + bd := sqlOrArgs[0].(builder.Builder) + return bd.ToSQL() + } + + return "", nil, ErrUnSupportedType +} + +func (statement *Statement) joinColumns(cols []*schemas.Column, includeTableName bool) string { + var colnames = make([]string, len(cols)) + for i, col := range cols { + if includeTableName { + colnames[i] = statement.quote(statement.TableName()) + + "." + statement.quote(col.Name) + } else { + colnames[i] = statement.quote(col.Name) + } + } + return strings.Join(colnames, ", ") +} + +// CondDeleted returns the conditions whether a record is soft deleted. +func (statement *Statement) CondDeleted(col *schemas.Column) builder.Cond { + var colName = col.Name + if statement.JoinStr != "" { + var prefix string + if statement.TableAlias != "" { + prefix = statement.TableAlias + } else { + prefix = statement.TableName() + } + colName = statement.quote(prefix) + "." + statement.quote(col.Name) + } + var cond = builder.NewCond() + if col.SQLType.IsNumeric() { + cond = builder.Eq{colName: 0} + } else { + // FIXME: mssql: The conversion of a nvarchar data type to a datetime data type resulted in an out-of-range value. + if statement.dialect.URI().DBType != schemas.MSSQL { + cond = builder.Eq{colName: utils.ZeroTime1} + } + } + + if col.Nullable { + cond = cond.Or(builder.IsNull{colName}) + } + + return cond +} diff --git a/vendor/xorm.io/xorm/statement_args.go b/vendor/xorm.io/xorm/internal/statements/statement_args.go similarity index 75% rename from vendor/xorm.io/xorm/statement_args.go rename to vendor/xorm.io/xorm/internal/statements/statement_args.go index 310f24d6b..7d1ef9eb6 100644 --- a/vendor/xorm.io/xorm/statement_args.go +++ b/vendor/xorm.io/xorm/internal/statements/statement_args.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package xorm +package statements import ( "fmt" @@ -11,7 +11,7 @@ import ( "time" "xorm.io/builder" - "xorm.io/core" + "xorm.io/xorm/schemas" ) func quoteNeeded(a interface{}) bool { @@ -77,10 +77,10 @@ func convertArg(arg interface{}, convertFunc func(string) string) string { const insertSelectPlaceHolder = true -func (statement *Statement) writeArg(w *builder.BytesWriter, arg interface{}) error { +func (statement *Statement) WriteArg(w *builder.BytesWriter, arg interface{}) error { switch argv := arg.(type) { case bool: - if statement.Engine.dialect.DBType() == core.MSSQL { + if statement.dialect.URI().DBType == schemas.MSSQL { if argv { if _, err := w.WriteString("1"); err != nil { return err @@ -119,7 +119,7 @@ func (statement *Statement) writeArg(w *builder.BytesWriter, arg interface{}) er w.Append(arg) } else { var convertFunc = convertStringSingleQuote - if statement.Engine.dialect.DBType() == core.MYSQL { + if statement.dialect.URI().DBType == schemas.MYSQL { convertFunc = convertString } if _, err := w.WriteString(convertArg(arg, convertFunc)); err != nil { @@ -130,9 +130,9 @@ func (statement *Statement) writeArg(w *builder.BytesWriter, arg interface{}) er return nil } -func (statement *Statement) writeArgs(w *builder.BytesWriter, args []interface{}) error { +func (statement *Statement) WriteArgs(w *builder.BytesWriter, args []interface{}) error { for i, arg := range args { - if err := statement.writeArg(w, arg); err != nil { + if err := statement.WriteArg(w, arg); err != nil { return err } @@ -144,27 +144,3 @@ func (statement *Statement) writeArgs(w *builder.BytesWriter, args []interface{} } return nil } - -func writeStrings(w *builder.BytesWriter, cols []string, leftQuote, rightQuote string) error { - for i, colName := range cols { - if len(leftQuote) > 0 && colName[0] != '`' { - if _, err := w.WriteString(leftQuote); err != nil { - return err - } - } - if _, err := w.WriteString(colName); err != nil { - return err - } - if len(rightQuote) > 0 && colName[len(colName)-1] != '`' { - if _, err := w.WriteString(rightQuote); err != nil { - return err - } - } - if i+1 != len(cols) { - if _, err := w.WriteString(","); err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/xorm.io/xorm/internal/statements/update.go b/vendor/xorm.io/xorm/internal/statements/update.go new file mode 100644 index 000000000..2bd7ddd3f --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/update.go @@ -0,0 +1,295 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "database/sql/driver" + "errors" + "fmt" + "reflect" + "time" + + "xorm.io/xorm/convert" + "xorm.io/xorm/dialects" + "xorm.io/xorm/internal/json" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" +) + +func (statement *Statement) ifAddColUpdate(col *schemas.Column, includeVersion, includeUpdated, includeNil, + includeAutoIncr, update bool) (bool, error) { + columnMap := statement.ColumnMap + omitColumnMap := statement.OmitColumnMap + unscoped := statement.unscoped + + if !includeVersion && col.IsVersion { + return false, nil + } + if col.IsCreated && !columnMap.Contain(col.Name) { + return false, nil + } + if !includeUpdated && col.IsUpdated { + return false, nil + } + if !includeAutoIncr && col.IsAutoIncrement { + return false, nil + } + if col.IsDeleted && !unscoped { + return false, nil + } + if omitColumnMap.Contain(col.Name) { + return false, nil + } + if len(columnMap) > 0 && !columnMap.Contain(col.Name) { + return false, nil + } + + if col.MapType == schemas.ONLYFROMDB { + return false, nil + } + + if statement.IncrColumns.IsColExist(col.Name) { + return false, nil + } else if statement.DecrColumns.IsColExist(col.Name) { + return false, nil + } else if statement.ExprColumns.IsColExist(col.Name) { + return false, nil + } + + return true, nil +} + +// BuildUpdates auto generating update columnes and values according a struct +func (statement *Statement) BuildUpdates(tableValue reflect.Value, + includeVersion, includeUpdated, includeNil, + includeAutoIncr, update bool) ([]string, []interface{}, error) { + table := statement.RefTable + allUseBool := statement.allUseBool + useAllCols := statement.useAllCols + mustColumnMap := statement.MustColumnMap + nullableMap := statement.NullableMap + + var colNames = make([]string, 0) + var args = make([]interface{}, 0) + + for _, col := range table.Columns() { + ok, err := statement.ifAddColUpdate(col, includeVersion, includeUpdated, includeNil, + includeAutoIncr, update) + if err != nil { + return nil, nil, err + } + if !ok { + continue + } + + fieldValuePtr, err := col.ValueOfV(&tableValue) + if err != nil { + return nil, nil, err + } + + fieldValue := *fieldValuePtr + fieldType := reflect.TypeOf(fieldValue.Interface()) + if fieldType == nil { + continue + } + + requiredField := useAllCols + includeNil := useAllCols + + if b, ok := getFlagForColumn(mustColumnMap, col); ok { + if b { + requiredField = true + } else { + continue + } + } + + // !evalphobia! set fieldValue as nil when column is nullable and zero-value + if b, ok := getFlagForColumn(nullableMap, col); ok { + if b && col.Nullable && utils.IsZero(fieldValue.Interface()) { + var nilValue *int + fieldValue = reflect.ValueOf(nilValue) + fieldType = reflect.TypeOf(fieldValue.Interface()) + includeNil = true + } + } + + var val interface{} + + if fieldValue.CanAddr() { + if structConvert, ok := fieldValue.Addr().Interface().(convert.Conversion); ok { + data, err := structConvert.ToDB() + if err != nil { + return nil, nil, err + } + + val = data + goto APPEND + } + } + + if structConvert, ok := fieldValue.Interface().(convert.Conversion); ok { + data, err := structConvert.ToDB() + if err != nil { + return nil, nil, err + } + + val = data + goto APPEND + } + + if fieldType.Kind() == reflect.Ptr { + if fieldValue.IsNil() { + if includeNil { + args = append(args, nil) + colNames = append(colNames, fmt.Sprintf("%v=?", statement.quote(col.Name))) + } + continue + } else if !fieldValue.IsValid() { + continue + } else { + // dereference ptr type to instance type + fieldValue = fieldValue.Elem() + fieldType = reflect.TypeOf(fieldValue.Interface()) + requiredField = true + } + } + + switch fieldType.Kind() { + case reflect.Bool: + if allUseBool || requiredField { + val = fieldValue.Interface() + } else { + // if a bool in a struct, it will not be as a condition because it default is false, + // please use Where() instead + continue + } + case reflect.String: + if !requiredField && fieldValue.String() == "" { + continue + } + // for MyString, should convert to string or panic + if fieldType.String() != reflect.String.String() { + val = fieldValue.String() + } else { + val = fieldValue.Interface() + } + case reflect.Int8, reflect.Int16, reflect.Int, reflect.Int32, reflect.Int64: + if !requiredField && fieldValue.Int() == 0 { + continue + } + val = fieldValue.Interface() + case reflect.Float32, reflect.Float64: + if !requiredField && fieldValue.Float() == 0.0 { + continue + } + val = fieldValue.Interface() + case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64: + if !requiredField && fieldValue.Uint() == 0 { + continue + } + t := int64(fieldValue.Uint()) + val = reflect.ValueOf(&t).Interface() + case reflect.Struct: + if fieldType.ConvertibleTo(schemas.TimeType) { + t := fieldValue.Convert(schemas.TimeType).Interface().(time.Time) + if !requiredField && (t.IsZero() || !fieldValue.IsValid()) { + continue + } + val = dialects.FormatColumnTime(statement.dialect, statement.defaultTimeZone, col, t) + } else if nulType, ok := fieldValue.Interface().(driver.Valuer); ok { + val, _ = nulType.Value() + if val == nil && !requiredField { + continue + } + } else { + if !col.SQLType.IsJson() { + table, err := statement.tagParser.ParseWithCache(fieldValue) + if err != nil { + val = fieldValue.Interface() + } else { + if len(table.PrimaryKeys) == 1 { + pkField := reflect.Indirect(fieldValue).FieldByName(table.PKColumns()[0].FieldName) + // fix non-int pk issues + if pkField.IsValid() && (!requiredField && !utils.IsZero(pkField.Interface())) { + val = pkField.Interface() + } else { + continue + } + } else { + return nil, nil, errors.New("Not supported multiple primary keys") + } + } + } else { + // Blank struct could not be as update data + if requiredField || !utils.IsStructZero(fieldValue) { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, nil, fmt.Errorf("mashal %v failed", fieldValue.Interface()) + } + if col.SQLType.IsText() { + val = string(bytes) + } else if col.SQLType.IsBlob() { + val = bytes + } + } else { + continue + } + } + } + case reflect.Array, reflect.Slice, reflect.Map: + if !requiredField { + if fieldValue == reflect.Zero(fieldType) { + continue + } + if fieldType.Kind() == reflect.Array { + if utils.IsArrayZero(fieldValue) { + continue + } + } else if fieldValue.IsNil() || !fieldValue.IsValid() || fieldValue.Len() == 0 { + continue + } + } + + if col.SQLType.IsText() { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, nil, err + } + val = string(bytes) + } else if col.SQLType.IsBlob() { + var bytes []byte + var err error + if fieldType.Kind() == reflect.Slice && + fieldType.Elem().Kind() == reflect.Uint8 { + if fieldValue.Len() > 0 { + val = fieldValue.Bytes() + } else { + continue + } + } else if fieldType.Kind() == reflect.Array && + fieldType.Elem().Kind() == reflect.Uint8 { + val = fieldValue.Slice(0, 0).Interface() + } else { + bytes, err = json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, nil, err + } + val = bytes + } + } else { + continue + } + default: + val = fieldValue.Interface() + } + + APPEND: + args = append(args, val) + colNames = append(colNames, fmt.Sprintf("%v = ?", statement.quote(col.Name))) + } + + return colNames, args, nil +} diff --git a/vendor/xorm.io/xorm/internal/statements/values.go b/vendor/xorm.io/xorm/internal/statements/values.go new file mode 100644 index 000000000..0ab174d60 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/values.go @@ -0,0 +1,154 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "reflect" + "time" + + "xorm.io/xorm/convert" + "xorm.io/xorm/dialects" + "xorm.io/xorm/internal/json" + "xorm.io/xorm/schemas" +) + +var ( + nullFloatType = reflect.TypeOf(sql.NullFloat64{}) +) + +// Value2Interface convert a field value of a struct to interface for puting into database +func (statement *Statement) Value2Interface(col *schemas.Column, fieldValue reflect.Value) (interface{}, error) { + if fieldValue.CanAddr() { + if fieldConvert, ok := fieldValue.Addr().Interface().(convert.Conversion); ok { + data, err := fieldConvert.ToDB() + if err != nil { + return nil, err + } + if col.SQLType.IsBlob() { + return data, nil + } + return string(data), nil + } + } + + if fieldConvert, ok := fieldValue.Interface().(convert.Conversion); ok { + data, err := fieldConvert.ToDB() + if err != nil { + return nil, err + } + if col.SQLType.IsBlob() { + return data, nil + } + if nil == data { + return nil, nil + } + return string(data), nil + } + + fieldType := fieldValue.Type() + k := fieldType.Kind() + if k == reflect.Ptr { + if fieldValue.IsNil() { + return nil, nil + } else if !fieldValue.IsValid() { + return nil, nil + } else { + // !nashtsai! deference pointer type to instance type + fieldValue = fieldValue.Elem() + fieldType = fieldValue.Type() + k = fieldType.Kind() + } + } + + switch k { + case reflect.Bool: + return fieldValue.Bool(), nil + case reflect.String: + return fieldValue.String(), nil + case reflect.Struct: + if fieldType.ConvertibleTo(schemas.TimeType) { + t := fieldValue.Convert(schemas.TimeType).Interface().(time.Time) + tf := dialects.FormatColumnTime(statement.dialect, statement.defaultTimeZone, col, t) + return tf, nil + } else if fieldType.ConvertibleTo(nullFloatType) { + t := fieldValue.Convert(nullFloatType).Interface().(sql.NullFloat64) + if !t.Valid { + return nil, nil + } + return t.Float64, nil + } + + if !col.SQLType.IsJson() { + // !! 增加支持driver.Valuer接口的结构,如sql.NullString + if v, ok := fieldValue.Interface().(driver.Valuer); ok { + return v.Value() + } + + fieldTable, err := statement.tagParser.ParseWithCache(fieldValue) + if err != nil { + return nil, err + } + if len(fieldTable.PrimaryKeys) == 1 { + pkField := reflect.Indirect(fieldValue).FieldByName(fieldTable.PKColumns()[0].FieldName) + return pkField.Interface(), nil + } + return nil, fmt.Errorf("no primary key for col %v", col.Name) + } + + if col.SQLType.IsText() { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + return string(bytes), nil + } else if col.SQLType.IsBlob() { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + return bytes, nil + } + return nil, fmt.Errorf("Unsupported type %v", fieldValue.Type()) + case reflect.Complex64, reflect.Complex128: + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + return string(bytes), nil + case reflect.Array, reflect.Slice, reflect.Map: + if !fieldValue.IsValid() { + return fieldValue.Interface(), nil + } + + if col.SQLType.IsText() { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + return string(bytes), nil + } else if col.SQLType.IsBlob() { + var bytes []byte + var err error + if (k == reflect.Slice) && + (fieldValue.Type().Elem().Kind() == reflect.Uint8) { + bytes = fieldValue.Bytes() + } else { + bytes, err = json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + } + return bytes, nil + } + return nil, ErrUnSupportedType + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return int64(fieldValue.Uint()), nil + default: + return fieldValue.Interface(), nil + } +} diff --git a/vendor/xorm.io/xorm/internal/utils/name.go b/vendor/xorm.io/xorm/internal/utils/name.go new file mode 100644 index 000000000..f5fc3ff78 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/utils/name.go @@ -0,0 +1,13 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import ( + "fmt" +) + +func IndexName(tableName, idxName string) string { + return fmt.Sprintf("IDX_%v_%v", tableName, idxName) +} diff --git a/vendor/xorm.io/xorm/internal/utils/reflect.go b/vendor/xorm.io/xorm/internal/utils/reflect.go new file mode 100644 index 000000000..3dad6bfe0 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/utils/reflect.go @@ -0,0 +1,13 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import ( + "reflect" +) + +func ReflectValue(bean interface{}) reflect.Value { + return reflect.Indirect(reflect.ValueOf(bean)) +} diff --git a/vendor/xorm.io/xorm/internal/utils/slice.go b/vendor/xorm.io/xorm/internal/utils/slice.go new file mode 100644 index 000000000..89685706d --- /dev/null +++ b/vendor/xorm.io/xorm/internal/utils/slice.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import "sort" + +// SliceEq return true if two slice have the same elements even if different sort. +func SliceEq(left, right []string) bool { + if len(left) != len(right) { + return false + } + sort.Sort(sort.StringSlice(left)) + sort.Sort(sort.StringSlice(right)) + for i := 0; i < len(left); i++ { + if left[i] != right[i] { + return false + } + } + return true +} diff --git a/vendor/xorm.io/xorm/internal/utils/sql.go b/vendor/xorm.io/xorm/internal/utils/sql.go new file mode 100644 index 000000000..5e68c4a46 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/utils/sql.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import ( + "strings" +) + +func IsSubQuery(tbName string) bool { + const selStr = "select" + if len(tbName) <= len(selStr)+1 { + return false + } + + return strings.EqualFold(tbName[:len(selStr)], selStr) || + strings.EqualFold(tbName[:len(selStr)+1], "("+selStr) +} diff --git a/vendor/xorm.io/xorm/internal/utils/strings.go b/vendor/xorm.io/xorm/internal/utils/strings.go new file mode 100644 index 000000000..b5dc37b77 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/utils/strings.go @@ -0,0 +1,30 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import ( + "strings" +) + +func IndexNoCase(s, sep string) int { + return strings.Index(strings.ToLower(s), strings.ToLower(sep)) +} + +func SplitNoCase(s, sep string) []string { + idx := IndexNoCase(s, sep) + if idx < 0 { + return []string{s} + } + return strings.Split(s, s[idx:idx+len(sep)]) +} + +func SplitNNoCase(s, sep string, n int) []string { + idx := IndexNoCase(s, sep) + if idx < 0 { + return []string{s} + } + return strings.SplitN(s, s[idx:idx+len(sep)], n) +} + diff --git a/vendor/xorm.io/xorm/internal/utils/zero.go b/vendor/xorm.io/xorm/internal/utils/zero.go new file mode 100644 index 000000000..8f033c60b --- /dev/null +++ b/vendor/xorm.io/xorm/internal/utils/zero.go @@ -0,0 +1,145 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import ( + "reflect" + "time" +) + +type Zeroable interface { + IsZero() bool +} + +var nilTime *time.Time + +// IsZero returns false if k is nil or has a zero value +func IsZero(k interface{}) bool { + if k == nil { + return true + } + + switch k.(type) { + case int: + return k.(int) == 0 + case int8: + return k.(int8) == 0 + case int16: + return k.(int16) == 0 + case int32: + return k.(int32) == 0 + case int64: + return k.(int64) == 0 + case uint: + return k.(uint) == 0 + case uint8: + return k.(uint8) == 0 + case uint16: + return k.(uint16) == 0 + case uint32: + return k.(uint32) == 0 + case uint64: + return k.(uint64) == 0 + case float32: + return k.(float32) == 0 + case float64: + return k.(float64) == 0 + case bool: + return k.(bool) == false + case string: + return k.(string) == "" + case *time.Time: + return k.(*time.Time) == nilTime || IsTimeZero(*k.(*time.Time)) + case time.Time: + return IsTimeZero(k.(time.Time)) + case Zeroable: + return k.(Zeroable) == nil || k.(Zeroable).IsZero() + case reflect.Value: // for go version less than 1.13 because reflect.Value has no method IsZero + return IsValueZero(k.(reflect.Value)) + } + + return IsValueZero(reflect.ValueOf(k)) +} + +var zeroType = reflect.TypeOf((*Zeroable)(nil)).Elem() + +func IsValueZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int, reflect.Int64: + return v.Int() == 0 + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint, reflect.Uint64: + return v.Uint() == 0 + case reflect.String: + return v.Len() == 0 + case reflect.Ptr: + if v.IsNil() { + return true + } + return IsValueZero(v.Elem()) + case reflect.Struct: + return IsStructZero(v) + case reflect.Array: + return IsArrayZero(v) + } + return false +} + +func IsStructZero(v reflect.Value) bool { + if !v.IsValid() || v.NumField() == 0 { + return true + } + + if v.Type().Implements(zeroType) { + f := v.MethodByName("IsZero") + if f.IsValid() { + res := f.Call(nil) + return len(res) == 1 && res[0].Bool() + } + } + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + switch field.Kind() { + case reflect.Ptr: + field = field.Elem() + fallthrough + case reflect.Struct: + if !IsStructZero(field) { + return false + } + default: + if field.CanInterface() && !IsZero(field.Interface()) { + return false + } + } + } + return true +} + +func IsArrayZero(v reflect.Value) bool { + if !v.IsValid() || v.Len() == 0 { + return true + } + + for i := 0; i < v.Len(); i++ { + if !IsZero(v.Index(i).Interface()) { + return false + } + } + + return true +} + +const ( + ZeroTime0 = "0000-00-00 00:00:00" + ZeroTime1 = "0001-01-01 00:00:00" +) + +func IsTimeZero(t time.Time) bool { + return t.IsZero() || t.Format("2006-01-02 15:04:05") == ZeroTime0 || + t.Format("2006-01-02 15:04:05") == ZeroTime1 +} diff --git a/vendor/xorm.io/xorm/logger.go b/vendor/xorm.io/xorm/log/logger.go similarity index 64% rename from vendor/xorm.io/xorm/logger.go rename to vendor/xorm.io/xorm/log/logger.go index 7b26e77f3..eeb63693b 100644 --- a/vendor/xorm.io/xorm/logger.go +++ b/vendor/xorm.io/xorm/log/logger.go @@ -2,26 +2,56 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package xorm +package log import ( "fmt" "io" "log" +) + +// LogLevel defines a log level +type LogLevel int - "xorm.io/core" +// enumerate all LogLevels +const ( + // !nashtsai! following level also match syslog.Priority value + LOG_DEBUG LogLevel = iota + LOG_INFO + LOG_WARNING + LOG_ERR + LOG_OFF + LOG_UNKNOWN ) // default log options const ( DEFAULT_LOG_PREFIX = "[xorm]" DEFAULT_LOG_FLAG = log.Ldate | log.Lmicroseconds - DEFAULT_LOG_LEVEL = core.LOG_DEBUG + DEFAULT_LOG_LEVEL = LOG_DEBUG ) -var _ core.ILogger = DiscardLogger{} +// Logger is a logger interface +type Logger interface { + Debug(v ...interface{}) + Debugf(format string, v ...interface{}) + Error(v ...interface{}) + Errorf(format string, v ...interface{}) + Info(v ...interface{}) + Infof(format string, v ...interface{}) + Warn(v ...interface{}) + Warnf(format string, v ...interface{}) + + Level() LogLevel + SetLevel(l LogLevel) + + ShowSQL(show ...bool) + IsShowSQL() bool +} + +var _ Logger = DiscardLogger{} -// DiscardLogger don't log implementation for core.ILogger +// DiscardLogger don't log implementation for ILogger type DiscardLogger struct{} // Debug empty implementation @@ -49,12 +79,12 @@ func (DiscardLogger) Warn(v ...interface{}) {} func (DiscardLogger) Warnf(format string, v ...interface{}) {} // Level empty implementation -func (DiscardLogger) Level() core.LogLevel { - return core.LOG_UNKNOWN +func (DiscardLogger) Level() LogLevel { + return LOG_UNKNOWN } // SetLevel empty implementation -func (DiscardLogger) SetLevel(l core.LogLevel) {} +func (DiscardLogger) SetLevel(l LogLevel) {} // ShowSQL empty implementation func (DiscardLogger) ShowSQL(show ...bool) {} @@ -64,17 +94,17 @@ func (DiscardLogger) IsShowSQL() bool { return false } -// SimpleLogger is the default implment of core.ILogger +// SimpleLogger is the default implment of ILogger type SimpleLogger struct { DEBUG *log.Logger ERR *log.Logger INFO *log.Logger WARN *log.Logger - level core.LogLevel + level LogLevel showSQL bool } -var _ core.ILogger = &SimpleLogger{} +var _ Logger = &SimpleLogger{} // NewSimpleLogger use a special io.Writer as logger output func NewSimpleLogger(out io.Writer) *SimpleLogger { @@ -87,7 +117,7 @@ func NewSimpleLogger2(out io.Writer, prefix string, flag int) *SimpleLogger { } // NewSimpleLogger3 let you customrize your logger prefix and flag and logLevel -func NewSimpleLogger3(out io.Writer, prefix string, flag int, l core.LogLevel) *SimpleLogger { +func NewSimpleLogger3(out io.Writer, prefix string, flag int, l LogLevel) *SimpleLogger { return &SimpleLogger{ DEBUG: log.New(out, fmt.Sprintf("%s [debug] ", prefix), flag), ERR: log.New(out, fmt.Sprintf("%s [error] ", prefix), flag), @@ -97,82 +127,82 @@ func NewSimpleLogger3(out io.Writer, prefix string, flag int, l core.LogLevel) * } } -// Error implement core.ILogger +// Error implement ILogger func (s *SimpleLogger) Error(v ...interface{}) { - if s.level <= core.LOG_ERR { - s.ERR.Output(2, fmt.Sprint(v...)) + if s.level <= LOG_ERR { + s.ERR.Output(2, fmt.Sprintln(v...)) } return } -// Errorf implement core.ILogger +// Errorf implement ILogger func (s *SimpleLogger) Errorf(format string, v ...interface{}) { - if s.level <= core.LOG_ERR { + if s.level <= LOG_ERR { s.ERR.Output(2, fmt.Sprintf(format, v...)) } return } -// Debug implement core.ILogger +// Debug implement ILogger func (s *SimpleLogger) Debug(v ...interface{}) { - if s.level <= core.LOG_DEBUG { - s.DEBUG.Output(2, fmt.Sprint(v...)) + if s.level <= LOG_DEBUG { + s.DEBUG.Output(2, fmt.Sprintln(v...)) } return } -// Debugf implement core.ILogger +// Debugf implement ILogger func (s *SimpleLogger) Debugf(format string, v ...interface{}) { - if s.level <= core.LOG_DEBUG { + if s.level <= LOG_DEBUG { s.DEBUG.Output(2, fmt.Sprintf(format, v...)) } return } -// Info implement core.ILogger +// Info implement ILogger func (s *SimpleLogger) Info(v ...interface{}) { - if s.level <= core.LOG_INFO { - s.INFO.Output(2, fmt.Sprint(v...)) + if s.level <= LOG_INFO { + s.INFO.Output(2, fmt.Sprintln(v...)) } return } -// Infof implement core.ILogger +// Infof implement ILogger func (s *SimpleLogger) Infof(format string, v ...interface{}) { - if s.level <= core.LOG_INFO { + if s.level <= LOG_INFO { s.INFO.Output(2, fmt.Sprintf(format, v...)) } return } -// Warn implement core.ILogger +// Warn implement ILogger func (s *SimpleLogger) Warn(v ...interface{}) { - if s.level <= core.LOG_WARNING { - s.WARN.Output(2, fmt.Sprint(v...)) + if s.level <= LOG_WARNING { + s.WARN.Output(2, fmt.Sprintln(v...)) } return } -// Warnf implement core.ILogger +// Warnf implement ILogger func (s *SimpleLogger) Warnf(format string, v ...interface{}) { - if s.level <= core.LOG_WARNING { + if s.level <= LOG_WARNING { s.WARN.Output(2, fmt.Sprintf(format, v...)) } return } -// Level implement core.ILogger -func (s *SimpleLogger) Level() core.LogLevel { +// Level implement ILogger +func (s *SimpleLogger) Level() LogLevel { return s.level } -// SetLevel implement core.ILogger -func (s *SimpleLogger) SetLevel(l core.LogLevel) { +// SetLevel implement ILogger +func (s *SimpleLogger) SetLevel(l LogLevel) { s.level = l return } -// ShowSQL implement core.ILogger +// ShowSQL implement ILogger func (s *SimpleLogger) ShowSQL(show ...bool) { if len(show) == 0 { s.showSQL = true @@ -181,7 +211,7 @@ func (s *SimpleLogger) ShowSQL(show ...bool) { s.showSQL = show[0] } -// IsShowSQL implement core.ILogger +// IsShowSQL implement ILogger func (s *SimpleLogger) IsShowSQL() bool { return s.showSQL } diff --git a/vendor/xorm.io/xorm/log/logger_context.go b/vendor/xorm.io/xorm/log/logger_context.go new file mode 100644 index 000000000..f80091f33 --- /dev/null +++ b/vendor/xorm.io/xorm/log/logger_context.go @@ -0,0 +1,97 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package log + +import ( + "context" + "time" +) + +// LogContext represents a log context +type LogContext struct { + Ctx context.Context + SQL string // log content or SQL + Args []interface{} // if it's a SQL, it's the arguments + ExecuteTime time.Duration + Err error // SQL executed error +} + +type SQLLogger interface { + BeforeSQL(context LogContext) // only invoked when IsShowSQL is true + AfterSQL(context LogContext) // only invoked when IsShowSQL is true +} + +// ContextLogger represents a logger interface with context +type ContextLogger interface { + SQLLogger + + Debugf(format string, v ...interface{}) + Errorf(format string, v ...interface{}) + Infof(format string, v ...interface{}) + Warnf(format string, v ...interface{}) + + Level() LogLevel + SetLevel(l LogLevel) + + ShowSQL(show ...bool) + IsShowSQL() bool +} + +var ( + _ ContextLogger = &LoggerAdapter{} +) + +// LoggerAdapter wraps a Logger interafce as LoggerContext interface +type LoggerAdapter struct { + logger Logger +} + +func NewLoggerAdapter(logger Logger) ContextLogger { + return &LoggerAdapter{ + logger: logger, + } +} + +func (l *LoggerAdapter) BeforeSQL(ctx LogContext) {} + +func (l *LoggerAdapter) AfterSQL(ctx LogContext) { + if ctx.ExecuteTime > 0 { + l.logger.Infof("[SQL] %v %v - %v", ctx.SQL, ctx.Args, ctx.ExecuteTime) + } else { + l.logger.Infof("[SQL] %v %v", ctx.SQL, ctx.Args) + } +} + +func (l *LoggerAdapter) Debugf(format string, v ...interface{}) { + l.logger.Debugf(format, v...) +} + +func (l *LoggerAdapter) Errorf(format string, v ...interface{}) { + l.logger.Errorf(format, v...) +} + +func (l *LoggerAdapter) Infof(format string, v ...interface{}) { + l.logger.Infof(format, v...) +} + +func (l *LoggerAdapter) Warnf(format string, v ...interface{}) { + l.logger.Warnf(format, v...) +} + +func (l *LoggerAdapter) Level() LogLevel { + return l.logger.Level() +} + +func (l *LoggerAdapter) SetLevel(lv LogLevel) { + l.logger.SetLevel(lv) +} + +func (l *LoggerAdapter) ShowSQL(show ...bool) { + l.logger.ShowSQL(show...) +} + +func (l *LoggerAdapter) IsShowSQL() bool { + return l.logger.IsShowSQL() +} diff --git a/vendor/xorm.io/xorm/syslogger.go b/vendor/xorm.io/xorm/log/syslogger.go similarity index 88% rename from vendor/xorm.io/xorm/syslogger.go rename to vendor/xorm.io/xorm/log/syslogger.go index 11ba01e7b..0b3e381c2 100644 --- a/vendor/xorm.io/xorm/syslogger.go +++ b/vendor/xorm.io/xorm/log/syslogger.go @@ -4,16 +4,14 @@ // +build !windows,!nacl,!plan9 -package xorm +package log import ( "fmt" "log/syslog" - - "xorm.io/core" ) -var _ core.ILogger = &SyslogLogger{} +var _ Logger = &SyslogLogger{} // SyslogLogger will be depricated type SyslogLogger struct { @@ -21,7 +19,7 @@ type SyslogLogger struct { showSQL bool } -// NewSyslogLogger implements core.ILogger +// NewSyslogLogger implements Logger func NewSyslogLogger(w *syslog.Writer) *SyslogLogger { return &SyslogLogger{w: w} } @@ -67,12 +65,12 @@ func (s *SyslogLogger) Warnf(format string, v ...interface{}) { } // Level shows log level -func (s *SyslogLogger) Level() core.LogLevel { - return core.LOG_UNKNOWN +func (s *SyslogLogger) Level() LogLevel { + return LOG_UNKNOWN } // SetLevel always return error, as current log/syslog package doesn't allow to set priority level after syslog.Writer created -func (s *SyslogLogger) SetLevel(l core.LogLevel) {} +func (s *SyslogLogger) SetLevel(l LogLevel) {} // ShowSQL set if logging SQL func (s *SyslogLogger) ShowSQL(show ...bool) { diff --git a/vendor/xorm.io/core/mapper.go b/vendor/xorm.io/xorm/names/mapper.go similarity index 93% rename from vendor/xorm.io/core/mapper.go rename to vendor/xorm.io/xorm/names/mapper.go index 4df05cb8e..4aaf0844f 100644 --- a/vendor/xorm.io/core/mapper.go +++ b/vendor/xorm.io/xorm/names/mapper.go @@ -2,28 +2,28 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package core +package names import ( "strings" "sync" ) -// IMapper represents a name convertation between struct's fields name and table's column name -type IMapper interface { +// Mapper represents a name convertation between struct's fields name and table's column name +type Mapper interface { Obj2Table(string) string Table2Obj(string) string } type CacheMapper struct { - oriMapper IMapper + oriMapper Mapper obj2tableCache map[string]string obj2tableMutex sync.RWMutex table2objCache map[string]string table2objMutex sync.RWMutex } -func NewCacheMapper(mapper IMapper) *CacheMapper { +func NewCacheMapper(mapper Mapper) *CacheMapper { return &CacheMapper{oriMapper: mapper, obj2tableCache: make(map[string]string), table2objCache: make(map[string]string), } @@ -223,7 +223,7 @@ var LintGonicMapper = GonicMapper{ // PrefixMapper provides prefix table name support type PrefixMapper struct { - Mapper IMapper + Mapper Mapper Prefix string } @@ -235,13 +235,13 @@ func (mapper PrefixMapper) Table2Obj(name string) string { return mapper.Mapper.Table2Obj(name[len(mapper.Prefix):]) } -func NewPrefixMapper(mapper IMapper, prefix string) PrefixMapper { +func NewPrefixMapper(mapper Mapper, prefix string) PrefixMapper { return PrefixMapper{mapper, prefix} } // SuffixMapper provides suffix table name support type SuffixMapper struct { - Mapper IMapper + Mapper Mapper Suffix string } @@ -253,6 +253,6 @@ func (mapper SuffixMapper) Table2Obj(name string) string { return mapper.Mapper.Table2Obj(name[:len(name)-len(mapper.Suffix)]) } -func NewSuffixMapper(mapper IMapper, suffix string) SuffixMapper { +func NewSuffixMapper(mapper Mapper, suffix string) SuffixMapper { return SuffixMapper{mapper, suffix} } diff --git a/vendor/xorm.io/xorm/names/table_name.go b/vendor/xorm.io/xorm/names/table_name.go new file mode 100644 index 000000000..0afb1ae39 --- /dev/null +++ b/vendor/xorm.io/xorm/names/table_name.go @@ -0,0 +1,56 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package names + +import ( + "reflect" + "sync" +) + +// TableName table name interface to define customerize table name +type TableName interface { + TableName() string +} + +var ( + tpTableName = reflect.TypeOf((*TableName)(nil)).Elem() + tvCache sync.Map +) + +func GetTableName(mapper Mapper, v reflect.Value) string { + if v.Type().Implements(tpTableName) { + return v.Interface().(TableName).TableName() + } + + if v.Kind() == reflect.Ptr { + v = v.Elem() + if v.Type().Implements(tpTableName) { + return v.Interface().(TableName).TableName() + } + } else if v.CanAddr() { + v1 := v.Addr() + if v1.Type().Implements(tpTableName) { + return v1.Interface().(TableName).TableName() + } + } else { + name, ok := tvCache.Load(v.Type()) + if ok { + if name.(string) != "" { + return name.(string) + } + } else { + v2 := reflect.New(v.Type()) + if v2.Type().Implements(tpTableName) { + tableName := v2.Interface().(TableName).TableName() + tvCache.Store(v.Type(), tableName) + return tableName + } + + tvCache.Store(v.Type(), "") + } + } + + return mapper.Obj2Table(v.Type().Name()) +} diff --git a/vendor/xorm.io/xorm/rows.go b/vendor/xorm.io/xorm/rows.go index bdd44589f..a56ea1c9e 100644 --- a/vendor/xorm.io/xorm/rows.go +++ b/vendor/xorm.io/xorm/rows.go @@ -6,10 +6,13 @@ package xorm import ( "database/sql" + "errors" "fmt" "reflect" - "xorm.io/core" + "xorm.io/builder" + "xorm.io/xorm/core" + "xorm.io/xorm/internal/utils" ) // Rows rows wrapper a rows to @@ -29,7 +32,14 @@ func newRows(session *Session, bean interface{}) (*Rows, error) { var args []interface{} var err error - if err = rows.session.statement.setRefBean(bean); err != nil { + beanValue := reflect.ValueOf(bean) + if beanValue.Kind() != reflect.Ptr { + return nil, errors.New("needs a pointer to a value") + } else if beanValue.Elem().Kind() == reflect.Ptr { + return nil, errors.New("a pointer to a pointer is not allowed") + } + + if err = rows.session.statement.SetRefBean(bean); err != nil { return nil, err } @@ -38,12 +48,39 @@ func newRows(session *Session, bean interface{}) (*Rows, error) { } if rows.session.statement.RawSQL == "" { - sqlStr, args, err = rows.session.statement.genGetSQL(bean) + var autoCond builder.Cond + var addedTableName = (len(session.statement.JoinStr) > 0) + var table = rows.session.statement.RefTable + + if !session.statement.NoAutoCondition { + var err error + autoCond, err = session.statement.BuildConds(table, bean, true, true, false, true, addedTableName) + if err != nil { + return nil, err + } + } else { + // !oinume! Add " IS NULL" to WHERE whatever condiBean is given. + // See https://gitea.com/xorm/xorm/issues/179 + if col := table.DeletedColumn(); col != nil && !session.statement.GetUnscoped() { // tag "deleted" is enabled + var colName = session.engine.Quote(col.Name) + if addedTableName { + var nm = session.statement.TableName() + if len(session.statement.TableAlias) > 0 { + nm = session.statement.TableAlias + } + colName = session.engine.Quote(nm) + "." + colName + } + + autoCond = session.statement.CondDeleted(col) + } + } + + sqlStr, args, err = rows.session.statement.GenFindSQL(autoCond) if err != nil { return nil, err } } else { - sqlStr = rows.session.statement.RawSQL + sqlStr = rows.session.statement.GenRawSQL() args = rows.session.statement.RawParams } @@ -84,7 +121,7 @@ func (rows *Rows) Scan(bean interface{}) error { return fmt.Errorf("scan arg is incompatible type to [%v]", rows.beanType) } - if err := rows.session.statement.setRefBean(bean); err != nil { + if err := rows.session.statement.SetRefBean(bean); err != nil { return err } @@ -98,7 +135,7 @@ func (rows *Rows) Scan(bean interface{}) error { return err } - dataStruct := rValue(bean) + dataStruct := utils.ReflectValue(bean) _, err = rows.session.slice2Bean(scanResults, fields, bean, &dataStruct, rows.session.statement.RefTable) if err != nil { return err diff --git a/vendor/xorm.io/core/column.go b/vendor/xorm.io/xorm/schemas/column.go similarity index 77% rename from vendor/xorm.io/core/column.go rename to vendor/xorm.io/xorm/schemas/column.go index 8f375db59..418629ac3 100644 --- a/vendor/xorm.io/core/column.go +++ b/vendor/xorm.io/xorm/schemas/column.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package core +package schemas import ( "fmt" @@ -21,7 +21,7 @@ const ( type Column struct { Name string TableName string - FieldName string + FieldName string // Avaiable only when parsed from a struct SQLType SQLType IsJSON bool Length int @@ -71,55 +71,6 @@ func NewColumn(name, fieldName string, sqlType SQLType, len1, len2 int, nullable } } -// String generate column description string according dialect -func (col *Column) String(d Dialect) string { - sql := d.Quote(col.Name) + " " - - sql += d.SqlType(col) + " " - - if col.IsPrimaryKey { - sql += "PRIMARY KEY " - if col.IsAutoIncrement { - sql += d.AutoIncrStr() + " " - } - } - - if col.Default != "" { - sql += "DEFAULT " + col.Default + " " - } - - if d.ShowCreateNull() { - if col.Nullable { - sql += "NULL " - } else { - sql += "NOT NULL " - } - } - - return sql -} - -// StringNoPk generate column description string according dialect without primary keys -func (col *Column) StringNoPk(d Dialect) string { - sql := d.Quote(col.Name) + " " - - sql += d.SqlType(col) + " " - - if col.Default != "" { - sql += "DEFAULT " + col.Default + " " - } - - if d.ShowCreateNull() { - if col.Nullable { - sql += "NULL " - } else { - sql += "NOT NULL " - } - } - - return sql -} - // ValueOf returns column's filed of struct's value func (col *Column) ValueOf(bean interface{}) (*reflect.Value, error) { dataStruct := reflect.Indirect(reflect.ValueOf(bean)) diff --git a/vendor/xorm.io/core/index.go b/vendor/xorm.io/xorm/schemas/index.go similarity index 98% rename from vendor/xorm.io/core/index.go rename to vendor/xorm.io/xorm/schemas/index.go index 129b54392..9541250f5 100644 --- a/vendor/xorm.io/core/index.go +++ b/vendor/xorm.io/xorm/schemas/index.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package core +package schemas import ( "fmt" @@ -23,6 +23,11 @@ type Index struct { Cols []string } +// NewIndex new an index object +func NewIndex(name string, indexType int) *Index { + return &Index{true, name, indexType, make([]string, 0)} +} + func (index *Index) XName(tableName string) string { if !strings.HasPrefix(index.Name, "UQE_") && !strings.HasPrefix(index.Name, "IDX_") { @@ -65,8 +70,3 @@ func (index *Index) Equal(dst *Index) bool { } return true } - -// NewIndex new an index object -func NewIndex(name string, indexType int) *Index { - return &Index{true, name, indexType, make([]string, 0)} -} diff --git a/vendor/xorm.io/core/pk.go b/vendor/xorm.io/xorm/schemas/pk.go similarity index 77% rename from vendor/xorm.io/core/pk.go rename to vendor/xorm.io/xorm/schemas/pk.go index 05a7672d8..03916b44f 100644 --- a/vendor/xorm.io/core/pk.go +++ b/vendor/xorm.io/xorm/schemas/pk.go @@ -2,11 +2,13 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package core +package schemas import ( "bytes" "encoding/gob" + + "xorm.io/xorm/internal/utils" ) type PK []interface{} @@ -16,6 +18,15 @@ func NewPK(pks ...interface{}) *PK { return &p } +func (p *PK) IsZero() bool { + for _, k := range *p { + if utils.IsZero(k) { + return true + } + } + return false +} + func (p *PK) ToString() (string, error) { buf := new(bytes.Buffer) enc := gob.NewEncoder(buf) diff --git a/vendor/xorm.io/xorm/schemas/quote.go b/vendor/xorm.io/xorm/schemas/quote.go new file mode 100644 index 000000000..2a03152e9 --- /dev/null +++ b/vendor/xorm.io/xorm/schemas/quote.go @@ -0,0 +1,236 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemas + +import ( + "strings" +) + +// Quoter represents a quoter to the SQL table name and column name +type Quoter struct { + Prefix byte + Suffix byte + IsReserved func(string) bool +} + +var ( + // AlwaysFalseReverse always think it's not a reverse word + AlwaysNoReserve = func(string) bool { return false } + + // AlwaysReverse always reverse the word + AlwaysReserve = func(string) bool { return true } + + // CommanQuoteMark represnets the common quote mark + CommanQuoteMark byte = '`' + + // CommonQuoter represetns a common quoter + CommonQuoter = Quoter{CommanQuoteMark, CommanQuoteMark, AlwaysReserve} +) + +func (q Quoter) IsEmpty() bool { + return q.Prefix == 0 && q.Suffix == 0 +} + +func (q Quoter) Quote(s string) string { + var buf strings.Builder + q.QuoteTo(&buf, s) + return buf.String() +} + +// Trim removes quotes from s +func (q Quoter) Trim(s string) string { + if len(s) < 2 { + return s + } + + var buf strings.Builder + for i := 0; i < len(s); i++ { + switch { + case i == 0 && s[i] == q.Prefix: + case i == len(s)-1 && s[i] == q.Suffix: + case s[i] == q.Suffix && s[i+1] == '.': + case s[i] == q.Prefix && s[i-1] == '.': + default: + buf.WriteByte(s[i]) + } + } + return buf.String() +} + +func (q Quoter) Join(a []string, sep string) string { + var b strings.Builder + q.JoinWrite(&b, a, sep) + return b.String() +} + +func (q Quoter) JoinWrite(b *strings.Builder, a []string, sep string) error { + if len(a) == 0 { + return nil + } + + n := len(sep) * (len(a) - 1) + for i := 0; i < len(a); i++ { + n += len(a[i]) + } + + b.Grow(n) + for i, s := range a { + if i > 0 { + if _, err := b.WriteString(sep); err != nil { + return err + } + } + if s != "*" { + q.QuoteTo(b, strings.TrimSpace(s)) + } + } + return nil +} + +func findWord(v string, start int) int { + for j := start; j < len(v); j++ { + switch v[j] { + case '.', ' ': + return j + } + } + return len(v) +} + +func findStart(value string, start int) int { + if value[start] == '.' { + return start + 1 + } + if value[start] != ' ' { + return start + } + + var k int + for j := start; j < len(value); j++ { + if value[j] != ' ' { + k = j + break + } + } + if k-1 == len(value) { + return len(value) + } + if (value[k] == 'A' || value[k] == 'a') && (value[k+1] == 'S' || value[k+1] == 's') { + k = k + 2 + } + + for j := k; j < len(value); j++ { + if value[j] != ' ' { + return j + } + } + return len(value) +} + +func (q Quoter) quoteWordTo(buf *strings.Builder, word string) error { + var realWord = word + if (word[0] == CommanQuoteMark && word[len(word)-1] == CommanQuoteMark) || + (word[0] == q.Prefix && word[len(word)-1] == q.Suffix) { + realWord = word[1 : len(word)-1] + } + + if q.IsEmpty() { + _, err := buf.WriteString(realWord) + return err + } + + isReserved := q.IsReserved(realWord) + if isReserved { + if err := buf.WriteByte(q.Prefix); err != nil { + return err + } + } + if _, err := buf.WriteString(realWord); err != nil { + return err + } + if isReserved { + return buf.WriteByte(q.Suffix) + } + + return nil +} + +// QuoteTo quotes the table or column names. i.e. if the quotes are [ and ] +// name -> [name] +// `name` -> [name] +// [name] -> [name] +// schema.name -> [schema].[name] +// `schema`.`name` -> [schema].[name] +// `schema`.name -> [schema].[name] +// schema.`name` -> [schema].[name] +// [schema].name -> [schema].[name] +// schema.[name] -> [schema].[name] +// name AS a -> [name] AS a +// schema.name AS a -> [schema].[name] AS a +func (q Quoter) QuoteTo(buf *strings.Builder, value string) error { + var i int + for i < len(value) { + start := findStart(value, i) + if start > i { + if _, err := buf.WriteString(value[i:start]); err != nil { + return err + } + } + var nextEnd = findWord(value, start) + + if err := q.quoteWordTo(buf, value[start:nextEnd]); err != nil { + return err + } + i = nextEnd + } + return nil +} + +// Strings quotes a slice of string +func (q Quoter) Strings(s []string) []string { + var res = make([]string, 0, len(s)) + for _, a := range s { + res = append(res, q.Quote(a)) + } + return res +} + +// Replace replaces common quote(`) as the quotes on the sql +func (q Quoter) Replace(sql string) string { + if q.IsEmpty() { + return sql + } + + var buf strings.Builder + buf.Grow(len(sql)) + + var beginSingleQuote bool + for i := 0; i < len(sql); i++ { + if !beginSingleQuote && sql[i] == CommanQuoteMark { + var j = i + 1 + for ; j < len(sql); j++ { + if sql[j] == CommanQuoteMark { + break + } + } + word := sql[i+1 : j] + isReserved := q.IsReserved(word) + if isReserved { + buf.WriteByte(q.Prefix) + } + buf.WriteString(word) + if isReserved { + buf.WriteByte(q.Suffix) + } + i = j + } else { + if sql[i] == '\'' { + beginSingleQuote = !beginSingleQuote + } + buf.WriteByte(sql[i]) + } + } + return buf.String() +} diff --git a/vendor/xorm.io/core/table.go b/vendor/xorm.io/xorm/schemas/table.go similarity index 95% rename from vendor/xorm.io/core/table.go rename to vendor/xorm.io/xorm/schemas/table.go index 0a3889e14..385969914 100644 --- a/vendor/xorm.io/core/table.go +++ b/vendor/xorm.io/xorm/schemas/table.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package core +package schemas import ( "reflect" @@ -23,20 +23,11 @@ type Table struct { Updated string Deleted string Version string - Cacher Cacher StoreEngine string Charset string Comment string } -func (table *Table) Columns() []*Column { - return table.columns -} - -func (table *Table) ColumnsSeq() []string { - return table.columnsSeq -} - func NewEmptyTable() *Table { return NewTable("", nil) } @@ -53,24 +44,25 @@ func NewTable(name string, t reflect.Type) *Table { } } -func (table *Table) columnsByName(name string) []*Column { - n := len(name) +func (table *Table) Columns() []*Column { + return table.columns +} - for k := range table.columnsMap { - if len(k) != n { - continue - } +func (table *Table) ColumnsSeq() []string { + return table.columnsSeq +} + +func (table *Table) columnsByName(name string) []*Column { + for k, cols := range table.columnsMap { if strings.EqualFold(k, name) { - return table.columnsMap[k] + return cols } } return nil } func (table *Table) GetColumn(name string) *Column { - cols := table.columnsByName(name) - if cols != nil { return cols[0] } @@ -80,7 +72,6 @@ func (table *Table) GetColumn(name string) *Column { func (table *Table) GetColumnIdx(name string, idx int) *Column { cols := table.columnsByName(name) - if cols != nil && idx < len(cols) { return cols[idx] } diff --git a/vendor/xorm.io/core/type.go b/vendor/xorm.io/xorm/schemas/type.go similarity index 95% rename from vendor/xorm.io/core/type.go rename to vendor/xorm.io/xorm/schemas/type.go index 75b6c3638..89459a4de 100644 --- a/vendor/xorm.io/core/type.go +++ b/vendor/xorm.io/xorm/schemas/type.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package core +package schemas import ( "reflect" @@ -11,15 +11,17 @@ import ( "time" ) +type DBType string + const ( - POSTGRES = "postgres" - SQLITE = "sqlite3" - MYSQL = "mysql" - MSSQL = "mssql" - ORACLE = "oracle" + POSTGRES DBType = "postgres" + SQLITE DBType = "sqlite3" + MYSQL DBType = "mysql" + MSSQL DBType = "mssql" + ORACLE DBType = "oracle" ) -// xorm SQL types +// SQLType represents SQL types type SQLType struct { Name string DefaultLength int @@ -32,6 +34,7 @@ const ( BLOB_TYPE TIME_TYPE NUMERIC_TYPE + ARRAY_TYPE ) func (s *SQLType) IsType(st int) bool { @@ -57,6 +60,10 @@ func (s *SQLType) IsNumeric() bool { return s.IsType(NUMERIC_TYPE) } +func (s *SQLType) IsArray() bool { + return s.IsType(ARRAY_TYPE) +} + func (s *SQLType) IsJson() bool { return s.Name == Json || s.Name == Jsonb } @@ -121,6 +128,8 @@ var ( Json = "JSON" Jsonb = "JSONB" + Array = "ARRAY" + SqlTypes = map[string]int{ Bit: NUMERIC_TYPE, TinyInt: NUMERIC_TYPE, @@ -178,6 +187,8 @@ var ( Serial: NUMERIC_TYPE, BigSerial: NUMERIC_TYPE, + + Array: ARRAY_TYPE, } intTypes = sort.StringSlice{"*int", "*int16", "*int32", "*int8"} diff --git a/vendor/xorm.io/xorm/session.go b/vendor/xorm.io/xorm/session.go index 830719355..4842883b6 100644 --- a/vendor/xorm.io/xorm/session.go +++ b/vendor/xorm.io/xorm/session.go @@ -14,9 +14,34 @@ import ( "strings" "time" - "xorm.io/core" + "xorm.io/xorm/contexts" + "xorm.io/xorm/convert" + "xorm.io/xorm/core" + "xorm.io/xorm/internal/json" + "xorm.io/xorm/internal/statements" + "xorm.io/xorm/schemas" ) +// ErrFieldIsNotExist columns does not exist +type ErrFieldIsNotExist struct { + FieldName string + TableName string +} + +func (e ErrFieldIsNotExist) Error() string { + return fmt.Sprintf("field %s is not valid on table %s", e.FieldName, e.TableName) +} + +// ErrFieldIsNotValid is not valid +type ErrFieldIsNotValid struct { + FieldName string + TableName string +} + +func (e ErrFieldIsNotValid) Error() string { + return fmt.Sprintf("field %s is not valid on table %s", e.FieldName, e.TableName) +} + type sessionType int const ( @@ -30,7 +55,7 @@ type Session struct { db *core.DB engine *Engine tx *core.Tx - statement Statement + statement *statements.Statement isAutoCommit bool isCommitedOrRollbacked bool isAutoClose bool @@ -53,8 +78,6 @@ type Session struct { prepareStmt bool stmtCache map[uint32]*core.Stmt //key: hash.Hash32 of (queryStr, len(queryStr)) - // !evalphobia! stored the last executed query on this session - //beforeSQLExec func(string, ...interface{}) lastSQL string lastSQLArgs []interface{} showSQL bool @@ -71,9 +94,12 @@ func (session *Session) Clone() *Session { // Init reset the session as the init status. func (session *Session) Init() { - session.statement.Init() - session.statement.Engine = session.engine - session.showSQL = session.engine.showSQL + session.statement = statements.NewStatement( + session.engine.dialect, + session.engine.tagParser, + session.engine.DatabaseTZ, + ) + session.isAutoCommit = true session.isCommitedOrRollbacked = false session.isAutoClose = false @@ -115,8 +141,8 @@ func (session *Session) Close() { } // ContextCache enable context cache or not -func (session *Session) ContextCache(context ContextCache) *Session { - session.statement.context = context +func (session *Session) ContextCache(context contexts.ContextCache) *Session { + session.statement.SetContextCache(context) return session } @@ -127,7 +153,7 @@ func (session *Session) IsClosed() bool { func (session *Session) resetStatement() { if session.autoResetStatement { - session.statement.Init() + session.statement.Reset() } } @@ -155,7 +181,9 @@ func (session *Session) After(closures func(interface{})) *Session { // Table can input a string or pointer to struct for special a table to operate. func (session *Session) Table(tableNameOrBean interface{}) *Session { - session.statement.Table(tableNameOrBean) + if err := session.statement.SetTable(tableNameOrBean); err != nil { + session.statement.LastError = err + } return session } @@ -179,7 +207,7 @@ func (session *Session) ForUpdate() *Session { // NoAutoCondition disable generate SQL condition from beans func (session *Session) NoAutoCondition(no ...bool) *Session { - session.statement.NoAutoCondition(no...) + session.statement.SetNoAutoCondition(no...) return session } @@ -230,11 +258,11 @@ func (session *Session) Cascade(trueOrFalse ...bool) *Session { // MustLogSQL means record SQL or not and don't follow engine's setting func (session *Session) MustLogSQL(log ...bool) *Session { + var showSQL = true if len(log) > 0 { - session.showSQL = log[0] - } else { - session.showSQL = true + showSQL = log[0] } + session.ctx = context.WithValue(session.ctx, "__xorm_show_sql", showSQL) return session } @@ -266,7 +294,7 @@ func (session *Session) Having(conditions string) *Session { // DB db return the wrapper of sql.DB func (session *Session) DB() *core.DB { if session.db == nil { - session.db = session.engine.db + session.db = session.engine.DB() session.stmtCache = make(map[uint32]*core.Stmt, 0) } return session.db @@ -285,7 +313,7 @@ func (session *Session) canCache() bool { !session.statement.UseCache || session.statement.IsForUpdate || session.tx != nil || - len(session.statement.selectStr) > 0 { + len(session.statement.SelectStr) > 0 { return false } return true @@ -306,8 +334,8 @@ func (session *Session) doPrepare(db *core.DB, sqlStr string) (stmt *core.Stmt, return } -func (session *Session) getField(dataStruct *reflect.Value, key string, table *core.Table, idx int) (*reflect.Value, error) { - var col *core.Column +func (session *Session) getField(dataStruct *reflect.Value, key string, table *schemas.Table, idx int) (*reflect.Value, error) { + var col *schemas.Column if col = table.GetColumnIdx(key, idx); col == nil { return nil, ErrFieldIsNotExist{key, table.Name} } @@ -328,8 +356,8 @@ func (session *Session) getField(dataStruct *reflect.Value, key string, table *c type Cell *interface{} func (session *Session) rows2Beans(rows *core.Rows, fields []string, - table *core.Table, newElemFunc func([]string) reflect.Value, - sliceValueSetFunc func(*reflect.Value, core.PK) error) error { + table *schemas.Table, newElemFunc func([]string) reflect.Value, + sliceValueSetFunc func(*reflect.Value, schemas.PK) error) error { for rows.Next() { var newValue = newElemFunc(fields) bean := newValue.Interface() @@ -377,7 +405,7 @@ func (session *Session) row2Slice(rows *core.Rows, fields []string, bean interfa return scanResults, nil } -func (session *Session) slice2Bean(scanResults []interface{}, fields []string, bean interface{}, dataStruct *reflect.Value, table *core.Table) (core.PK, error) { +func (session *Session) slice2Bean(scanResults []interface{}, fields []string, bean interface{}, dataStruct *reflect.Value, table *schemas.Table) (schemas.PK, error) { defer func() { if b, hasAfterSet := bean.(AfterSetProcessor); hasAfterSet { for ii, key := range fields { @@ -421,7 +449,7 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b } var tempMap = make(map[string]int) - var pk core.PK + var pk schemas.PK for ii, key := range fields { var idx int var ok bool @@ -436,7 +464,7 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b fieldValue, err := session.getField(dataStruct, key, table, idx) if err != nil { if !strings.Contains(err.Error(), "is not valid") { - session.engine.logger.Warn(err) + session.engine.logger.Warnf("%v", err) } continue } @@ -451,7 +479,7 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b } if fieldValue.CanAddr() { - if structConvert, ok := fieldValue.Addr().Interface().(core.Conversion); ok { + if structConvert, ok := fieldValue.Addr().Interface().(convert.Conversion); ok { if data, err := value2Bytes(&rawValue); err == nil { if err := structConvert.FromDB(data); err != nil { return nil, err @@ -463,12 +491,12 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b } } - if _, ok := fieldValue.Interface().(core.Conversion); ok { + if _, ok := fieldValue.Interface().(convert.Conversion); ok { if data, err := value2Bytes(&rawValue); err == nil { if fieldValue.Kind() == reflect.Ptr && fieldValue.IsNil() { fieldValue.Set(reflect.New(fieldValue.Type().Elem())) } - fieldValue.Interface().(core.Conversion).FromDB(data) + fieldValue.Interface().(convert.Conversion).FromDB(data) } else { return nil, err } @@ -488,7 +516,7 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b var bs []byte if rawValueType.Kind() == reflect.String { bs = []byte(vv.String()) - } else if rawValueType.ConvertibleTo(core.BytesType) { + } else if rawValueType.ConvertibleTo(schemas.BytesType) { bs = vv.Bytes() } else { return nil, fmt.Errorf("unsupported database data type: %s %v", key, rawValueType.Kind()) @@ -502,13 +530,13 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b continue } if fieldValue.CanAddr() { - err := DefaultJSONHandler.Unmarshal(bs, fieldValue.Addr().Interface()) + err := json.DefaultJSONHandler.Unmarshal(bs, fieldValue.Addr().Interface()) if err != nil { return nil, err } } else { x := reflect.New(fieldType) - err := DefaultJSONHandler.Unmarshal(bs, x.Interface()) + err := json.DefaultJSONHandler.Unmarshal(bs, x.Interface()) if err != nil { return nil, err } @@ -525,20 +553,20 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b var bs []byte if rawValueType.Kind() == reflect.String { bs = []byte(vv.String()) - } else if rawValueType.ConvertibleTo(core.BytesType) { + } else if rawValueType.ConvertibleTo(schemas.BytesType) { bs = vv.Bytes() } hasAssigned = true if len(bs) > 0 { if fieldValue.CanAddr() { - err := DefaultJSONHandler.Unmarshal(bs, fieldValue.Addr().Interface()) + err := json.DefaultJSONHandler.Unmarshal(bs, fieldValue.Addr().Interface()) if err != nil { return nil, err } } else { x := reflect.New(fieldType) - err := DefaultJSONHandler.Unmarshal(bs, x.Interface()) + err := json.DefaultJSONHandler.Unmarshal(bs, x.Interface()) if err != nil { return nil, err } @@ -554,7 +582,7 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b hasAssigned = true if col.SQLType.IsText() { x := reflect.New(fieldType) - err := DefaultJSONHandler.Unmarshal(vv.Bytes(), x.Interface()) + err := json.DefaultJSONHandler.Unmarshal(vv.Bytes(), x.Interface()) if err != nil { return nil, err } @@ -607,16 +635,16 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b fieldValue.SetUint(uint64(vv.Int())) } case reflect.Struct: - if fieldType.ConvertibleTo(core.TimeType) { + if fieldType.ConvertibleTo(schemas.TimeType) { dbTZ := session.engine.DatabaseTZ if col.TimeZone != nil { dbTZ = col.TimeZone } - if rawValueType == core.TimeType { + if rawValueType == schemas.TimeType { hasAssigned = true - t := vv.Convert(core.TimeType).Interface().(time.Time) + t := vv.Convert(schemas.TimeType).Interface().(time.Time) z, _ := t.Zone() // set new location if database don't save timezone or give an incorrect timezone @@ -628,8 +656,8 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b t = t.In(session.engine.TZLocation) fieldValue.Set(reflect.ValueOf(t).Convert(fieldType)) - } else if rawValueType == core.IntType || rawValueType == core.Int64Type || - rawValueType == core.Int32Type { + } else if rawValueType == schemas.IntType || rawValueType == schemas.Int64Type || + rawValueType == schemas.Int32Type { hasAssigned = true t := time.Unix(vv.Int(), 0).In(session.engine.TZLocation) @@ -639,7 +667,7 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b hasAssigned = true t, err := session.byte2Time(col, d) if err != nil { - session.engine.logger.Error("byte2Time error:", err.Error()) + session.engine.logger.Errorf("byte2Time error: %v", err) hasAssigned = false } else { fieldValue.Set(reflect.ValueOf(t).Convert(fieldType)) @@ -648,7 +676,7 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b hasAssigned = true t, err := session.str2Time(col, d) if err != nil { - session.engine.logger.Error("byte2Time error:", err.Error()) + session.engine.logger.Errorf("byte2Time error: %v", err) hasAssigned = false } else { fieldValue.Set(reflect.ValueOf(t).Convert(fieldType)) @@ -661,7 +689,7 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b // !! 增加支持sql.Scanner接口的结构,如sql.NullString hasAssigned = true if err := nulVal.Scan(vv.Interface()); err != nil { - session.engine.logger.Error("sql.Sanner error:", err.Error()) + session.engine.logger.Errorf("sql.Sanner error: %v", err) hasAssigned = false } } else if col.SQLType.IsJson() { @@ -669,7 +697,7 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b hasAssigned = true x := reflect.New(fieldType) if len([]byte(vv.String())) > 0 { - err := DefaultJSONHandler.Unmarshal([]byte(vv.String()), x.Interface()) + err := json.DefaultJSONHandler.Unmarshal([]byte(vv.String()), x.Interface()) if err != nil { return nil, err } @@ -679,7 +707,7 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b hasAssigned = true x := reflect.New(fieldType) if len(vv.Bytes()) > 0 { - err := DefaultJSONHandler.Unmarshal(vv.Bytes(), x.Interface()) + err := json.DefaultJSONHandler.Unmarshal(vv.Bytes(), x.Interface()) if err != nil { return nil, err } @@ -687,7 +715,7 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b } } } else if session.statement.UseCascade { - table, err := session.engine.autoMapType(*fieldValue) + table, err := session.engine.tagParser.ParseWithCache(*fieldValue) if err != nil { return nil, err } @@ -696,13 +724,13 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b if len(table.PrimaryKeys) != 1 { return nil, errors.New("unsupported non or composited primary key cascade") } - var pk = make(core.PK, len(table.PrimaryKeys)) + var pk = make(schemas.PK, len(table.PrimaryKeys)) pk[0], err = asKind(vv, rawValueType) if err != nil { return nil, err } - if !isPKZero(pk) { + if !pk.IsZero() { // !nashtsai! TODO for hasOne relationship, it's preferred to use join query for eager fetch // however, also need to consider adding a 'lazy' attribute to xorm tag which allow hasOne // property to be fetched lazily @@ -722,110 +750,110 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b // !nashtsai! TODO merge duplicated codes above switch fieldType { // following types case matching ptr's native type, therefore assign ptr directly - case core.PtrStringType: + case schemas.PtrStringType: if rawValueType.Kind() == reflect.String { x := vv.String() hasAssigned = true fieldValue.Set(reflect.ValueOf(&x)) } - case core.PtrBoolType: + case schemas.PtrBoolType: if rawValueType.Kind() == reflect.Bool { x := vv.Bool() hasAssigned = true fieldValue.Set(reflect.ValueOf(&x)) } - case core.PtrTimeType: - if rawValueType == core.PtrTimeType { + case schemas.PtrTimeType: + if rawValueType == schemas.PtrTimeType { hasAssigned = true var x = rawValue.Interface().(time.Time) fieldValue.Set(reflect.ValueOf(&x)) } - case core.PtrFloat64Type: + case schemas.PtrFloat64Type: if rawValueType.Kind() == reflect.Float64 { x := vv.Float() hasAssigned = true fieldValue.Set(reflect.ValueOf(&x)) } - case core.PtrUint64Type: + case schemas.PtrUint64Type: if rawValueType.Kind() == reflect.Int64 { var x = uint64(vv.Int()) hasAssigned = true fieldValue.Set(reflect.ValueOf(&x)) } - case core.PtrInt64Type: + case schemas.PtrInt64Type: if rawValueType.Kind() == reflect.Int64 { x := vv.Int() hasAssigned = true fieldValue.Set(reflect.ValueOf(&x)) } - case core.PtrFloat32Type: + case schemas.PtrFloat32Type: if rawValueType.Kind() == reflect.Float64 { var x = float32(vv.Float()) hasAssigned = true fieldValue.Set(reflect.ValueOf(&x)) } - case core.PtrIntType: + case schemas.PtrIntType: if rawValueType.Kind() == reflect.Int64 { var x = int(vv.Int()) hasAssigned = true fieldValue.Set(reflect.ValueOf(&x)) } - case core.PtrInt32Type: + case schemas.PtrInt32Type: if rawValueType.Kind() == reflect.Int64 { var x = int32(vv.Int()) hasAssigned = true fieldValue.Set(reflect.ValueOf(&x)) } - case core.PtrInt8Type: + case schemas.PtrInt8Type: if rawValueType.Kind() == reflect.Int64 { var x = int8(vv.Int()) hasAssigned = true fieldValue.Set(reflect.ValueOf(&x)) } - case core.PtrInt16Type: + case schemas.PtrInt16Type: if rawValueType.Kind() == reflect.Int64 { var x = int16(vv.Int()) hasAssigned = true fieldValue.Set(reflect.ValueOf(&x)) } - case core.PtrUintType: + case schemas.PtrUintType: if rawValueType.Kind() == reflect.Int64 { var x = uint(vv.Int()) hasAssigned = true fieldValue.Set(reflect.ValueOf(&x)) } - case core.PtrUint32Type: + case schemas.PtrUint32Type: if rawValueType.Kind() == reflect.Int64 { var x = uint32(vv.Int()) hasAssigned = true fieldValue.Set(reflect.ValueOf(&x)) } - case core.Uint8Type: + case schemas.Uint8Type: if rawValueType.Kind() == reflect.Int64 { var x = uint8(vv.Int()) hasAssigned = true fieldValue.Set(reflect.ValueOf(&x)) } - case core.Uint16Type: + case schemas.Uint16Type: if rawValueType.Kind() == reflect.Int64 { var x = uint16(vv.Int()) hasAssigned = true fieldValue.Set(reflect.ValueOf(&x)) } - case core.Complex64Type: + case schemas.Complex64Type: var x complex64 if len([]byte(vv.String())) > 0 { - err := DefaultJSONHandler.Unmarshal([]byte(vv.String()), &x) + err := json.DefaultJSONHandler.Unmarshal([]byte(vv.String()), &x) if err != nil { return nil, err } fieldValue.Set(reflect.ValueOf(&x)) } hasAssigned = true - case core.Complex128Type: + case schemas.Complex128Type: var x complex128 if len([]byte(vv.String())) > 0 { - err := DefaultJSONHandler.Unmarshal([]byte(vv.String()), &x) + err := json.DefaultJSONHandler.Unmarshal([]byte(vv.String()), &x) if err != nil { return nil, err } @@ -854,17 +882,6 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b func (session *Session) saveLastSQL(sql string, args ...interface{}) { session.lastSQL = sql session.lastSQLArgs = args - session.logSQL(sql, args...) -} - -func (session *Session) logSQL(sqlStr string, sqlArgs ...interface{}) { - if session.showSQL && !session.engine.showExecTime { - if len(sqlArgs) > 0 { - session.engine.logger.Infof("[SQL] %v %#v", sqlStr, sqlArgs) - } else { - session.engine.logger.Infof("[SQL] %v", sqlStr) - } - } } // LastSQL returns last query information @@ -874,7 +891,7 @@ func (session *Session) LastSQL() (string, []interface{}) { // Unscoped always disable struct tag "deleted" func (session *Session) Unscoped() *Session { - session.statement.Unscoped() + session.statement.SetUnscoped() return session } @@ -886,3 +903,19 @@ func (session *Session) incrVersionFieldValue(fieldValue *reflect.Value) { fieldValue.SetUint(fieldValue.Uint() + 1) } } + +// Context sets the context on this session +func (session *Session) Context(ctx context.Context) *Session { + session.ctx = ctx + return session +} + +// PingContext test if database is ok +func (session *Session) PingContext(ctx context.Context) error { + if session.isAutoClose { + defer session.Close() + } + + session.engine.logger.Infof("PING DATABASE %v", session.engine.DriverName()) + return session.DB().PingContext(ctx) +} diff --git a/vendor/xorm.io/xorm/session_cols.go b/vendor/xorm.io/xorm/session_cols.go index 1558074f3..ca3589aba 100644 --- a/vendor/xorm.io/xorm/session_cols.go +++ b/vendor/xorm.io/xorm/session_cols.go @@ -9,10 +9,10 @@ import ( "strings" "time" - "xorm.io/core" + "xorm.io/xorm/schemas" ) -func setColumnInt(bean interface{}, col *core.Column, t int64) { +func setColumnInt(bean interface{}, col *schemas.Column, t int64) { v, err := col.ValueOf(bean) if err != nil { return @@ -27,7 +27,7 @@ func setColumnInt(bean interface{}, col *core.Column, t int64) { } } -func setColumnTime(bean interface{}, col *core.Column, t time.Time) { +func setColumnTime(bean interface{}, col *schemas.Column, t time.Time) { v, err := col.ValueOf(bean) if err != nil { return @@ -44,7 +44,7 @@ func setColumnTime(bean interface{}, col *core.Column, t time.Time) { } } -func getFlagForColumn(m map[string]bool, col *core.Column) (val bool, has bool) { +func getFlagForColumn(m map[string]bool, col *schemas.Column) (val bool, has bool) { if len(m) == 0 { return false, false } @@ -63,19 +63,6 @@ func getFlagForColumn(m map[string]bool, col *core.Column) (val bool, has bool) return false, false } -func col2NewCols(columns ...string) []string { - newColumns := make([]string, 0, len(columns)) - for _, col := range columns { - col = strings.Replace(col, "`", "", -1) - col = strings.Replace(col, `"`, "", -1) - ccols := strings.Split(col, ",") - for _, c := range ccols { - newColumns = append(newColumns, strings.TrimSpace(c)) - } - } - return newColumns -} - // Incr provides a query string like "count = count + 1" func (session *Session) Incr(column string, arg ...interface{}) *Session { session.statement.Incr(column, arg...) diff --git a/vendor/xorm.io/xorm/session_cond.go b/vendor/xorm.io/xorm/session_cond.go index b16bdea8e..25d171481 100644 --- a/vendor/xorm.io/xorm/session_cond.go +++ b/vendor/xorm.io/xorm/session_cond.go @@ -6,14 +6,6 @@ package xorm import "xorm.io/builder" -// Sql provides raw sql input parameter. When you have a complex SQL statement -// and cannot use Where, Id, In and etc. Methods to describe, you can use SQL. -// -// Deprecated: use SQL instead. -func (session *Session) Sql(query string, args ...interface{}) *Session { - return session.SQL(query, args...) -} - // SQL provides raw sql input parameter. When you have a complex SQL statement // and cannot use Where, Id, In and etc. Methods to describe, you can use SQL. func (session *Session) SQL(query interface{}, args ...interface{}) *Session { @@ -39,13 +31,6 @@ func (session *Session) Or(query interface{}, args ...interface{}) *Session { return session } -// Id provides converting id as a query condition -// -// Deprecated: use ID instead -func (session *Session) Id(id interface{}) *Session { - return session.ID(id) -} - // ID provides converting id as a query condition func (session *Session) ID(id interface{}) *Session { session.statement.ID(id) @@ -66,5 +51,5 @@ func (session *Session) NotIn(column string, args ...interface{}) *Session { // Conds returns session query conditions except auto bean conditions func (session *Session) Conds() builder.Cond { - return session.statement.cond + return session.statement.Conds() } diff --git a/vendor/xorm.io/xorm/session_context.go b/vendor/xorm.io/xorm/session_context.go deleted file mode 100644 index 915f05685..000000000 --- a/vendor/xorm.io/xorm/session_context.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import "context" - -// Context sets the context on this session -func (session *Session) Context(ctx context.Context) *Session { - session.ctx = ctx - return session -} - -// PingContext test if database is ok -func (session *Session) PingContext(ctx context.Context) error { - if session.isAutoClose { - defer session.Close() - } - - session.engine.logger.Infof("PING DATABASE %v", session.engine.DriverName()) - return session.DB().PingContext(ctx) -} diff --git a/vendor/xorm.io/xorm/session_convert.go b/vendor/xorm.io/xorm/session_convert.go index 7f11354d5..a68399473 100644 --- a/vendor/xorm.io/xorm/session_convert.go +++ b/vendor/xorm.io/xorm/session_convert.go @@ -6,7 +6,6 @@ package xorm import ( "database/sql" - "database/sql/driver" "errors" "fmt" "reflect" @@ -14,10 +13,13 @@ import ( "strings" "time" - "xorm.io/core" + "xorm.io/xorm/convert" + "xorm.io/xorm/internal/json" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" ) -func (session *Session) str2Time(col *core.Column, data string) (outTime time.Time, outErr error) { +func (session *Session) str2Time(col *schemas.Column, data string) (outTime time.Time, outErr error) { sdata := strings.TrimSpace(data) var x time.Time var err error @@ -27,7 +29,7 @@ func (session *Session) str2Time(col *core.Column, data string) (outTime time.Ti parseLoc = col.TimeZone } - if sdata == zeroTime0 || sdata == zeroTime1 { + if sdata == utils.ZeroTime0 || sdata == utils.ZeroTime1 { } else if !strings.ContainsAny(sdata, "- :") { // !nashtsai! has only found that mymysql driver is using this for time type column // time stamp sd, err := strconv.ParseInt(sdata, 10, 64) @@ -54,14 +56,14 @@ func (session *Session) str2Time(col *core.Column, data string) (outTime time.Ti } else if len(sdata) == 10 && sdata[4] == '-' && sdata[7] == '-' { x, err = time.ParseInLocation("2006-01-02", sdata, parseLoc) //session.engine.logger.Debugf("time(5) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata) - } else if col.SQLType.Name == core.Time { + } else if col.SQLType.Name == schemas.Time { if strings.Contains(sdata, " ") { ssd := strings.Split(sdata, " ") sdata = ssd[1] } sdata = strings.TrimSpace(sdata) - if session.engine.dialect.DBType() == core.MYSQL && len(sdata) > 8 { + if session.engine.dialect.URI().DBType == schemas.MYSQL && len(sdata) > 8 { sdata = sdata[len(sdata)-8:] } @@ -80,21 +82,17 @@ func (session *Session) str2Time(col *core.Column, data string) (outTime time.Ti return } -func (session *Session) byte2Time(col *core.Column, data []byte) (outTime time.Time, outErr error) { +func (session *Session) byte2Time(col *schemas.Column, data []byte) (outTime time.Time, outErr error) { return session.str2Time(col, string(data)) } -var ( - nullFloatType = reflect.TypeOf(sql.NullFloat64{}) -) - // convert a db data([]byte) to a field value -func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, data []byte) error { - if structConvert, ok := fieldValue.Addr().Interface().(core.Conversion); ok { +func (session *Session) bytes2Value(col *schemas.Column, fieldValue *reflect.Value, data []byte) error { + if structConvert, ok := fieldValue.Addr().Interface().(convert.Conversion); ok { return structConvert.FromDB(data) } - if structConvert, ok := fieldValue.Interface().(core.Conversion); ok { + if structConvert, ok := fieldValue.Interface().(convert.Conversion); ok { return structConvert.FromDB(data) } @@ -106,9 +104,8 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, case reflect.Complex64, reflect.Complex128: x := reflect.New(fieldType) if len(data) > 0 { - err := DefaultJSONHandler.Unmarshal(data, x.Interface()) + err := json.DefaultJSONHandler.Unmarshal(data, x.Interface()) if err != nil { - session.engine.logger.Error(err) return err } fieldValue.Set(x.Elem()) @@ -120,9 +117,8 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, if col.SQLType.IsText() { x := reflect.New(fieldType) if len(data) > 0 { - err := DefaultJSONHandler.Unmarshal(data, x.Interface()) + err := json.DefaultJSONHandler.Unmarshal(data, x.Interface()) if err != nil { - session.engine.logger.Error(err) return err } fieldValue.Set(x.Elem()) @@ -133,9 +129,8 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, } else { x := reflect.New(fieldType) if len(data) > 0 { - err := DefaultJSONHandler.Unmarshal(data, x.Interface()) + err := json.DefaultJSONHandler.Unmarshal(data, x.Interface()) if err != nil { - session.engine.logger.Error(err) return err } fieldValue.Set(x.Elem()) @@ -157,8 +152,8 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, var x int64 var err error // for mysql, when use bit, it returned \x01 - if col.SQLType.Name == core.Bit && - session.engine.dialect.DBType() == core.MYSQL { // !nashtsai! TODO dialect needs to provide conversion interface API + if col.SQLType.Name == schemas.Bit && + session.engine.dialect.URI().DBType == schemas.MYSQL { // !nashtsai! TODO dialect needs to provide conversion interface API if len(data) == 1 { x = int64(data[0]) } else { @@ -199,7 +194,7 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, return fmt.Errorf("sql.Scan(%v) failed: %s ", data, err.Error()) } } else { - if fieldType.ConvertibleTo(core.TimeType) { + if fieldType.ConvertibleTo(schemas.TimeType) { x, err := session.byte2Time(col, data) if err != nil { return err @@ -207,7 +202,7 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, v = x fieldValue.Set(reflect.ValueOf(v).Convert(fieldType)) } else if session.statement.UseCascade { - table, err := session.engine.autoMapType(*fieldValue) + table, err := session.engine.tagParser.ParseWithCache(*fieldValue) if err != nil { return err } @@ -217,14 +212,14 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, return errors.New("unsupported composited primary key cascade") } - var pk = make(core.PK, len(table.PrimaryKeys)) + var pk = make(schemas.PK, len(table.PrimaryKeys)) rawValueType := table.ColumnType(table.PKColumns()[0].FieldName) pk[0], err = str2PK(string(data), rawValueType) if err != nil { return err } - if !isPKZero(pk) { + if !pk.IsZero() { // !nashtsai! TODO for hasOne relationship, it's preferred to use join query for eager fetch // however, also need to consider adding a 'lazy' attribute to xorm tag which allow hasOne // property to be fetched lazily @@ -247,11 +242,11 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, //typeStr := fieldType.String() switch fieldType.Elem().Kind() { // case "*string": - case core.StringType.Kind(): + case schemas.StringType.Kind(): x := string(data) fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) // case "*bool": - case core.BoolType.Kind(): + case schemas.BoolType.Kind(): d := string(data) v, err := strconv.ParseBool(d) if err != nil { @@ -259,36 +254,34 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, } fieldValue.Set(reflect.ValueOf(&v).Convert(fieldType)) // case "*complex64": - case core.Complex64Type.Kind(): + case schemas.Complex64Type.Kind(): var x complex64 if len(data) > 0 { - err := DefaultJSONHandler.Unmarshal(data, &x) + err := json.DefaultJSONHandler.Unmarshal(data, &x) if err != nil { - session.engine.logger.Error(err) return err } fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) } // case "*complex128": - case core.Complex128Type.Kind(): + case schemas.Complex128Type.Kind(): var x complex128 if len(data) > 0 { - err := DefaultJSONHandler.Unmarshal(data, &x) + err := json.DefaultJSONHandler.Unmarshal(data, &x) if err != nil { - session.engine.logger.Error(err) return err } fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) } // case "*float64": - case core.Float64Type.Kind(): + case schemas.Float64Type.Kind(): x, err := strconv.ParseFloat(string(data), 64) if err != nil { return fmt.Errorf("arg %v as float64: %s", key, err.Error()) } fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) // case "*float32": - case core.Float32Type.Kind(): + case schemas.Float32Type.Kind(): var x float32 x1, err := strconv.ParseFloat(string(data), 32) if err != nil { @@ -297,7 +290,7 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, x = float32(x1) fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) // case "*uint64": - case core.Uint64Type.Kind(): + case schemas.Uint64Type.Kind(): var x uint64 x, err := strconv.ParseUint(string(data), 10, 64) if err != nil { @@ -305,7 +298,7 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, } fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) // case "*uint": - case core.UintType.Kind(): + case schemas.UintType.Kind(): var x uint x1, err := strconv.ParseUint(string(data), 10, 64) if err != nil { @@ -314,7 +307,7 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, x = uint(x1) fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) // case "*uint32": - case core.Uint32Type.Kind(): + case schemas.Uint32Type.Kind(): var x uint32 x1, err := strconv.ParseUint(string(data), 10, 64) if err != nil { @@ -323,7 +316,7 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, x = uint32(x1) fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) // case "*uint8": - case core.Uint8Type.Kind(): + case schemas.Uint8Type.Kind(): var x uint8 x1, err := strconv.ParseUint(string(data), 10, 64) if err != nil { @@ -332,7 +325,7 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, x = uint8(x1) fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) // case "*uint16": - case core.Uint16Type.Kind(): + case schemas.Uint16Type.Kind(): var x uint16 x1, err := strconv.ParseUint(string(data), 10, 64) if err != nil { @@ -341,12 +334,12 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, x = uint16(x1) fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) // case "*int64": - case core.Int64Type.Kind(): + case schemas.Int64Type.Kind(): sdata := string(data) var x int64 var err error // for mysql, when use bit, it returned \x01 - if col.SQLType.Name == core.Bit && + if col.SQLType.Name == schemas.Bit && strings.Contains(session.engine.DriverName(), "mysql") { if len(data) == 1 { x = int64(data[0]) @@ -365,13 +358,13 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, } fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) // case "*int": - case core.IntType.Kind(): + case schemas.IntType.Kind(): sdata := string(data) var x int var x1 int64 var err error // for mysql, when use bit, it returned \x01 - if col.SQLType.Name == core.Bit && + if col.SQLType.Name == schemas.Bit && strings.Contains(session.engine.DriverName(), "mysql") { if len(data) == 1 { x = int(data[0]) @@ -393,14 +386,14 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, } fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) // case "*int32": - case core.Int32Type.Kind(): + case schemas.Int32Type.Kind(): sdata := string(data) var x int32 var x1 int64 var err error // for mysql, when use bit, it returned \x01 - if col.SQLType.Name == core.Bit && - session.engine.dialect.DBType() == core.MYSQL { + if col.SQLType.Name == schemas.Bit && + session.engine.dialect.URI().DBType == schemas.MYSQL { if len(data) == 1 { x = int32(data[0]) } else { @@ -421,13 +414,13 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, } fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) // case "*int8": - case core.Int8Type.Kind(): + case schemas.Int8Type.Kind(): sdata := string(data) var x int8 var x1 int64 var err error // for mysql, when use bit, it returned \x01 - if col.SQLType.Name == core.Bit && + if col.SQLType.Name == schemas.Bit && strings.Contains(session.engine.DriverName(), "mysql") { if len(data) == 1 { x = int8(data[0]) @@ -449,13 +442,13 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, } fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) // case "*int16": - case core.Int16Type.Kind(): + case schemas.Int16Type.Kind(): sdata := string(data) var x int16 var x1 int64 var err error // for mysql, when use bit, it returned \x01 - if col.SQLType.Name == core.Bit && + if col.SQLType.Name == schemas.Bit && strings.Contains(session.engine.DriverName(), "mysql") { if len(data) == 1 { x = int16(data[0]) @@ -480,7 +473,7 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, case reflect.Struct: switch fieldType { // case "*.time.Time": - case core.PtrTimeType: + case schemas.PtrTimeType: x, err := session.byte2Time(col, data) if err != nil { return err @@ -490,7 +483,7 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, default: if session.statement.UseCascade { structInter := reflect.New(fieldType.Elem()) - table, err := session.engine.autoMapType(structInter.Elem()) + table, err := session.engine.tagParser.ParseWithCache(structInter.Elem()) if err != nil { return err } @@ -499,14 +492,14 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, return errors.New("unsupported composited primary key cascade") } - var pk = make(core.PK, len(table.PrimaryKeys)) + var pk = make(schemas.PK, len(table.PrimaryKeys)) rawValueType := table.ColumnType(table.PKColumns()[0].FieldName) pk[0], err = str2PK(string(data), rawValueType) if err != nil { return err } - if !isPKZero(pk) { + if !pk.IsZero() { // !nashtsai! TODO for hasOne relationship, it's preferred to use join query for eager fetch // however, also need to consider adding a 'lazy' attribute to xorm tag which allow hasOne // property to be fetched lazily @@ -534,138 +527,3 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, return nil } - -// convert a field value of a struct to interface for put into db -func (session *Session) value2Interface(col *core.Column, fieldValue reflect.Value) (interface{}, error) { - if fieldValue.CanAddr() { - if fieldConvert, ok := fieldValue.Addr().Interface().(core.Conversion); ok { - data, err := fieldConvert.ToDB() - if err != nil { - return 0, err - } - if col.SQLType.IsBlob() { - return data, nil - } - return string(data), nil - } - } - - if fieldConvert, ok := fieldValue.Interface().(core.Conversion); ok { - data, err := fieldConvert.ToDB() - if err != nil { - return 0, err - } - if col.SQLType.IsBlob() { - return data, nil - } - return string(data), nil - } - - fieldType := fieldValue.Type() - k := fieldType.Kind() - if k == reflect.Ptr { - if fieldValue.IsNil() { - return nil, nil - } else if !fieldValue.IsValid() { - session.engine.logger.Warn("the field[", col.FieldName, "] is invalid") - return nil, nil - } else { - // !nashtsai! deference pointer type to instance type - fieldValue = fieldValue.Elem() - fieldType = fieldValue.Type() - k = fieldType.Kind() - } - } - - switch k { - case reflect.Bool: - return fieldValue.Bool(), nil - case reflect.String: - return fieldValue.String(), nil - case reflect.Struct: - if fieldType.ConvertibleTo(core.TimeType) { - t := fieldValue.Convert(core.TimeType).Interface().(time.Time) - tf := session.engine.formatColTime(col, t) - return tf, nil - } else if fieldType.ConvertibleTo(nullFloatType) { - t := fieldValue.Convert(nullFloatType).Interface().(sql.NullFloat64) - if !t.Valid { - return nil, nil - } - return t.Float64, nil - } - - if !col.SQLType.IsJson() { - // !! 增加支持driver.Valuer接口的结构,如sql.NullString - if v, ok := fieldValue.Interface().(driver.Valuer); ok { - return v.Value() - } - - fieldTable, err := session.engine.autoMapType(fieldValue) - if err != nil { - return nil, err - } - if len(fieldTable.PrimaryKeys) == 1 { - pkField := reflect.Indirect(fieldValue).FieldByName(fieldTable.PKColumns()[0].FieldName) - return pkField.Interface(), nil - } - return 0, fmt.Errorf("no primary key for col %v", col.Name) - } - - if col.SQLType.IsText() { - bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - session.engine.logger.Error(err) - return 0, err - } - return string(bytes), nil - } else if col.SQLType.IsBlob() { - bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - session.engine.logger.Error(err) - return 0, err - } - return bytes, nil - } - return nil, fmt.Errorf("Unsupported type %v", fieldValue.Type()) - case reflect.Complex64, reflect.Complex128: - bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - session.engine.logger.Error(err) - return 0, err - } - return string(bytes), nil - case reflect.Array, reflect.Slice, reflect.Map: - if !fieldValue.IsValid() { - return fieldValue.Interface(), nil - } - - if col.SQLType.IsText() { - bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - session.engine.logger.Error(err) - return 0, err - } - return string(bytes), nil - } else if col.SQLType.IsBlob() { - var bytes []byte - var err error - if (k == reflect.Slice) && - (fieldValue.Type().Elem().Kind() == reflect.Uint8) { - bytes = fieldValue.Bytes() - } else { - bytes, err = DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - session.engine.logger.Error(err) - return 0, err - } - } - return bytes, nil - } - return nil, ErrUnSupportedType - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return int64(fieldValue.Uint()), nil - default: - return fieldValue.Interface(), nil - } -} diff --git a/vendor/xorm.io/xorm/session_delete.go b/vendor/xorm.io/xorm/session_delete.go index 675d4d8c7..ff28867a7 100644 --- a/vendor/xorm.io/xorm/session_delete.go +++ b/vendor/xorm.io/xorm/session_delete.go @@ -9,37 +9,46 @@ import ( "fmt" "strconv" - "xorm.io/core" + "xorm.io/xorm/caches" + "xorm.io/xorm/schemas" ) -func (session *Session) cacheDelete(table *core.Table, tableName, sqlStr string, args ...interface{}) error { +var ( + // ErrNeedDeletedCond delete needs less one condition error + ErrNeedDeletedCond = errors.New("Delete action needs at least one condition") + + // ErrNotImplemented not implemented + ErrNotImplemented = errors.New("Not implemented") +) + +func (session *Session) cacheDelete(table *schemas.Table, tableName, sqlStr string, args ...interface{}) error { if table == nil || session.tx != nil { return ErrCacheFailed } for _, filter := range session.engine.dialect.Filters() { - sqlStr = filter.Do(sqlStr, session.engine.dialect, table) + sqlStr = filter.Do(sqlStr) } - newsql := session.statement.convertIDSQL(sqlStr) + newsql := session.statement.ConvertIDSQL(sqlStr) if newsql == "" { return ErrCacheFailed } - cacher := session.engine.getCacher(tableName) + cacher := session.engine.cacherMgr.GetCacher(tableName) pkColumns := table.PKColumns() - ids, err := core.GetCacheSql(cacher, tableName, newsql, args) + ids, err := caches.GetCacheSql(cacher, tableName, newsql, args) if err != nil { resultsSlice, err := session.queryBytes(newsql, args...) if err != nil { return err } - ids = make([]core.PK, 0) + ids = make([]schemas.PK, 0) if len(resultsSlice) > 0 { for _, data := range resultsSlice { var id int64 - var pk core.PK = make([]interface{}, 0) + var pk schemas.PK = make([]interface{}, 0) for _, col := range pkColumns { if v, ok := data[col.Name]; !ok { return errors.New("no id") @@ -61,14 +70,14 @@ func (session *Session) cacheDelete(table *core.Table, tableName, sqlStr string, } for _, id := range ids { - session.engine.logger.Debug("[cacheDelete] delete cache obj:", tableName, id) + session.engine.logger.Debugf("[cache] delete cache obj: %v, %v", tableName, id) sid, err := id.ToString() if err != nil { return err } cacher.DelBean(tableName, sid) } - session.engine.logger.Debug("[cacheDelete] clear cache table:", tableName) + session.engine.logger.Debugf("[cache] clear cache table: %v", tableName) cacher.ClearIds(tableName) return nil } @@ -79,11 +88,11 @@ func (session *Session) Delete(bean interface{}) (int64, error) { defer session.Close() } - if session.statement.lastError != nil { - return 0, session.statement.lastError + if session.statement.LastError != nil { + return 0, session.statement.LastError } - if err := session.statement.setRefBean(bean); err != nil { + if err := session.statement.SetRefBean(bean); err != nil { return 0, err } @@ -97,11 +106,12 @@ func (session *Session) Delete(bean interface{}) (int64, error) { processor.BeforeDelete() } - condSQL, condArgs, err := session.statement.genConds(bean) + condSQL, condArgs, err := session.statement.GenConds(bean) if err != nil { return 0, err } - if len(condSQL) == 0 && session.statement.LimitN == 0 { + pLimitN := session.statement.LimitN + if len(condSQL) == 0 && (pLimitN == nil || *pLimitN == 0) { return 0, ErrNeedDeletedCond } @@ -119,28 +129,29 @@ func (session *Session) Delete(bean interface{}) (int64, error) { if len(session.statement.OrderStr) > 0 { orderSQL += fmt.Sprintf(" ORDER BY %s", session.statement.OrderStr) } - if session.statement.LimitN > 0 { - orderSQL += fmt.Sprintf(" LIMIT %d", session.statement.LimitN) + if pLimitN != nil && *pLimitN > 0 { + limitNValue := *pLimitN + orderSQL += fmt.Sprintf(" LIMIT %d", limitNValue) } if len(orderSQL) > 0 { - switch session.engine.dialect.DBType() { - case core.POSTGRES: + switch session.engine.dialect.URI().DBType { + case schemas.POSTGRES: inSQL := fmt.Sprintf("ctid IN (SELECT ctid FROM %s%s)", tableName, orderSQL) if len(condSQL) > 0 { deleteSQL += " AND " + inSQL } else { deleteSQL += " WHERE " + inSQL } - case core.SQLITE: + case schemas.SQLITE: inSQL := fmt.Sprintf("rowid IN (SELECT rowid FROM %s%s)", tableName, orderSQL) if len(condSQL) > 0 { deleteSQL += " AND " + inSQL } else { deleteSQL += " WHERE " + inSQL } - // TODO: how to handle delete limit on mssql? - case core.MSSQL: + // TODO: how to handle delete limit on mssql? + case schemas.MSSQL: return 0, ErrNotImplemented default: deleteSQL += orderSQL @@ -149,12 +160,12 @@ func (session *Session) Delete(bean interface{}) (int64, error) { var realSQL string argsForCache := make([]interface{}, 0, len(condArgs)*2) - if session.statement.unscoped || table.DeletedColumn() == nil { // tag "deleted" is disabled + if session.statement.GetUnscoped() || table.DeletedColumn() == nil { // tag "deleted" is disabled realSQL = deleteSQL copy(argsForCache, condArgs) argsForCache = append(condArgs, argsForCache...) } else { - // !oinume! sqlStrForCache and argsForCache is needed to behave as executing "DELETE FROM ..." for cache. + // !oinume! sqlStrForCache and argsForCache is needed to behave as executing "DELETE FROM ..." for caches. copy(argsForCache, condArgs) argsForCache = append(condArgs, argsForCache...) @@ -165,23 +176,23 @@ func (session *Session) Delete(bean interface{}) (int64, error) { condSQL) if len(orderSQL) > 0 { - switch session.engine.dialect.DBType() { - case core.POSTGRES: + switch session.engine.dialect.URI().DBType { + case schemas.POSTGRES: inSQL := fmt.Sprintf("ctid IN (SELECT ctid FROM %s%s)", tableName, orderSQL) if len(condSQL) > 0 { realSQL += " AND " + inSQL } else { realSQL += " WHERE " + inSQL } - case core.SQLITE: + case schemas.SQLITE: inSQL := fmt.Sprintf("rowid IN (SELECT rowid FROM %s%s)", tableName, orderSQL) if len(condSQL) > 0 { realSQL += " AND " + inSQL } else { realSQL += " WHERE " + inSQL } - // TODO: how to handle delete limit on mssql? - case core.MSSQL: + // TODO: how to handle delete limit on mssql? + case schemas.MSSQL: return 0, ErrNotImplemented default: realSQL += orderSQL @@ -203,7 +214,7 @@ func (session *Session) Delete(bean interface{}) (int64, error) { }) } - if cacher := session.engine.getCacher(tableNameNoQuote); cacher != nil && session.statement.UseCache { + if cacher := session.engine.GetCacher(tableNameNoQuote); cacher != nil && session.statement.UseCache { session.cacheDelete(table, tableNameNoQuote, deleteSQL, argsForCache...) } diff --git a/vendor/xorm.io/xorm/session_exist.go b/vendor/xorm.io/xorm/session_exist.go index 660cc47e4..e52c618e7 100644 --- a/vendor/xorm.io/xorm/session_exist.go +++ b/vendor/xorm.io/xorm/session_exist.go @@ -4,86 +4,19 @@ package xorm -import ( - "errors" - "fmt" - "reflect" - - "xorm.io/builder" - "xorm.io/core" -) - // Exist returns true if the record exist otherwise return false func (session *Session) Exist(bean ...interface{}) (bool, error) { if session.isAutoClose { defer session.Close() } - if session.statement.lastError != nil { - return false, session.statement.lastError + if session.statement.LastError != nil { + return false, session.statement.LastError } - var sqlStr string - var args []interface{} - var err error - - if session.statement.RawSQL == "" { - if len(bean) == 0 { - tableName := session.statement.TableName() - if len(tableName) <= 0 { - return false, ErrTableNotFound - } - - tableName = session.statement.Engine.Quote(tableName) - - if session.statement.cond.IsValid() { - condSQL, condArgs, err := builder.ToSQL(session.statement.cond) - if err != nil { - return false, err - } - - if session.engine.dialect.DBType() == core.MSSQL { - sqlStr = fmt.Sprintf("SELECT TOP 1 * FROM %s WHERE %s", tableName, condSQL) - } else if session.engine.dialect.DBType() == core.ORACLE { - sqlStr = fmt.Sprintf("SELECT * FROM %s WHERE (%s) AND ROWNUM=1", tableName, condSQL) - } else { - sqlStr = fmt.Sprintf("SELECT * FROM %s WHERE %s LIMIT 1", tableName, condSQL) - } - args = condArgs - } else { - if session.engine.dialect.DBType() == core.MSSQL { - sqlStr = fmt.Sprintf("SELECT TOP 1 * FROM %s", tableName) - } else if session.engine.dialect.DBType() == core.ORACLE { - sqlStr = fmt.Sprintf("SELECT * FROM %s WHERE ROWNUM=1", tableName) - } else { - sqlStr = fmt.Sprintf("SELECT * FROM %s LIMIT 1", tableName) - } - args = []interface{}{} - } - } else { - beanValue := reflect.ValueOf(bean[0]) - if beanValue.Kind() != reflect.Ptr { - return false, errors.New("needs a pointer") - } - - if beanValue.Elem().Kind() == reflect.Struct { - if err := session.statement.setRefBean(bean[0]); err != nil { - return false, err - } - } - - if len(session.statement.TableName()) <= 0 { - return false, ErrTableNotFound - } - session.statement.Limit(1) - sqlStr, args, err = session.statement.genGetSQL(bean[0]) - if err != nil { - return false, err - } - } - } else { - sqlStr = session.statement.RawSQL - args = session.statement.RawParams + sqlStr, args, err := session.statement.GenExistSQL(bean...) + if err != nil { + return false, err } rows, err := session.queryRows(sqlStr, args...) diff --git a/vendor/xorm.io/xorm/session_find.go b/vendor/xorm.io/xorm/session_find.go index e16ae54c9..960c10851 100644 --- a/vendor/xorm.io/xorm/session_find.go +++ b/vendor/xorm.io/xorm/session_find.go @@ -8,10 +8,11 @@ import ( "errors" "fmt" "reflect" - "strings" "xorm.io/builder" - "xorm.io/core" + "xorm.io/xorm/caches" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" ) const ( @@ -52,8 +53,8 @@ func (session *Session) FindAndCount(rowsSlicePtr interface{}, condiBean ...inte } session.autoResetStatement = true - if session.statement.selectStr != "" { - session.statement.selectStr = "" + if session.statement.SelectStr != "" { + session.statement.SelectStr = "" } if session.statement.OrderStr != "" { session.statement.OrderStr = "" @@ -64,13 +65,14 @@ func (session *Session) FindAndCount(rowsSlicePtr interface{}, condiBean ...inte func (session *Session) find(rowsSlicePtr interface{}, condiBean ...interface{}) error { defer session.resetStatement() - - if session.statement.lastError != nil { - return session.statement.lastError + if session.statement.LastError != nil { + return session.statement.LastError } sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr)) - if sliceValue.Kind() != reflect.Slice && sliceValue.Kind() != reflect.Map { + var isSlice = sliceValue.Kind() == reflect.Slice + var isMap = sliceValue.Kind() == reflect.Map + if !isSlice && !isMap { return errors.New("needs a pointer to a slice or a map") } @@ -81,7 +83,7 @@ func (session *Session) find(rowsSlicePtr interface{}, condiBean ...interface{}) if sliceElementType.Kind() == reflect.Ptr { if sliceElementType.Elem().Kind() == reflect.Struct { pv := reflect.New(sliceElementType.Elem()) - if err := session.statement.setRefValue(pv); err != nil { + if err := session.statement.SetRefValue(pv); err != nil { return err } } else { @@ -89,7 +91,7 @@ func (session *Session) find(rowsSlicePtr interface{}, condiBean ...interface{}) } } else if sliceElementType.Kind() == reflect.Struct { pv := reflect.New(sliceElementType) - if err := session.statement.setRefValue(pv); err != nil { + if err := session.statement.SetRefValue(pv); err != nil { return err } } else { @@ -97,107 +99,54 @@ func (session *Session) find(rowsSlicePtr interface{}, condiBean ...interface{}) } } - var table = session.statement.RefTable - - var addedTableName = (len(session.statement.JoinStr) > 0) - var autoCond builder.Cond + var ( + table = session.statement.RefTable + addedTableName = (len(session.statement.JoinStr) > 0) + autoCond builder.Cond + ) if tp == tpStruct { - if !session.statement.noAutoCondition && len(condiBean) > 0 { + if !session.statement.NoAutoCondition && len(condiBean) > 0 { var err error - autoCond, err = session.statement.buildConds(table, condiBean[0], true, true, false, true, addedTableName) + autoCond, err = session.statement.BuildConds(table, condiBean[0], true, true, false, true, addedTableName) if err != nil { return err } } else { - // !oinume! Add " IS NULL" to WHERE whatever condiBean is given. - // See https://gitea.com/xorm/xorm/issues/179 - if col := table.DeletedColumn(); col != nil && !session.statement.unscoped { // tag "deleted" is enabled - var colName = session.engine.Quote(col.Name) - if addedTableName { - var nm = session.statement.TableName() - if len(session.statement.TableAlias) > 0 { - nm = session.statement.TableAlias - } - colName = session.engine.Quote(nm) + "." + colName - } - - autoCond = session.engine.CondDeleted(colName) + if col := table.DeletedColumn(); col != nil && !session.statement.GetUnscoped() { // tag "deleted" is enabled + autoCond = session.statement.CondDeleted(col) } } } - var sqlStr string - var args []interface{} - var err error - if session.statement.RawSQL == "" { - if len(session.statement.TableName()) <= 0 { - return ErrTableNotFound - } - - var columnStr = session.statement.ColumnStr - if len(session.statement.selectStr) > 0 { - columnStr = session.statement.selectStr - } else { - if session.statement.JoinStr == "" { - if columnStr == "" { - if session.statement.GroupByStr != "" { - columnStr = session.engine.quoteColumns(session.statement.GroupByStr) - } else { - columnStr = session.statement.genColumnStr() - } - } - } else { - if columnStr == "" { - if session.statement.GroupByStr != "" { - columnStr = session.engine.quoteColumns(session.statement.GroupByStr) - } else { - columnStr = "*" - } - } - } - if columnStr == "" { - columnStr = "*" - } - } - - session.statement.cond = session.statement.cond.And(autoCond) - condSQL, condArgs, err := builder.ToSQL(session.statement.cond) - if err != nil { - return err + // if it's a map with Cols but primary key not in column list, we still need the primary key + if isMap && !session.statement.ColumnMap.IsEmpty() { + for _, k := range session.statement.RefTable.PrimaryKeys { + session.statement.ColumnMap.Add(k) } + } - args = append(session.statement.joinArgs, condArgs...) - sqlStr, err = session.statement.genSelectSQL(columnStr, condSQL, true, true) - if err != nil { - return err - } - // for mssql and use limit - qs := strings.Count(sqlStr, "?") - if len(args)*2 == qs { - args = append(args, args...) - } - } else { - sqlStr = session.statement.RawSQL - args = session.statement.RawParams + sqlStr, args, err := session.statement.GenFindSQL(autoCond) + if err != nil { + return err } - if session.canCache() { - if cacher := session.engine.getCacher(session.statement.TableName()); cacher != nil && + if session.statement.ColumnMap.IsEmpty() && session.canCache() { + if cacher := session.engine.GetCacher(session.statement.TableName()); cacher != nil && !session.statement.IsDistinct && - !session.statement.unscoped { + !session.statement.GetUnscoped() { err = session.cacheFind(sliceElementType, sqlStr, rowsSlicePtr, args...) if err != ErrCacheFailed { return err } err = nil // !nashtsai! reset err to nil for ErrCacheFailed - session.engine.logger.Warn("Cache Find Failed") + session.engine.logger.Warnf("Cache Find Failed") } } return session.noCacheFind(table, sliceValue, sqlStr, args...) } -func (session *Session) noCacheFind(table *core.Table, containerValue reflect.Value, sqlStr string, args ...interface{}) error { +func (session *Session) noCacheFind(table *schemas.Table, containerValue reflect.Value, sqlStr string, args ...interface{}) error { rows, err := session.queryRows(sqlStr, args...) if err != nil { return err @@ -236,10 +185,10 @@ func (session *Session) noCacheFind(table *core.Table, containerValue reflect.Va return reflect.New(elemType) } - var containerValueSetFunc func(*reflect.Value, core.PK) error + var containerValueSetFunc func(*reflect.Value, schemas.PK) error if containerValue.Kind() == reflect.Slice { - containerValueSetFunc = func(newValue *reflect.Value, pk core.PK) error { + containerValueSetFunc = func(newValue *reflect.Value, pk schemas.PK) error { if isPointer { containerValue.Set(reflect.Append(containerValue, newValue.Elem().Addr())) } else { @@ -256,7 +205,7 @@ func (session *Session) noCacheFind(table *core.Table, containerValue reflect.Va return errors.New("don't support multiple primary key's map has non-slice key type") } - containerValueSetFunc = func(newValue *reflect.Value, pk core.PK) error { + containerValueSetFunc = func(newValue *reflect.Value, pk schemas.PK) error { keyValue := reflect.New(keyType) err := convertPKToValue(table, keyValue.Interface(), pk) if err != nil { @@ -273,8 +222,8 @@ func (session *Session) noCacheFind(table *core.Table, containerValue reflect.Va if elemType.Kind() == reflect.Struct { var newValue = newElemFunc(fields) - dataStruct := rValue(newValue.Interface()) - tb, err := session.engine.autoMapType(dataStruct) + dataStruct := utils.ReflectValue(newValue.Interface()) + tb, err := session.engine.tagParser.ParseWithCache(dataStruct) if err != nil { return err } @@ -310,7 +259,7 @@ func (session *Session) noCacheFind(table *core.Table, containerValue reflect.Va return nil } -func convertPKToValue(table *core.Table, dst interface{}, pk core.PK) error { +func convertPKToValue(table *schemas.Table, dst interface{}, pk schemas.PK) error { cols := table.PKColumns() if len(cols) == 1 { return convertAssign(dst, pk[0]) @@ -322,28 +271,28 @@ func convertPKToValue(table *core.Table, dst interface{}, pk core.PK) error { func (session *Session) cacheFind(t reflect.Type, sqlStr string, rowsSlicePtr interface{}, args ...interface{}) (err error) { if !session.canCache() || - indexNoCase(sqlStr, "having") != -1 || - indexNoCase(sqlStr, "group by") != -1 { + utils.IndexNoCase(sqlStr, "having") != -1 || + utils.IndexNoCase(sqlStr, "group by") != -1 { return ErrCacheFailed } tableName := session.statement.TableName() - cacher := session.engine.getCacher(tableName) + cacher := session.engine.cacherMgr.GetCacher(tableName) if cacher == nil { return nil } for _, filter := range session.engine.dialect.Filters() { - sqlStr = filter.Do(sqlStr, session.engine.dialect, session.statement.RefTable) + sqlStr = filter.Do(sqlStr) } - newsql := session.statement.convertIDSQL(sqlStr) + newsql := session.statement.ConvertIDSQL(sqlStr) if newsql == "" { return ErrCacheFailed } table := session.statement.RefTable - ids, err := core.GetCacheSql(cacher, tableName, newsql, args) + ids, err := caches.GetCacheSql(cacher, tableName, newsql, args) if err != nil { rows, err := session.queryRows(newsql, args...) if err != nil { @@ -352,11 +301,11 @@ func (session *Session) cacheFind(t reflect.Type, sqlStr string, rowsSlicePtr in defer rows.Close() var i int - ids = make([]core.PK, 0) + ids = make([]schemas.PK, 0) for rows.Next() { i++ if i > 500 { - session.engine.logger.Debug("[cacheFind] ids length > 500, no cache") + session.engine.logger.Debugf("[cacheFind] ids length > 500, no cache") return ErrCacheFailed } var res = make([]string, len(table.PrimaryKeys)) @@ -364,7 +313,7 @@ func (session *Session) cacheFind(t reflect.Type, sqlStr string, rowsSlicePtr in if err != nil { return err } - var pk core.PK = make([]interface{}, len(table.PrimaryKeys)) + var pk schemas.PK = make([]interface{}, len(table.PrimaryKeys)) for i, col := range table.PKColumns() { pk[i], err = session.engine.idTypeAssertion(col, res[i]) if err != nil { @@ -375,19 +324,19 @@ func (session *Session) cacheFind(t reflect.Type, sqlStr string, rowsSlicePtr in ids = append(ids, pk) } - session.engine.logger.Debug("[cacheFind] cache sql:", ids, tableName, sqlStr, newsql, args) - err = core.PutCacheSql(cacher, ids, tableName, newsql, args) + session.engine.logger.Debugf("[cache] cache sql: %v, %v, %v, %v, %v", ids, tableName, sqlStr, newsql, args) + err = caches.PutCacheSql(cacher, ids, tableName, newsql, args) if err != nil { return err } } else { - session.engine.logger.Debug("[cacheFind] cache hit sql:", tableName, sqlStr, newsql, args) + session.engine.logger.Debugf("[cache] cache hit sql: %v, %v, %v, %v", tableName, sqlStr, newsql, args) } sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr)) ididxes := make(map[string]int) - var ides []core.PK + var ides []schemas.PK var temps = make([]interface{}, len(ids)) for idx, id := range ids { @@ -396,20 +345,38 @@ func (session *Session) cacheFind(t reflect.Type, sqlStr string, rowsSlicePtr in return err } bean := cacher.GetBean(tableName, sid) - if bean == nil || reflect.ValueOf(bean).Elem().Type() != t { + + // fix issue #894 + isHit := func() (ht bool) { + if bean == nil { + ht = false + return + } + ckb := reflect.ValueOf(bean).Elem().Type() + ht = ckb == t + if !ht && t.Kind() == reflect.Ptr { + ht = t.Elem() == ckb + } + return + } + if !isHit() { ides = append(ides, id) ididxes[sid] = idx } else { - session.engine.logger.Debug("[cacheFind] cache hit bean:", tableName, id, bean) + session.engine.logger.Debugf("[cache] cache hit bean: %v, %v, %v", tableName, id, bean) + + pk, err := session.engine.IDOf(bean) + if err != nil { + return err + } - pk := session.engine.IdOf(bean) xid, err := pk.ToString() if err != nil { return err } if sid != xid { - session.engine.logger.Error("[cacheFind] error cache", xid, sid, bean) + session.engine.logger.Errorf("[cache] error cache: %v, %v, %v", xid, sid, bean) return ErrCacheFailed } temps[idx] = bean @@ -459,7 +426,7 @@ func (session *Session) cacheFind(t reflect.Type, sqlStr string, rowsSlicePtr in bean := rv.Interface() temps[ididxes[sid]] = bean - session.engine.logger.Debug("[cacheFind] cache bean:", tableName, id, bean, temps) + session.engine.logger.Debugf("[cache] cache bean: %v, %v, %v, %v", tableName, id, bean, temps) cacher.PutBean(tableName, sid, bean) } } @@ -467,7 +434,7 @@ func (session *Session) cacheFind(t reflect.Type, sqlStr string, rowsSlicePtr in for j := 0; j < len(temps); j++ { bean := temps[j] if bean == nil { - session.engine.logger.Warn("[cacheFind] cache no hit:", tableName, ids[j], temps) + session.engine.logger.Warnf("[cache] cache no hit: %v, %v, %v", tableName, ids[j], temps) // return errors.New("cache error") // !nashtsai! no need to return error, but continue instead continue } @@ -488,7 +455,7 @@ func (session *Session) cacheFind(t reflect.Type, sqlStr string, rowsSlicePtr in } } else { if keyType.Kind() != reflect.Slice { - return errors.New("table have multiple primary keys, key is not core.PK or slice") + return errors.New("table have multiple primary keys, key is not schemas.PK or slice") } ikey = key } diff --git a/vendor/xorm.io/xorm/session_get.go b/vendor/xorm.io/xorm/session_get.go index cc0a2019e..e56ef2d7d 100644 --- a/vendor/xorm.io/xorm/session_get.go +++ b/vendor/xorm.io/xorm/session_get.go @@ -11,7 +11,9 @@ import ( "reflect" "strconv" - "xorm.io/core" + "xorm.io/xorm/caches" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" ) // Get retrieve one record from database, bean's non-empty fields @@ -26,8 +28,8 @@ func (session *Session) Get(bean interface{}) (bool, error) { func (session *Session) get(bean interface{}) (bool, error) { defer session.resetStatement() - if session.statement.lastError != nil { - return false, session.statement.lastError + if session.statement.LastError != nil { + return false, session.statement.LastError } beanValue := reflect.ValueOf(bean) @@ -38,7 +40,7 @@ func (session *Session) get(bean interface{}) (bool, error) { } if beanValue.Elem().Kind() == reflect.Struct { - if err := session.statement.setRefBean(bean); err != nil { + if err := session.statement.SetRefBean(bean); err != nil { return false, err } } @@ -52,20 +54,20 @@ func (session *Session) get(bean interface{}) (bool, error) { return false, ErrTableNotFound } session.statement.Limit(1) - sqlStr, args, err = session.statement.genGetSQL(bean) + sqlStr, args, err = session.statement.GenGetSQL(bean) if err != nil { return false, err } } else { - sqlStr = session.statement.RawSQL + sqlStr = session.statement.GenRawSQL() args = session.statement.RawParams } table := session.statement.RefTable - if session.canCache() && beanValue.Elem().Kind() == reflect.Struct { - if cacher := session.engine.getCacher(session.statement.TableName()); cacher != nil && - !session.statement.unscoped { + if session.statement.ColumnMap.IsEmpty() && session.canCache() && beanValue.Elem().Kind() == reflect.Struct { + if cacher := session.engine.GetCacher(session.statement.TableName()); cacher != nil && + !session.statement.GetUnscoped() { has, err := session.cacheGet(bean, sqlStr, args...) if err != ErrCacheFailed { return has, err @@ -73,11 +75,11 @@ func (session *Session) get(bean interface{}) (bool, error) { } } - context := session.statement.context + context := session.statement.Context if context != nil { res := context.Get(fmt.Sprintf("%v-%v", sqlStr, args)) if res != nil { - session.engine.logger.Debug("hit context cache", sqlStr) + session.engine.logger.Debugf("hit context cache: %s", sqlStr) structValue := reflect.Indirect(reflect.ValueOf(bean)) structValue.Set(reflect.Indirect(reflect.ValueOf(res))) @@ -99,7 +101,7 @@ func (session *Session) get(bean interface{}) (bool, error) { return true, nil } -func (session *Session) nocacheGet(beanKind reflect.Kind, table *core.Table, bean interface{}, sqlStr string, args ...interface{}) (bool, error) { +func (session *Session) nocacheGet(beanKind reflect.Kind, table *schemas.Table, bean interface{}, sqlStr string, args ...interface{}) (bool, error) { rows, err := session.queryRows(sqlStr, args...) if err != nil { return false, err @@ -243,7 +245,7 @@ func (session *Session) nocacheGet(beanKind reflect.Kind, table *core.Table, bea // close it before covert data rows.Close() - dataStruct := rValue(bean) + dataStruct := utils.ReflectValue(bean) _, err = session.slice2Bean(scanResults, fields, bean, &dataStruct, table) if err != nil { return true, err @@ -271,19 +273,19 @@ func (session *Session) cacheGet(bean interface{}, sqlStr string, args ...interf } for _, filter := range session.engine.dialect.Filters() { - sqlStr = filter.Do(sqlStr, session.engine.dialect, session.statement.RefTable) + sqlStr = filter.Do(sqlStr) } - newsql := session.statement.convertIDSQL(sqlStr) + newsql := session.statement.ConvertIDSQL(sqlStr) if newsql == "" { return false, ErrCacheFailed } tableName := session.statement.TableName() - cacher := session.engine.getCacher(tableName) + cacher := session.engine.cacherMgr.GetCacher(tableName) - session.engine.logger.Debug("[cacheGet] find sql:", newsql, args) + session.engine.logger.Debugf("[cache] Get SQL: %s, %v", newsql, args) table := session.statement.RefTable - ids, err := core.GetCacheSql(cacher, tableName, newsql, args) + ids, err := caches.GetCacheSql(cacher, tableName, newsql, args) if err != nil { var res = make([]string, len(table.PrimaryKeys)) rows, err := session.NoCache().queryRows(newsql, args...) @@ -301,7 +303,7 @@ func (session *Session) cacheGet(bean interface{}, sqlStr string, args ...interf return false, ErrCacheFailed } - var pk core.PK = make([]interface{}, len(table.PrimaryKeys)) + var pk schemas.PK = make([]interface{}, len(table.PrimaryKeys)) for i, col := range table.PKColumns() { if col.SQLType.IsText() { pk[i] = res[i] @@ -316,20 +318,20 @@ func (session *Session) cacheGet(bean interface{}, sqlStr string, args ...interf } } - ids = []core.PK{pk} - session.engine.logger.Debug("[cacheGet] cache ids:", newsql, ids) - err = core.PutCacheSql(cacher, ids, tableName, newsql, args) + ids = []schemas.PK{pk} + session.engine.logger.Debugf("[cache] cache ids: %s, %v", newsql, ids) + err = caches.PutCacheSql(cacher, ids, tableName, newsql, args) if err != nil { return false, err } } else { - session.engine.logger.Debug("[cacheGet] cache hit sql:", newsql, ids) + session.engine.logger.Debugf("[cache] cache hit: %s, %v", newsql, ids) } if len(ids) > 0 { structValue := reflect.Indirect(reflect.ValueOf(bean)) id := ids[0] - session.engine.logger.Debug("[cacheGet] get bean:", tableName, id) + session.engine.logger.Debugf("[cache] get bean: %s, %v", tableName, id) sid, err := id.ToString() if err != nil { return false, err @@ -342,10 +344,10 @@ func (session *Session) cacheGet(bean interface{}, sqlStr string, args ...interf return has, err } - session.engine.logger.Debug("[cacheGet] cache bean:", tableName, id, cacheBean) + session.engine.logger.Debugf("[cache] cache bean: %s, %v, %v", tableName, id, cacheBean) cacher.PutBean(tableName, sid, cacheBean) } else { - session.engine.logger.Debug("[cacheGet] cache hit bean:", tableName, id, cacheBean) + session.engine.logger.Debugf("[cache] cache hit: %s, %v, %v", tableName, id, cacheBean) has = true } structValue.Set(reflect.Indirect(reflect.ValueOf(cacheBean))) diff --git a/vendor/xorm.io/xorm/session_insert.go b/vendor/xorm.io/xorm/session_insert.go index 5f8f7e1ee..3a0a70665 100644 --- a/vendor/xorm.io/xorm/session_insert.go +++ b/vendor/xorm.io/xorm/session_insert.go @@ -13,9 +13,13 @@ import ( "strings" "xorm.io/builder" - "xorm.io/core" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" ) +// ErrNoElementsOnSlice represents an error there is no element when insert +var ErrNoElementsOnSlice = errors.New("No element on slice when insert") + // Insert insert one or more beans func (session *Session) Insert(beans ...interface{}) (int64, error) { var affected int64 @@ -67,23 +71,15 @@ func (session *Session) Insert(beans ...interface{}) (int64, error) { sliceValue := reflect.Indirect(reflect.ValueOf(bean)) if sliceValue.Kind() == reflect.Slice { size := sliceValue.Len() - if size > 0 { - if session.engine.SupportInsertMany() { - cnt, err := session.innerInsertMulti(bean) - if err != nil { - return affected, err - } - affected += cnt - } else { - for i := 0; i < size; i++ { - cnt, err := session.innerInsert(sliceValue.Index(i).Interface()) - if err != nil { - return affected, err - } - affected += cnt - } - } + if size <= 0 { + return 0, ErrNoElementsOnSlice + } + + cnt, err := session.innerInsertMulti(bean) + if err != nil { + return affected, err } + affected += cnt } else { cnt, err := session.innerInsert(bean) if err != nil { @@ -107,7 +103,7 @@ func (session *Session) innerInsertMulti(rowsSlicePtr interface{}) (int64, error return 0, errors.New("could not insert a empty slice") } - if err := session.statement.setRefBean(sliceValue.Index(0).Interface()); err != nil { + if err := session.statement.SetRefBean(sliceValue.Index(0).Interface()); err != nil { return 0, err } @@ -122,11 +118,17 @@ func (session *Session) innerInsertMulti(rowsSlicePtr interface{}) (int64, error var colNames []string var colMultiPlaces []string var args []interface{} - var cols []*core.Column + var cols []*schemas.Column for i := 0; i < size; i++ { v := sliceValue.Index(i) - vv := reflect.Indirect(v) + var vv reflect.Value + switch v.Kind() { + case reflect.Interface: + vv = reflect.Indirect(v.Elem()) + default: + vv = reflect.Indirect(v) + } elemValue := v.Interface() var colPlaces []string @@ -141,123 +143,77 @@ func (session *Session) innerInsertMulti(rowsSlicePtr interface{}) (int64, error } // -- - if i == 0 { - for _, col := range table.Columns() { - ptrFieldValue, err := col.ValueOfV(&vv) + for _, col := range table.Columns() { + ptrFieldValue, err := col.ValueOfV(&vv) + if err != nil { + return 0, err + } + fieldValue := *ptrFieldValue + if col.IsAutoIncrement && utils.IsZero(fieldValue.Interface()) { + continue + } + if col.MapType == schemas.ONLYFROMDB { + continue + } + if col.IsDeleted { + continue + } + if session.statement.OmitColumnMap.Contain(col.Name) { + continue + } + if len(session.statement.ColumnMap) > 0 && !session.statement.ColumnMap.Contain(col.Name) { + continue + } + if (col.IsCreated || col.IsUpdated) && session.statement.UseAutoTime { + val, t := session.engine.nowTime(col) + args = append(args, val) + + var colName = col.Name + session.afterClosures = append(session.afterClosures, func(bean interface{}) { + col := table.GetColumn(colName) + setColumnTime(bean, col, t) + }) + } else if col.IsVersion && session.statement.CheckVersion { + args = append(args, 1) + var colName = col.Name + session.afterClosures = append(session.afterClosures, func(bean interface{}) { + col := table.GetColumn(colName) + setColumnInt(bean, col, 1) + }) + } else { + arg, err := session.statement.Value2Interface(col, fieldValue) if err != nil { return 0, err } - fieldValue := *ptrFieldValue - if col.IsAutoIncrement && isZero(fieldValue.Interface()) { - continue - } - if col.MapType == core.ONLYFROMDB { - continue - } - if col.IsDeleted { - continue - } - if session.statement.omitColumnMap.contain(col.Name) { - continue - } - if len(session.statement.columnMap) > 0 && !session.statement.columnMap.contain(col.Name) { - continue - } - if (col.IsCreated || col.IsUpdated) && session.statement.UseAutoTime { - val, t := session.engine.nowTime(col) - args = append(args, val) - - var colName = col.Name - session.afterClosures = append(session.afterClosures, func(bean interface{}) { - col := table.GetColumn(colName) - setColumnTime(bean, col, t) - }) - } else if col.IsVersion && session.statement.checkVersion { - args = append(args, 1) - var colName = col.Name - session.afterClosures = append(session.afterClosures, func(bean interface{}) { - col := table.GetColumn(colName) - setColumnInt(bean, col, 1) - }) - } else { - arg, err := session.value2Interface(col, fieldValue) - if err != nil { - return 0, err - } - args = append(args, arg) - } + args = append(args, arg) + } + if i == 0 { colNames = append(colNames, col.Name) cols = append(cols, col) - colPlaces = append(colPlaces, "?") - } - } else { - for _, col := range cols { - ptrFieldValue, err := col.ValueOfV(&vv) - if err != nil { - return 0, err - } - fieldValue := *ptrFieldValue - - if col.IsAutoIncrement && isZero(fieldValue.Interface()) { - continue - } - if col.MapType == core.ONLYFROMDB { - continue - } - if col.IsDeleted { - continue - } - if session.statement.omitColumnMap.contain(col.Name) { - continue - } - if len(session.statement.columnMap) > 0 && !session.statement.columnMap.contain(col.Name) { - continue - } - if (col.IsCreated || col.IsUpdated) && session.statement.UseAutoTime { - val, t := session.engine.nowTime(col) - args = append(args, val) - - var colName = col.Name - session.afterClosures = append(session.afterClosures, func(bean interface{}) { - col := table.GetColumn(colName) - setColumnTime(bean, col, t) - }) - } else if col.IsVersion && session.statement.checkVersion { - args = append(args, 1) - var colName = col.Name - session.afterClosures = append(session.afterClosures, func(bean interface{}) { - col := table.GetColumn(colName) - setColumnInt(bean, col, 1) - }) - } else { - arg, err := session.value2Interface(col, fieldValue) - if err != nil { - return 0, err - } - args = append(args, arg) - } - - colPlaces = append(colPlaces, "?") } + colPlaces = append(colPlaces, "?") } + colMultiPlaces = append(colMultiPlaces, strings.Join(colPlaces, ", ")) } cleanupProcessorsClosures(&session.beforeClosures) + quoter := session.engine.dialect.Quoter() var sql string - if session.engine.dialect.DBType() == core.ORACLE { + colStr := quoter.Join(colNames, ",") + if session.engine.dialect.URI().DBType == schemas.ORACLE { temp := fmt.Sprintf(") INTO %s (%v) VALUES (", - session.engine.Quote(tableName), - quoteColumns(colNames, session.engine.Quote, ",")) + quoter.Quote(tableName), + colStr) sql = fmt.Sprintf("INSERT ALL INTO %s (%v) VALUES (%v) SELECT 1 FROM DUAL", - session.engine.Quote(tableName), - quoteColumns(colNames, session.engine.Quote, ","), + quoter.Quote(tableName), + colStr, strings.Join(colMultiPlaces, temp)) } else { sql = fmt.Sprintf("INSERT INTO %s (%v) VALUES (%v)", - session.engine.Quote(tableName), - quoteColumns(colNames, session.engine.Quote, ","), + quoter.Quote(tableName), + colStr, strings.Join(colMultiPlaces, "),(")) } res, err := session.exec(sql, args...) @@ -321,15 +277,13 @@ func (session *Session) InsertMulti(rowsSlicePtr interface{}) (int64, error) { } func (session *Session) innerInsert(bean interface{}) (int64, error) { - if err := session.statement.setRefBean(bean); err != nil { + if err := session.statement.SetRefBean(bean); err != nil { return 0, err } if len(session.statement.TableName()) <= 0 { return 0, ErrTableNotFound } - table := session.statement.RefTable - // handle BeforeInsertProcessor for _, closure := range session.beforeClosures { closure(bean) @@ -340,100 +294,19 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) { processor.BeforeInsert() } + var tableName = session.statement.TableName() + table := session.statement.RefTable + colNames, args, err := session.genInsertColumns(bean) if err != nil { return 0, err } - exprs := session.statement.exprColumns - colPlaces := strings.Repeat("?, ", len(colNames)) - if exprs.Len() <= 0 && len(colPlaces) > 0 { - colPlaces = colPlaces[0 : len(colPlaces)-2] - } - - var tableName = session.statement.TableName() - var output string - if session.engine.dialect.DBType() == core.MSSQL && len(table.AutoIncrement) > 0 { - output = fmt.Sprintf(" OUTPUT Inserted.%s", table.AutoIncrement) - } - - var buf = builder.NewWriter() - if _, err := buf.WriteString(fmt.Sprintf("INSERT INTO %s", session.engine.Quote(tableName))); err != nil { + sqlStr, args, err := session.statement.GenInsertSQL(colNames, args) + if err != nil { return 0, err } - if len(colPlaces) <= 0 { - if session.engine.dialect.DBType() == core.MYSQL { - if _, err := buf.WriteString(" VALUES ()"); err != nil { - return 0, err - } - } else { - if _, err := buf.WriteString(fmt.Sprintf("%s DEFAULT VALUES", output)); err != nil { - return 0, err - } - } - } else { - if _, err := buf.WriteString(" ("); err != nil { - return 0, err - } - - if err := writeStrings(buf, append(colNames, exprs.colNames...), "`", "`"); err != nil { - return 0, err - } - - if session.statement.cond.IsValid() { - if _, err := buf.WriteString(fmt.Sprintf(")%s SELECT ", output)); err != nil { - return 0, err - } - - if err := session.statement.writeArgs(buf, args); err != nil { - return 0, err - } - - if len(exprs.args) > 0 { - if _, err := buf.WriteString(","); err != nil { - return 0, err - } - } - if err := exprs.writeArgs(buf); err != nil { - return 0, err - } - - if _, err := buf.WriteString(fmt.Sprintf(" FROM %v WHERE ", session.engine.Quote(tableName))); err != nil { - return 0, err - } - - if err := session.statement.cond.WriteTo(buf); err != nil { - return 0, err - } - } else { - buf.Append(args...) - - if _, err := buf.WriteString(fmt.Sprintf(")%s VALUES (%v", - output, - colPlaces)); err != nil { - return 0, err - } - - if err := exprs.writeArgs(buf); err != nil { - return 0, err - } - - if _, err := buf.WriteString(")"); err != nil { - return 0, err - } - } - } - - if len(table.AutoIncrement) > 0 && session.engine.dialect.DBType() == core.POSTGRES { - if _, err := buf.WriteString(" RETURNING " + session.engine.Quote(table.AutoIncrement)); err != nil { - return 0, err - } - } - - sqlStr := buf.String() - args = buf.Args() - handleAfterInsertProcessorFunc := func(bean interface{}) { if session.isAutoCommit { for _, closure := range session.afterClosures { @@ -464,7 +337,7 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) { // for postgres, many of them didn't implement lastInsertId, so we should // implemented it ourself. - if session.engine.dialect.DBType() == core.ORACLE && len(table.AutoIncrement) > 0 { + if session.engine.dialect.URI().DBType == schemas.ORACLE && len(table.AutoIncrement) > 0 { res, err := session.queryBytes("select seq_atable.currval from dual", args...) if err != nil { return 0, err @@ -474,10 +347,10 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) { session.cacheInsert(tableName) - if table.Version != "" && session.statement.checkVersion { + if table.Version != "" && session.statement.CheckVersion { verValue, err := table.VersionColumn().ValueOf(bean) if err != nil { - session.engine.logger.Error(err) + session.engine.logger.Errorf("%v", err) } else if verValue.IsValid() && verValue.CanSet() { session.incrVersionFieldValue(verValue) } @@ -495,7 +368,7 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) { aiValue, err := table.AutoIncrColumn().ValueOf(bean) if err != nil { - session.engine.logger.Error(err) + session.engine.logger.Errorf("%v", err) } if aiValue == nil || !aiValue.IsValid() || !aiValue.CanSet() { @@ -505,7 +378,8 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) { aiValue.Set(int64ToIntValue(id, aiValue.Type())) return 1, nil - } else if len(table.AutoIncrement) > 0 && (session.engine.dialect.DBType() == core.POSTGRES || session.engine.dialect.DBType() == core.MSSQL) { + } else if len(table.AutoIncrement) > 0 && (session.engine.dialect.URI().DBType == schemas.POSTGRES || + session.engine.dialect.URI().DBType == schemas.MSSQL) { res, err := session.queryBytes(sqlStr, args...) if err != nil { @@ -515,10 +389,10 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) { session.cacheInsert(tableName) - if table.Version != "" && session.statement.checkVersion { + if table.Version != "" && session.statement.CheckVersion { verValue, err := table.VersionColumn().ValueOf(bean) if err != nil { - session.engine.logger.Error(err) + session.engine.logger.Errorf("%v", err) } else if verValue.IsValid() && verValue.CanSet() { session.incrVersionFieldValue(verValue) } @@ -536,7 +410,7 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) { aiValue, err := table.AutoIncrColumn().ValueOf(bean) if err != nil { - session.engine.logger.Error(err) + session.engine.logger.Errorf("%v", err) } if aiValue == nil || !aiValue.IsValid() || !aiValue.CanSet() { @@ -546,48 +420,48 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) { aiValue.Set(int64ToIntValue(id, aiValue.Type())) return 1, nil - } else { - res, err := session.exec(sqlStr, args...) - if err != nil { - return 0, err - } - - defer handleAfterInsertProcessorFunc(bean) - - session.cacheInsert(tableName) + } - if table.Version != "" && session.statement.checkVersion { - verValue, err := table.VersionColumn().ValueOf(bean) - if err != nil { - session.engine.logger.Error(err) - } else if verValue.IsValid() && verValue.CanSet() { - session.incrVersionFieldValue(verValue) - } - } + res, err := session.exec(sqlStr, args...) + if err != nil { + return 0, err + } - if table.AutoIncrement == "" { - return res.RowsAffected() - } + defer handleAfterInsertProcessorFunc(bean) - var id int64 - id, err = res.LastInsertId() - if err != nil || id <= 0 { - return res.RowsAffected() - } + session.cacheInsert(tableName) - aiValue, err := table.AutoIncrColumn().ValueOf(bean) + if table.Version != "" && session.statement.CheckVersion { + verValue, err := table.VersionColumn().ValueOf(bean) if err != nil { - session.engine.logger.Error(err) + session.engine.logger.Errorf("%v", err) + } else if verValue.IsValid() && verValue.CanSet() { + session.incrVersionFieldValue(verValue) } + } - if aiValue == nil || !aiValue.IsValid() || !aiValue.CanSet() { - return res.RowsAffected() - } + if table.AutoIncrement == "" { + return res.RowsAffected() + } - aiValue.Set(int64ToIntValue(id, aiValue.Type())) + var id int64 + id, err = res.LastInsertId() + if err != nil || id <= 0 { + return res.RowsAffected() + } + aiValue, err := table.AutoIncrColumn().ValueOf(bean) + if err != nil { + session.engine.logger.Errorf("%v", err) + } + + if aiValue == nil || !aiValue.IsValid() || !aiValue.CanSet() { return res.RowsAffected() } + + aiValue.Set(int64ToIntValue(id, aiValue.Type())) + + return res.RowsAffected() } // InsertOne insert only one struct into database as a record. @@ -605,11 +479,11 @@ func (session *Session) cacheInsert(table string) error { if !session.statement.UseCache { return nil } - cacher := session.engine.getCacher(table) + cacher := session.engine.cacherMgr.GetCacher(table) if cacher == nil { return nil } - session.engine.logger.Debug("[cache] clear sql:", table) + session.engine.logger.Debugf("[cache] clear sql: %v", table) cacher.ClearIds(table) return nil } @@ -621,7 +495,7 @@ func (session *Session) genInsertColumns(bean interface{}) ([]string, []interfac args := make([]interface{}, 0, len(table.ColumnsSeq())) for _, col := range table.Columns() { - if col.MapType == core.ONLYFROMDB { + if col.MapType == schemas.ONLYFROMDB { continue } @@ -629,19 +503,19 @@ func (session *Session) genInsertColumns(bean interface{}) ([]string, []interfac continue } - if session.statement.omitColumnMap.contain(col.Name) { + if session.statement.OmitColumnMap.Contain(col.Name) { continue } - if len(session.statement.columnMap) > 0 && !session.statement.columnMap.contain(col.Name) { + if len(session.statement.ColumnMap) > 0 && !session.statement.ColumnMap.Contain(col.Name) { continue } - if session.statement.incrColumns.isColExist(col.Name) { + if session.statement.IncrColumns.IsColExist(col.Name) { continue - } else if session.statement.decrColumns.isColExist(col.Name) { + } else if session.statement.DecrColumns.IsColExist(col.Name) { continue - } else if session.statement.exprColumns.isColExist(col.Name) { + } else if session.statement.ExprColumns.IsColExist(col.Name) { continue } @@ -651,30 +525,13 @@ func (session *Session) genInsertColumns(bean interface{}) ([]string, []interfac } fieldValue := *fieldValuePtr - if col.IsAutoIncrement { - switch fieldValue.Type().Kind() { - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int, reflect.Int64: - if fieldValue.Int() == 0 { - continue - } - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint, reflect.Uint64: - if fieldValue.Uint() == 0 { - continue - } - case reflect.String: - if len(fieldValue.String()) == 0 { - continue - } - case reflect.Ptr: - if fieldValue.Pointer() == 0 { - continue - } - } + if col.IsAutoIncrement && utils.IsValueZero(fieldValue) { + continue } // !evalphobia! set fieldValue as nil when column is nullable and zero-value - if _, ok := getFlagForColumn(session.statement.nullableMap, col); ok { - if col.Nullable && isZero(fieldValue.Interface()) { + if _, ok := getFlagForColumn(session.statement.NullableMap, col); ok { + if col.Nullable && utils.IsValueZero(fieldValue) { var nilValue *int fieldValue = reflect.ValueOf(nilValue) } @@ -690,10 +547,10 @@ func (session *Session) genInsertColumns(bean interface{}) ([]string, []interfac col := table.GetColumn(colName) setColumnTime(bean, col, t) }) - } else if col.IsVersion && session.statement.checkVersion { + } else if col.IsVersion && session.statement.CheckVersion { args = append(args, 1) } else { - arg, err := session.value2Interface(col, fieldValue) + arg, err := session.statement.Value2Interface(col, fieldValue) if err != nil { return colNames, args, err } @@ -716,9 +573,9 @@ func (session *Session) insertMapInterface(m map[string]interface{}) (int64, err } var columns = make([]string, 0, len(m)) - exprs := session.statement.exprColumns + exprs := session.statement.ExprColumns for k := range m { - if !exprs.isColExist(k) { + if !exprs.IsColExist(k) { columns = append(columns, k) } } @@ -743,9 +600,9 @@ func (session *Session) insertMapString(m map[string]string) (int64, error) { } var columns = make([]string, 0, len(m)) - exprs := session.statement.exprColumns + exprs := session.statement.ExprColumns for k := range m { - if !exprs.isColExist(k) { + if !exprs.IsColExist(k) { columns = append(columns, k) } } @@ -766,15 +623,15 @@ func (session *Session) insertMap(columns []string, args []interface{}) (int64, return 0, ErrTableNotFound } - exprs := session.statement.exprColumns + exprs := session.statement.ExprColumns w := builder.NewWriter() // if insert where - if session.statement.cond.IsValid() { + if session.statement.Conds().IsValid() { if _, err := w.WriteString(fmt.Sprintf("INSERT INTO %s (", session.engine.Quote(tableName))); err != nil { return 0, err } - if err := writeStrings(w, append(columns, exprs.colNames...), "`", "`"); err != nil { + if err := session.engine.dialect.Quoter().JoinWrite(w.Builder, append(columns, exprs.ColNames...), ","); err != nil { return 0, err } @@ -782,15 +639,15 @@ func (session *Session) insertMap(columns []string, args []interface{}) (int64, return 0, err } - if err := session.statement.writeArgs(w, args); err != nil { + if err := session.statement.WriteArgs(w, args); err != nil { return 0, err } - if len(exprs.args) > 0 { + if len(exprs.Args) > 0 { if _, err := w.WriteString(","); err != nil { return 0, err } - if err := exprs.writeArgs(w); err != nil { + if err := exprs.WriteArgs(w); err != nil { return 0, err } } @@ -799,7 +656,7 @@ func (session *Session) insertMap(columns []string, args []interface{}) (int64, return 0, err } - if err := session.statement.cond.WriteTo(w); err != nil { + if err := session.statement.Conds().WriteTo(w); err != nil { return 0, err } } else { @@ -810,7 +667,7 @@ func (session *Session) insertMap(columns []string, args []interface{}) (int64, return 0, err } - if err := writeStrings(w, append(columns, exprs.colNames...), "`", "`"); err != nil { + if err := session.engine.dialect.Quoter().JoinWrite(w.Builder, append(columns, exprs.ColNames...), ","); err != nil { return 0, err } if _, err := w.WriteString(fmt.Sprintf(") VALUES (%s", qm)); err != nil { @@ -818,11 +675,11 @@ func (session *Session) insertMap(columns []string, args []interface{}) (int64, } w.Append(args...) - if len(exprs.args) > 0 { + if len(exprs.Args) > 0 { if _, err := w.WriteString(","); err != nil { return 0, err } - if err := exprs.writeArgs(w); err != nil { + if err := exprs.WriteArgs(w); err != nil { return 0, err } } diff --git a/vendor/xorm.io/xorm/session_iterate.go b/vendor/xorm.io/xorm/session_iterate.go index ca996c288..8cab8f48f 100644 --- a/vendor/xorm.io/xorm/session_iterate.go +++ b/vendor/xorm.io/xorm/session_iterate.go @@ -4,7 +4,11 @@ package xorm -import "reflect" +import ( + "reflect" + + "xorm.io/xorm/internal/utils" +) // IterFunc only use by Iterate type IterFunc func(idx int, bean interface{}) error @@ -23,11 +27,11 @@ func (session *Session) Iterate(bean interface{}, fun IterFunc) error { defer session.Close() } - if session.statement.lastError != nil { - return session.statement.lastError + if session.statement.LastError != nil { + return session.statement.LastError } - if session.statement.bufferSize > 0 { + if session.statement.BufferSize > 0 { return session.bufferIterate(bean, fun) } @@ -55,27 +59,28 @@ func (session *Session) Iterate(bean interface{}, fun IterFunc) error { // BufferSize sets the buffersize for iterate func (session *Session) BufferSize(size int) *Session { - session.statement.bufferSize = size + session.statement.BufferSize = size return session } func (session *Session) bufferIterate(bean interface{}, fun IterFunc) error { - if session.isAutoClose { - defer session.Close() - } - - var bufferSize = session.statement.bufferSize - var limit = session.statement.LimitN - if limit > 0 && bufferSize > limit { - bufferSize = limit + var bufferSize = session.statement.BufferSize + var pLimitN = session.statement.LimitN + if pLimitN != nil && bufferSize > *pLimitN { + bufferSize = *pLimitN } var start = session.statement.Start - v := rValue(bean) + v := utils.ReflectValue(bean) sliceType := reflect.SliceOf(v.Type()) var idx = 0 - for { + session.autoResetStatement = false + defer func() { + session.autoResetStatement = true + }() + + for bufferSize > 0 { slice := reflect.New(sliceType) - if err := session.Limit(bufferSize, start).find(slice.Interface(), bean); err != nil { + if err := session.NoCache().Limit(bufferSize, start).find(slice.Interface(), bean); err != nil { return err } @@ -86,13 +91,13 @@ func (session *Session) bufferIterate(bean interface{}, fun IterFunc) error { idx++ } - start = start + slice.Elem().Len() - if limit > 0 && idx+bufferSize > limit { - bufferSize = limit - idx + if bufferSize > slice.Elem().Len() { + break } - if bufferSize <= 0 || slice.Elem().Len() < bufferSize || idx == limit { - break + start = start + slice.Elem().Len() + if pLimitN != nil && start+bufferSize > *pLimitN { + bufferSize = *pLimitN - start } } diff --git a/vendor/xorm.io/xorm/session_query.go b/vendor/xorm.io/xorm/session_query.go index 21c00b8d7..121364661 100644 --- a/vendor/xorm.io/xorm/session_query.go +++ b/vendor/xorm.io/xorm/session_query.go @@ -8,82 +8,19 @@ import ( "fmt" "reflect" "strconv" - "strings" "time" - "xorm.io/builder" - "xorm.io/core" + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" ) -func (session *Session) genQuerySQL(sqlOrArgs ...interface{}) (string, []interface{}, error) { - if len(sqlOrArgs) > 0 { - return convertSQLOrArgs(sqlOrArgs...) - } - - if session.statement.RawSQL != "" { - return session.statement.RawSQL, session.statement.RawParams, nil - } - - if len(session.statement.TableName()) <= 0 { - return "", nil, ErrTableNotFound - } - - var columnStr = session.statement.ColumnStr - if len(session.statement.selectStr) > 0 { - columnStr = session.statement.selectStr - } else { - if session.statement.JoinStr == "" { - if columnStr == "" { - if session.statement.GroupByStr != "" { - columnStr = session.engine.quoteColumns(session.statement.GroupByStr) - } else { - columnStr = session.statement.genColumnStr() - } - } - } else { - if columnStr == "" { - if session.statement.GroupByStr != "" { - columnStr = session.engine.quoteColumns(session.statement.GroupByStr) - } else { - columnStr = "*" - } - } - } - if columnStr == "" { - columnStr = "*" - } - } - - if err := session.statement.processIDParam(); err != nil { - return "", nil, err - } - - condSQL, condArgs, err := builder.ToSQL(session.statement.cond) - if err != nil { - return "", nil, err - } - - args := append(session.statement.joinArgs, condArgs...) - sqlStr, err := session.statement.genSelectSQL(columnStr, condSQL, true, true) - if err != nil { - return "", nil, err - } - // for mssql and use limit - qs := strings.Count(sqlStr, "?") - if len(args)*2 == qs { - args = append(args, args...) - } - - return sqlStr, args, nil -} - // Query runs a raw sql and return records as []map[string][]byte func (session *Session) Query(sqlOrArgs ...interface{}) ([]map[string][]byte, error) { if session.isAutoClose { defer session.Close() } - sqlStr, args, err := session.genQuerySQL(sqlOrArgs...) + sqlStr, args, err := session.statement.GenQuerySQL(sqlOrArgs...) if err != nil { return nil, err } @@ -116,8 +53,8 @@ func value2String(rawValue *reflect.Value) (str string, err error) { } // time type case reflect.Struct: - if aa.ConvertibleTo(core.TimeType) { - str = vv.Convert(core.TimeType).Interface().(time.Time).Format(time.RFC3339Nano) + if aa.ConvertibleTo(schemas.TimeType) { + str = vv.Convert(schemas.TimeType).Interface().(time.Time).Format(time.RFC3339Nano) } else { err = fmt.Errorf("Unsupported struct type %v", vv.Type().Name()) } @@ -232,7 +169,7 @@ func (session *Session) QueryString(sqlOrArgs ...interface{}) ([]map[string]stri defer session.Close() } - sqlStr, args, err := session.genQuerySQL(sqlOrArgs...) + sqlStr, args, err := session.statement.GenQuerySQL(sqlOrArgs...) if err != nil { return nil, err } @@ -252,7 +189,7 @@ func (session *Session) QuerySliceString(sqlOrArgs ...interface{}) ([][]string, defer session.Close() } - sqlStr, args, err := session.genQuerySQL(sqlOrArgs...) + sqlStr, args, err := session.statement.GenQuerySQL(sqlOrArgs...) if err != nil { return nil, err } @@ -305,7 +242,7 @@ func (session *Session) QueryInterface(sqlOrArgs ...interface{}) ([]map[string]i defer session.Close() } - sqlStr, args, err := session.genQuerySQL(sqlOrArgs...) + sqlStr, args, err := session.statement.GenQuerySQL(sqlOrArgs...) if err != nil { return nil, err } diff --git a/vendor/xorm.io/xorm/session_raw.go b/vendor/xorm.io/xorm/session_raw.go index a92982967..4cfe297ab 100644 --- a/vendor/xorm.io/xorm/session_raw.go +++ b/vendor/xorm.io/xorm/session_raw.go @@ -7,15 +7,13 @@ package xorm import ( "database/sql" "reflect" - "time" - "xorm.io/builder" - "xorm.io/core" + "xorm.io/xorm/core" ) func (session *Session) queryPreprocess(sqlStr *string, paramStr ...interface{}) { for _, filter := range session.engine.dialect.Filters() { - *sqlStr = filter.Do(*sqlStr, session.engine.dialect, session.statement.RefTable) + *sqlStr = filter.Do(*sqlStr) } session.lastSQL = *sqlStr @@ -24,30 +22,14 @@ func (session *Session) queryPreprocess(sqlStr *string, paramStr ...interface{}) func (session *Session) queryRows(sqlStr string, args ...interface{}) (*core.Rows, error) { defer session.resetStatement() + if session.statement.LastError != nil { + return nil, session.statement.LastError + } session.queryPreprocess(&sqlStr, args...) - if session.showSQL { - session.lastSQL = sqlStr - session.lastSQLArgs = args - if session.engine.showExecTime { - b4ExecTime := time.Now() - defer func() { - execDuration := time.Since(b4ExecTime) - if len(args) > 0 { - session.engine.logger.Infof("[SQL] %s %#v - took: %v", sqlStr, args, execDuration) - } else { - session.engine.logger.Infof("[SQL] %s - took: %v", sqlStr, execDuration) - } - }() - } else { - if len(args) > 0 { - session.engine.logger.Infof("[SQL] %v %#v", sqlStr, args) - } else { - session.engine.logger.Infof("[SQL] %v", sqlStr) - } - } - } + session.lastSQL = sqlStr + session.lastSQLArgs = args if session.isAutoCommit { var db *core.DB @@ -156,25 +138,8 @@ func (session *Session) exec(sqlStr string, args ...interface{}) (sql.Result, er session.queryPreprocess(&sqlStr, args...) - if session.engine.showSQL { - if session.engine.showExecTime { - b4ExecTime := time.Now() - defer func() { - execDuration := time.Since(b4ExecTime) - if len(args) > 0 { - session.engine.logger.Infof("[SQL] %s %#v - took: %v", sqlStr, args, execDuration) - } else { - session.engine.logger.Infof("[SQL] %s - took: %v", sqlStr, execDuration) - } - }() - } else { - if len(args) > 0 { - session.engine.logger.Infof("[SQL] %v %#v", sqlStr, args) - } else { - session.engine.logger.Infof("[SQL] %v", sqlStr) - } - } - } + session.lastSQL = sqlStr + session.lastSQLArgs = args if !session.isAutoCommit { return session.tx.ExecContext(session.ctx, sqlStr, args...) @@ -196,20 +161,6 @@ func (session *Session) exec(sqlStr string, args ...interface{}) (sql.Result, er return session.DB().ExecContext(session.ctx, sqlStr, args...) } -func convertSQLOrArgs(sqlOrArgs ...interface{}) (string, []interface{}, error) { - switch sqlOrArgs[0].(type) { - case string: - return sqlOrArgs[0].(string), sqlOrArgs[1:], nil - case *builder.Builder: - return sqlOrArgs[0].(*builder.Builder).ToSQL() - case builder.Builder: - bd := sqlOrArgs[0].(builder.Builder) - return bd.ToSQL() - } - - return "", nil, ErrUnSupportedType -} - // Exec raw sql func (session *Session) Exec(sqlOrArgs ...interface{}) (sql.Result, error) { if session.isAutoClose { @@ -220,7 +171,7 @@ func (session *Session) Exec(sqlOrArgs ...interface{}) (sql.Result, error) { return nil, ErrUnSupportedType } - sqlStr, args, err := convertSQLOrArgs(sqlOrArgs...) + sqlStr, args, err := session.statement.ConvertSQLOrArgs(sqlOrArgs...) if err != nil { return nil, err } diff --git a/vendor/xorm.io/xorm/session_schema.go b/vendor/xorm.io/xorm/session_schema.go index 5e576c29a..84eb586ed 100644 --- a/vendor/xorm.io/xorm/session_schema.go +++ b/vendor/xorm.io/xorm/session_schema.go @@ -5,11 +5,15 @@ package xorm import ( + "bufio" "database/sql" "fmt" + "io" + "os" "strings" - "xorm.io/core" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" ) // Ping test if database is ok @@ -32,13 +36,18 @@ func (session *Session) CreateTable(bean interface{}) error { } func (session *Session) createTable(bean interface{}) error { - if err := session.statement.setRefBean(bean); err != nil { + if err := session.statement.SetRefBean(bean); err != nil { return err } - sqlStr := session.statement.genCreateTableSQL() - _, err := session.exec(sqlStr) - return err + sqlStrs := session.statement.GenCreateTableSQL() + for _, s := range sqlStrs { + _, err := session.exec(s) + if err != nil { + return err + } + } + return nil } // CreateIndexes create indexes @@ -51,11 +60,11 @@ func (session *Session) CreateIndexes(bean interface{}) error { } func (session *Session) createIndexes(bean interface{}) error { - if err := session.statement.setRefBean(bean); err != nil { + if err := session.statement.SetRefBean(bean); err != nil { return err } - sqls := session.statement.genIndexSQL() + sqls := session.statement.GenIndexSQL() for _, sqlStr := range sqls { _, err := session.exec(sqlStr) if err != nil { @@ -74,11 +83,11 @@ func (session *Session) CreateUniques(bean interface{}) error { } func (session *Session) createUniques(bean interface{}) error { - if err := session.statement.setRefBean(bean); err != nil { + if err := session.statement.SetRefBean(bean); err != nil { return err } - sqls := session.statement.genUniqueSQL() + sqls := session.statement.GenUniqueSQL() for _, sqlStr := range sqls { _, err := session.exec(sqlStr) if err != nil { @@ -98,11 +107,11 @@ func (session *Session) DropIndexes(bean interface{}) error { } func (session *Session) dropIndexes(bean interface{}) error { - if err := session.statement.setRefBean(bean); err != nil { + if err := session.statement.SetRefBean(bean); err != nil { return err } - sqls := session.statement.genDelIndexSQL() + sqls := session.statement.GenDelIndexSQL() for _, sqlStr := range sqls { _, err := session.exec(sqlStr) if err != nil { @@ -123,18 +132,16 @@ func (session *Session) DropTable(beanOrTableName interface{}) error { func (session *Session) dropTable(beanOrTableName interface{}) error { tableName := session.engine.TableName(beanOrTableName) - var needDrop = true - if !session.engine.dialect.SupportDropIfExists() { - sqlStr, args := session.engine.dialect.TableCheckSql(tableName) - results, err := session.queryBytes(sqlStr, args...) + sqlStr, checkIfExist := session.engine.dialect.DropTableSQL(session.engine.TableName(tableName, true)) + if !checkIfExist { + exist, err := session.engine.dialect.IsTableExist(session.ctx, tableName) if err != nil { return err } - needDrop = len(results) > 0 + checkIfExist = exist } - if needDrop { - sqlStr := session.engine.Dialect().DropTableSql(session.engine.TableName(tableName, true)) + if checkIfExist { _, err := session.exec(sqlStr) return err } @@ -153,9 +160,7 @@ func (session *Session) IsTableExist(beanOrTableName interface{}) (bool, error) } func (session *Session) isTableExist(tableName string) (bool, error) { - sqlStr, args := session.engine.dialect.TableCheckSql(tableName) - results, err := session.queryBytes(sqlStr, args...) - return len(results) > 0, err + return session.engine.dialect.IsTableExist(session.ctx, tableName) } // IsTableEmpty if table have any records @@ -182,17 +187,17 @@ func (session *Session) isTableEmpty(tableName string) (bool, error) { // find if index is exist according cols func (session *Session) isIndexExist2(tableName string, cols []string, unique bool) (bool, error) { - indexes, err := session.engine.dialect.GetIndexes(tableName) + indexes, err := session.engine.dialect.GetIndexes(session.ctx, tableName) if err != nil { return false, err } for _, index := range indexes { - if sliceEq(index.Cols, cols) { + if utils.SliceEq(index.Cols, cols) { if unique { - return index.Type == core.UniqueType, nil + return index.Type == schemas.UniqueType, nil } - return index.Type == core.IndexType, nil + return index.Type == schemas.IndexType, nil } } return false, nil @@ -200,21 +205,21 @@ func (session *Session) isIndexExist2(tableName string, cols []string, unique bo func (session *Session) addColumn(colName string) error { col := session.statement.RefTable.GetColumn(colName) - sql, args := session.statement.genAddColumnStr(col) - _, err := session.exec(sql, args...) + sql := session.engine.dialect.AddColumnSQL(session.statement.TableName(), col) + _, err := session.exec(sql) return err } func (session *Session) addIndex(tableName, idxName string) error { index := session.statement.RefTable.Indexes[idxName] - sqlStr := session.engine.dialect.CreateIndexSql(tableName, index) + sqlStr := session.engine.dialect.CreateIndexSQL(tableName, index) _, err := session.exec(sqlStr) return err } func (session *Session) addUnique(tableName, uqeName string) error { index := session.statement.RefTable.Indexes[uqeName] - sqlStr := session.engine.dialect.CreateIndexSql(tableName, index) + sqlStr := session.engine.dialect.CreateIndexSQL(tableName, index) _, err := session.exec(sqlStr) return err } @@ -228,7 +233,7 @@ func (session *Session) Sync2(beans ...interface{}) error { defer session.Close() } - tables, err := engine.dialect.GetTables() + tables, err := engine.dialect.GetTables(session.ctx) if err != nil { return err } @@ -240,8 +245,8 @@ func (session *Session) Sync2(beans ...interface{}) error { }() for _, bean := range beans { - v := rValue(bean) - table, err := engine.mapType(v) + v := utils.ReflectValue(bean) + table, err := engine.tagParser.ParseWithCache(v) if err != nil { return err } @@ -253,7 +258,7 @@ func (session *Session) Sync2(beans ...interface{}) error { } tbNameWithSchema := engine.tbNameWithSchema(tbName) - var oriTable *core.Table + var oriTable *schemas.Table for _, tb := range tables { if strings.EqualFold(engine.tbNameWithSchema(tb.Name), engine.tbNameWithSchema(tbName)) { oriTable = tb @@ -287,7 +292,7 @@ func (session *Session) Sync2(beans ...interface{}) error { // check columns for _, col := range table.Columns() { - var oriCol *core.Column + var oriCol *schemas.Column for _, col2 := range oriTable.Columns() { if strings.EqualFold(col.Name, col2.Name) { oriCol = col2 @@ -298,7 +303,7 @@ func (session *Session) Sync2(beans ...interface{}) error { // column is not exist on table if oriCol == nil { session.statement.RefTable = table - session.statement.tableName = tbNameWithSchema + session.statement.SetTableName(tbNameWithSchema) if err = session.addColumn(col.Name); err != nil { return err } @@ -306,27 +311,27 @@ func (session *Session) Sync2(beans ...interface{}) error { } err = nil - expectedType := engine.dialect.SqlType(col) - curType := engine.dialect.SqlType(oriCol) + expectedType := engine.dialect.SQLType(col) + curType := engine.dialect.SQLType(oriCol) if expectedType != curType { - if expectedType == core.Text && - strings.HasPrefix(curType, core.Varchar) { + if expectedType == schemas.Text && + strings.HasPrefix(curType, schemas.Varchar) { // currently only support mysql & postgres - if engine.dialect.DBType() == core.MYSQL || - engine.dialect.DBType() == core.POSTGRES { + if engine.dialect.URI().DBType == schemas.MYSQL || + engine.dialect.URI().DBType == schemas.POSTGRES { engine.logger.Infof("Table %s column %s change type from %s to %s\n", tbNameWithSchema, col.Name, curType, expectedType) - _, err = session.exec(engine.dialect.ModifyColumnSql(tbNameWithSchema, col)) + _, err = session.exec(engine.dialect.ModifyColumnSQL(tbNameWithSchema, col)) } else { engine.logger.Warnf("Table %s column %s db type is %s, struct type is %s\n", tbNameWithSchema, col.Name, curType, expectedType) } - } else if strings.HasPrefix(curType, core.Varchar) && strings.HasPrefix(expectedType, core.Varchar) { - if engine.dialect.DBType() == core.MYSQL { + } else if strings.HasPrefix(curType, schemas.Varchar) && strings.HasPrefix(expectedType, schemas.Varchar) { + if engine.dialect.URI().DBType == schemas.MYSQL { if oriCol.Length < col.Length { engine.logger.Infof("Table %s column %s change type from varchar(%d) to varchar(%d)\n", tbNameWithSchema, col.Name, oriCol.Length, col.Length) - _, err = session.exec(engine.dialect.ModifyColumnSql(tbNameWithSchema, col)) + _, err = session.exec(engine.dialect.ModifyColumnSQL(tbNameWithSchema, col)) } } } else { @@ -335,21 +340,23 @@ func (session *Session) Sync2(beans ...interface{}) error { tbNameWithSchema, col.Name, curType, expectedType) } } - } else if expectedType == core.Varchar { - if engine.dialect.DBType() == core.MYSQL { + } else if expectedType == schemas.Varchar { + if engine.dialect.URI().DBType == schemas.MYSQL { if oriCol.Length < col.Length { engine.logger.Infof("Table %s column %s change type from varchar(%d) to varchar(%d)\n", tbNameWithSchema, col.Name, oriCol.Length, col.Length) - _, err = session.exec(engine.dialect.ModifyColumnSql(tbNameWithSchema, col)) + _, err = session.exec(engine.dialect.ModifyColumnSQL(tbNameWithSchema, col)) } } } if col.Default != oriCol.Default { - if (col.SQLType.Name == core.Bool || col.SQLType.Name == core.Boolean) && + switch { + case col.IsAutoIncrement: // For autoincrement column, don't check default + case (col.SQLType.Name == schemas.Bool || col.SQLType.Name == schemas.Boolean) && ((strings.EqualFold(col.Default, "true") && oriCol.Default == "1") || - (strings.EqualFold(col.Default, "false") && oriCol.Default == "0")) { - } else { + (strings.EqualFold(col.Default, "false") && oriCol.Default == "0")): + default: engine.logger.Warnf("Table %s Column %s db default is %s, struct default is %s", tbName, col.Name, oriCol.Default, col.Default) } @@ -365,10 +372,10 @@ func (session *Session) Sync2(beans ...interface{}) error { } var foundIndexNames = make(map[string]bool) - var addedNames = make(map[string]*core.Index) + var addedNames = make(map[string]*schemas.Index) for name, index := range table.Indexes { - var oriIndex *core.Index + var oriIndex *schemas.Index for name2, index2 := range oriTable.Indexes { if index.Equal(index2) { oriIndex = index2 @@ -379,7 +386,7 @@ func (session *Session) Sync2(beans ...interface{}) error { if oriIndex != nil { if oriIndex.Type != index.Type { - sql := engine.dialect.DropIndexSql(tbNameWithSchema, oriIndex) + sql := engine.dialect.DropIndexSQL(tbNameWithSchema, oriIndex) _, err = session.exec(sql) if err != nil { return err @@ -395,7 +402,7 @@ func (session *Session) Sync2(beans ...interface{}) error { for name2, index2 := range oriTable.Indexes { if _, ok := foundIndexNames[name2]; !ok { - sql := engine.dialect.DropIndexSql(tbNameWithSchema, index2) + sql := engine.dialect.DropIndexSQL(tbNameWithSchema, index2) _, err = session.exec(sql) if err != nil { return err @@ -404,13 +411,13 @@ func (session *Session) Sync2(beans ...interface{}) error { } for name, index := range addedNames { - if index.Type == core.UniqueType { + if index.Type == schemas.UniqueType { session.statement.RefTable = table - session.statement.tableName = tbNameWithSchema + session.statement.SetTableName(tbNameWithSchema) err = session.addUnique(tbNameWithSchema, name) - } else if index.Type == core.IndexType { + } else if index.Type == schemas.IndexType { session.statement.RefTable = table - session.statement.tableName = tbNameWithSchema + session.statement.SetTableName(tbNameWithSchema) err = session.addIndex(tbNameWithSchema, name) } if err != nil { @@ -428,3 +435,56 @@ func (session *Session) Sync2(beans ...interface{}) error { return nil } + +// ImportFile SQL DDL file +func (session *Session) ImportFile(ddlPath string) ([]sql.Result, error) { + file, err := os.Open(ddlPath) + if err != nil { + return nil, err + } + defer file.Close() + return session.Import(file) +} + +// Import SQL DDL from io.Reader +func (session *Session) Import(r io.Reader) ([]sql.Result, error) { + var results []sql.Result + var lastError error + scanner := bufio.NewScanner(r) + + var inSingleQuote bool + semiColSpliter := func(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + for i, b := range data { + if b == '\'' { + inSingleQuote = !inSingleQuote + } + if !inSingleQuote && b == ';' { + return i + 1, data[0:i], nil + } + } + // If we're at EOF, we have a final, non-terminated line. Return it. + if atEOF { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil + } + + scanner.Split(semiColSpliter) + + for scanner.Scan() { + query := strings.Trim(scanner.Text(), " \t\n\r") + if len(query) > 0 { + result, err := session.Exec(query) + results = append(results, result) + if err != nil { + return nil, err + } + } + } + + return results, lastError +} diff --git a/vendor/xorm.io/xorm/session_stats.go b/vendor/xorm.io/xorm/session_stats.go index c2cac8306..17d0a675a 100644 --- a/vendor/xorm.io/xorm/session_stats.go +++ b/vendor/xorm.io/xorm/session_stats.go @@ -17,17 +17,9 @@ func (session *Session) Count(bean ...interface{}) (int64, error) { defer session.Close() } - var sqlStr string - var args []interface{} - var err error - if session.statement.RawSQL == "" { - sqlStr, args, err = session.statement.genCountSQL(bean...) - if err != nil { - return 0, err - } - } else { - sqlStr = session.statement.RawSQL - args = session.statement.RawParams + sqlStr, args, err := session.statement.GenCountSQL(bean...) + if err != nil { + return 0, err } var total int64 @@ -50,21 +42,12 @@ func (session *Session) sum(res interface{}, bean interface{}, columnNames ...st return errors.New("need a pointer to a variable") } - var isSlice = v.Elem().Kind() == reflect.Slice - var sqlStr string - var args []interface{} - var err error - if len(session.statement.RawSQL) == 0 { - sqlStr, args, err = session.statement.genSumSQL(bean, columnNames...) - if err != nil { - return err - } - } else { - sqlStr = session.statement.RawSQL - args = session.statement.RawParams + sqlStr, args, err := session.statement.GenSumSQL(bean, columnNames...) + if err != nil { + return err } - if isSlice { + if v.Elem().Kind() == reflect.Slice { err = session.queryRow(sqlStr, args...).ScanSlice(res) } else { err = session.queryRow(sqlStr, args...).Scan(res) diff --git a/vendor/xorm.io/xorm/session_tx.go b/vendor/xorm.io/xorm/session_tx.go index ee3d473f9..cd23cf89c 100644 --- a/vendor/xorm.io/xorm/session_tx.go +++ b/vendor/xorm.io/xorm/session_tx.go @@ -4,6 +4,12 @@ package xorm +import ( + "time" + + "xorm.io/xorm/log" +) + // Begin a transaction func (session *Session) Begin() error { if session.isAutoCommit { @@ -14,6 +20,7 @@ func (session *Session) Begin() error { session.isAutoCommit = false session.isCommitedOrRollbacked = false session.tx = tx + session.saveLastSQL("BEGIN TRANSACTION") } return nil @@ -22,10 +29,28 @@ func (session *Session) Begin() error { // Rollback When using transaction, you can rollback if any error func (session *Session) Rollback() error { if !session.isAutoCommit && !session.isCommitedOrRollbacked { - session.saveLastSQL(session.engine.dialect.RollBackStr()) + session.saveLastSQL("ROLL BACK") session.isCommitedOrRollbacked = true session.isAutoCommit = true - return session.tx.Rollback() + + start := time.Now() + needSQL := session.DB().NeedLogSQL(session.ctx) + if needSQL { + session.engine.logger.BeforeSQL(log.LogContext{ + Ctx: session.ctx, + SQL: "ROLL BACK", + }) + } + err := session.tx.Rollback() + if needSQL { + session.engine.logger.AfterSQL(log.LogContext{ + Ctx: session.ctx, + SQL: "ROLL BACK", + ExecuteTime: time.Now().Sub(start), + Err: err, + }) + } + return err } return nil } @@ -36,48 +61,67 @@ func (session *Session) Commit() error { session.saveLastSQL("COMMIT") session.isCommitedOrRollbacked = true session.isAutoCommit = true - var err error - if err = session.tx.Commit(); err == nil { - // handle processors after tx committed - closureCallFunc := func(closuresPtr *[]func(interface{}), bean interface{}) { - if closuresPtr != nil { - for _, closure := range *closuresPtr { - closure(bean) - } + + start := time.Now() + needSQL := session.DB().NeedLogSQL(session.ctx) + if needSQL { + session.engine.logger.BeforeSQL(log.LogContext{ + Ctx: session.ctx, + SQL: "COMMIT", + }) + } + err := session.tx.Commit() + if needSQL { + session.engine.logger.AfterSQL(log.LogContext{ + Ctx: session.ctx, + SQL: "COMMIT", + ExecuteTime: time.Now().Sub(start), + Err: err, + }) + } + + if err != nil { + return err + } + + // handle processors after tx committed + closureCallFunc := func(closuresPtr *[]func(interface{}), bean interface{}) { + if closuresPtr != nil { + for _, closure := range *closuresPtr { + closure(bean) } } + } - for bean, closuresPtr := range session.afterInsertBeans { - closureCallFunc(closuresPtr, bean) + for bean, closuresPtr := range session.afterInsertBeans { + closureCallFunc(closuresPtr, bean) - if processor, ok := interface{}(bean).(AfterInsertProcessor); ok { - processor.AfterInsert() - } + if processor, ok := interface{}(bean).(AfterInsertProcessor); ok { + processor.AfterInsert() } - for bean, closuresPtr := range session.afterUpdateBeans { - closureCallFunc(closuresPtr, bean) + } + for bean, closuresPtr := range session.afterUpdateBeans { + closureCallFunc(closuresPtr, bean) - if processor, ok := interface{}(bean).(AfterUpdateProcessor); ok { - processor.AfterUpdate() - } + if processor, ok := interface{}(bean).(AfterUpdateProcessor); ok { + processor.AfterUpdate() } - for bean, closuresPtr := range session.afterDeleteBeans { - closureCallFunc(closuresPtr, bean) + } + for bean, closuresPtr := range session.afterDeleteBeans { + closureCallFunc(closuresPtr, bean) - if processor, ok := interface{}(bean).(AfterDeleteProcessor); ok { - processor.AfterDelete() - } + if processor, ok := interface{}(bean).(AfterDeleteProcessor); ok { + processor.AfterDelete() } - cleanUpFunc := func(slices *map[interface{}]*[]func(interface{})) { - if len(*slices) > 0 { - *slices = make(map[interface{}]*[]func(interface{}), 0) - } + } + cleanUpFunc := func(slices *map[interface{}]*[]func(interface{})) { + if len(*slices) > 0 { + *slices = make(map[interface{}]*[]func(interface{}), 0) } - cleanUpFunc(&session.afterInsertBeans) - cleanUpFunc(&session.afterUpdateBeans) - cleanUpFunc(&session.afterDeleteBeans) } - return err + cleanUpFunc(&session.afterInsertBeans) + cleanUpFunc(&session.afterUpdateBeans) + cleanUpFunc(&session.afterDeleteBeans) } return nil } diff --git a/vendor/xorm.io/xorm/session_update.go b/vendor/xorm.io/xorm/session_update.go index 47ced66d1..62116c473 100644 --- a/vendor/xorm.io/xorm/session_update.go +++ b/vendor/xorm.io/xorm/session_update.go @@ -12,23 +12,25 @@ import ( "strings" "xorm.io/builder" - "xorm.io/core" + "xorm.io/xorm/caches" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" ) -func (session *Session) cacheUpdate(table *core.Table, tableName, sqlStr string, args ...interface{}) error { +func (session *Session) cacheUpdate(table *schemas.Table, tableName, sqlStr string, args ...interface{}) error { if table == nil || session.tx != nil { return ErrCacheFailed } - oldhead, newsql := session.statement.convertUpdateSQL(sqlStr) + oldhead, newsql := session.statement.ConvertUpdateSQL(sqlStr) if newsql == "" { return ErrCacheFailed } for _, filter := range session.engine.dialect.Filters() { - newsql = filter.Do(newsql, session.engine.dialect, table) + newsql = filter.Do(newsql) } - session.engine.logger.Debug("[cacheUpdate] new sql", oldhead, newsql) + session.engine.logger.Debugf("[cache] new sql: %v, %v", oldhead, newsql) var nStart int if len(args) > 0 { @@ -40,9 +42,9 @@ func (session *Session) cacheUpdate(table *core.Table, tableName, sqlStr string, } } - cacher := session.engine.getCacher(tableName) - session.engine.logger.Debug("[cacheUpdate] get cache sql", newsql, args[nStart:]) - ids, err := core.GetCacheSql(cacher, tableName, newsql, args[nStart:]) + cacher := session.engine.GetCacher(tableName) + session.engine.logger.Debugf("[cache] get cache sql: %v, %v", newsql, args[nStart:]) + ids, err := caches.GetCacheSql(cacher, tableName, newsql, args[nStart:]) if err != nil { rows, err := session.NoCache().queryRows(newsql, args[nStart:]...) if err != nil { @@ -50,14 +52,14 @@ func (session *Session) cacheUpdate(table *core.Table, tableName, sqlStr string, } defer rows.Close() - ids = make([]core.PK, 0) + ids = make([]schemas.PK, 0) for rows.Next() { var res = make([]string, len(table.PrimaryKeys)) err = rows.ScanSlice(&res) if err != nil { return err } - var pk core.PK = make([]interface{}, len(table.PrimaryKeys)) + var pk schemas.PK = make([]interface{}, len(table.PrimaryKeys)) for i, col := range table.PKColumns() { if col.SQLType.IsNumeric() { n, err := strconv.ParseInt(res[i], 10, 64) @@ -74,7 +76,7 @@ func (session *Session) cacheUpdate(table *core.Table, tableName, sqlStr string, ids = append(ids, pk) } - session.engine.logger.Debug("[cacheUpdate] find updated id", ids) + session.engine.logger.Debugf("[cache] find updated id: %v", ids) } /*else { session.engine.LogDebug("[xorm:cacheUpdate] del cached sql:", tableName, newsql, args) cacher.DelIds(tableName, genSqlKey(newsql, args)) @@ -86,12 +88,12 @@ func (session *Session) cacheUpdate(table *core.Table, tableName, sqlStr string, return err } if bean := cacher.GetBean(tableName, sid); bean != nil { - sqls := splitNNoCase(sqlStr, "where", 2) + sqls := utils.SplitNNoCase(sqlStr, "where", 2) if len(sqls) == 0 || len(sqls) > 2 { return ErrCacheFailed } - sqls = splitNNoCase(sqls[0], "set", 2) + sqls = utils.SplitNNoCase(sqls[0], "set", 2) if len(sqls) != 2 { return ErrCacheFailed } @@ -101,38 +103,32 @@ func (session *Session) cacheUpdate(table *core.Table, tableName, sqlStr string, sps := strings.SplitN(kv, "=", 2) sps2 := strings.Split(sps[0], ".") colName := sps2[len(sps2)-1] - // treat quote prefix, suffix and '`' as quotes - quotes := append(strings.Split(session.engine.Quote(""), ""), "`") - if strings.ContainsAny(colName, strings.Join(quotes, "")) { - colName = strings.TrimSpace(eraseAny(colName, quotes...)) - } else { - session.engine.logger.Debug("[cacheUpdate] cannot find column", tableName, colName) - return ErrCacheFailed - } + colName = session.engine.dialect.Quoter().Trim(colName) + colName = schemas.CommonQuoter.Trim(colName) if col := table.GetColumn(colName); col != nil { fieldValue, err := col.ValueOf(bean) if err != nil { - session.engine.logger.Error(err) + session.engine.logger.Errorf("%v", err) } else { - session.engine.logger.Debug("[cacheUpdate] set bean field", bean, colName, fieldValue.Interface()) - if col.IsVersion && session.statement.checkVersion { + session.engine.logger.Debugf("[cache] set bean field: %v, %v, %v", bean, colName, fieldValue.Interface()) + if col.IsVersion && session.statement.CheckVersion { session.incrVersionFieldValue(fieldValue) } else { fieldValue.Set(reflect.ValueOf(args[idx])) } } } else { - session.engine.logger.Errorf("[cacheUpdate] ERROR: column %v is not table %v's", + session.engine.logger.Errorf("[cache] ERROR: column %v is not table %v's", colName, table.Name) } } - session.engine.logger.Debug("[cacheUpdate] update cache", tableName, id, bean) + session.engine.logger.Debugf("[cache] update cache: %v, %v, %v", tableName, id, bean) cacher.PutBean(tableName, sid, bean) } } - session.engine.logger.Debug("[cacheUpdate] clear cached table sql:", tableName) + session.engine.logger.Debugf("[cache] clear cached table sql: %v", tableName) cacher.ClearIds(tableName) return nil } @@ -148,11 +144,11 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6 defer session.Close() } - if session.statement.lastError != nil { - return 0, session.statement.lastError + if session.statement.LastError != nil { + return 0, session.statement.LastError } - v := rValue(bean) + v := utils.ReflectValue(bean) t := v.Type() var colNames []string @@ -172,7 +168,7 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6 var isMap = t.Kind() == reflect.Map var isStruct = t.Kind() == reflect.Struct if isStruct { - if err := session.statement.setRefBean(bean); err != nil { + if err := session.statement.SetRefBean(bean); err != nil { return 0, err } @@ -180,14 +176,14 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6 return 0, ErrTableNotFound } - if session.statement.ColumnStr == "" { - colNames, args = session.statement.buildUpdates(bean, false, false, + if session.statement.ColumnStr() == "" { + colNames, args, err = session.statement.BuildUpdates(v, false, false, false, false, true) } else { colNames, args, err = session.genUpdateColumns(bean) - if err != nil { - return 0, err - } + } + if err != nil { + return 0, err } } else if isMap { colNames = make([]string, 0) @@ -205,8 +201,8 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6 table := session.statement.RefTable if session.statement.UseAutoTime && table != nil && table.Updated != "" { - if !session.statement.columnMap.contain(table.Updated) && - !session.statement.omitColumnMap.contain(table.Updated) { + if !session.statement.ColumnMap.Contain(table.Updated) && + !session.statement.OmitColumnMap.Contain(table.Updated) { colNames = append(colNames, session.engine.Quote(table.Updated)+" = ?") col := table.UpdatedColumn() val, t := session.engine.nowTime(col) @@ -223,28 +219,28 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6 } // for update action to like "column = column + ?" - incColumns := session.statement.incrColumns - for i, colName := range incColumns.colNames { + incColumns := session.statement.IncrColumns + for i, colName := range incColumns.ColNames { colNames = append(colNames, session.engine.Quote(colName)+" = "+session.engine.Quote(colName)+" + ?") - args = append(args, incColumns.args[i]) + args = append(args, incColumns.Args[i]) } // for update action to like "column = column - ?" - decColumns := session.statement.decrColumns - for i, colName := range decColumns.colNames { + decColumns := session.statement.DecrColumns + for i, colName := range decColumns.ColNames { colNames = append(colNames, session.engine.Quote(colName)+" = "+session.engine.Quote(colName)+" - ?") - args = append(args, decColumns.args[i]) + args = append(args, decColumns.Args[i]) } // for update action to like "column = expression" - exprColumns := session.statement.exprColumns - for i, colName := range exprColumns.colNames { - switch tp := exprColumns.args[i].(type) { + exprColumns := session.statement.ExprColumns + for i, colName := range exprColumns.ColNames { + switch tp := exprColumns.Args[i].(type) { case string: if len(tp) == 0 { tp = "''" } colNames = append(colNames, session.engine.Quote(colName)+"="+tp) case *builder.Builder: - subQuery, subArgs, err := builder.ToSQL(tp) + subQuery, subArgs, err := session.statement.GenCondSQL(tp) if err != nil { return 0, err } @@ -252,16 +248,16 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6 args = append(args, subArgs...) default: colNames = append(colNames, session.engine.Quote(colName)+"=?") - args = append(args, exprColumns.args[i]) + args = append(args, exprColumns.Args[i]) } } - if err = session.statement.processIDParam(); err != nil { + if err = session.statement.ProcessIDParam(); err != nil { return 0, err } var autoCond builder.Cond - if !session.statement.noAutoCondition { + if !session.statement.NoAutoCondition { condBeanIsStruct := false if len(condiBean) > 0 { if c, ok := condiBean[0].(map[string]interface{}); ok { @@ -274,7 +270,7 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6 } if k == reflect.Struct { var err error - autoCond, err = session.statement.buildConds(session.statement.RefTable, condiBean[0], true, true, false, true, false) + autoCond, err = session.statement.BuildConds(session.statement.RefTable, condiBean[0], true, true, false, true, false) if err != nil { return 0, err } @@ -286,8 +282,8 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6 } if !condBeanIsStruct && table != nil { - if col := table.DeletedColumn(); col != nil && !session.statement.unscoped { // tag "deleted" is enabled - autoCond1 := session.engine.CondDeleted(session.engine.Quote(col.Name)) + if col := table.DeletedColumn(); col != nil && !session.statement.GetUnscoped() { // tag "deleted" is enabled + autoCond1 := session.statement.CondDeleted(col) if autoCond == nil { autoCond = autoCond1 @@ -298,26 +294,34 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6 } } - st := &session.statement + st := session.statement - var sqlStr string - var condArgs []interface{} - var condSQL string - cond := session.statement.cond.And(autoCond) + var ( + sqlStr string + condArgs []interface{} + condSQL string + cond = session.statement.Conds().And(autoCond) - var doIncVer = (table != nil && table.Version != "" && session.statement.checkVersion) - var verValue *reflect.Value + doIncVer = isStruct && (table != nil && table.Version != "" && session.statement.CheckVersion) + verValue *reflect.Value + ) if doIncVer { verValue, err = table.VersionColumn().ValueOf(bean) if err != nil { return 0, err } - cond = cond.And(builder.Eq{session.engine.Quote(table.Version): verValue.Interface()}) - colNames = append(colNames, session.engine.Quote(table.Version)+" = "+session.engine.Quote(table.Version)+" + 1") + if verValue != nil { + cond = cond.And(builder.Eq{session.engine.Quote(table.Version): verValue.Interface()}) + colNames = append(colNames, session.engine.Quote(table.Version)+" = "+session.engine.Quote(table.Version)+" + 1") + } } - condSQL, condArgs, err = builder.ToSQL(cond) + if len(colNames) <= 0 { + return 0, errors.New("No content found to be updated") + } + + condSQL, condArgs, err = session.statement.GenCondSQL(cond) if err != nil { return 0, err } @@ -333,25 +337,27 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6 var tableName = session.statement.TableName() // TODO: Oracle support needed var top string - if st.LimitN > 0 { - if st.Engine.dialect.DBType() == core.MYSQL { - condSQL = condSQL + fmt.Sprintf(" LIMIT %d", st.LimitN) - } else if st.Engine.dialect.DBType() == core.SQLITE { - tempCondSQL := condSQL + fmt.Sprintf(" LIMIT %d", st.LimitN) + if st.LimitN != nil { + limitValue := *st.LimitN + switch session.engine.dialect.URI().DBType { + case schemas.MYSQL: + condSQL = condSQL + fmt.Sprintf(" LIMIT %d", limitValue) + case schemas.SQLITE: + tempCondSQL := condSQL + fmt.Sprintf(" LIMIT %d", limitValue) cond = cond.And(builder.Expr(fmt.Sprintf("rowid IN (SELECT rowid FROM %v %v)", session.engine.Quote(tableName), tempCondSQL), condArgs...)) - condSQL, condArgs, err = builder.ToSQL(cond) + condSQL, condArgs, err = session.statement.GenCondSQL(cond) if err != nil { return 0, err } if len(condSQL) > 0 { condSQL = "WHERE " + condSQL } - } else if st.Engine.dialect.DBType() == core.POSTGRES { - tempCondSQL := condSQL + fmt.Sprintf(" LIMIT %d", st.LimitN) + case schemas.POSTGRES: + tempCondSQL := condSQL + fmt.Sprintf(" LIMIT %d", limitValue) cond = cond.And(builder.Expr(fmt.Sprintf("CTID IN (SELECT CTID FROM %v %v)", session.engine.Quote(tableName), tempCondSQL), condArgs...)) - condSQL, condArgs, err = builder.ToSQL(cond) + condSQL, condArgs, err = session.statement.GenCondSQL(cond) if err != nil { return 0, err } @@ -359,14 +365,13 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6 if len(condSQL) > 0 { condSQL = "WHERE " + condSQL } - } else if st.Engine.dialect.DBType() == core.MSSQL { - if st.OrderStr != "" && st.Engine.dialect.DBType() == core.MSSQL && - table != nil && len(table.PrimaryKeys) == 1 { + case schemas.MSSQL: + if st.OrderStr != "" && table != nil && len(table.PrimaryKeys) == 1 { cond = builder.Expr(fmt.Sprintf("%s IN (SELECT TOP (%d) %s FROM %v%v)", - table.PrimaryKeys[0], st.LimitN, table.PrimaryKeys[0], + table.PrimaryKeys[0], limitValue, table.PrimaryKeys[0], session.engine.Quote(tableName), condSQL), condArgs...) - condSQL, condArgs, err = builder.ToSQL(cond) + condSQL, condArgs, err = session.statement.GenCondSQL(cond) if err != nil { return 0, err } @@ -374,20 +379,16 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6 condSQL = "WHERE " + condSQL } } else { - top = fmt.Sprintf("TOP (%d) ", st.LimitN) + top = fmt.Sprintf("TOP (%d) ", limitValue) } } } - if len(colNames) <= 0 { - return 0, errors.New("No content found to be updated") - } - var tableAlias = session.engine.Quote(tableName) var fromSQL string if session.statement.TableAlias != "" { - switch session.engine.dialect.DBType() { - case core.MSSQL: + switch session.engine.dialect.URI().DBType { + case schemas.MSSQL: fromSQL = fmt.Sprintf("FROM %s %s ", tableAlias, session.statement.TableAlias) tableAlias = session.statement.TableAlias default: @@ -411,9 +412,9 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6 } } - if cacher := session.engine.getCacher(tableName); cacher != nil && session.statement.UseCache { + if cacher := session.engine.GetCacher(tableName); cacher != nil && session.statement.UseCache { // session.cacheUpdate(table, tableName, sqlStr, args...) - session.engine.logger.Debug("[cacheUpdate] clear table ", tableName) + session.engine.logger.Debugf("[cache] clear table: %v", tableName) cacher.ClearIds(tableName) cacher.ClearBeans(tableName) } @@ -424,7 +425,7 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6 closure(bean) } if processor, ok := interface{}(bean).(AfterUpdateProcessor); ok { - session.engine.logger.Debug("[event]", tableName, " has after update processor") + session.engine.logger.Debugf("[event] %v has after update processor", tableName) processor.AfterUpdate() } } else { @@ -458,11 +459,11 @@ func (session *Session) genUpdateColumns(bean interface{}) ([]string, []interfac for _, col := range table.Columns() { if !col.IsVersion && !col.IsCreated && !col.IsUpdated { - if session.statement.omitColumnMap.contain(col.Name) { + if session.statement.OmitColumnMap.Contain(col.Name) { continue } } - if col.MapType == core.ONLYFROMDB { + if col.MapType == schemas.ONLYFROMDB { continue } @@ -472,47 +473,30 @@ func (session *Session) genUpdateColumns(bean interface{}) ([]string, []interfac } fieldValue := *fieldValuePtr - if col.IsAutoIncrement { - switch fieldValue.Type().Kind() { - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int, reflect.Int64: - if fieldValue.Int() == 0 { - continue - } - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint, reflect.Uint64: - if fieldValue.Uint() == 0 { - continue - } - case reflect.String: - if len(fieldValue.String()) == 0 { - continue - } - case reflect.Ptr: - if fieldValue.Pointer() == 0 { - continue - } - } + if col.IsAutoIncrement && utils.IsValueZero(fieldValue) { + continue } - if (col.IsDeleted && !session.statement.unscoped) || col.IsCreated { + if (col.IsDeleted && !session.statement.GetUnscoped()) || col.IsCreated { continue } // if only update specify columns - if len(session.statement.columnMap) > 0 && !session.statement.columnMap.contain(col.Name) { + if len(session.statement.ColumnMap) > 0 && !session.statement.ColumnMap.Contain(col.Name) { continue } - if session.statement.incrColumns.isColExist(col.Name) { + if session.statement.IncrColumns.IsColExist(col.Name) { continue - } else if session.statement.decrColumns.isColExist(col.Name) { + } else if session.statement.DecrColumns.IsColExist(col.Name) { continue - } else if session.statement.exprColumns.isColExist(col.Name) { + } else if session.statement.ExprColumns.IsColExist(col.Name) { continue } // !evalphobia! set fieldValue as nil when column is nullable and zero-value - if _, ok := getFlagForColumn(session.statement.nullableMap, col); ok { - if col.Nullable && isZero(fieldValue.Interface()) { + if _, ok := getFlagForColumn(session.statement.NullableMap, col); ok { + if col.Nullable && utils.IsValueZero(fieldValue) { var nilValue *int fieldValue = reflect.ValueOf(nilValue) } @@ -528,10 +512,10 @@ func (session *Session) genUpdateColumns(bean interface{}) ([]string, []interfac col := table.GetColumn(colName) setColumnTime(bean, col, t) }) - } else if col.IsVersion && session.statement.checkVersion { + } else if col.IsVersion && session.statement.CheckVersion { args = append(args, 1) } else { - arg, err := session.value2Interface(col, fieldValue) + arg, err := session.statement.Value2Interface(col, fieldValue) if err != nil { return colNames, args, err } diff --git a/vendor/xorm.io/xorm/statement.go b/vendor/xorm.io/xorm/statement.go deleted file mode 100644 index 67e352136..000000000 --- a/vendor/xorm.io/xorm/statement.go +++ /dev/null @@ -1,1256 +0,0 @@ -// Copyright 2015 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "database/sql/driver" - "fmt" - "reflect" - "strings" - "time" - - "xorm.io/builder" - "xorm.io/core" -) - -// Statement save all the sql info for executing SQL -type Statement struct { - RefTable *core.Table - Engine *Engine - Start int - LimitN int - idParam *core.PK - OrderStr string - JoinStr string - joinArgs []interface{} - GroupByStr string - HavingStr string - ColumnStr string - selectStr string - useAllCols bool - OmitStr string - AltTableName string - tableName string - RawSQL string - RawParams []interface{} - UseCascade bool - UseAutoJoin bool - StoreEngine string - Charset string - UseCache bool - UseAutoTime bool - noAutoCondition bool - IsDistinct bool - IsForUpdate bool - TableAlias string - allUseBool bool - checkVersion bool - unscoped bool - columnMap columnMap - omitColumnMap columnMap - mustColumnMap map[string]bool - nullableMap map[string]bool - incrColumns exprParams - decrColumns exprParams - exprColumns exprParams - cond builder.Cond - bufferSize int - context ContextCache - lastError error -} - -// Init reset all the statement's fields -func (statement *Statement) Init() { - statement.RefTable = nil - statement.Start = 0 - statement.LimitN = 0 - statement.OrderStr = "" - statement.UseCascade = true - statement.JoinStr = "" - statement.joinArgs = make([]interface{}, 0) - statement.GroupByStr = "" - statement.HavingStr = "" - statement.ColumnStr = "" - statement.OmitStr = "" - statement.columnMap = columnMap{} - statement.omitColumnMap = columnMap{} - statement.AltTableName = "" - statement.tableName = "" - statement.idParam = nil - statement.RawSQL = "" - statement.RawParams = make([]interface{}, 0) - statement.UseCache = true - statement.UseAutoTime = true - statement.noAutoCondition = false - statement.IsDistinct = false - statement.IsForUpdate = false - statement.TableAlias = "" - statement.selectStr = "" - statement.allUseBool = false - statement.useAllCols = false - statement.mustColumnMap = make(map[string]bool) - statement.nullableMap = make(map[string]bool) - statement.checkVersion = true - statement.unscoped = false - statement.incrColumns = exprParams{} - statement.decrColumns = exprParams{} - statement.exprColumns = exprParams{} - statement.cond = builder.NewCond() - statement.bufferSize = 0 - statement.context = nil - statement.lastError = nil -} - -// NoAutoCondition if you do not want convert bean's field as query condition, then use this function -func (statement *Statement) NoAutoCondition(no ...bool) *Statement { - statement.noAutoCondition = true - if len(no) > 0 { - statement.noAutoCondition = no[0] - } - return statement -} - -// Alias set the table alias -func (statement *Statement) Alias(alias string) *Statement { - statement.TableAlias = alias - return statement -} - -// SQL adds raw sql statement -func (statement *Statement) SQL(query interface{}, args ...interface{}) *Statement { - switch query.(type) { - case (*builder.Builder): - var err error - statement.RawSQL, statement.RawParams, err = query.(*builder.Builder).ToSQL() - if err != nil { - statement.lastError = err - } - case string: - statement.RawSQL = query.(string) - statement.RawParams = args - default: - statement.lastError = ErrUnSupportedSQLType - } - - return statement -} - -// Where add Where statement -func (statement *Statement) Where(query interface{}, args ...interface{}) *Statement { - return statement.And(query, args...) -} - -// And add Where & and statement -func (statement *Statement) And(query interface{}, args ...interface{}) *Statement { - switch query.(type) { - case string: - cond := builder.Expr(query.(string), args...) - statement.cond = statement.cond.And(cond) - case map[string]interface{}: - queryMap := query.(map[string]interface{}) - newMap := make(map[string]interface{}) - for k, v := range queryMap { - newMap[statement.Engine.Quote(k)] = v - } - statement.cond = statement.cond.And(builder.Eq(newMap)) - case builder.Cond: - cond := query.(builder.Cond) - statement.cond = statement.cond.And(cond) - for _, v := range args { - if vv, ok := v.(builder.Cond); ok { - statement.cond = statement.cond.And(vv) - } - } - default: - statement.lastError = ErrConditionType - } - - return statement -} - -// Or add Where & Or statement -func (statement *Statement) Or(query interface{}, args ...interface{}) *Statement { - switch query.(type) { - case string: - cond := builder.Expr(query.(string), args...) - statement.cond = statement.cond.Or(cond) - case map[string]interface{}: - cond := builder.Eq(query.(map[string]interface{})) - statement.cond = statement.cond.Or(cond) - case builder.Cond: - cond := query.(builder.Cond) - statement.cond = statement.cond.Or(cond) - for _, v := range args { - if vv, ok := v.(builder.Cond); ok { - statement.cond = statement.cond.Or(vv) - } - } - default: - // TODO: not support condition type - } - return statement -} - -// In generate "Where column IN (?) " statement -func (statement *Statement) In(column string, args ...interface{}) *Statement { - in := builder.In(statement.Engine.Quote(column), args...) - statement.cond = statement.cond.And(in) - return statement -} - -// NotIn generate "Where column NOT IN (?) " statement -func (statement *Statement) NotIn(column string, args ...interface{}) *Statement { - notIn := builder.NotIn(statement.Engine.Quote(column), args...) - statement.cond = statement.cond.And(notIn) - return statement -} - -func (statement *Statement) setRefValue(v reflect.Value) error { - var err error - statement.RefTable, err = statement.Engine.autoMapType(reflect.Indirect(v)) - if err != nil { - return err - } - statement.tableName = statement.Engine.TableName(v, true) - return nil -} - -func (statement *Statement) setRefBean(bean interface{}) error { - var err error - statement.RefTable, err = statement.Engine.autoMapType(rValue(bean)) - if err != nil { - return err - } - statement.tableName = statement.Engine.TableName(bean, true) - return nil -} - -// Auto generating update columnes and values according a struct -func (statement *Statement) buildUpdates(bean interface{}, - includeVersion, includeUpdated, includeNil, - includeAutoIncr, update bool) ([]string, []interface{}) { - engine := statement.Engine - table := statement.RefTable - allUseBool := statement.allUseBool - useAllCols := statement.useAllCols - mustColumnMap := statement.mustColumnMap - nullableMap := statement.nullableMap - columnMap := statement.columnMap - omitColumnMap := statement.omitColumnMap - unscoped := statement.unscoped - - var colNames = make([]string, 0) - var args = make([]interface{}, 0) - for _, col := range table.Columns() { - if !includeVersion && col.IsVersion { - continue - } - if col.IsCreated { - continue - } - if !includeUpdated && col.IsUpdated { - continue - } - if !includeAutoIncr && col.IsAutoIncrement { - continue - } - if col.IsDeleted && !unscoped { - continue - } - if omitColumnMap.contain(col.Name) { - continue - } - if len(columnMap) > 0 && !columnMap.contain(col.Name) { - continue - } - - if col.MapType == core.ONLYFROMDB { - continue - } - - if statement.incrColumns.isColExist(col.Name) { - continue - } else if statement.decrColumns.isColExist(col.Name) { - continue - } else if statement.exprColumns.isColExist(col.Name) { - continue - } - - fieldValuePtr, err := col.ValueOf(bean) - if err != nil { - engine.logger.Error(err) - continue - } - - fieldValue := *fieldValuePtr - fieldType := reflect.TypeOf(fieldValue.Interface()) - if fieldType == nil { - continue - } - - requiredField := useAllCols - includeNil := useAllCols - - if b, ok := getFlagForColumn(mustColumnMap, col); ok { - if b { - requiredField = true - } else { - continue - } - } - - // !evalphobia! set fieldValue as nil when column is nullable and zero-value - if b, ok := getFlagForColumn(nullableMap, col); ok { - if b && col.Nullable && isZero(fieldValue.Interface()) { - var nilValue *int - fieldValue = reflect.ValueOf(nilValue) - fieldType = reflect.TypeOf(fieldValue.Interface()) - includeNil = true - } - } - - var val interface{} - - if fieldValue.CanAddr() { - if structConvert, ok := fieldValue.Addr().Interface().(core.Conversion); ok { - data, err := structConvert.ToDB() - if err != nil { - engine.logger.Error(err) - } else { - val = data - } - goto APPEND - } - } - - if structConvert, ok := fieldValue.Interface().(core.Conversion); ok { - data, err := structConvert.ToDB() - if err != nil { - engine.logger.Error(err) - } else { - val = data - } - goto APPEND - } - - if fieldType.Kind() == reflect.Ptr { - if fieldValue.IsNil() { - if includeNil { - args = append(args, nil) - colNames = append(colNames, fmt.Sprintf("%v=?", engine.Quote(col.Name))) - } - continue - } else if !fieldValue.IsValid() { - continue - } else { - // dereference ptr type to instance type - fieldValue = fieldValue.Elem() - fieldType = reflect.TypeOf(fieldValue.Interface()) - requiredField = true - } - } - - switch fieldType.Kind() { - case reflect.Bool: - if allUseBool || requiredField { - val = fieldValue.Interface() - } else { - // if a bool in a struct, it will not be as a condition because it default is false, - // please use Where() instead - continue - } - case reflect.String: - if !requiredField && fieldValue.String() == "" { - continue - } - // for MyString, should convert to string or panic - if fieldType.String() != reflect.String.String() { - val = fieldValue.String() - } else { - val = fieldValue.Interface() - } - case reflect.Int8, reflect.Int16, reflect.Int, reflect.Int32, reflect.Int64: - if !requiredField && fieldValue.Int() == 0 { - continue - } - val = fieldValue.Interface() - case reflect.Float32, reflect.Float64: - if !requiredField && fieldValue.Float() == 0.0 { - continue - } - val = fieldValue.Interface() - case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64: - if !requiredField && fieldValue.Uint() == 0 { - continue - } - t := int64(fieldValue.Uint()) - val = reflect.ValueOf(&t).Interface() - case reflect.Struct: - if fieldType.ConvertibleTo(core.TimeType) { - t := fieldValue.Convert(core.TimeType).Interface().(time.Time) - if !requiredField && (t.IsZero() || !fieldValue.IsValid()) { - continue - } - val = engine.formatColTime(col, t) - } else if nulType, ok := fieldValue.Interface().(driver.Valuer); ok { - val, _ = nulType.Value() - } else { - if !col.SQLType.IsJson() { - engine.autoMapType(fieldValue) - if table, ok := engine.Tables[fieldValue.Type()]; ok { - if len(table.PrimaryKeys) == 1 { - pkField := reflect.Indirect(fieldValue).FieldByName(table.PKColumns()[0].FieldName) - // fix non-int pk issues - if pkField.IsValid() && (!requiredField && !isZero(pkField.Interface())) { - val = pkField.Interface() - } else { - continue - } - } else { - // TODO: how to handler? - panic("not supported") - } - } else { - val = fieldValue.Interface() - } - } else { - // Blank struct could not be as update data - if requiredField || !isStructZero(fieldValue) { - bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - panic(fmt.Sprintf("mashal %v failed", fieldValue.Interface())) - } - if col.SQLType.IsText() { - val = string(bytes) - } else if col.SQLType.IsBlob() { - val = bytes - } - } else { - continue - } - } - } - case reflect.Array, reflect.Slice, reflect.Map: - if !requiredField { - if fieldValue == reflect.Zero(fieldType) { - continue - } - if fieldType.Kind() == reflect.Array { - if isArrayValueZero(fieldValue) { - continue - } - } else if fieldValue.IsNil() || !fieldValue.IsValid() || fieldValue.Len() == 0 { - continue - } - } - - if col.SQLType.IsText() { - bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - engine.logger.Error(err) - continue - } - val = string(bytes) - } else if col.SQLType.IsBlob() { - var bytes []byte - var err error - if fieldType.Kind() == reflect.Slice && - fieldType.Elem().Kind() == reflect.Uint8 { - if fieldValue.Len() > 0 { - val = fieldValue.Bytes() - } else { - continue - } - } else if fieldType.Kind() == reflect.Array && - fieldType.Elem().Kind() == reflect.Uint8 { - val = fieldValue.Slice(0, 0).Interface() - } else { - bytes, err = DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - engine.logger.Error(err) - continue - } - val = bytes - } - } else { - continue - } - default: - val = fieldValue.Interface() - } - - APPEND: - args = append(args, val) - if col.IsPrimaryKey && engine.dialect.DBType() == "ql" { - continue - } - colNames = append(colNames, fmt.Sprintf("%v = ?", engine.Quote(col.Name))) - } - - return colNames, args -} - -func (statement *Statement) needTableName() bool { - return len(statement.JoinStr) > 0 -} - -func (statement *Statement) colName(col *core.Column, tableName string) string { - if statement.needTableName() { - var nm = tableName - if len(statement.TableAlias) > 0 { - nm = statement.TableAlias - } - return statement.Engine.Quote(nm) + "." + statement.Engine.Quote(col.Name) - } - return statement.Engine.Quote(col.Name) -} - -// TableName return current tableName -func (statement *Statement) TableName() string { - if statement.AltTableName != "" { - return statement.AltTableName - } - - return statement.tableName -} - -// ID generate "where id = ? " statement or for composite key "where key1 = ? and key2 = ?" -func (statement *Statement) ID(id interface{}) *Statement { - idValue := reflect.ValueOf(id) - idType := reflect.TypeOf(idValue.Interface()) - - switch idType { - case ptrPkType: - if pkPtr, ok := (id).(*core.PK); ok { - statement.idParam = pkPtr - return statement - } - case pkType: - if pk, ok := (id).(core.PK); ok { - statement.idParam = &pk - return statement - } - } - - switch idType.Kind() { - case reflect.String: - statement.idParam = &core.PK{idValue.Convert(reflect.TypeOf("")).Interface()} - return statement - } - - statement.idParam = &core.PK{id} - return statement -} - -// Incr Generate "Update ... Set column = column + arg" statement -func (statement *Statement) Incr(column string, arg ...interface{}) *Statement { - if len(arg) > 0 { - statement.incrColumns.addParam(column, arg[0]) - } else { - statement.incrColumns.addParam(column, 1) - } - return statement -} - -// Decr Generate "Update ... Set column = column - arg" statement -func (statement *Statement) Decr(column string, arg ...interface{}) *Statement { - if len(arg) > 0 { - statement.decrColumns.addParam(column, arg[0]) - } else { - statement.decrColumns.addParam(column, 1) - } - return statement -} - -// SetExpr Generate "Update ... Set column = {expression}" statement -func (statement *Statement) SetExpr(column string, expression interface{}) *Statement { - statement.exprColumns.addParam(column, expression) - return statement -} - -func (statement *Statement) col2NewColsWithQuote(columns ...string) []string { - newColumns := make([]string, 0) - quotes := append(strings.Split(statement.Engine.Quote(""), ""), "`") - for _, col := range columns { - newColumns = append(newColumns, statement.Engine.Quote(eraseAny(col, quotes...))) - } - return newColumns -} - -func (statement *Statement) colmap2NewColsWithQuote() []string { - newColumns := make([]string, len(statement.columnMap), len(statement.columnMap)) - copy(newColumns, statement.columnMap) - for i := 0; i < len(statement.columnMap); i++ { - newColumns[i] = statement.Engine.Quote(newColumns[i]) - } - return newColumns -} - -// Distinct generates "DISTINCT col1, col2 " statement -func (statement *Statement) Distinct(columns ...string) *Statement { - statement.IsDistinct = true - statement.Cols(columns...) - return statement -} - -// ForUpdate generates "SELECT ... FOR UPDATE" statement -func (statement *Statement) ForUpdate() *Statement { - statement.IsForUpdate = true - return statement -} - -// Select replace select -func (statement *Statement) Select(str string) *Statement { - statement.selectStr = str - return statement -} - -// Cols generate "col1, col2" statement -func (statement *Statement) Cols(columns ...string) *Statement { - cols := col2NewCols(columns...) - for _, nc := range cols { - statement.columnMap.add(nc) - } - - newColumns := statement.colmap2NewColsWithQuote() - - statement.ColumnStr = strings.Join(newColumns, ", ") - statement.ColumnStr = strings.Replace(statement.ColumnStr, statement.Engine.quote("*"), "*", -1) - return statement -} - -// AllCols update use only: update all columns -func (statement *Statement) AllCols() *Statement { - statement.useAllCols = true - return statement -} - -// MustCols update use only: must update columns -func (statement *Statement) MustCols(columns ...string) *Statement { - newColumns := col2NewCols(columns...) - for _, nc := range newColumns { - statement.mustColumnMap[strings.ToLower(nc)] = true - } - return statement -} - -// UseBool indicates that use bool fields as update contents and query contiditions -func (statement *Statement) UseBool(columns ...string) *Statement { - if len(columns) > 0 { - statement.MustCols(columns...) - } else { - statement.allUseBool = true - } - return statement -} - -// Omit do not use the columns -func (statement *Statement) Omit(columns ...string) { - newColumns := col2NewCols(columns...) - for _, nc := range newColumns { - statement.omitColumnMap = append(statement.omitColumnMap, nc) - } - statement.OmitStr = statement.Engine.Quote(strings.Join(newColumns, statement.Engine.Quote(", "))) -} - -// Nullable Update use only: update columns to null when value is nullable and zero-value -func (statement *Statement) Nullable(columns ...string) { - newColumns := col2NewCols(columns...) - for _, nc := range newColumns { - statement.nullableMap[strings.ToLower(nc)] = true - } -} - -// Top generate LIMIT limit statement -func (statement *Statement) Top(limit int) *Statement { - statement.Limit(limit) - return statement -} - -// Limit generate LIMIT start, limit statement -func (statement *Statement) Limit(limit int, start ...int) *Statement { - statement.LimitN = limit - if len(start) > 0 { - statement.Start = start[0] - } - return statement -} - -// OrderBy generate "Order By order" statement -func (statement *Statement) OrderBy(order string) *Statement { - if len(statement.OrderStr) > 0 { - statement.OrderStr += ", " - } - statement.OrderStr += order - return statement -} - -// Desc generate `ORDER BY xx DESC` -func (statement *Statement) Desc(colNames ...string) *Statement { - var buf strings.Builder - if len(statement.OrderStr) > 0 { - fmt.Fprint(&buf, statement.OrderStr, ", ") - } - newColNames := statement.col2NewColsWithQuote(colNames...) - fmt.Fprintf(&buf, "%v DESC", strings.Join(newColNames, " DESC, ")) - statement.OrderStr = buf.String() - return statement -} - -// Asc provide asc order by query condition, the input parameters are columns. -func (statement *Statement) Asc(colNames ...string) *Statement { - var buf strings.Builder - if len(statement.OrderStr) > 0 { - fmt.Fprint(&buf, statement.OrderStr, ", ") - } - newColNames := statement.col2NewColsWithQuote(colNames...) - fmt.Fprintf(&buf, "%v ASC", strings.Join(newColNames, " ASC, ")) - statement.OrderStr = buf.String() - return statement -} - -// Table tempororily set table name, the parameter could be a string or a pointer of struct -func (statement *Statement) Table(tableNameOrBean interface{}) *Statement { - v := rValue(tableNameOrBean) - t := v.Type() - if t.Kind() == reflect.Struct { - var err error - statement.RefTable, err = statement.Engine.autoMapType(v) - if err != nil { - statement.Engine.logger.Error(err) - return statement - } - } - - statement.AltTableName = statement.Engine.TableName(tableNameOrBean, true) - return statement -} - -// Join The joinOP should be one of INNER, LEFT OUTER, CROSS etc - this will be prepended to JOIN -func (statement *Statement) Join(joinOP string, tablename interface{}, condition string, args ...interface{}) *Statement { - var buf strings.Builder - if len(statement.JoinStr) > 0 { - fmt.Fprintf(&buf, "%v %v JOIN ", statement.JoinStr, joinOP) - } else { - fmt.Fprintf(&buf, "%v JOIN ", joinOP) - } - - switch tp := tablename.(type) { - case builder.Builder: - subSQL, subQueryArgs, err := tp.ToSQL() - if err != nil { - statement.lastError = err - return statement - } - tbs := strings.Split(tp.TableName(), ".") - quotes := append(strings.Split(statement.Engine.Quote(""), ""), "`") - - var aliasName = strings.Trim(tbs[len(tbs)-1], strings.Join(quotes, "")) - fmt.Fprintf(&buf, "(%s) %s ON %v", subSQL, aliasName, condition) - statement.joinArgs = append(statement.joinArgs, subQueryArgs...) - case *builder.Builder: - subSQL, subQueryArgs, err := tp.ToSQL() - if err != nil { - statement.lastError = err - return statement - } - tbs := strings.Split(tp.TableName(), ".") - quotes := append(strings.Split(statement.Engine.Quote(""), ""), "`") - - var aliasName = strings.Trim(tbs[len(tbs)-1], strings.Join(quotes, "")) - fmt.Fprintf(&buf, "(%s) %s ON %v", subSQL, aliasName, condition) - statement.joinArgs = append(statement.joinArgs, subQueryArgs...) - default: - tbName := statement.Engine.TableName(tablename, true) - fmt.Fprintf(&buf, "%s ON %v", tbName, condition) - } - - statement.JoinStr = buf.String() - statement.joinArgs = append(statement.joinArgs, args...) - return statement -} - -// GroupBy generate "Group By keys" statement -func (statement *Statement) GroupBy(keys string) *Statement { - statement.GroupByStr = keys - return statement -} - -// Having generate "Having conditions" statement -func (statement *Statement) Having(conditions string) *Statement { - statement.HavingStr = fmt.Sprintf("HAVING %v", conditions) - return statement -} - -// Unscoped always disable struct tag "deleted" -func (statement *Statement) Unscoped() *Statement { - statement.unscoped = true - return statement -} - -func (statement *Statement) genColumnStr() string { - if statement.RefTable == nil { - return "" - } - - var buf strings.Builder - columns := statement.RefTable.Columns() - - for _, col := range columns { - if statement.omitColumnMap.contain(col.Name) { - continue - } - - if len(statement.columnMap) > 0 && !statement.columnMap.contain(col.Name) { - continue - } - - if col.MapType == core.ONLYTODB { - continue - } - - if buf.Len() != 0 { - buf.WriteString(", ") - } - - if statement.JoinStr != "" { - if statement.TableAlias != "" { - buf.WriteString(statement.TableAlias) - } else { - buf.WriteString(statement.TableName()) - } - - buf.WriteString(".") - } - - statement.Engine.QuoteTo(&buf, col.Name) - } - - return buf.String() -} - -func (statement *Statement) genCreateTableSQL() string { - return statement.Engine.dialect.CreateTableSql(statement.RefTable, statement.TableName(), - statement.StoreEngine, statement.Charset) -} - -func (statement *Statement) genIndexSQL() []string { - var sqls []string - tbName := statement.TableName() - for _, index := range statement.RefTable.Indexes { - if index.Type == core.IndexType { - sql := statement.Engine.dialect.CreateIndexSql(tbName, index) - /*idxTBName := strings.Replace(tbName, ".", "_", -1) - idxTBName = strings.Replace(idxTBName, `"`, "", -1) - sql := fmt.Sprintf("CREATE INDEX %v ON %v (%v);", quote(indexName(idxTBName, idxName)), - quote(tbName), quote(strings.Join(index.Cols, quote(","))))*/ - sqls = append(sqls, sql) - } - } - return sqls -} - -func uniqueName(tableName, uqeName string) string { - return fmt.Sprintf("UQE_%v_%v", tableName, uqeName) -} - -func (statement *Statement) genUniqueSQL() []string { - var sqls []string - tbName := statement.TableName() - for _, index := range statement.RefTable.Indexes { - if index.Type == core.UniqueType { - sql := statement.Engine.dialect.CreateIndexSql(tbName, index) - sqls = append(sqls, sql) - } - } - return sqls -} - -func (statement *Statement) genDelIndexSQL() []string { - var sqls []string - tbName := statement.TableName() - idxPrefixName := strings.Replace(tbName, `"`, "", -1) - idxPrefixName = strings.Replace(idxPrefixName, `.`, "_", -1) - for idxName, index := range statement.RefTable.Indexes { - var rIdxName string - if index.Type == core.UniqueType { - rIdxName = uniqueName(idxPrefixName, idxName) - } else if index.Type == core.IndexType { - rIdxName = indexName(idxPrefixName, idxName) - } - sql := fmt.Sprintf("DROP INDEX %v", statement.Engine.Quote(statement.Engine.TableName(rIdxName, true))) - if statement.Engine.dialect.IndexOnTable() { - sql += fmt.Sprintf(" ON %v", statement.Engine.Quote(tbName)) - } - sqls = append(sqls, sql) - } - return sqls -} - -func (statement *Statement) genAddColumnStr(col *core.Column) (string, []interface{}) { - quote := statement.Engine.Quote - sql := fmt.Sprintf("ALTER TABLE %v ADD %v", quote(statement.TableName()), - col.String(statement.Engine.dialect)) - if statement.Engine.dialect.DBType() == core.MYSQL && len(col.Comment) > 0 { - sql += " COMMENT '" + col.Comment + "'" - } - sql += ";" - return sql, []interface{}{} -} - -func (statement *Statement) buildConds(table *core.Table, bean interface{}, includeVersion bool, includeUpdated bool, includeNil bool, includeAutoIncr bool, addedTableName bool) (builder.Cond, error) { - return statement.Engine.buildConds(table, bean, includeVersion, includeUpdated, includeNil, includeAutoIncr, statement.allUseBool, statement.useAllCols, - statement.unscoped, statement.mustColumnMap, statement.TableName(), statement.TableAlias, addedTableName) -} - -func (statement *Statement) mergeConds(bean interface{}) error { - if !statement.noAutoCondition { - var addedTableName = (len(statement.JoinStr) > 0) - autoCond, err := statement.buildConds(statement.RefTable, bean, true, true, false, true, addedTableName) - if err != nil { - return err - } - statement.cond = statement.cond.And(autoCond) - } - - if err := statement.processIDParam(); err != nil { - return err - } - return nil -} - -func (statement *Statement) genConds(bean interface{}) (string, []interface{}, error) { - if err := statement.mergeConds(bean); err != nil { - return "", nil, err - } - - return builder.ToSQL(statement.cond) -} - -func (statement *Statement) genGetSQL(bean interface{}) (string, []interface{}, error) { - v := rValue(bean) - isStruct := v.Kind() == reflect.Struct - if isStruct { - statement.setRefBean(bean) - } - - var columnStr = statement.ColumnStr - if len(statement.selectStr) > 0 { - columnStr = statement.selectStr - } else { - // TODO: always generate column names, not use * even if join - if len(statement.JoinStr) == 0 { - if len(columnStr) == 0 { - if len(statement.GroupByStr) > 0 { - columnStr = statement.Engine.quoteColumns(statement.GroupByStr) - } else { - columnStr = statement.genColumnStr() - } - } - } else { - if len(columnStr) == 0 { - if len(statement.GroupByStr) > 0 { - columnStr = statement.Engine.quoteColumns(statement.GroupByStr) - } - } - } - } - - if len(columnStr) == 0 { - columnStr = "*" - } - - if isStruct { - if err := statement.mergeConds(bean); err != nil { - return "", nil, err - } - } else { - if err := statement.processIDParam(); err != nil { - return "", nil, err - } - } - condSQL, condArgs, err := builder.ToSQL(statement.cond) - if err != nil { - return "", nil, err - } - - sqlStr, err := statement.genSelectSQL(columnStr, condSQL, true, true) - if err != nil { - return "", nil, err - } - - return sqlStr, append(statement.joinArgs, condArgs...), nil -} - -func (statement *Statement) genCountSQL(beans ...interface{}) (string, []interface{}, error) { - var condSQL string - var condArgs []interface{} - var err error - if len(beans) > 0 { - statement.setRefBean(beans[0]) - condSQL, condArgs, err = statement.genConds(beans[0]) - } else { - condSQL, condArgs, err = builder.ToSQL(statement.cond) - } - if err != nil { - return "", nil, err - } - - var selectSQL = statement.selectStr - if len(selectSQL) <= 0 { - if statement.IsDistinct { - selectSQL = fmt.Sprintf("count(DISTINCT %s)", statement.ColumnStr) - } else { - selectSQL = "count(*)" - } - } - sqlStr, err := statement.genSelectSQL(selectSQL, condSQL, false, false) - if err != nil { - return "", nil, err - } - - return sqlStr, append(statement.joinArgs, condArgs...), nil -} - -func (statement *Statement) genSumSQL(bean interface{}, columns ...string) (string, []interface{}, error) { - statement.setRefBean(bean) - - var sumStrs = make([]string, 0, len(columns)) - for _, colName := range columns { - if !strings.Contains(colName, " ") && !strings.Contains(colName, "(") { - colName = statement.Engine.Quote(colName) - } - sumStrs = append(sumStrs, fmt.Sprintf("COALESCE(sum(%s),0)", colName)) - } - sumSelect := strings.Join(sumStrs, ", ") - - condSQL, condArgs, err := statement.genConds(bean) - if err != nil { - return "", nil, err - } - - sqlStr, err := statement.genSelectSQL(sumSelect, condSQL, true, true) - if err != nil { - return "", nil, err - } - - return sqlStr, append(statement.joinArgs, condArgs...), nil -} - -func (statement *Statement) genSelectSQL(columnStr, condSQL string, needLimit, needOrderBy bool) (string, error) { - var ( - distinct string - dialect = statement.Engine.Dialect() - quote = statement.Engine.Quote - fromStr = " FROM " - top, mssqlCondi, whereStr string - ) - if statement.IsDistinct && !strings.HasPrefix(columnStr, "count") { - distinct = "DISTINCT " - } - if len(condSQL) > 0 { - whereStr = " WHERE " + condSQL - } - - if dialect.DBType() == core.MSSQL && strings.Contains(statement.TableName(), "..") { - fromStr += statement.TableName() - } else { - fromStr += quote(statement.TableName()) - } - - if statement.TableAlias != "" { - if dialect.DBType() == core.ORACLE { - fromStr += " " + quote(statement.TableAlias) - } else { - fromStr += " AS " + quote(statement.TableAlias) - } - } - if statement.JoinStr != "" { - fromStr = fmt.Sprintf("%v %v", fromStr, statement.JoinStr) - } - - if dialect.DBType() == core.MSSQL { - if statement.LimitN > 0 { - top = fmt.Sprintf("TOP %d ", statement.LimitN) - } - if statement.Start > 0 { - var column string - if len(statement.RefTable.PKColumns()) == 0 { - for _, index := range statement.RefTable.Indexes { - if len(index.Cols) == 1 { - column = index.Cols[0] - break - } - } - if len(column) == 0 { - column = statement.RefTable.ColumnsSeq()[0] - } - } else { - column = statement.RefTable.PKColumns()[0].Name - } - if statement.needTableName() { - if len(statement.TableAlias) > 0 { - column = statement.TableAlias + "." + column - } else { - column = statement.TableName() + "." + column - } - } - - var orderStr string - if needOrderBy && len(statement.OrderStr) > 0 { - orderStr = " ORDER BY " + statement.OrderStr - } - - var groupStr string - if len(statement.GroupByStr) > 0 { - groupStr = " GROUP BY " + statement.GroupByStr - } - mssqlCondi = fmt.Sprintf("(%s NOT IN (SELECT TOP %d %s%s%s%s%s))", - column, statement.Start, column, fromStr, whereStr, orderStr, groupStr) - } - } - - var buf strings.Builder - fmt.Fprintf(&buf, "SELECT %v%v%v%v%v", distinct, top, columnStr, fromStr, whereStr) - if len(mssqlCondi) > 0 { - if len(whereStr) > 0 { - fmt.Fprint(&buf, " AND ", mssqlCondi) - } else { - fmt.Fprint(&buf, " WHERE ", mssqlCondi) - } - } - - if statement.GroupByStr != "" { - fmt.Fprint(&buf, " GROUP BY ", statement.GroupByStr) - } - if statement.HavingStr != "" { - fmt.Fprint(&buf, " ", statement.HavingStr) - } - if needOrderBy && statement.OrderStr != "" { - fmt.Fprint(&buf, " ORDER BY ", statement.OrderStr) - } - if needLimit { - if dialect.DBType() != core.MSSQL && dialect.DBType() != core.ORACLE { - if statement.Start > 0 { - fmt.Fprintf(&buf, " LIMIT %v OFFSET %v", statement.LimitN, statement.Start) - } else if statement.LimitN > 0 { - fmt.Fprint(&buf, " LIMIT ", statement.LimitN) - } - } else if dialect.DBType() == core.ORACLE { - if statement.Start != 0 || statement.LimitN != 0 { - oldString := buf.String() - buf.Reset() - rawColStr := columnStr - if rawColStr == "*" { - rawColStr = "at.*" - } - fmt.Fprintf(&buf, "SELECT %v FROM (SELECT %v,ROWNUM RN FROM (%v) at WHERE ROWNUM <= %d) aat WHERE RN > %d", - columnStr, rawColStr, oldString, statement.Start+statement.LimitN, statement.Start) - } - } - } - if statement.IsForUpdate { - return dialect.ForUpdateSql(buf.String()), nil - } - - return buf.String(), nil -} - -func (statement *Statement) processIDParam() error { - if statement.idParam == nil || statement.RefTable == nil { - return nil - } - - if len(statement.RefTable.PrimaryKeys) != len(*statement.idParam) { - return fmt.Errorf("ID condition is error, expect %d primarykeys, there are %d", - len(statement.RefTable.PrimaryKeys), - len(*statement.idParam), - ) - } - - for i, col := range statement.RefTable.PKColumns() { - var colName = statement.colName(col, statement.TableName()) - statement.cond = statement.cond.And(builder.Eq{colName: (*(statement.idParam))[i]}) - } - return nil -} - -func (statement *Statement) joinColumns(cols []*core.Column, includeTableName bool) string { - var colnames = make([]string, len(cols)) - for i, col := range cols { - if includeTableName { - colnames[i] = statement.Engine.Quote(statement.TableName()) + - "." + statement.Engine.Quote(col.Name) - } else { - colnames[i] = statement.Engine.Quote(col.Name) - } - } - return strings.Join(colnames, ", ") -} - -func (statement *Statement) convertIDSQL(sqlStr string) string { - if statement.RefTable != nil { - cols := statement.RefTable.PKColumns() - if len(cols) == 0 { - return "" - } - - colstrs := statement.joinColumns(cols, false) - sqls := splitNNoCase(sqlStr, " from ", 2) - if len(sqls) != 2 { - return "" - } - - var top string - if statement.LimitN > 0 && statement.Engine.dialect.DBType() == core.MSSQL { - top = fmt.Sprintf("TOP %d ", statement.LimitN) - } - - newsql := fmt.Sprintf("SELECT %s%s FROM %v", top, colstrs, sqls[1]) - return newsql - } - return "" -} - -func (statement *Statement) convertUpdateSQL(sqlStr string) (string, string) { - if statement.RefTable == nil || len(statement.RefTable.PrimaryKeys) != 1 { - return "", "" - } - - colstrs := statement.joinColumns(statement.RefTable.PKColumns(), true) - sqls := splitNNoCase(sqlStr, "where", 2) - if len(sqls) != 2 { - if len(sqls) == 1 { - return sqls[0], fmt.Sprintf("SELECT %v FROM %v", - colstrs, statement.Engine.Quote(statement.TableName())) - } - return "", "" - } - - var whereStr = sqls[1] - - // TODO: for postgres only, if any other database? - var paraStr string - if statement.Engine.dialect.DBType() == core.POSTGRES { - paraStr = "$" - } else if statement.Engine.dialect.DBType() == core.MSSQL { - paraStr = ":" - } - - if paraStr != "" { - if strings.Contains(sqls[1], paraStr) { - dollers := strings.Split(sqls[1], paraStr) - whereStr = dollers[0] - for i, c := range dollers[1:] { - ccs := strings.SplitN(c, " ", 2) - whereStr += fmt.Sprintf(paraStr+"%v %v", i+1, ccs[1]) - } - } - } - - return sqls[0], fmt.Sprintf("SELECT %v FROM %v WHERE %v", - colstrs, statement.Engine.Quote(statement.TableName()), - whereStr) -} diff --git a/vendor/xorm.io/xorm/statement_columnmap.go b/vendor/xorm.io/xorm/statement_columnmap.go deleted file mode 100644 index b6523b1e7..000000000 --- a/vendor/xorm.io/xorm/statement_columnmap.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import "strings" - -type columnMap []string - -func (m columnMap) contain(colName string) bool { - if len(m) == 0 { - return false - } - - n := len(colName) - for _, mk := range m { - if len(mk) != n { - continue - } - if strings.EqualFold(mk, colName) { - return true - } - } - - return false -} - -func (m *columnMap) add(colName string) bool { - if m.contain(colName) { - return false - } - *m = append(*m, colName) - return true -} diff --git a/vendor/xorm.io/xorm/statement_quote.go b/vendor/xorm.io/xorm/statement_quote.go deleted file mode 100644 index e22e0d147..000000000 --- a/vendor/xorm.io/xorm/statement_quote.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -func trimQuote(s string) string { - if len(s) == 0 { - return s - } - - if s[0] == '`' { - s = s[1:] - } - if len(s) > 0 && s[len(s)-1] == '`' { - return s[:len(s)-1] - } - return s -} diff --git a/vendor/xorm.io/xorm/tags/parser.go b/vendor/xorm.io/xorm/tags/parser.go new file mode 100644 index 000000000..236d2d466 --- /dev/null +++ b/vendor/xorm.io/xorm/tags/parser.go @@ -0,0 +1,307 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tags + +import ( + "encoding/gob" + "errors" + "fmt" + "reflect" + "strings" + "sync" + "time" + + "xorm.io/xorm/caches" + "xorm.io/xorm/convert" + "xorm.io/xorm/dialects" + "xorm.io/xorm/names" + "xorm.io/xorm/schemas" +) + +var ( + ErrUnsupportedType = errors.New("Unsupported type") +) + +type Parser struct { + identifier string + dialect dialects.Dialect + columnMapper names.Mapper + tableMapper names.Mapper + handlers map[string]Handler + cacherMgr *caches.Manager + tableCache sync.Map // map[reflect.Type]*schemas.Table +} + +func NewParser(identifier string, dialect dialects.Dialect, tableMapper, columnMapper names.Mapper, cacherMgr *caches.Manager) *Parser { + return &Parser{ + identifier: identifier, + dialect: dialect, + tableMapper: tableMapper, + columnMapper: columnMapper, + handlers: defaultTagHandlers, + cacherMgr: cacherMgr, + } +} + +func (parser *Parser) GetTableMapper() names.Mapper { + return parser.tableMapper +} + +func (parser *Parser) SetTableMapper(mapper names.Mapper) { + parser.ClearCaches() + parser.tableMapper = mapper +} + +func (parser *Parser) GetColumnMapper() names.Mapper { + return parser.columnMapper +} + +func (parser *Parser) SetColumnMapper(mapper names.Mapper) { + parser.ClearCaches() + parser.columnMapper = mapper +} + +func (parser *Parser) ParseWithCache(v reflect.Value) (*schemas.Table, error) { + t := v.Type() + tableI, ok := parser.tableCache.Load(t) + if ok { + return tableI.(*schemas.Table), nil + } + + table, err := parser.Parse(v) + if err != nil { + return nil, err + } + + parser.tableCache.Store(t, table) + + if parser.cacherMgr.GetDefaultCacher() != nil { + if v.CanAddr() { + gob.Register(v.Addr().Interface()) + } else { + gob.Register(v.Interface()) + } + } + + return table, nil +} + +// ClearCacheTable removes the database mapper of a type from the cache +func (parser *Parser) ClearCacheTable(t reflect.Type) { + parser.tableCache.Delete(t) +} + +// ClearCaches removes all the cached table information parsed by structs +func (parser *Parser) ClearCaches() { + parser.tableCache = sync.Map{} +} + +func addIndex(indexName string, table *schemas.Table, col *schemas.Column, indexType int) { + if index, ok := table.Indexes[indexName]; ok { + index.AddColumn(col.Name) + col.Indexes[index.Name] = indexType + } else { + index := schemas.NewIndex(indexName, indexType) + index.AddColumn(col.Name) + table.AddIndex(index) + col.Indexes[index.Name] = indexType + } +} + +// Parse parses a struct as a table information +func (parser *Parser) Parse(v reflect.Value) (*schemas.Table, error) { + t := v.Type() + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return nil, ErrUnsupportedType + } + + table := schemas.NewEmptyTable() + table.Type = t + table.Name = names.GetTableName(parser.tableMapper, v) + + var idFieldColName string + var hasCacheTag, hasNoCacheTag bool + + for i := 0; i < t.NumField(); i++ { + tag := t.Field(i).Tag + + ormTagStr := tag.Get(parser.identifier) + var col *schemas.Column + fieldValue := v.Field(i) + fieldType := fieldValue.Type() + + if ormTagStr != "" { + col = &schemas.Column{ + FieldName: t.Field(i).Name, + Nullable: true, + IsPrimaryKey: false, + IsAutoIncrement: false, + MapType: schemas.TWOSIDES, + Indexes: make(map[string]int), + DefaultIsEmpty: true, + } + tags := splitTag(ormTagStr) + + if len(tags) > 0 { + if tags[0] == "-" { + continue + } + + var ctx = Context{ + table: table, + col: col, + fieldValue: fieldValue, + indexNames: make(map[string]int), + parser: parser, + } + + if strings.HasPrefix(strings.ToUpper(tags[0]), "EXTENDS") { + pStart := strings.Index(tags[0], "(") + if pStart > -1 && strings.HasSuffix(tags[0], ")") { + var tagPrefix = strings.TrimFunc(tags[0][pStart+1:len(tags[0])-1], func(r rune) bool { + return r == '\'' || r == '"' + }) + + ctx.params = []string{tagPrefix} + } + + if err := ExtendsTagHandler(&ctx); err != nil { + return nil, err + } + continue + } + + for j, key := range tags { + if ctx.ignoreNext { + ctx.ignoreNext = false + continue + } + + k := strings.ToUpper(key) + ctx.tagName = k + ctx.params = []string{} + + pStart := strings.Index(k, "(") + if pStart == 0 { + return nil, errors.New("( could not be the first character") + } + if pStart > -1 { + if !strings.HasSuffix(k, ")") { + return nil, fmt.Errorf("field %s tag %s cannot match ) character", col.FieldName, key) + } + + ctx.tagName = k[:pStart] + ctx.params = strings.Split(key[pStart+1:len(k)-1], ",") + } + + if j > 0 { + ctx.preTag = strings.ToUpper(tags[j-1]) + } + if j < len(tags)-1 { + ctx.nextTag = tags[j+1] + } else { + ctx.nextTag = "" + } + + if h, ok := parser.handlers[ctx.tagName]; ok { + if err := h(&ctx); err != nil { + return nil, err + } + } else { + if strings.HasPrefix(key, "'") && strings.HasSuffix(key, "'") { + col.Name = key[1 : len(key)-1] + } else { + col.Name = key + } + } + + if ctx.hasCacheTag { + hasCacheTag = true + } + if ctx.hasNoCacheTag { + hasNoCacheTag = true + } + } + + if col.SQLType.Name == "" { + col.SQLType = schemas.Type2SQLType(fieldType) + } + parser.dialect.SQLType(col) + if col.Length == 0 { + col.Length = col.SQLType.DefaultLength + } + if col.Length2 == 0 { + col.Length2 = col.SQLType.DefaultLength2 + } + if col.Name == "" { + col.Name = parser.columnMapper.Obj2Table(t.Field(i).Name) + } + + if ctx.isUnique { + ctx.indexNames[col.Name] = schemas.UniqueType + } else if ctx.isIndex { + ctx.indexNames[col.Name] = schemas.IndexType + } + + for indexName, indexType := range ctx.indexNames { + addIndex(indexName, table, col, indexType) + } + } + } else { + var sqlType schemas.SQLType + if fieldValue.CanAddr() { + if _, ok := fieldValue.Addr().Interface().(convert.Conversion); ok { + sqlType = schemas.SQLType{Name: schemas.Text} + } + } + if _, ok := fieldValue.Interface().(convert.Conversion); ok { + sqlType = schemas.SQLType{Name: schemas.Text} + } else { + sqlType = schemas.Type2SQLType(fieldType) + } + col = schemas.NewColumn(parser.columnMapper.Obj2Table(t.Field(i).Name), + t.Field(i).Name, sqlType, sqlType.DefaultLength, + sqlType.DefaultLength2, true) + + if fieldType.Kind() == reflect.Int64 && (strings.ToUpper(col.FieldName) == "ID" || strings.HasSuffix(strings.ToUpper(col.FieldName), ".ID")) { + idFieldColName = col.Name + } + } + if col.IsAutoIncrement { + col.Nullable = false + } + + table.AddColumn(col) + + } // end for + + if idFieldColName != "" && len(table.PrimaryKeys) == 0 { + col := table.GetColumn(idFieldColName) + col.IsPrimaryKey = true + col.IsAutoIncrement = true + col.Nullable = false + table.PrimaryKeys = append(table.PrimaryKeys, col.Name) + table.AutoIncrement = col.Name + } + + if hasCacheTag { + if parser.cacherMgr.GetDefaultCacher() != nil { // !nash! use engine's cacher if provided + //engine.logger.Info("enable cache on table:", table.Name) + parser.cacherMgr.SetCacher(table.Name, parser.cacherMgr.GetDefaultCacher()) + } else { + //engine.logger.Info("enable LRU cache on table:", table.Name) + parser.cacherMgr.SetCacher(table.Name, caches.NewLRUCacher2(caches.NewMemoryStore(), time.Hour, 10000)) + } + } + if hasNoCacheTag { + //engine.logger.Info("disable cache on table:", table.Name) + parser.cacherMgr.SetCacher(table.Name, nil) + } + + return table, nil +} diff --git a/vendor/xorm.io/xorm/tag.go b/vendor/xorm.io/xorm/tags/tag.go similarity index 72% rename from vendor/xorm.io/xorm/tag.go rename to vendor/xorm.io/xorm/tags/tag.go index ec8d5cf05..ee3f1e824 100644 --- a/vendor/xorm.io/xorm/tag.go +++ b/vendor/xorm.io/xorm/tags/tag.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package xorm +package tags import ( "fmt" @@ -11,31 +11,52 @@ import ( "strings" "time" - "xorm.io/core" + "xorm.io/xorm/schemas" ) -type tagContext struct { +func splitTag(tag string) (tags []string) { + tag = strings.TrimSpace(tag) + var hasQuote = false + var lastIdx = 0 + for i, t := range tag { + if t == '\'' { + hasQuote = !hasQuote + } else if t == ' ' { + if lastIdx < i && !hasQuote { + tags = append(tags, strings.TrimSpace(tag[lastIdx:i])) + lastIdx = i + 1 + } + } + } + if lastIdx < len(tag) { + tags = append(tags, strings.TrimSpace(tag[lastIdx:])) + } + return +} + +// Context represents a context for xorm tag parse. +type Context struct { tagName string params []string preTag, nextTag string - table *core.Table - col *core.Column + table *schemas.Table + col *schemas.Column fieldValue reflect.Value isIndex bool isUnique bool indexNames map[string]int - engine *Engine + parser *Parser hasCacheTag bool hasNoCacheTag bool ignoreNext bool } -// tagHandler describes tag handler for XORM -type tagHandler func(ctx *tagContext) error +// Handler describes tag handler for XORM +type Handler func(ctx *Context) error var ( // defaultTagHandlers enumerates all the default tag handler - defaultTagHandlers = map[string]tagHandler{ + defaultTagHandlers = map[string]Handler{ "<-": OnlyFromDBTagHandler, "->": OnlyToDBTagHandler, "PK": PKTagHandler, @@ -59,49 +80,49 @@ var ( ) func init() { - for k := range core.SqlTypes { + for k := range schemas.SqlTypes { defaultTagHandlers[k] = SQLTypeTagHandler } } // IgnoreTagHandler describes ignored tag handler -func IgnoreTagHandler(ctx *tagContext) error { +func IgnoreTagHandler(ctx *Context) error { return nil } // OnlyFromDBTagHandler describes mapping direction tag handler -func OnlyFromDBTagHandler(ctx *tagContext) error { - ctx.col.MapType = core.ONLYFROMDB +func OnlyFromDBTagHandler(ctx *Context) error { + ctx.col.MapType = schemas.ONLYFROMDB return nil } // OnlyToDBTagHandler describes mapping direction tag handler -func OnlyToDBTagHandler(ctx *tagContext) error { - ctx.col.MapType = core.ONLYTODB +func OnlyToDBTagHandler(ctx *Context) error { + ctx.col.MapType = schemas.ONLYTODB return nil } -// PKTagHandler decribes primary key tag handler -func PKTagHandler(ctx *tagContext) error { +// PKTagHandler describes primary key tag handler +func PKTagHandler(ctx *Context) error { ctx.col.IsPrimaryKey = true ctx.col.Nullable = false return nil } // NULLTagHandler describes null tag handler -func NULLTagHandler(ctx *tagContext) error { +func NULLTagHandler(ctx *Context) error { ctx.col.Nullable = (strings.ToUpper(ctx.preTag) != "NOT") return nil } // NotNullTagHandler describes notnull tag handler -func NotNullTagHandler(ctx *tagContext) error { +func NotNullTagHandler(ctx *Context) error { ctx.col.Nullable = false return nil } // AutoIncrTagHandler describes autoincr tag handler -func AutoIncrTagHandler(ctx *tagContext) error { +func AutoIncrTagHandler(ctx *Context) error { ctx.col.IsAutoIncrement = true /* if len(ctx.params) > 0 { @@ -118,7 +139,7 @@ func AutoIncrTagHandler(ctx *tagContext) error { } // DefaultTagHandler describes default tag handler -func DefaultTagHandler(ctx *tagContext) error { +func DefaultTagHandler(ctx *Context) error { if len(ctx.params) > 0 { ctx.col.Default = ctx.params[0] } else { @@ -130,26 +151,26 @@ func DefaultTagHandler(ctx *tagContext) error { } // CreatedTagHandler describes created tag handler -func CreatedTagHandler(ctx *tagContext) error { +func CreatedTagHandler(ctx *Context) error { ctx.col.IsCreated = true return nil } // VersionTagHandler describes version tag handler -func VersionTagHandler(ctx *tagContext) error { +func VersionTagHandler(ctx *Context) error { ctx.col.IsVersion = true ctx.col.Default = "1" return nil } // UTCTagHandler describes utc tag handler -func UTCTagHandler(ctx *tagContext) error { +func UTCTagHandler(ctx *Context) error { ctx.col.TimeZone = time.UTC return nil } // LocalTagHandler describes local tag handler -func LocalTagHandler(ctx *tagContext) error { +func LocalTagHandler(ctx *Context) error { if len(ctx.params) == 0 { ctx.col.TimeZone = time.Local } else { @@ -163,21 +184,21 @@ func LocalTagHandler(ctx *tagContext) error { } // UpdatedTagHandler describes updated tag handler -func UpdatedTagHandler(ctx *tagContext) error { +func UpdatedTagHandler(ctx *Context) error { ctx.col.IsUpdated = true return nil } // DeletedTagHandler describes deleted tag handler -func DeletedTagHandler(ctx *tagContext) error { +func DeletedTagHandler(ctx *Context) error { ctx.col.IsDeleted = true return nil } // IndexTagHandler describes index tag handler -func IndexTagHandler(ctx *tagContext) error { +func IndexTagHandler(ctx *Context) error { if len(ctx.params) > 0 { - ctx.indexNames[ctx.params[0]] = core.IndexType + ctx.indexNames[ctx.params[0]] = schemas.IndexType } else { ctx.isIndex = true } @@ -185,9 +206,9 @@ func IndexTagHandler(ctx *tagContext) error { } // UniqueTagHandler describes unique tag handler -func UniqueTagHandler(ctx *tagContext) error { +func UniqueTagHandler(ctx *Context) error { if len(ctx.params) > 0 { - ctx.indexNames[ctx.params[0]] = core.UniqueType + ctx.indexNames[ctx.params[0]] = schemas.UniqueType } else { ctx.isUnique = true } @@ -195,7 +216,7 @@ func UniqueTagHandler(ctx *tagContext) error { } // CommentTagHandler add comment to column -func CommentTagHandler(ctx *tagContext) error { +func CommentTagHandler(ctx *Context) error { if len(ctx.params) > 0 { ctx.col.Comment = strings.Trim(ctx.params[0], "' ") } @@ -203,17 +224,17 @@ func CommentTagHandler(ctx *tagContext) error { } // SQLTypeTagHandler describes SQL Type tag handler -func SQLTypeTagHandler(ctx *tagContext) error { - ctx.col.SQLType = core.SQLType{Name: ctx.tagName} +func SQLTypeTagHandler(ctx *Context) error { + ctx.col.SQLType = schemas.SQLType{Name: ctx.tagName} if len(ctx.params) > 0 { - if ctx.tagName == core.Enum { + if ctx.tagName == schemas.Enum { ctx.col.EnumOptions = make(map[string]int) for k, v := range ctx.params { v = strings.TrimSpace(v) v = strings.Trim(v, "'") ctx.col.EnumOptions[v] = k } - } else if ctx.tagName == core.Set { + } else if ctx.tagName == schemas.Set { ctx.col.SetOptions = make(map[string]int) for k, v := range ctx.params { v = strings.TrimSpace(v) @@ -243,7 +264,7 @@ func SQLTypeTagHandler(ctx *tagContext) error { } // ExtendsTagHandler describes extends tag handler -func ExtendsTagHandler(ctx *tagContext) error { +func ExtendsTagHandler(ctx *Context) error { var fieldValue = ctx.fieldValue var isPtr = false switch fieldValue.Kind() { @@ -259,7 +280,7 @@ func ExtendsTagHandler(ctx *tagContext) error { isPtr = true fallthrough case reflect.Struct: - parentTable, err := ctx.engine.mapType(fieldValue) + parentTable, err := ctx.parser.Parse(fieldValue) if err != nil { return err } @@ -295,7 +316,7 @@ func ExtendsTagHandler(ctx *tagContext) error { } // CacheTagHandler describes cache tag handler -func CacheTagHandler(ctx *tagContext) error { +func CacheTagHandler(ctx *Context) error { if !ctx.hasCacheTag { ctx.hasCacheTag = true } @@ -303,7 +324,7 @@ func CacheTagHandler(ctx *tagContext) error { } // NoCacheTagHandler describes nocache tag handler -func NoCacheTagHandler(ctx *tagContext) error { +func NoCacheTagHandler(ctx *Context) error { if !ctx.hasNoCacheTag { ctx.hasNoCacheTag = true } diff --git a/vendor/xorm.io/xorm/test_mssql.sh b/vendor/xorm.io/xorm/test_mssql.sh deleted file mode 100644 index 7f060cff3..000000000 --- a/vendor/xorm.io/xorm/test_mssql.sh +++ /dev/null @@ -1 +0,0 @@ -go test -db=mssql -conn_str="server=localhost;user id=sa;password=yourStrong(!)Password;database=xorm_test" \ No newline at end of file diff --git a/vendor/xorm.io/xorm/test_mssql_cache.sh b/vendor/xorm.io/xorm/test_mssql_cache.sh deleted file mode 100644 index 76efd6ca0..000000000 --- a/vendor/xorm.io/xorm/test_mssql_cache.sh +++ /dev/null @@ -1 +0,0 @@ -go test -db=mssql -conn_str="server=192.168.1.58;user id=sa;password=123456;database=xorm_test" -cache=true \ No newline at end of file diff --git a/vendor/xorm.io/xorm/test_mymysql.sh b/vendor/xorm.io/xorm/test_mymysql.sh deleted file mode 100644 index f7780d14f..000000000 --- a/vendor/xorm.io/xorm/test_mymysql.sh +++ /dev/null @@ -1 +0,0 @@ -go test -db=mymysql -conn_str="xorm_test/root/" \ No newline at end of file diff --git a/vendor/xorm.io/xorm/test_mymysql_cache.sh b/vendor/xorm.io/xorm/test_mymysql_cache.sh deleted file mode 100644 index 0100286d6..000000000 --- a/vendor/xorm.io/xorm/test_mymysql_cache.sh +++ /dev/null @@ -1 +0,0 @@ -go test -db=mymysql -conn_str="xorm_test/root/" -cache=true \ No newline at end of file diff --git a/vendor/xorm.io/xorm/test_mysql.sh b/vendor/xorm.io/xorm/test_mysql.sh deleted file mode 100644 index 650e4ee17..000000000 --- a/vendor/xorm.io/xorm/test_mysql.sh +++ /dev/null @@ -1 +0,0 @@ -go test -db=mysql -conn_str="root:@/xorm_test" \ No newline at end of file diff --git a/vendor/xorm.io/xorm/test_mysql_cache.sh b/vendor/xorm.io/xorm/test_mysql_cache.sh deleted file mode 100644 index c542e7359..000000000 --- a/vendor/xorm.io/xorm/test_mysql_cache.sh +++ /dev/null @@ -1 +0,0 @@ -go test -db=mysql -conn_str="root:@/xorm_test" -cache=true \ No newline at end of file diff --git a/vendor/xorm.io/xorm/test_postgres.sh b/vendor/xorm.io/xorm/test_postgres.sh deleted file mode 100644 index dc1152e0a..000000000 --- a/vendor/xorm.io/xorm/test_postgres.sh +++ /dev/null @@ -1 +0,0 @@ -go test -db=postgres -conn_str="dbname=xorm_test sslmode=disable" \ No newline at end of file diff --git a/vendor/xorm.io/xorm/test_postgres_cache.sh b/vendor/xorm.io/xorm/test_postgres_cache.sh deleted file mode 100644 index 462fc948c..000000000 --- a/vendor/xorm.io/xorm/test_postgres_cache.sh +++ /dev/null @@ -1 +0,0 @@ -go test -db=postgres -conn_str="dbname=xorm_test sslmode=disable" -cache=true \ No newline at end of file diff --git a/vendor/xorm.io/xorm/test_sqlite.sh b/vendor/xorm.io/xorm/test_sqlite.sh deleted file mode 100644 index 6352b5cb5..000000000 --- a/vendor/xorm.io/xorm/test_sqlite.sh +++ /dev/null @@ -1 +0,0 @@ -go test -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc" \ No newline at end of file diff --git a/vendor/xorm.io/xorm/test_sqlite_cache.sh b/vendor/xorm.io/xorm/test_sqlite_cache.sh deleted file mode 100644 index 75a054c3f..000000000 --- a/vendor/xorm.io/xorm/test_sqlite_cache.sh +++ /dev/null @@ -1 +0,0 @@ -go test -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc" -cache=true \ No newline at end of file diff --git a/vendor/xorm.io/xorm/test_tidb.sh b/vendor/xorm.io/xorm/test_tidb.sh deleted file mode 100644 index 03d2d6cd8..000000000 --- a/vendor/xorm.io/xorm/test_tidb.sh +++ /dev/null @@ -1 +0,0 @@ -go test -db=mysql -conn_str="root:@tcp(localhost:4000)/xorm_test" -ignore_select_update=true \ No newline at end of file diff --git a/vendor/xorm.io/xorm/transaction.go b/vendor/xorm.io/xorm/transaction.go deleted file mode 100644 index 4104103fd..000000000 --- a/vendor/xorm.io/xorm/transaction.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -// Transaction Execute sql wrapped in a transaction(abbr as tx), tx will automatic commit if no errors occurred -func (engine *Engine) Transaction(f func(*Session) (interface{}, error)) (interface{}, error) { - session := engine.NewSession() - defer session.Close() - - if err := session.Begin(); err != nil { - return nil, err - } - - result, err := f(session) - if err != nil { - return nil, err - } - - if err := session.Commit(); err != nil { - return nil, err - } - - return result, nil -} diff --git a/vendor/xorm.io/xorm/types.go b/vendor/xorm.io/xorm/types.go deleted file mode 100644 index c76a54606..000000000 --- a/vendor/xorm.io/xorm/types.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "reflect" - - "xorm.io/core" -) - -var ( - ptrPkType = reflect.TypeOf(&core.PK{}) - pkType = reflect.TypeOf(core.PK{}) -) diff --git a/vendor/xorm.io/xorm/xorm.go b/vendor/xorm.io/xorm/xorm.go index e1c83b56f..2025522f2 100644 --- a/vendor/xorm.io/xorm/xorm.go +++ b/vendor/xorm.io/xorm/xorm.go @@ -2,118 +2,66 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.8 +// +build go1.11 package xorm import ( "context" - "fmt" "os" - "reflect" "runtime" - "sync" "time" - "xorm.io/core" + "xorm.io/xorm/caches" + "xorm.io/xorm/dialects" + "xorm.io/xorm/log" + "xorm.io/xorm/names" + "xorm.io/xorm/schemas" + "xorm.io/xorm/tags" ) -const ( - // Version show the xorm's version - Version string = "0.8.0.1015" -) - -func regDrvsNDialects() bool { - providedDrvsNDialects := map[string]struct { - dbType core.DbType - getDriver func() core.Driver - getDialect func() core.Dialect - }{ - "mssql": {"mssql", func() core.Driver { return &odbcDriver{} }, func() core.Dialect { return &mssql{} }}, - "odbc": {"mssql", func() core.Driver { return &odbcDriver{} }, func() core.Dialect { return &mssql{} }}, // !nashtsai! TODO change this when supporting MS Access - "mysql": {"mysql", func() core.Driver { return &mysqlDriver{} }, func() core.Dialect { return &mysql{} }}, - "mymysql": {"mysql", func() core.Driver { return &mymysqlDriver{} }, func() core.Dialect { return &mysql{} }}, - "postgres": {"postgres", func() core.Driver { return &pqDriver{} }, func() core.Dialect { return &postgres{} }}, - "pgx": {"postgres", func() core.Driver { return &pqDriverPgx{} }, func() core.Dialect { return &postgres{} }}, - "sqlite3": {"sqlite3", func() core.Driver { return &sqlite3Driver{} }, func() core.Dialect { return &sqlite3{} }}, - "oci8": {"oracle", func() core.Driver { return &oci8Driver{} }, func() core.Dialect { return &oracle{} }}, - "goracle": {"oracle", func() core.Driver { return &goracleDriver{} }, func() core.Dialect { return &oracle{} }}, - } - - for driverName, v := range providedDrvsNDialects { - if driver := core.QueryDriver(driverName); driver == nil { - core.RegisterDriver(driverName, v.getDriver()) - core.RegisterDialect(v.dbType, v.getDialect) - } - } - return true -} - func close(engine *Engine) { engine.Close() } -func init() { - regDrvsNDialects() -} - // NewEngine new a db manager according to the parameter. Currently support four // drivers func NewEngine(driverName string, dataSourceName string) (*Engine, error) { - driver := core.QueryDriver(driverName) - if driver == nil { - return nil, fmt.Errorf("Unsupported driver name: %v", driverName) - } - - uri, err := driver.Parse(driverName, dataSourceName) - if err != nil { - return nil, err - } - - dialect := core.QueryDialect(uri.DbType) - if dialect == nil { - return nil, fmt.Errorf("Unsupported dialect type: %v", uri.DbType) - } - - db, err := core.Open(driverName, dataSourceName) + dialect, err := dialects.OpenDialect(driverName, dataSourceName) if err != nil { return nil, err } - err = dialect.Init(db, uri, driverName, dataSourceName) - if err != nil { - return nil, err - } + cacherMgr := caches.NewManager() + mapper := names.NewCacheMapper(new(names.SnakeMapper)) + tagParser := tags.NewParser("xorm", dialect, mapper, mapper, cacherMgr) engine := &Engine{ - db: db, dialect: dialect, - Tables: make(map[reflect.Type]*core.Table), - mutex: &sync.RWMutex{}, - TagIdentifier: "xorm", TZLocation: time.Local, - tagHandlers: defaultTagHandlers, - cachers: make(map[string]core.Cacher), defaultContext: context.Background(), + cacherMgr: cacherMgr, + tagParser: tagParser, + driverName: driverName, + dataSourceName: dataSourceName, } - if uri.DbType == core.SQLITE { + if dialect.URI().DBType == schemas.SQLITE { engine.DatabaseTZ = time.UTC } else { engine.DatabaseTZ = time.Local } - logger := NewSimpleLogger(os.Stdout) - logger.SetLevel(core.LOG_INFO) - engine.SetLogger(logger) - engine.SetMapper(core.NewCacheMapper(new(core.SnakeMapper))) + logger := log.NewSimpleLogger(os.Stdout) + logger.SetLevel(log.LOG_INFO) + engine.SetLogger(log.NewLoggerAdapter(logger)) runtime.SetFinalizer(engine, close) return engine, nil } -// NewEngineWithParams new a db manager with params. The params will be passed to dialect. +// NewEngineWithParams new a db manager with params. The params will be passed to dialects. func NewEngineWithParams(driverName string, dataSourceName string, params map[string]string) (*Engine, error) { engine, err := NewEngine(driverName, dataSourceName) engine.dialect.SetParams(params)