chore(project): 添加项目配置文件和忽略规则

- 添加 Babel 配置文件支持 ES6+ 语法转换
- 添加 ESLint 忽略规则和配置文件
- 添加 Git 忽略规则文件
- 添加 Travis CI 配置文件
- 添加 1.4.2 版本变更日志文件
- 添加 Helm 图表辅助模板文件
- 添加 Helm 忽略规则文件
This commit is contained in:
2026-03-27 17:36:48 +08:00
commit c2453d6434
1703 changed files with 277582 additions and 0 deletions

129
rm-datasource/pom.xml Normal file
View File

@@ -0,0 +1,129 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Copyright 1999-2019 Seata.io Group.
~
~ Licensed under the Apache License, Version 2.0 (the "License");
~ you may not use this file except in compliance with the License.
~ You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>io.seata</groupId>
<artifactId>seata-parent</artifactId>
<version>${revision}</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>seata-rm-datasource</artifactId>
<packaging>jar</packaging>
<name>seata-rm-datasource ${project.version}</name>
<dependencies>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>seata-core</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>seata-rm</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>seata-sqlparser-core</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>seata-compressor-all</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>com.github.ben-manes.caffeine</groupId>
<artifactId>caffeine</artifactId>
</dependency>
<dependency>
<groupId>io.protostuff</groupId>
<artifactId>protostuff-core</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>io.protostuff</groupId>
<artifactId>protostuff-runtime</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-dbcp2</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.h2database</groupId>
<artifactId>h2</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>seata-sqlparser-druid</artifactId>
<version>${project.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</dependency>
<dependency>
<groupId>com.esotericsoftware</groupId>
<artifactId>kryo</artifactId>
<scope>provided</scope>
<optional>true</optional>
</dependency>
<dependency>
<groupId>de.javakaffee</groupId>
<artifactId>kryo-serializers</artifactId>
<scope>provided</scope>
<optional>true</optional>
</dependency>
<dependency>
<groupId>de.ruedigermoeller</groupId>
<artifactId>fst</artifactId>
<scope>provided</scope>
<optional>true</optional>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<scope>provided</scope>
<optional>true</optional>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<scope>provided</scope>
<optional>true</optional>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1,193 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.core.model.BranchType;
import io.seata.core.model.Resource;
import io.seata.rm.datasource.SeataDataSourceProxy;
import io.seata.rm.datasource.xa.Holdable;
import io.seata.rm.datasource.xa.Holder;
import javax.sql.DataSource;
import java.io.PrintWriter;
import java.sql.Driver;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.logging.Logger;
/**
* Base class of those DataSources working as Seata Resource.
*
* @author sharajava
*/
public abstract class BaseDataSourceResource<T extends Holdable> implements SeataDataSourceProxy, Resource, Holder<T> {
protected DataSource dataSource;
protected String resourceId;
protected String resourceGroupId;
protected BranchType branchType;
protected String dbType;
protected Driver driver;
private ConcurrentHashMap<String, T> keeper = new ConcurrentHashMap<>();
/**
* Gets target data source.
*
* @return the target data source
*/
@Override
public DataSource getTargetDataSource() {
return dataSource;
}
@Override
public String getResourceId() {
return resourceId;
}
public void setResourceId(String resourceId) {
this.resourceId = resourceId;
}
@Override
public String getResourceGroupId() {
return resourceGroupId;
}
public void setResourceGroupId(String resourceGroupId) {
this.resourceGroupId = resourceGroupId;
}
@Override
public BranchType getBranchType() {
return branchType;
}
public void setBranchType(BranchType branchType) {
this.branchType = branchType;
}
public String getDbType() {
return dbType;
}
public void setDbType(String dbType) {
this.dbType = dbType;
}
public Driver getDriver() {
return driver;
}
public void setDriver(Driver driver) {
this.driver = driver;
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
if (iface == null) {
return null;
}
if (iface.isInstance(this)) {
return (T) this;
}
return null;
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return iface != null && iface.isInstance(this);
}
protected void dataSourceCheck() {
if (dataSource == null) {
throw new UnsupportedOperationException("dataSource CAN NOT be null");
}
}
@Override
public PrintWriter getLogWriter() throws SQLException {
dataSourceCheck();
return dataSource.getLogWriter();
}
@Override
public void setLogWriter(PrintWriter out) throws SQLException {
dataSourceCheck();
dataSource.setLogWriter(out);
}
@Override
public void setLoginTimeout(int seconds) throws SQLException {
dataSourceCheck();
dataSource.setLoginTimeout(seconds);
}
@Override
public int getLoginTimeout() throws SQLException {
dataSourceCheck();
return dataSource.getLoginTimeout();
}
@Override
public Logger getParentLogger() throws SQLFeatureNotSupportedException {
dataSourceCheck();
return dataSource.getParentLogger();
}
@Override
public T hold(String key, T value) {
if (value.isHeld()) {
T x = keeper.get(key);
if (x != value) {
throw new ShouldNeverHappenException("something wrong with keeper, keeping[" + x +
"] but[" + value + "] is also kept with the same key[" + key + "]");
}
return value;
}
T x = keeper.put(key, value);
value.setHeld(true);
return x;
}
@Override
public T release(String key, T value) {
T x = keeper.remove(key);
if (x != value) {
throw new ShouldNeverHappenException("something wrong with keeper, released[" + x +
"] but[" + value + "] is wanted with key[" + key + "]");
}
value.setHeld(false);
return x;
}
@Override
public T lookup(String key) {
return keeper.get(key);
}
}

View File

@@ -0,0 +1,38 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm;
import io.seata.core.model.GlobalLockConfig;
/**
* executor to execute business logic that require global lock
* @author selfishlover
*/
public interface GlobalLockExecutor {
/**
* execute business logic
* @return business return
* @throws Throwable whatever throw during execution
*/
Object execute() throws Throwable;
/**
* global lock config info
* @return
*/
GlobalLockConfig getGlobalLockConfig();
}

View File

@@ -0,0 +1,57 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm;
import io.seata.core.context.GlobalLockConfigHolder;
import io.seata.core.context.RootContext;
import io.seata.core.model.GlobalLockConfig;
/**
* executor template for local transaction which need global lock
* @author selfishlover
*/
public class GlobalLockTemplate {
public Object execute(GlobalLockExecutor executor) throws Throwable {
boolean alreadyInGlobalLock = RootContext.requireGlobalLock();
if (!alreadyInGlobalLock) {
RootContext.bindGlobalLockFlag();
}
// set my config to config holder so that it can be access in further execution
// for example, LockRetryController can access it with config holder
GlobalLockConfig myConfig = executor.getGlobalLockConfig();
GlobalLockConfig previousConfig = GlobalLockConfigHolder.setAndReturnPrevious(myConfig);
try {
return executor.execute();
} finally {
// only unbind when this is the root caller.
// otherwise, the outer caller would lose global lock flag
if (!alreadyInGlobalLock) {
RootContext.unbindGlobalLockFlag();
}
// if previous config is not null, we need to set it back
// so that the outer logic can still use their config
if (previousConfig != null) {
GlobalLockConfigHolder.setAndReturnPrevious(previousConfig);
} else {
GlobalLockConfigHolder.remove();
}
}
}
}

View File

@@ -0,0 +1,107 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Calendar;
import java.util.Date;
import io.seata.core.model.BranchType;
import io.seata.core.model.ResourceManager;
import io.seata.core.protocol.transaction.UndoLogDeleteRequest;
import io.seata.rm.datasource.DataSourceManager;
import io.seata.rm.datasource.DataSourceProxy;
import io.seata.rm.datasource.undo.UndoLogManagerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The type Rm handler at.
*
* @author sharajava
*/
public class RMHandlerAT extends AbstractRMHandler {
private static final Logger LOGGER = LoggerFactory.getLogger(RMHandlerAT.class);
private static final int LIMIT_ROWS = 3000;
@Override
public void handle(UndoLogDeleteRequest request) {
DataSourceManager dataSourceManager = (DataSourceManager)getResourceManager();
DataSourceProxy dataSourceProxy = dataSourceManager.get(request.getResourceId());
if (dataSourceProxy == null) {
LOGGER.warn("Failed to get dataSourceProxy for delete undolog on {}", request.getResourceId());
return;
}
Date logCreatedSave = getLogCreated(request.getSaveDays());
Connection conn = null;
try {
conn = dataSourceProxy.getPlainConnection();
int deleteRows = 0;
do {
try {
deleteRows = UndoLogManagerFactory.getUndoLogManager(dataSourceProxy.getDbType())
.deleteUndoLogByLogCreated(logCreatedSave, LIMIT_ROWS, conn);
if (deleteRows > 0 && !conn.getAutoCommit()) {
conn.commit();
}
} catch (SQLException exx) {
if (deleteRows > 0 && !conn.getAutoCommit()) {
conn.rollback();
}
throw exx;
}
} while (deleteRows == LIMIT_ROWS);
} catch (Exception e) {
LOGGER.error("Failed to delete expired undo_log, error:{}", e.getMessage(), e);
} finally {
if (conn != null) {
try {
conn.close();
} catch (SQLException closeEx) {
LOGGER.warn("Failed to close JDBC resource while deleting undo_log ", closeEx);
}
}
}
}
private Date getLogCreated(int saveDays) {
if (saveDays <= 0) {
saveDays = UndoLogDeleteRequest.DEFAULT_SAVE_DAYS;
}
Calendar calendar = Calendar.getInstance();
calendar.add(Calendar.DATE, -saveDays);
return calendar.getTime();
}
/**
* get AT resource managerDataSourceManager.java
*
* @return
*/
@Override
protected ResourceManager getResourceManager() {
return DefaultResourceManager.get().getResourceManager(BranchType.AT);
}
@Override
public BranchType getBranchType() {
return BranchType.AT;
}
}

View File

@@ -0,0 +1,38 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm;
import io.seata.core.model.BranchType;
import io.seata.core.model.ResourceManager;
/**
* The type RM handler XA.
*
* @author sharajava
*/
public class RMHandlerXA extends AbstractRMHandler {
@Override
protected ResourceManager getResourceManager() {
return DefaultResourceManager.get().getResourceManager(BranchType.XA);
}
@Override
public BranchType getBranchType() {
return BranchType.XA;
}
}

View File

@@ -0,0 +1,380 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource;
import io.seata.core.context.RootContext;
import io.seata.core.model.BranchType;
import io.seata.rm.datasource.sql.SQLVisitorFactory;
import io.seata.rm.datasource.sql.struct.TableMeta;
import io.seata.rm.datasource.sql.struct.TableMetaCacheFactory;
import io.seata.sqlparser.SQLRecognizer;
import io.seata.sqlparser.SQLType;
import java.sql.Array;
import java.sql.Blob;
import java.sql.CallableStatement;
import java.sql.Clob;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.NClob;
import java.sql.PreparedStatement;
import java.sql.SQLClientInfoException;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.sql.SQLXML;
import java.sql.Statement;
import java.sql.Struct;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Executor;
/**
* The type Abstract connection proxy.
*
* @author sharajava
*/
public abstract class AbstractConnectionProxy implements Connection {
/**
* The Data source proxy.
*/
protected DataSourceProxy dataSourceProxy;
/**
* The Target connection.
*/
protected Connection targetConnection;
/**
* Instantiates a new Abstract connection proxy.
*
* @param dataSourceProxy the data source proxy
* @param targetConnection the target connection
*/
public AbstractConnectionProxy(DataSourceProxy dataSourceProxy, Connection targetConnection) {
this.dataSourceProxy = dataSourceProxy;
this.targetConnection = targetConnection;
}
/**
* Gets data source proxy.
*
* @return the data source proxy
*/
public DataSourceProxy getDataSourceProxy() {
return dataSourceProxy;
}
/**
* Gets target connection.
*
* @return the target connection
*/
public Connection getTargetConnection() {
return targetConnection;
}
/**
* Gets db type.
*
* @return the db type
*/
public String getDbType() {
return dataSourceProxy.getDbType();
}
@Override
public Statement createStatement() throws SQLException {
Statement targetStatement = getTargetConnection().createStatement();
return new StatementProxy(this, targetStatement);
}
@Override
public PreparedStatement prepareStatement(String sql) throws SQLException {
String dbType = getDbType();
// support oracle 10.2+
PreparedStatement targetPreparedStatement = null;
if (BranchType.AT == RootContext.getBranchType()) {
List<SQLRecognizer> sqlRecognizers = SQLVisitorFactory.get(sql, dbType);
if (sqlRecognizers != null && sqlRecognizers.size() == 1) {
SQLRecognizer sqlRecognizer = sqlRecognizers.get(0);
if (sqlRecognizer != null && sqlRecognizer.getSQLType() == SQLType.INSERT) {
TableMeta tableMeta = TableMetaCacheFactory.getTableMetaCache(dbType).getTableMeta(getTargetConnection(),
sqlRecognizer.getTableName(), getDataSourceProxy().getResourceId());
String[] pkNameArray = new String[tableMeta.getPrimaryKeyOnlyName().size()];
tableMeta.getPrimaryKeyOnlyName().toArray(pkNameArray);
targetPreparedStatement = getTargetConnection().prepareStatement(sql,pkNameArray);
}
}
}
if (targetPreparedStatement == null) {
targetPreparedStatement = getTargetConnection().prepareStatement(sql);
}
return new PreparedStatementProxy(this, targetPreparedStatement, sql);
}
@Override
public CallableStatement prepareCall(String sql) throws SQLException {
RootContext.assertNotInGlobalTransaction();
return targetConnection.prepareCall(sql);
}
@Override
public String nativeSQL(String sql) throws SQLException {
return targetConnection.nativeSQL(sql);
}
@Override
public boolean getAutoCommit() throws SQLException {
return targetConnection.getAutoCommit();
}
@Override
public void close() throws SQLException {
targetConnection.close();
}
@Override
public boolean isClosed() throws SQLException {
return targetConnection.isClosed();
}
@Override
public DatabaseMetaData getMetaData() throws SQLException {
return targetConnection.getMetaData();
}
@Override
public void setReadOnly(boolean readOnly) throws SQLException {
targetConnection.setReadOnly(readOnly);
}
@Override
public boolean isReadOnly() throws SQLException {
return targetConnection.isReadOnly();
}
@Override
public void setCatalog(String catalog) throws SQLException {
targetConnection.setCatalog(catalog);
}
@Override
public String getCatalog() throws SQLException {
return targetConnection.getCatalog();
}
@Override
public void setTransactionIsolation(int level) throws SQLException {
targetConnection.setTransactionIsolation(level);
}
@Override
public int getTransactionIsolation() throws SQLException {
return targetConnection.getTransactionIsolation();
}
@Override
public SQLWarning getWarnings() throws SQLException {
return targetConnection.getWarnings();
}
@Override
public void clearWarnings() throws SQLException {
targetConnection.clearWarnings();
}
@Override
public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
Statement statement = targetConnection.createStatement(resultSetType, resultSetConcurrency);
return new StatementProxy<Statement>(this, statement);
}
@Override
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency)
throws SQLException {
PreparedStatement preparedStatement = targetConnection.prepareStatement(sql, resultSetType,
resultSetConcurrency);
return new PreparedStatementProxy(this, preparedStatement, sql);
}
@Override
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
RootContext.assertNotInGlobalTransaction();
return targetConnection.prepareCall(sql, resultSetType, resultSetConcurrency);
}
@Override
public Map<String, Class<?>> getTypeMap() throws SQLException {
return targetConnection.getTypeMap();
}
@Override
public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
targetConnection.setTypeMap(map);
}
@Override
public void setHoldability(int holdability) throws SQLException {
targetConnection.setHoldability(holdability);
}
@Override
public int getHoldability() throws SQLException {
return targetConnection.getHoldability();
}
@Override
public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability)
throws SQLException {
Statement statement = targetConnection.createStatement(resultSetType, resultSetConcurrency,
resultSetHoldability);
return new StatementProxy<Statement>(this, statement);
}
@Override
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency,
int resultSetHoldability) throws SQLException {
PreparedStatement preparedStatement = targetConnection.prepareStatement(sql, resultSetType,
resultSetConcurrency, resultSetHoldability);
return new PreparedStatementProxy(this, preparedStatement, sql);
}
@Override
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency,
int resultSetHoldability) throws SQLException {
RootContext.assertNotInGlobalTransaction();
return targetConnection.prepareCall(sql, resultSetType, resultSetConcurrency, resultSetHoldability);
}
@Override
public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
PreparedStatement preparedStatement = targetConnection.prepareStatement(sql, autoGeneratedKeys);
return new PreparedStatementProxy(this, preparedStatement, sql);
}
@Override
public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
PreparedStatement preparedStatement = targetConnection.prepareStatement(sql, columnIndexes);
return new PreparedStatementProxy(this, preparedStatement, sql);
}
@Override
public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
PreparedStatement preparedStatement = targetConnection.prepareStatement(sql, columnNames);
return new PreparedStatementProxy(this, preparedStatement, sql);
}
@Override
public Clob createClob() throws SQLException {
return targetConnection.createClob();
}
@Override
public Blob createBlob() throws SQLException {
return targetConnection.createBlob();
}
@Override
public NClob createNClob() throws SQLException {
return targetConnection.createNClob();
}
@Override
public SQLXML createSQLXML() throws SQLException {
return targetConnection.createSQLXML();
}
@Override
public boolean isValid(int timeout) throws SQLException {
return targetConnection.isValid(timeout);
}
@Override
public void setClientInfo(String name, String value) throws SQLClientInfoException {
targetConnection.setClientInfo(name, value);
}
@Override
public void setClientInfo(Properties properties) throws SQLClientInfoException {
targetConnection.setClientInfo(properties);
}
@Override
public String getClientInfo(String name) throws SQLException {
return targetConnection.getClientInfo(name);
}
@Override
public Properties getClientInfo() throws SQLException {
return targetConnection.getClientInfo();
}
@Override
public Array createArrayOf(String typeName, Object[] elements) throws SQLException {
return targetConnection.createArrayOf(typeName, elements);
}
@Override
public Struct createStruct(String typeName, Object[] attributes) throws SQLException {
return targetConnection.createStruct(typeName, attributes);
}
@Override
public void setSchema(String schema) throws SQLException {
targetConnection.setSchema(schema);
}
@Override
public String getSchema() throws SQLException {
return targetConnection.getSchema();
}
@Override
public void abort(Executor executor) throws SQLException {
targetConnection.abort(executor);
}
@Override
public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException {
targetConnection.setNetworkTimeout(executor, milliseconds);
}
@Override
public int getNetworkTimeout() throws SQLException {
return targetConnection.getNetworkTimeout();
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
return targetConnection.unwrap(iface);
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return targetConnection.isWrapperFor(iface);
}
}

View File

@@ -0,0 +1,54 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource;
import io.seata.common.executor.Initialize;
import io.seata.core.model.Resource;
import io.seata.rm.AbstractResourceManager;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* Abstract RM with DataSource Cache.
*
* @author sharajava
*/
public abstract class AbstractDataSourceCacheResourceManager extends AbstractResourceManager implements Initialize {
protected Map<String, Resource> dataSourceCache = new ConcurrentHashMap<>();
/**
* Instantiates a new Data source manager.
*/
public AbstractDataSourceCacheResourceManager() {
}
@Override
public abstract void init();
@Override
public Map<String, Resource> getManagedResources() {
return dataSourceCache;
}
@Override
public void registerResource(Resource resource) {
dataSourceCache.put(resource.getResourceId(), resource);
super.registerResource(resource);
}
}

View File

@@ -0,0 +1,94 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource;
import javax.sql.DataSource;
import java.io.PrintWriter;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.util.logging.Logger;
/**
* The type Abstract data source proxy.
*
* @author sharajava
*/
public abstract class AbstractDataSourceProxy implements SeataDataSourceProxy {
/**
* The Target data source.
*/
protected DataSource targetDataSource;
/**
* Instantiates a new Abstract data source proxy.
*/
public AbstractDataSourceProxy(){}
/**
* Instantiates a new Abstract data source proxy.
*
* @param targetDataSource the target data source
*/
public AbstractDataSourceProxy(DataSource targetDataSource) {
this.targetDataSource = targetDataSource;
}
/**
* Gets target data source.
*
* @return the target data source
*/
@Override
public DataSource getTargetDataSource() {
return targetDataSource;
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
return targetDataSource.unwrap(iface);
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return targetDataSource.isWrapperFor(iface);
}
@Override
public PrintWriter getLogWriter() throws SQLException {
return targetDataSource.getLogWriter();
}
@Override
public void setLogWriter(PrintWriter out) throws SQLException {
targetDataSource.setLogWriter(out);
}
@Override
public void setLoginTimeout(int seconds) throws SQLException {
targetDataSource.setLoginTimeout(seconds);
}
@Override
public int getLoginTimeout() throws SQLException {
return targetDataSource.getLoginTimeout();
}
@Override
public Logger getParentLogger() throws SQLFeatureNotSupportedException {
return targetDataSource.getParentLogger();
}
}

View File

@@ -0,0 +1,418 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource;
import io.seata.common.util.CollectionUtils;
import io.seata.sqlparser.struct.Null;
import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
import java.net.URL;
import java.sql.Array;
import java.sql.Blob;
import java.sql.Clob;
import java.sql.Date;
import java.sql.NClob;
import java.sql.ParameterMetaData;
import java.sql.PreparedStatement;
import java.sql.Ref;
import java.sql.ResultSetMetaData;
import java.sql.RowId;
import java.sql.SQLException;
import java.sql.SQLXML;
import java.sql.Time;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* The type Abstract prepared statement proxy.
*
* @author sharajava
*/
public abstract class AbstractPreparedStatementProxy extends StatementProxy<PreparedStatement>
implements PreparedStatement {
/**
* The Parameters.
*/
protected Map<Integer, ArrayList<Object>> parameters;
private void initParameterHolder() {
this.parameters = new HashMap<>();
}
/**
* Instantiates a new Abstract prepared statement proxy.
*
* @param connectionProxy the connection proxy
* @param targetStatement the target statement
* @param targetSQL the target sql
* @throws SQLException the sql exception
*/
public AbstractPreparedStatementProxy(AbstractConnectionProxy connectionProxy, PreparedStatement targetStatement,
String targetSQL) throws SQLException {
super(connectionProxy, targetStatement, targetSQL);
initParameterHolder();
}
/**
* Instantiates a new Abstract prepared statement proxy.
*
* @param connectionProxy the connection proxy
* @param targetStatement the target statement
* @throws SQLException the sql exception
*/
public AbstractPreparedStatementProxy(AbstractConnectionProxy connectionProxy, PreparedStatement targetStatement)
throws SQLException {
super(connectionProxy, targetStatement);
initParameterHolder();
}
/**
* Gets params by index.
*
* @param index the index
* @return the params by index
*/
public List<Object> getParamsByIndex(int index) {
return parameters.get(index);
}
/**
* Sets param by index.
*
* @param index the index
* @param x the x
*/
protected void setParamByIndex(int index, Object x) {
CollectionUtils.computeIfAbsent(parameters, index, e -> new ArrayList<>())
.add(x);
}
@Override
public void setNull(int parameterIndex, int sqlType) throws SQLException {
setParamByIndex(parameterIndex, Null.get());
targetStatement.setNull(parameterIndex, sqlType);
}
@Override
public void setBoolean(int parameterIndex, boolean x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setBoolean(parameterIndex, x);
}
@Override
public void setByte(int parameterIndex, byte x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setByte(parameterIndex, x);
}
@Override
public void setShort(int parameterIndex, short x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setShort(parameterIndex, x);
}
@Override
public void setInt(int parameterIndex, int x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setInt(parameterIndex, x);
}
@Override
public void setLong(int parameterIndex, long x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setLong(parameterIndex, x);
}
@Override
public void setFloat(int parameterIndex, float x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setFloat(parameterIndex, x);
}
@Override
public void setDouble(int parameterIndex, double x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setDouble(parameterIndex, x);
}
@Override
public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setBigDecimal(parameterIndex, x);
}
@Override
public void setString(int parameterIndex, String x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setString(parameterIndex, x);
}
@Override
public void setBytes(int parameterIndex, byte[] x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setBytes(parameterIndex, x);
}
@Override
public void setDate(int parameterIndex, Date x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setDate(parameterIndex, x);
}
@Override
public void setTime(int parameterIndex, Time x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setTime(parameterIndex, x);
}
@Override
public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setTimestamp(parameterIndex, x);
}
@Override
public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setAsciiStream(parameterIndex, x, length);
}
@Deprecated
@Override
public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setUnicodeStream(parameterIndex, x, length);
}
@Override
public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setBinaryStream(parameterIndex, x, length);
}
@Override
public void clearParameters() throws SQLException {
initParameterHolder();
targetStatement.clearParameters();
}
@Override
public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setObject(parameterIndex, x, targetSqlType);
}
@Override
public void setObject(int parameterIndex, Object x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setObject(parameterIndex, x);
}
@Override
public void addBatch() throws SQLException {
targetStatement.addBatch();
}
@Override
public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException {
setParamByIndex(parameterIndex, reader);
targetStatement.setCharacterStream(parameterIndex, reader, length);
}
@Override
public void setRef(int parameterIndex, Ref x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setRef(parameterIndex, x);
}
@Override
public void setBlob(int parameterIndex, Blob x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setBlob(parameterIndex, x);
}
@Override
public void setClob(int parameterIndex, Clob x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setClob(parameterIndex, x);
}
@Override
public void setArray(int parameterIndex, Array x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setArray(parameterIndex, x);
}
@Override
public ResultSetMetaData getMetaData() throws SQLException {
return targetStatement.getMetaData();
}
@Override
public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setDate(parameterIndex, x, cal);
}
@Override
public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setTime(parameterIndex, x, cal);
}
@Override
public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setTimestamp(parameterIndex, x, cal);
}
@Override
public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException {
setParamByIndex(parameterIndex, Null.get());
targetStatement.setNull(parameterIndex, sqlType, typeName);
}
@Override
public void setURL(int parameterIndex, URL x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setURL(parameterIndex, x);
}
@Override
public ParameterMetaData getParameterMetaData() throws SQLException {
return targetStatement.getParameterMetaData();
}
@Override
public void setRowId(int parameterIndex, RowId x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setRowId(parameterIndex, x);
}
@Override
public void setNString(int parameterIndex, String value) throws SQLException {
setParamByIndex(parameterIndex, value);
targetStatement.setNString(parameterIndex, value);
}
@Override
public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException {
setParamByIndex(parameterIndex, value);
targetStatement.setNCharacterStream(parameterIndex, value, length);
}
@Override
public void setNClob(int parameterIndex, NClob value) throws SQLException {
setParamByIndex(parameterIndex, value);
targetStatement.setNClob(parameterIndex, value);
}
@Override
public void setClob(int parameterIndex, Reader reader, long length) throws SQLException {
setParamByIndex(parameterIndex, reader);
targetStatement.setClob(parameterIndex, reader, length);
}
@Override
public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException {
setParamByIndex(parameterIndex, inputStream);
targetStatement.setBlob(parameterIndex, inputStream, length);
}
@Override
public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException {
setParamByIndex(parameterIndex, reader);
targetStatement.setNClob(parameterIndex, reader, length);
}
@Override
public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException {
setParamByIndex(parameterIndex, xmlObject);
targetStatement.setSQLXML(parameterIndex, xmlObject);
}
@Override
public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setObject(parameterIndex, x, targetSqlType, scaleOrLength);
}
@Override
public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setAsciiStream(parameterIndex, x, length);
}
@Override
public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setBinaryStream(parameterIndex, x, length);
}
@Override
public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException {
setParamByIndex(parameterIndex, reader);
targetStatement.setCharacterStream(parameterIndex, reader, length);
}
@Override
public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setAsciiStream(parameterIndex, x);
}
@Override
public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException {
setParamByIndex(parameterIndex, x);
targetStatement.setBinaryStream(parameterIndex, x);
}
@Override
public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException {
setParamByIndex(parameterIndex, reader);
targetStatement.setCharacterStream(parameterIndex, reader);
}
@Override
public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException {
setParamByIndex(parameterIndex, value);
targetStatement.setNCharacterStream(parameterIndex, value);
}
@Override
public void setClob(int parameterIndex, Reader reader) throws SQLException {
setParamByIndex(parameterIndex, reader);
targetStatement.setClob(parameterIndex, reader);
}
@Override
public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException {
setParamByIndex(parameterIndex, inputStream);
targetStatement.setBlob(parameterIndex, inputStream);
}
@Override
public void setNClob(int parameterIndex, Reader reader) throws SQLException {
setParamByIndex(parameterIndex, reader);
targetStatement.setNClob(parameterIndex, reader);
}
}

View File

@@ -0,0 +1,294 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource;
import javax.sql.rowset.CachedRowSet;
import javax.sql.rowset.RowSetProvider;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.sql.Statement;
/**
* The type Abstract statement proxy.
*
* @author sharajava
*
* @param <T> the type parameter
*/
public abstract class AbstractStatementProxy<T extends Statement> implements Statement {
/**
* The Connection proxy.
*/
protected AbstractConnectionProxy connectionProxy;
/**
* The Target statement.
*/
protected T targetStatement;
/**
* The Target sql.
*/
protected String targetSQL;
/**
* Instantiates a new Abstract statement proxy.
*
* @param connectionProxy the connection proxy
* @param targetStatement the target statement
* @param targetSQL the target sql
* @throws SQLException the sql exception
*/
public AbstractStatementProxy(AbstractConnectionProxy connectionProxy, T targetStatement, String targetSQL)
throws SQLException {
this.connectionProxy = connectionProxy;
this.targetStatement = targetStatement;
this.targetSQL = targetSQL;
}
/**
* Instantiates a new Abstract statement proxy.
*
* @param connectionProxy the connection proxy
* @param targetStatement the target statement
* @throws SQLException the sql exception
*/
public AbstractStatementProxy(ConnectionProxy connectionProxy, T targetStatement) throws SQLException {
this(connectionProxy, targetStatement, null);
}
/**
* Gets connection proxy.
*
* @return the connection proxy
*/
public AbstractConnectionProxy getConnectionProxy() {
return connectionProxy;
}
/**
* Gets target statement.
*
* @return the target statement
*/
public T getTargetStatement() {
return targetStatement;
}
/**
* Gets target sql.
*
* @return the target sql
*/
public String getTargetSQL() {
return targetSQL;
}
@Override
public void close() throws SQLException {
targetStatement.close();
}
@Override
public int getMaxFieldSize() throws SQLException {
return targetStatement.getMaxFieldSize();
}
@Override
public void setMaxFieldSize(int max) throws SQLException {
targetStatement.setMaxFieldSize(max);
}
@Override
public int getMaxRows() throws SQLException {
return targetStatement.getMaxRows();
}
@Override
public void setMaxRows(int max) throws SQLException {
targetStatement.setMaxRows(max);
}
@Override
public void setEscapeProcessing(boolean enable) throws SQLException {
targetStatement.setEscapeProcessing(enable);
}
@Override
public int getQueryTimeout() throws SQLException {
return targetStatement.getQueryTimeout();
}
@Override
public void setQueryTimeout(int seconds) throws SQLException {
targetStatement.setQueryTimeout(seconds);
}
@Override
public void cancel() throws SQLException {
targetStatement.cancel();
}
@Override
public SQLWarning getWarnings() throws SQLException {
return targetStatement.getWarnings();
}
@Override
public void clearWarnings() throws SQLException {
targetStatement.clearWarnings();
}
@Override
public void setCursorName(String name) throws SQLException {
targetStatement.setCursorName(name);
}
@Override
public ResultSet getResultSet() throws SQLException {
return targetStatement.getResultSet();
}
@Override
public int getUpdateCount() throws SQLException {
return targetStatement.getUpdateCount();
}
@Override
public boolean getMoreResults() throws SQLException {
return targetStatement.getMoreResults();
}
@Override
public void setFetchDirection(int direction) throws SQLException {
targetStatement.setFetchDirection(direction);
}
@Override
public int getFetchDirection() throws SQLException {
return targetStatement.getFetchDirection();
}
@Override
public void setFetchSize(int rows) throws SQLException {
targetStatement.setFetchSize(rows);
}
@Override
public int getFetchSize() throws SQLException {
return targetStatement.getFetchSize();
}
@Override
public int getResultSetConcurrency() throws SQLException {
return targetStatement.getResultSetConcurrency();
}
@Override
public int getResultSetType() throws SQLException {
return targetStatement.getResultSetType();
}
@Override
public void addBatch(String sql) throws SQLException {
targetStatement.addBatch(sql);
}
@Override
public void clearBatch() throws SQLException {
targetStatement.clearBatch();
targetSQL = null;
}
@Override
public int[] executeBatch() throws SQLException {
return targetStatement.executeBatch();
}
@Override
public Connection getConnection() throws SQLException {
return targetStatement.getConnection();
}
@Override
public boolean getMoreResults(int current) throws SQLException {
return targetStatement.getMoreResults(current);
}
@Override
public ResultSet getGeneratedKeys() throws SQLException {
ResultSet rs = targetStatement.getGeneratedKeys();
CachedRowSet generatedKeysRowSet = RowSetProvider.newFactory().createCachedRowSet();
generatedKeysRowSet.populate(rs);
return generatedKeysRowSet;
}
@Override
public int getResultSetHoldability() throws SQLException {
return targetStatement.getResultSetHoldability();
}
@Override
public boolean isClosed() throws SQLException {
return targetStatement.isClosed();
}
@Override
public void setPoolable(boolean poolable) throws SQLException {
targetStatement.setPoolable(poolable);
}
@Override
public boolean isPoolable() throws SQLException {
return targetStatement.isPoolable();
}
@Override
public void closeOnCompletion() throws SQLException {
targetStatement.closeOnCompletion();
}
@Override
public boolean isCloseOnCompletion() throws SQLException {
return targetStatement.isCloseOnCompletion();
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
return targetStatement.unwrap(iface);
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return targetStatement.isWrapperFor(iface);
}
}

View File

@@ -0,0 +1,207 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import com.google.common.collect.Lists;
import io.seata.common.thread.NamedThreadFactory;
import io.seata.config.ConfigurationFactory;
import io.seata.core.model.BranchStatus;
import io.seata.rm.datasource.undo.UndoLogManager;
import io.seata.rm.datasource.undo.UndoLogManagerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static io.seata.core.constants.ConfigurationKeys.CLIENT_ASYNC_COMMIT_BUFFER_LIMIT;
import static io.seata.common.DefaultValues.DEFAULT_CLIENT_ASYNC_COMMIT_BUFFER_LIMIT;
/**
* The type Async worker.
*
* @author sharajava
*/
public class AsyncWorker {
private static final Logger LOGGER = LoggerFactory.getLogger(AsyncWorker.class);
private static final int DEFAULT_RESOURCE_SIZE = 16;
private static final int UNDOLOG_DELETE_LIMIT_SIZE = 1000;
private static final int ASYNC_COMMIT_BUFFER_LIMIT = ConfigurationFactory.getInstance().getInt(
CLIENT_ASYNC_COMMIT_BUFFER_LIMIT, DEFAULT_CLIENT_ASYNC_COMMIT_BUFFER_LIMIT);
private final DataSourceManager dataSourceManager;
private final BlockingQueue<Phase2Context> commitQueue;
private final ScheduledExecutorService scheduledExecutor;
public AsyncWorker(DataSourceManager dataSourceManager) {
this.dataSourceManager = dataSourceManager;
LOGGER.info("Async Commit Buffer Limit: {}", ASYNC_COMMIT_BUFFER_LIMIT);
commitQueue = new LinkedBlockingQueue<>(ASYNC_COMMIT_BUFFER_LIMIT);
ThreadFactory threadFactory = new NamedThreadFactory("AsyncWorker", 2, true);
scheduledExecutor = new ScheduledThreadPoolExecutor(2, threadFactory);
scheduledExecutor.scheduleAtFixedRate(this::doBranchCommitSafely, 10, 1000, TimeUnit.MILLISECONDS);
}
public BranchStatus branchCommit(String xid, long branchId, String resourceId) {
Phase2Context context = new Phase2Context(xid, branchId, resourceId);
addToCommitQueue(context);
return BranchStatus.PhaseTwo_Committed;
}
/**
* try add context to commitQueue directly, if fail(which means the queue is full),
* then doBranchCommit urgently(so that the queue could be empty again) and retry this process.
*/
private void addToCommitQueue(Phase2Context context) {
if (commitQueue.offer(context)) {
return;
}
CompletableFuture.runAsync(this::doBranchCommitSafely, scheduledExecutor)
.thenRun(() -> addToCommitQueue(context));
}
void doBranchCommitSafely() {
try {
doBranchCommit();
} catch (Throwable e) {
LOGGER.error("Exception occur when doing branch commit", e);
}
}
private void doBranchCommit() {
if (commitQueue.isEmpty()) {
return;
}
// transfer all context currently received to this list
List<Phase2Context> allContexts = new LinkedList<>();
commitQueue.drainTo(allContexts);
// group context by their resourceId
Map<String, List<Phase2Context>> groupedContexts = groupedByResourceId(allContexts);
groupedContexts.forEach(this::dealWithGroupedContexts);
}
Map<String, List<Phase2Context>> groupedByResourceId(List<Phase2Context> contexts) {
Map<String, List<Phase2Context>> groupedContexts = new HashMap<>(DEFAULT_RESOURCE_SIZE);
contexts.forEach(context -> {
List<Phase2Context> group = groupedContexts.computeIfAbsent(context.resourceId, key -> new LinkedList<>());
group.add(context);
});
return groupedContexts;
}
private void dealWithGroupedContexts(String resourceId, List<Phase2Context> contexts) {
DataSourceProxy dataSourceProxy = dataSourceManager.get(resourceId);
if (dataSourceProxy == null) {
LOGGER.warn("Failed to find resource for {}", resourceId);
return;
}
Connection conn;
try {
conn = dataSourceProxy.getPlainConnection();
} catch (SQLException sqle) {
LOGGER.error("Failed to get connection for async committing on {}", resourceId, sqle);
return;
}
UndoLogManager undoLogManager = UndoLogManagerFactory.getUndoLogManager(dataSourceProxy.getDbType());
// split contexts into several lists, with each list contain no more element than limit size
List<List<Phase2Context>> splitByLimit = Lists.partition(contexts, UNDOLOG_DELETE_LIMIT_SIZE);
splitByLimit.forEach(partition -> deleteUndoLog(conn, undoLogManager, partition));
}
private void deleteUndoLog(Connection conn, UndoLogManager undoLogManager, List<Phase2Context> contexts) {
Set<String> xids = new LinkedHashSet<>(contexts.size());
Set<Long> branchIds = new LinkedHashSet<>(contexts.size());
contexts.forEach(context -> {
xids.add(context.xid);
branchIds.add(context.branchId);
});
try {
undoLogManager.batchDeleteUndoLog(xids, branchIds, conn);
if (!conn.getAutoCommit()) {
conn.commit();
}
} catch (SQLException e) {
LOGGER.error("Failed to batch delete undo log", e);
try {
conn.rollback();
} catch (SQLException rollbackEx) {
LOGGER.error("Failed to rollback JDBC resource after deleting undo log failed", rollbackEx);
}
} finally {
try {
conn.close();
} catch (SQLException closeEx) {
LOGGER.error("Failed to close JDBC resource after deleting undo log", closeEx);
}
}
}
static class Phase2Context {
/**
* AT Phase 2 context
* @param xid the xid
* @param branchId the branch id
* @param resourceId the resource id
*/
public Phase2Context(String xid, long branchId, String resourceId) {
this.xid = xid;
this.branchId = branchId;
this.resourceId = resourceId;
}
/**
* The Xid.
*/
String xid;
/**
* The Branch id.
*/
long branchId;
/**
* The Resource id.
*/
String resourceId;
}
}

View File

@@ -0,0 +1,266 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource;
import io.seata.common.util.CollectionUtils;
import io.seata.common.util.StringUtils;
import io.seata.rm.datasource.undo.KeywordChecker;
import io.seata.rm.datasource.undo.KeywordCheckerFactory;
import io.seata.sqlparser.util.JdbcConstants;
import java.util.ArrayList;
import java.util.List;
/**
* column utils
*
* @author jsbxyyx
*/
public final class ColumnUtils {
private static final String DOT = ".";
/**
* The escape
*/
public enum Escape {
/**
* standard escape
*/
STANDARD('"'),
/**
* mysql series escape
*/
MYSQL('`');
/**
* The Value.
*/
public final char value;
Escape(char value) {
this.value = value;
}
}
/**
* del escape by db type
*
* @param cols the cols
* @param dbType the db type
* @return list
*/
public static List<String> delEscape(List<String> cols, String dbType) {
// sql standard
// https://db.apache.org/derby/docs/10.1/ref/crefsqlj1003454.html
// https://docs.oracle.com/javadb/10.8.3.0/ref/crefsqlj1003454.html
// https://www.informit.com/articles/article.aspx?p=2036581&seqNum=2
List<String> newCols = delEscape(cols, Escape.STANDARD);
if (isMysqlSeries(dbType)) {
newCols = delEscape(newCols, Escape.MYSQL);
}
return newCols;
}
/**
* del escape
*
* @param cols the cols
* @param escape the escape
* @return delete the column list element left and right escape.
*/
public static List<String> delEscape(List<String> cols, Escape escape) {
if (CollectionUtils.isEmpty(cols)) {
return cols;
}
List<String> newCols = new ArrayList<>(cols.size());
for (int i = 0, len = cols.size(); i < len; i++) {
String col = cols.get(i);
col = delEscape(col, escape);
newCols.add(col);
}
return newCols;
}
/**
* del escape by db type
*
* @param colName the column name
* @param dbType the db type
* @return string string
*/
public static String delEscape(String colName, String dbType) {
String newColName = delEscape(colName, Escape.STANDARD);
if (isMysqlSeries(dbType)) {
newColName = delEscape(newColName, Escape.MYSQL);
}
return newColName;
}
/**
* del escape by escape
*
* @param colName the column name
* @param escape the escape
* @return string string
*/
public static String delEscape(String colName, Escape escape) {
if (colName == null || colName.isEmpty()) {
return colName;
}
if (colName.charAt(0) == escape.value && colName.charAt(colName.length() - 1) == escape.value) {
// like "scheme"."id" `scheme`.`id`
String str = escape.value + DOT + escape.value;
int index = colName.indexOf(str);
if (index > -1) {
return colName.substring(1, index) + DOT + colName.substring(index + str.length(), colName.length() - 1);
}
return colName.substring(1, colName.length() - 1);
} else {
// like "scheme".id `scheme`.id
String str = escape.value + DOT;
int index = colName.indexOf(str);
if (index > -1 && colName.charAt(0) == escape.value) {
return colName.substring(1, index) + DOT + colName.substring(index + str.length());
}
// like scheme."id" scheme.`id`
str = DOT + escape.value;
index = colName.indexOf(str);
if (index > -1 && colName.charAt(colName.length() - 1) == escape.value) {
return colName.substring(0, index) + DOT + colName.substring(index + str.length(), colName.length() - 1);
}
}
return colName;
}
/**
* if necessary, add escape by db type
* <pre>
* mysql:
* only deal with keyword.
* postgresql:
* only deal with keyword, contains uppercase character.
* oracle:
* only deal with keyword, not full uppercase character.
* </pre>
*
* @param cols the column name list
* @param dbType the db type
* @return list list
*/
public static List<String> addEscape(List<String> cols, String dbType) {
if (CollectionUtils.isEmpty(cols)) {
return cols;
}
List<String> newCols = new ArrayList<>(cols.size());
for (int i = 0, len = cols.size(); i < len; i++) {
String col = cols.get(i);
col = addEscape(col, dbType);
newCols.add(col);
}
return newCols;
}
/**
* if necessary, add escape by db type
*
* @param colName the column name
* @param dbType the db type
* @return the colName left and right add escape
*/
public static String addEscape(String colName, String dbType) {
if (isMysqlSeries(dbType)) {
return addEscape(colName, dbType, ColumnUtils.Escape.MYSQL);
}
return addEscape(colName, dbType, ColumnUtils.Escape.STANDARD);
}
/**
* if necessary, add escape
*
* @param colName the column name
* @param escape the escape
* @return
*/
private static String addEscape(String colName, String dbType, Escape escape) {
if (colName == null || colName.isEmpty()) {
return colName;
}
if (colName.charAt(0) == escape.value && colName.charAt(colName.length() - 1) == escape.value) {
return colName;
}
KeywordChecker keywordChecker = KeywordCheckerFactory.getKeywordChecker(dbType);
if (keywordChecker != null) {
boolean check = keywordChecker.checkEscape(colName);
if (!check) {
return colName;
}
}
if (colName.contains(DOT)) {
// like "scheme".id `scheme`.id
String str = escape.value + DOT;
int dotIndex = colName.indexOf(str);
if (dotIndex > -1) {
return new StringBuilder()
.append(colName.substring(0, dotIndex + str.length()))
.append(escape.value)
.append(colName.substring(dotIndex + str.length()))
.append(escape.value).toString();
}
// like scheme."id" scheme.`id`
str = DOT + escape.value;
dotIndex = colName.indexOf(str);
if (dotIndex > -1) {
return new StringBuilder()
.append(escape.value)
.append(colName.substring(0, dotIndex))
.append(escape.value)
.append(colName.substring(dotIndex))
.toString();
}
str = DOT;
dotIndex = colName.indexOf(str);
if (dotIndex > -1) {
return new StringBuilder()
.append(escape.value)
.append(colName.substring(0, dotIndex))
.append(escape.value)
.append(DOT)
.append(escape.value)
.append(colName.substring(dotIndex + str.length()))
.append(escape.value).toString();
}
}
char[] buf = new char[colName.length() + 2];
buf[0] = escape.value;
buf[buf.length - 1] = escape.value;
colName.getChars(0, colName.length(), buf, 1);
return new String(buf).intern();
}
private static boolean isMysqlSeries(String dbType) {
return StringUtils.equalsIgnoreCase(dbType, JdbcConstants.MYSQL) ||
StringUtils.equalsIgnoreCase(dbType, JdbcConstants.H2) ||
StringUtils.equalsIgnoreCase(dbType, JdbcConstants.MARIADB);
}
}

View File

@@ -0,0 +1,345 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource;
import java.sql.SQLException;
import java.sql.Savepoint;
import java.util.Set;
import java.util.Map;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.common.util.CollectionUtils;
import io.seata.rm.datasource.undo.SQLUndoLog;
/**
* The type Connection context.
*
* @author sharajava
*/
public class ConnectionContext {
private static final Savepoint DEFAULT_SAVEPOINT = new Savepoint() {
@Override
public int getSavepointId() throws SQLException {
return 0;
}
@Override
public String getSavepointName() throws SQLException {
return "DEFAULT_SEATA_SAVEPOINT";
}
};
private String xid;
private Long branchId;
private boolean isGlobalLockRequire;
private Savepoint currentSavepoint = DEFAULT_SAVEPOINT;
private boolean autoCommitChanged;
/**
* the lock keys buffer
*/
private final Map<Savepoint, Set<String>> lockKeysBuffer = new LinkedHashMap<>();
/**
* the undo items buffer
*/
private final Map<Savepoint, List<SQLUndoLog>> sqlUndoItemsBuffer = new LinkedHashMap<>();
private final List<Savepoint> savepoints = new ArrayList<>(8);
/**
* whether requires global lock in this connection
*
* @return
*/
boolean isGlobalLockRequire() {
return isGlobalLockRequire;
}
/**
* set whether requires global lock in this connection
*
* @param isGlobalLockRequire
*/
void setGlobalLockRequire(boolean isGlobalLockRequire) {
this.isGlobalLockRequire = isGlobalLockRequire;
}
/**
* Append lock key.
*
* @param lockKey the lock key
*/
void appendLockKey(String lockKey) {
lockKeysBuffer.computeIfAbsent(currentSavepoint, k -> new HashSet<>()).add(lockKey);
}
/**
* Append undo item.
*
* @param sqlUndoLog the sql undo log
*/
void appendUndoItem(SQLUndoLog sqlUndoLog) {
sqlUndoItemsBuffer.computeIfAbsent(currentSavepoint, k -> new ArrayList<>()).add(sqlUndoLog);
}
/**
* Append savepoint
* @param savepoint the savepoint
*/
void appendSavepoint(Savepoint savepoint) {
savepoints.add(savepoint);
this.currentSavepoint = savepoint;
}
public void removeSavepoint(Savepoint savepoint) {
List<Savepoint> afterSavepoints = getAfterSavepoints(savepoint);
if (null == savepoint) {
sqlUndoItemsBuffer.clear();
lockKeysBuffer.clear();
} else {
for (Savepoint sp : afterSavepoints) {
sqlUndoItemsBuffer.remove(sp);
lockKeysBuffer.remove(sp);
}
}
savepoints.removeAll(afterSavepoints);
currentSavepoint = savepoints.size() == 0 ? DEFAULT_SAVEPOINT : savepoints.get(savepoints.size() - 1);
}
public void releaseSavepoint(Savepoint savepoint) {
List<Savepoint> afterSavepoints = getAfterSavepoints(savepoint);
savepoints.removeAll(afterSavepoints);
currentSavepoint = savepoints.size() == 0 ? DEFAULT_SAVEPOINT : savepoints.get(savepoints.size() - 1);
// move the undo items & lock keys to current savepoint
for (Savepoint sp : afterSavepoints) {
List<SQLUndoLog> savepointSQLUndoLogs = sqlUndoItemsBuffer.remove(sp);
if (CollectionUtils.isNotEmpty(savepointSQLUndoLogs)) {
sqlUndoItemsBuffer.computeIfAbsent(currentSavepoint, k -> new ArrayList<>(savepointSQLUndoLogs.size()))
.addAll(savepointSQLUndoLogs);
}
Set<String> savepointLockKeys = lockKeysBuffer.remove(sp);
if (CollectionUtils.isNotEmpty(savepointLockKeys)) {
lockKeysBuffer.computeIfAbsent(currentSavepoint, k -> new HashSet<>())
.addAll(savepointLockKeys);
}
}
}
/**
* In global transaction boolean.
*
* @return the boolean
*/
public boolean inGlobalTransaction() {
return xid != null;
}
/**
* Is branch registered boolean.
*
* @return the boolean
*/
public boolean isBranchRegistered() {
return branchId != null;
}
/**
* Bind.
*
* @param xid the xid
*/
void bind(String xid) {
if (xid == null) {
throw new IllegalArgumentException("xid should not be null");
}
if (!inGlobalTransaction()) {
setXid(xid);
} else {
if (!this.xid.equals(xid)) {
throw new ShouldNeverHappenException();
}
}
}
/**
* Has undo log boolean.
*
* @return the boolean
*/
public boolean hasUndoLog() {
return !sqlUndoItemsBuffer.isEmpty();
}
/**
* Gets lock keys buffer.
*
* @return the lock keys buffer
*/
public boolean hasLockKey() {
return !lockKeysBuffer.isEmpty();
}
/**
* Gets xid.
*
* @return the xid
*/
public String getXid() {
return xid;
}
/**
* Sets xid.
*
* @param xid the xid
*/
void setXid(String xid) {
this.xid = xid;
}
/**
* Gets branch id.
*
* @return the branch id
*/
public Long getBranchId() {
return branchId;
}
/**
* Sets branch id.
*
* @param branchId the branch id
*/
void setBranchId(Long branchId) {
this.branchId = branchId;
}
/**
* is seata change targetConnection autoCommit
*
* @return the boolean
*/
public boolean isAutoCommitChanged() {
return this.autoCommitChanged;
}
/**
* set seata change targetConnection autoCommit record
*
* @param autoCommitChanged the boolean
*/
public void setAutoCommitChanged(boolean autoCommitChanged) {
this.autoCommitChanged = autoCommitChanged;
}
/**
* Reset.
*/
public void reset() {
this.reset(null);
}
/**
* Reset.
*
* @param xid the xid
*/
void reset(String xid) {
this.xid = xid;
branchId = null;
this.isGlobalLockRequire = false;
savepoints.clear();
lockKeysBuffer.clear();
sqlUndoItemsBuffer.clear();
this.autoCommitChanged = false;
}
/**
* Build lock keys string.
*
* @return the string
*/
public String buildLockKeys() {
if (lockKeysBuffer.isEmpty()) {
return null;
}
Set<String> lockKeysBufferSet = new HashSet<>();
for (Set<String> lockKeys : lockKeysBuffer.values()) {
lockKeysBufferSet.addAll(lockKeys);
}
if (lockKeysBufferSet.isEmpty()) {
return null;
}
StringBuilder appender = new StringBuilder();
Iterator<String> iterable = lockKeysBufferSet.iterator();
while (iterable.hasNext()) {
appender.append(iterable.next());
if (iterable.hasNext()) {
appender.append(";");
}
}
return appender.toString();
}
/**
* Gets undo items.
*
* @return the undo items
*/
public List<SQLUndoLog> getUndoItems() {
List<SQLUndoLog> undoItems = new ArrayList<>();
for (List<SQLUndoLog> items : sqlUndoItemsBuffer.values()) {
undoItems.addAll(items);
}
return undoItems;
}
/**
* Get the savepoints after target savepoint(include the param savepoint)
* @param savepoint the target savepoint
* @return after savepoints
*/
private List<Savepoint> getAfterSavepoints(Savepoint savepoint) {
if (null == savepoint) {
return new ArrayList<>(savepoints);
}
return new ArrayList<>(savepoints.subList(savepoints.indexOf(savepoint), savepoints.size()));
}
@Override
public String toString() {
return "ConnectionContext [xid=" + xid + ", branchId=" + branchId + ", lockKeysBuffer=" + lockKeysBuffer
+ ", sqlUndoItemsBuffer=" + sqlUndoItemsBuffer + "]";
}
}

View File

@@ -0,0 +1,363 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Savepoint;
import java.util.concurrent.Callable;
import io.seata.common.util.StringUtils;
import io.seata.config.ConfigurationFactory;
import io.seata.core.constants.ConfigurationKeys;
import io.seata.core.exception.TransactionException;
import io.seata.core.exception.TransactionExceptionCode;
import io.seata.core.model.BranchStatus;
import io.seata.core.model.BranchType;
import io.seata.rm.DefaultResourceManager;
import io.seata.rm.datasource.exec.LockConflictException;
import io.seata.rm.datasource.exec.LockRetryController;
import io.seata.rm.datasource.undo.SQLUndoLog;
import io.seata.rm.datasource.undo.UndoLogManagerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static io.seata.common.DefaultValues.DEFAULT_CLIENT_LOCK_RETRY_POLICY_BRANCH_ROLLBACK_ON_CONFLICT;
import static io.seata.common.DefaultValues.DEFAULT_CLIENT_REPORT_RETRY_COUNT;
import static io.seata.common.DefaultValues.DEFAULT_CLIENT_REPORT_SUCCESS_ENABLE;
/**
* The type Connection proxy.
*
* @author sharajava
*/
public class ConnectionProxy extends AbstractConnectionProxy {
private static final Logger LOGGER = LoggerFactory.getLogger(ConnectionProxy.class);
private ConnectionContext context = new ConnectionContext();
private static final int REPORT_RETRY_COUNT = ConfigurationFactory.getInstance().getInt(
ConfigurationKeys.CLIENT_REPORT_RETRY_COUNT, DEFAULT_CLIENT_REPORT_RETRY_COUNT);
public static final boolean IS_REPORT_SUCCESS_ENABLE = ConfigurationFactory.getInstance().getBoolean(
ConfigurationKeys.CLIENT_REPORT_SUCCESS_ENABLE, DEFAULT_CLIENT_REPORT_SUCCESS_ENABLE);
private final static LockRetryPolicy LOCK_RETRY_POLICY = new LockRetryPolicy();
/**
* Instantiates a new Connection proxy.
*
* @param dataSourceProxy the data source proxy
* @param targetConnection the target connection
*/
public ConnectionProxy(DataSourceProxy dataSourceProxy, Connection targetConnection) {
super(dataSourceProxy, targetConnection);
}
/**
* Gets context.
*
* @return the context
*/
public ConnectionContext getContext() {
return context;
}
/**
* Bind.
*
* @param xid the xid
*/
public void bind(String xid) {
context.bind(xid);
}
/**
* set global lock requires flag
*
* @param isLock whether to lock
*/
public void setGlobalLockRequire(boolean isLock) {
context.setGlobalLockRequire(isLock);
}
/**
* get global lock requires flag
*
* @return the boolean
*/
public boolean isGlobalLockRequire() {
return context.isGlobalLockRequire();
}
/**
* Check lock.
*
* @param lockKeys the lockKeys
* @throws SQLException the sql exception
*/
public void checkLock(String lockKeys) throws SQLException {
if (StringUtils.isBlank(lockKeys)) {
return;
}
// Just check lock without requiring lock by now.
try {
boolean lockable = DefaultResourceManager.get().lockQuery(BranchType.AT,
getDataSourceProxy().getResourceId(), context.getXid(), lockKeys);
if (!lockable) {
throw new LockConflictException();
}
} catch (TransactionException e) {
recognizeLockKeyConflictException(e, lockKeys);
}
}
/**
* Lock query.
*
* @param lockKeys the lock keys
* @return the boolean
* @throws SQLException the sql exception
*/
public boolean lockQuery(String lockKeys) throws SQLException {
// Just check lock without requiring lock by now.
boolean result = false;
try {
result = DefaultResourceManager.get().lockQuery(BranchType.AT, getDataSourceProxy().getResourceId(),
context.getXid(), lockKeys);
} catch (TransactionException e) {
recognizeLockKeyConflictException(e, lockKeys);
}
return result;
}
private void recognizeLockKeyConflictException(TransactionException te) throws SQLException {
recognizeLockKeyConflictException(te, null);
}
private void recognizeLockKeyConflictException(TransactionException te, String lockKeys) throws SQLException {
if (te.getCode() == TransactionExceptionCode.LockKeyConflict) {
StringBuilder reasonBuilder = new StringBuilder("get global lock fail, xid:");
reasonBuilder.append(context.getXid());
if (StringUtils.isNotBlank(lockKeys)) {
reasonBuilder.append(", lockKeys:").append(lockKeys);
}
throw new LockConflictException(reasonBuilder.toString());
} else {
throw new SQLException(te);
}
}
/**
* append sqlUndoLog
*
* @param sqlUndoLog the sql undo log
*/
public void appendUndoLog(SQLUndoLog sqlUndoLog) {
context.appendUndoItem(sqlUndoLog);
}
/**
* append lockKey
*
* @param lockKey the lock key
*/
public void appendLockKey(String lockKey) {
context.appendLockKey(lockKey);
}
@Override
public void commit() throws SQLException {
try {
LOCK_RETRY_POLICY.execute(() -> {
doCommit();
return null;
});
} catch (SQLException e) {
if (targetConnection != null && !getAutoCommit() && !getContext().isAutoCommitChanged()) {
rollback();
}
throw e;
} catch (Exception e) {
throw new SQLException(e);
}
}
@Override
public Savepoint setSavepoint() throws SQLException {
Savepoint savepoint = targetConnection.setSavepoint();
context.appendSavepoint(savepoint);
return savepoint;
}
@Override
public Savepoint setSavepoint(String name) throws SQLException {
Savepoint savepoint = targetConnection.setSavepoint(name);
context.appendSavepoint(savepoint);
return savepoint;
}
@Override
public void rollback(Savepoint savepoint) throws SQLException {
targetConnection.rollback(savepoint);
context.removeSavepoint(savepoint);
}
@Override
public void releaseSavepoint(Savepoint savepoint) throws SQLException {
targetConnection.releaseSavepoint(savepoint);
context.releaseSavepoint(savepoint);
}
private void doCommit() throws SQLException {
if (context.inGlobalTransaction()) {
processGlobalTransactionCommit();
} else if (context.isGlobalLockRequire()) {
processLocalCommitWithGlobalLocks();
} else {
targetConnection.commit();
}
}
private void processLocalCommitWithGlobalLocks() throws SQLException {
checkLock(context.buildLockKeys());
try {
targetConnection.commit();
} catch (Throwable ex) {
throw new SQLException(ex);
}
context.reset();
}
private void processGlobalTransactionCommit() throws SQLException {
try {
register();
} catch (TransactionException e) {
recognizeLockKeyConflictException(e, context.buildLockKeys());
}
try {
UndoLogManagerFactory.getUndoLogManager(this.getDbType()).flushUndoLogs(this);
targetConnection.commit();
} catch (Throwable ex) {
LOGGER.error("process connectionProxy commit error: {}", ex.getMessage(), ex);
report(false);
throw new SQLException(ex);
}
if (IS_REPORT_SUCCESS_ENABLE) {
report(true);
}
context.reset();
}
private void register() throws TransactionException {
if (!context.hasUndoLog() || !context.hasLockKey()) {
return;
}
Long branchId = DefaultResourceManager.get().branchRegister(BranchType.AT, getDataSourceProxy().getResourceId(),
null, context.getXid(), null, context.buildLockKeys());
context.setBranchId(branchId);
}
@Override
public void rollback() throws SQLException {
targetConnection.rollback();
if (context.inGlobalTransaction() && context.isBranchRegistered()) {
report(false);
}
context.reset();
}
/**
* change connection autoCommit to false by seata
*
* @throws SQLException the sql exception
*/
public void changeAutoCommit() throws SQLException {
getContext().setAutoCommitChanged(true);
setAutoCommit(false);
}
@Override
public void setAutoCommit(boolean autoCommit) throws SQLException {
if ((context.inGlobalTransaction() || context.isGlobalLockRequire()) && autoCommit && !getAutoCommit()) {
// change autocommit from false to true, we should commit() first according to JDBC spec.
doCommit();
}
targetConnection.setAutoCommit(autoCommit);
}
private void report(boolean commitDone) throws SQLException {
if (context.getBranchId() == null) {
return;
}
int retry = REPORT_RETRY_COUNT;
while (retry > 0) {
try {
DefaultResourceManager.get().branchReport(BranchType.AT, context.getXid(), context.getBranchId(),
commitDone ? BranchStatus.PhaseOne_Done : BranchStatus.PhaseOne_Failed, null);
return;
} catch (Throwable ex) {
LOGGER.error("Failed to report [" + context.getBranchId() + "/" + context.getXid() + "] commit done ["
+ commitDone + "] Retry Countdown: " + retry);
retry--;
if (retry == 0) {
throw new SQLException("Failed to report branch status " + commitDone, ex);
}
}
}
}
public static class LockRetryPolicy {
protected static final boolean LOCK_RETRY_POLICY_BRANCH_ROLLBACK_ON_CONFLICT = ConfigurationFactory
.getInstance().getBoolean(ConfigurationKeys.CLIENT_LOCK_RETRY_POLICY_BRANCH_ROLLBACK_ON_CONFLICT, DEFAULT_CLIENT_LOCK_RETRY_POLICY_BRANCH_ROLLBACK_ON_CONFLICT);
public <T> T execute(Callable<T> callable) throws Exception {
if (LOCK_RETRY_POLICY_BRANCH_ROLLBACK_ON_CONFLICT) {
return callable.call();
} else {
return doRetryOnLockConflict(callable);
}
}
protected <T> T doRetryOnLockConflict(Callable<T> callable) throws Exception {
LockRetryController lockRetryController = new LockRetryController();
while (true) {
try {
return callable.call();
} catch (LockConflictException lockConflict) {
onException(lockConflict);
lockRetryController.sleep(lockConflict);
} catch (Exception e) {
onException(e);
throw e;
}
}
}
/**
* Callback on exception in doLockRetryOnConflict.
*
* @param e invocation exception
* @throws Exception error
*/
protected void onException(Exception e) throws Exception {
}
}
}

View File

@@ -0,0 +1,220 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource;
import io.seata.common.util.CollectionUtils;
import io.seata.common.util.StringUtils;
import io.seata.core.model.Result;
import io.seata.rm.datasource.sql.struct.Field;
import io.seata.rm.datasource.sql.struct.Row;
import io.seata.rm.datasource.sql.struct.TableMeta;
import io.seata.rm.datasource.sql.struct.TableRecords;
import io.seata.rm.datasource.undo.AbstractUndoLogManager;
import io.seata.rm.datasource.undo.parser.FastjsonUndoLogParser;
import java.math.BigDecimal;
import java.sql.Timestamp;
import java.sql.Types;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.HashMap;
import java.util.Comparator;
import java.util.stream.Collectors;
/**
* The type Data compare utils.
*
* @author Geng Zhang
*/
public class DataCompareUtils {
private DataCompareUtils() {
}
/**
* Is field equals result.
*
* @param f0 the f 0
* @param f1 the f 1
* @return the result
*/
public static Result<Boolean> isFieldEquals(Field f0, Field f1) {
if (f0 == null) {
return Result.build(f1 == null);
} else {
if (f1 == null) {
return Result.build(false);
} else {
if (StringUtils.equalsIgnoreCase(f0.getName(), f1.getName())
&& f0.getType() == f1.getType()) {
if (f0.getValue() == null) {
return Result.build(f1.getValue() == null);
} else {
if (f1.getValue() == null) {
return Result.buildWithParams(false, "Field not equals, name {}, new value is null", f0.getName());
} else {
String currentSerializer = AbstractUndoLogManager.getCurrentSerializer();
if (StringUtils.equals(currentSerializer, FastjsonUndoLogParser.NAME)) {
convertType(f0, f1);
}
boolean result = Objects.deepEquals(f0.getValue(), f1.getValue());
if (result) {
return Result.ok();
} else {
return Result.buildWithParams(false, "Field not equals, name {}, old value {}, new value {}", f0.getName(), f0.getValue(), f1.getValue());
}
}
}
} else {
return Result.buildWithParams(false, "Field not equals, old name {} type {}, new name {} type {}", f0.getName(), f0.getType(), f1.getName(), f1.getType());
}
}
}
}
private static void convertType(Field f0, Field f1) {
int f0Type = f0.getType();
int f1Type = f1.getType();
if (f0Type == Types.TIMESTAMP && f0.getValue().getClass().equals(String.class)) {
f0.setValue(Timestamp.valueOf(f0.getValue().toString()));
}
if (f1Type == Types.TIMESTAMP && f1.getValue().getClass().equals(String.class)) {
f1.setValue(Timestamp.valueOf(f1.getValue().toString()));
}
if (f0Type == Types.DECIMAL && f0.getValue().getClass().equals(Integer.class)) {
f0.setValue(new BigDecimal(f0.getValue().toString()));
}
if (f1Type == Types.DECIMAL && f1.getValue().getClass().equals(Integer.class)) {
f1.setValue(new BigDecimal(f1.getValue().toString()));
}
if (f0Type == Types.BIGINT && f0.getValue().getClass().equals(Integer.class)) {
f0.setValue(Long.parseLong(f0.getValue().toString()));
}
if (f1Type == Types.BIGINT && f1.getValue().getClass().equals(Integer.class)) {
f1.setValue(Long.parseLong(f1.getValue().toString()));
}
}
/**
* Is records equals result.
*
* @param beforeImage the before image
* @param afterImage the after image
* @return the result
*/
public static Result<Boolean> isRecordsEquals(TableRecords beforeImage, TableRecords afterImage) {
if (beforeImage == null) {
return Result.build(afterImage == null, null);
} else {
if (afterImage == null) {
return Result.build(false, null);
}
if (beforeImage.getTableName().equalsIgnoreCase(afterImage.getTableName())
&& CollectionUtils.isSizeEquals(beforeImage.getRows(), afterImage.getRows())) {
//when image is EmptyTableRecords, getTableMeta will throw an exception
if (CollectionUtils.isEmpty(beforeImage.getRows())) {
return Result.ok();
}
return compareRows(beforeImage.getTableMeta(), beforeImage.getRows(), afterImage.getRows());
} else {
return Result.build(false, null);
}
}
}
/**
* Is rows equals result.
*
* @param tableMetaData the table meta data
* @param oldRows the old rows
* @param newRows the new rows
* @return the result
*/
public static Result<Boolean> isRowsEquals(TableMeta tableMetaData, List<Row> oldRows, List<Row> newRows) {
if (!CollectionUtils.isSizeEquals(oldRows, newRows)) {
return Result.build(false, null);
}
return compareRows(tableMetaData, oldRows, newRows);
}
private static Result<Boolean> compareRows(TableMeta tableMetaData, List<Row> oldRows, List<Row> newRows) {
// old row to map
Map<String, Map<String, Field>> oldRowsMap = rowListToMap(oldRows, tableMetaData.getPrimaryKeyOnlyName());
// new row to map
Map<String, Map<String, Field>> newRowsMap = rowListToMap(newRows, tableMetaData.getPrimaryKeyOnlyName());
// compare data
for (Map.Entry<String, Map<String, Field>> oldEntry : oldRowsMap.entrySet()) {
String key = oldEntry.getKey();
Map<String, Field> oldRow = oldEntry.getValue();
Map<String, Field> newRow = newRowsMap.get(key);
if (newRow == null) {
return Result.buildWithParams(false, "compare row failed, rowKey {}, reason [newRow is null]", key);
}
for (Map.Entry<String, Field> oldRowEntry : oldRow.entrySet()) {
String fieldName = oldRowEntry.getKey();
Field oldField = oldRowEntry.getValue();
Field newField = newRow.get(fieldName);
if (newField == null) {
return Result.buildWithParams(false, "compare row failed, rowKey {}, fieldName {}, reason [newField is null]", key, fieldName);
}
Result<Boolean> oldEqualsNewFieldResult = isFieldEquals(oldField, newField);
if (!oldEqualsNewFieldResult.getResult()) {
return oldEqualsNewFieldResult;
}
}
}
return Result.ok();
}
/**
* Row list to map map.
*
* @param rowList the row list
* @param primaryKeyList the primary key list
* @return the map
*/
public static Map<String, Map<String, Field>> rowListToMap(List<Row> rowList, List<String> primaryKeyList) {
// {value of primaryKey, value of all columns}
Map<String, Map<String, Field>> rowMap = new HashMap<>();
for (Row row : rowList) {
//ensure the order of column
List<Field> rowFieldList = row.getFields().stream()
.sorted(Comparator.comparing(Field::getName))
.collect(Collectors.toList());
// {uppercase fieldName : field}
Map<String, Field> colsMap = new HashMap<>();
StringBuilder rowKey = new StringBuilder();
boolean firstUnderline = false;
for (int j = 0; j < rowFieldList.size(); j++) {
Field field = rowFieldList.get(j);
if (primaryKeyList.stream().anyMatch(e -> field.getName().equals(e))) {
if (firstUnderline && j > 0) {
rowKey.append("_");
}
rowKey.append(String.valueOf(field.getValue()));
firstUnderline = true;
}
colsMap.put(field.getName().trim().toUpperCase(), field);
}
rowMap.put(rowKey.toString(), colsMap);
}
return rowMap;
}
}

View File

@@ -0,0 +1,147 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeoutException;
import io.seata.common.exception.NotSupportYetException;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.core.context.RootContext;
import io.seata.core.exception.RmTransactionException;
import io.seata.core.exception.TransactionException;
import io.seata.core.exception.TransactionExceptionCode;
import io.seata.core.logger.StackTraceLogger;
import io.seata.core.model.BranchStatus;
import io.seata.core.model.BranchType;
import io.seata.core.model.Resource;
import io.seata.core.protocol.ResultCode;
import io.seata.core.protocol.transaction.GlobalLockQueryRequest;
import io.seata.core.protocol.transaction.GlobalLockQueryResponse;
import io.seata.core.rpc.netty.RmNettyRemotingClient;
import io.seata.rm.AbstractResourceManager;
import io.seata.rm.datasource.undo.UndoLogManagerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The type Data source manager.
*
* @author sharajava
*/
public class DataSourceManager extends AbstractResourceManager {
private static final Logger LOGGER = LoggerFactory.getLogger(DataSourceManager.class);
private final AsyncWorker asyncWorker = new AsyncWorker(this);
private final Map<String, Resource> dataSourceCache = new ConcurrentHashMap<>();
@Override
public boolean lockQuery(BranchType branchType, String resourceId, String xid, String lockKeys) throws TransactionException {
GlobalLockQueryRequest request = new GlobalLockQueryRequest();
request.setXid(xid);
request.setLockKey(lockKeys);
request.setResourceId(resourceId);
try {
GlobalLockQueryResponse response;
if (RootContext.inGlobalTransaction() || RootContext.requireGlobalLock()) {
response = (GlobalLockQueryResponse) RmNettyRemotingClient.getInstance().sendSyncRequest(request);
} else {
throw new RuntimeException("unknow situation!");
}
if (response.getResultCode() == ResultCode.Failed) {
throw new TransactionException(response.getTransactionExceptionCode(),
"Response[" + response.getMsg() + "]");
}
return response.isLockable();
} catch (TimeoutException toe) {
throw new RmTransactionException(TransactionExceptionCode.IO, "RPC Timeout", toe);
} catch (RuntimeException rex) {
throw new RmTransactionException(TransactionExceptionCode.LockableCheckFailed, "Runtime", rex);
}
}
/**
* Instantiates a new Data source manager.
*/
public DataSourceManager() {
}
@Override
public void registerResource(Resource resource) {
DataSourceProxy dataSourceProxy = (DataSourceProxy) resource;
dataSourceCache.put(dataSourceProxy.getResourceId(), dataSourceProxy);
super.registerResource(dataSourceProxy);
}
@Override
public void unregisterResource(Resource resource) {
throw new NotSupportYetException("unregister a resource");
}
/**
* Get data source proxy.
*
* @param resourceId the resource id
* @return the data source proxy
*/
public DataSourceProxy get(String resourceId) {
return (DataSourceProxy) dataSourceCache.get(resourceId);
}
@Override
public BranchStatus branchCommit(BranchType branchType, String xid, long branchId, String resourceId,
String applicationData) throws TransactionException {
return asyncWorker.branchCommit(xid, branchId, resourceId);
}
@Override
public BranchStatus branchRollback(BranchType branchType, String xid, long branchId, String resourceId,
String applicationData) throws TransactionException {
DataSourceProxy dataSourceProxy = get(resourceId);
if (dataSourceProxy == null) {
throw new ShouldNeverHappenException();
}
try {
UndoLogManagerFactory.getUndoLogManager(dataSourceProxy.getDbType()).undo(dataSourceProxy, xid, branchId);
} catch (TransactionException te) {
StackTraceLogger.info(LOGGER, te,
"branchRollback failed. branchType:[{}], xid:[{}], branchId:[{}], resourceId:[{}], applicationData:[{}]. reason:[{}]",
new Object[]{branchType, xid, branchId, resourceId, applicationData, te.getMessage()});
if (te.getCode() == TransactionExceptionCode.BranchRollbackFailed_Unretriable) {
return BranchStatus.PhaseTwo_RollbackFailed_Unretryable;
} else {
return BranchStatus.PhaseTwo_RollbackFailed_Retryable;
}
}
return BranchStatus.PhaseTwo_Rollbacked;
}
@Override
public Map<String, Resource> getManagedResources() {
return dataSourceCache;
}
@Override
public BranchType getBranchType() {
return BranchType.AT;
}
}

View File

@@ -0,0 +1,222 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import javax.sql.DataSource;
import io.seata.common.thread.NamedThreadFactory;
import io.seata.config.ConfigurationFactory;
import io.seata.core.constants.ConfigurationKeys;
import io.seata.core.context.RootContext;
import io.seata.core.model.BranchType;
import io.seata.core.model.Resource;
import io.seata.rm.DefaultResourceManager;
import io.seata.rm.datasource.sql.struct.TableMetaCacheFactory;
import io.seata.rm.datasource.util.JdbcUtils;
import io.seata.sqlparser.util.JdbcConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static io.seata.common.DefaultValues.DEFAULT_CLIENT_TABLE_META_CHECK_ENABLE;
import static io.seata.common.DefaultValues.DEFAULT_TABLE_META_CHECKER_INTERVAL;
/**
* The type Data source proxy.
*
* @author sharajava
*/
public class DataSourceProxy extends AbstractDataSourceProxy implements Resource {
private static final Logger LOGGER = LoggerFactory.getLogger(DataSourceProxy.class);
private static final String DEFAULT_RESOURCE_GROUP_ID = "DEFAULT";
private String resourceGroupId;
private String jdbcUrl;
private String dbType;
private String userName;
/**
* Enable the table meta checker
*/
private static boolean ENABLE_TABLE_META_CHECKER_ENABLE = ConfigurationFactory.getInstance().getBoolean(
ConfigurationKeys.CLIENT_TABLE_META_CHECK_ENABLE, DEFAULT_CLIENT_TABLE_META_CHECK_ENABLE);
/**
* Table meta checker interval
*/
private static final long TABLE_META_CHECKER_INTERVAL = ConfigurationFactory.getInstance().getLong(
ConfigurationKeys.CLIENT_TABLE_META_CHECKER_INTERVAL, DEFAULT_TABLE_META_CHECKER_INTERVAL);
private final ScheduledExecutorService tableMetaExcutor = new ScheduledThreadPoolExecutor(1,
new NamedThreadFactory("tableMetaChecker", 1, true));
/**
* Instantiates a new Data source proxy.
*
* @param targetDataSource the target data source
*/
public DataSourceProxy(DataSource targetDataSource) {
this(targetDataSource, DEFAULT_RESOURCE_GROUP_ID);
}
/**
* Instantiates a new Data source proxy.
*
* @param targetDataSource the target data source
* @param resourceGroupId the resource group id
*/
public DataSourceProxy(DataSource targetDataSource, String resourceGroupId) {
if (targetDataSource instanceof SeataDataSourceProxy) {
LOGGER.info("Unwrap the target data source, because the type is: {}", targetDataSource.getClass().getName());
targetDataSource = ((SeataDataSourceProxy) targetDataSource).getTargetDataSource();
}
this.targetDataSource = targetDataSource;
init(targetDataSource, resourceGroupId);
}
private void init(DataSource dataSource, String resourceGroupId) {
this.resourceGroupId = resourceGroupId;
try (Connection connection = dataSource.getConnection()) {
jdbcUrl = connection.getMetaData().getURL();
dbType = JdbcUtils.getDbType(jdbcUrl);
if (JdbcConstants.ORACLE.equals(dbType)) {
userName = connection.getMetaData().getUserName();
}
} catch (SQLException e) {
throw new IllegalStateException("can not init dataSource", e);
}
DefaultResourceManager.get().registerResource(this);
if (ENABLE_TABLE_META_CHECKER_ENABLE) {
tableMetaExcutor.scheduleAtFixedRate(() -> {
try (Connection connection = dataSource.getConnection()) {
TableMetaCacheFactory.getTableMetaCache(DataSourceProxy.this.getDbType())
.refresh(connection, DataSourceProxy.this.getResourceId());
} catch (Exception ignore) {
}
}, 0, TABLE_META_CHECKER_INTERVAL, TimeUnit.MILLISECONDS);
}
//Set the default branch type to 'AT' in the RootContext.
RootContext.setDefaultBranchType(this.getBranchType());
}
/**
* Gets plain connection.
*
* @return the plain connection
* @throws SQLException the sql exception
*/
public Connection getPlainConnection() throws SQLException {
return targetDataSource.getConnection();
}
/**
* Gets db type.
*
* @return the db type
*/
public String getDbType() {
return dbType;
}
@Override
public ConnectionProxy getConnection() throws SQLException {
Connection targetConnection = targetDataSource.getConnection();
return new ConnectionProxy(this, targetConnection);
}
@Override
public ConnectionProxy getConnection(String username, String password) throws SQLException {
Connection targetConnection = targetDataSource.getConnection(username, password);
return new ConnectionProxy(this, targetConnection);
}
@Override
public String getResourceGroupId() {
return resourceGroupId;
}
@Override
public String getResourceId() {
if (JdbcConstants.POSTGRESQL.equals(dbType)) {
return getPGResourceId();
} else if (JdbcConstants.ORACLE.equals(dbType) && userName != null) {
return getDefaultResourceId() + "/" + userName;
} else {
return getDefaultResourceId();
}
}
/**
* get the default resource id
* @return resource id
*/
private String getDefaultResourceId() {
if (jdbcUrl.contains("?")) {
return jdbcUrl.substring(0, jdbcUrl.indexOf('?'));
} else {
return jdbcUrl;
}
}
/**
* prevent pg sql url like
* jdbc:postgresql://127.0.0.1:5432/seata?currentSchema=public
* jdbc:postgresql://127.0.0.1:5432/seata?currentSchema=seata
* cause the duplicated resourceId
* it will cause the problem like
* 1.get file lock fail
* 2.error table meta cache
* @return resourceId
*/
private String getPGResourceId() {
if (jdbcUrl.contains("?")) {
StringBuilder jdbcUrlBuilder = new StringBuilder();
jdbcUrlBuilder.append(jdbcUrl.substring(0, jdbcUrl.indexOf('?')));
StringBuilder paramsBuilder = new StringBuilder();
String paramUrl = jdbcUrl.substring(jdbcUrl.indexOf('?') + 1, jdbcUrl.length());
String[] urlParams = paramUrl.split("&");
for (String urlParam : urlParams) {
if (urlParam.contains("currentSchema")) {
paramsBuilder.append(urlParam);
break;
}
}
if (paramsBuilder.length() > 0) {
jdbcUrlBuilder.append("?");
jdbcUrlBuilder.append(paramsBuilder);
}
return jdbcUrlBuilder.toString();
} else {
return jdbcUrl;
}
}
@Override
public BranchType getBranchType() {
return BranchType.AT;
}
}

View File

@@ -0,0 +1,67 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Map;
import io.seata.rm.datasource.exec.ExecuteTemplate;
import io.seata.sqlparser.ParametersHolder;
/**
* The type Prepared statement proxy.
*
* @author sharajava
*/
public class PreparedStatementProxy extends AbstractPreparedStatementProxy
implements PreparedStatement, ParametersHolder {
@Override
public Map<Integer,ArrayList<Object>> getParameters() {
return parameters;
}
/**
* Instantiates a new Prepared statement proxy.
*
* @param connectionProxy the connection proxy
* @param targetStatement the target statement
* @param targetSQL the target sql
* @throws SQLException the sql exception
*/
public PreparedStatementProxy(AbstractConnectionProxy connectionProxy, PreparedStatement targetStatement,
String targetSQL) throws SQLException {
super(connectionProxy, targetStatement, targetSQL);
}
@Override
public boolean execute() throws SQLException {
return ExecuteTemplate.execute(this, (statement, args) -> statement.execute());
}
@Override
public ResultSet executeQuery() throws SQLException {
return ExecuteTemplate.execute(this, (statement, args) -> statement.executeQuery());
}
@Override
public int executeUpdate() throws SQLException {
return ExecuteTemplate.execute(this, (statement, args) -> statement.executeUpdate());
}
}

View File

@@ -0,0 +1,42 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource;
import javax.sql.DataSource;
import io.seata.core.model.BranchType;
/**
* The interface Seata data source.
*
* @author wang.liang
*/
public interface SeataDataSourceProxy extends DataSource {
/**
* Gets target data source.
*
* @return the target data source
*/
DataSource getTargetDataSource();
/**
* Gets branch type.
*
* @return the branch type
*/
BranchType getBranchType();
}

View File

@@ -0,0 +1,137 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.List;
import java.util.Map;
import io.seata.rm.datasource.sql.struct.Field;
/**
* generate sql and set value to sql
*
* @author JerryYin
*/
public class SqlGenerateUtils {
private static final int MAX_IN_SIZE = 1000;
private SqlGenerateUtils() {
}
public static String buildWhereConditionByPKs(List<String> pkNameList, int rowSize, String dbType)
throws SQLException {
return buildWhereConditionByPKs(pkNameList, rowSize, dbType, MAX_IN_SIZE);
}
/**
* each pk is a condition.the result will like :" (id,userCode) in ((?,?),(?,?)) or (id,userCode) in ((?,?),(?,?)
* ) or (id,userCode) in ((?,?))"
* Build where condition by pks string.
*
* @param pkNameList pk column name list
* @param rowSize the row size of records
* @param dbType the type of database
* @param maxInSize the max in size
* @return return where condition sql string.the sql can search all related records not just one.
* @throws SQLException the sql exception
*/
public static String buildWhereConditionByPKs(List<String> pkNameList, int rowSize, String dbType, int maxInSize)
throws SQLException {
StringBuilder whereStr = new StringBuilder();
//we must consider the situation of composite primary key
int batchSize = rowSize % maxInSize == 0 ? rowSize / maxInSize : (rowSize / maxInSize) + 1;
for (int batch = 0; batch < batchSize; batch++) {
if (batch > 0) {
whereStr.append(" or ");
}
whereStr.append("(");
for (int i = 0; i < pkNameList.size(); i++) {
if (i > 0) {
whereStr.append(",");
}
whereStr.append(ColumnUtils.addEscape(pkNameList.get(i), dbType));
}
whereStr.append(") in ( ");
int eachSize = (batch == batchSize - 1) ? (rowSize % maxInSize == 0 ? maxInSize : rowSize % maxInSize)
: maxInSize;
for (int i = 0; i < eachSize; i++) {
//each row is a bracket
if (i > 0) {
whereStr.append(",");
}
whereStr.append("(");
for (int x = 0; x < pkNameList.size(); x++) {
if (x > 0) {
whereStr.append(",");
}
whereStr.append("?");
}
whereStr.append(")");
}
whereStr.append(" )");
}
return whereStr.toString();
}
/**
* set parameter for PreparedStatement, this is only used in pk sql.
*
* @param pkRowsList pkRowsList
* @param pkColumnNameList pkColumnNameList
* @param pst preparedStatement
* @throws SQLException SQLException
*/
public static void setParamForPk(List<Map<String, Field>> pkRowsList, List<String> pkColumnNameList,
PreparedStatement pst) throws SQLException {
int paramIndex = 1;
for (int i = 0; i < pkRowsList.size(); i++) {
Map<String, Field> rowData = pkRowsList.get(i);
for (String columnName : pkColumnNameList) {
Field pkField = rowData.get(columnName);
pst.setObject(paramIndex, pkField.getValue(), pkField.getType());
paramIndex++;
}
}
}
/**
* each pk is a condition.the result will like :" id =? and userCode =?"
*
* @param pkNameList pkNameList
* @param dbType dbType
* @return return where condition sql string.the sql can just search one related record.
*/
public static String buildWhereConditionByPKs(List<String> pkNameList, String dbType) {
StringBuilder whereStr = new StringBuilder();
//we must consider the situation of composite primary key
for (int i = 0; i < pkNameList.size(); i++) {
if (i > 0) {
whereStr.append(" and ");
}
String pkName = pkNameList.get(i);
whereStr.append(ColumnUtils.addEscape(pkName, dbType));
whereStr.append(" = ? ");
}
return whereStr.toString();
}
}

View File

@@ -0,0 +1,131 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import io.seata.common.util.StringUtils;
import io.seata.rm.datasource.exec.ExecuteTemplate;
/**
* The type Statement proxy.
*
* @param <T> the type parameter
* @author sharajava
*/
public class StatementProxy<T extends Statement> extends AbstractStatementProxy<T> {
/**
* Instantiates a new Statement proxy.
*
* @param connectionWrapper the connection wrapper
* @param targetStatement the target statement
* @param targetSQL the target sql
* @throws SQLException the sql exception
*/
public StatementProxy(AbstractConnectionProxy connectionWrapper, T targetStatement, String targetSQL)
throws SQLException {
super(connectionWrapper, targetStatement, targetSQL);
}
/**
* Instantiates a new Statement proxy.
*
* @param connectionWrapper the connection wrapper
* @param targetStatement the target statement
* @throws SQLException the sql exception
*/
public StatementProxy(AbstractConnectionProxy connectionWrapper, T targetStatement) throws SQLException {
this(connectionWrapper, targetStatement, null);
}
@Override
public ConnectionProxy getConnectionProxy() {
return (ConnectionProxy) super.getConnectionProxy();
}
@Override
public ResultSet executeQuery(String sql) throws SQLException {
this.targetSQL = sql;
return ExecuteTemplate.execute(this, (statement, args) -> statement.executeQuery((String) args[0]), sql);
}
@Override
public int executeUpdate(String sql) throws SQLException {
this.targetSQL = sql;
return ExecuteTemplate.execute(this, (statement, args) -> statement.executeUpdate((String) args[0]), sql);
}
@Override
public boolean execute(String sql) throws SQLException {
this.targetSQL = sql;
return ExecuteTemplate.execute(this, (statement, args) -> statement.execute((String) args[0]), sql);
}
@Override
public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
this.targetSQL = sql;
return ExecuteTemplate.execute(this, (statement, args) -> statement.executeUpdate((String) args[0],(int)args[1]), sql,autoGeneratedKeys);
}
@Override
public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
this.targetSQL = sql;
return ExecuteTemplate.execute(this, (statement, args) -> statement.executeUpdate((String) args[0],(int [])args[1]), sql,columnIndexes);
}
@Override
public int executeUpdate(String sql, String[] columnNames) throws SQLException {
this.targetSQL = sql;
return ExecuteTemplate.execute(this, (statement, args) -> statement.executeUpdate((String) args[0],(String[])args[1]), sql,columnNames);
}
@Override
public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
this.targetSQL = sql;
return ExecuteTemplate.execute(this, (statement, args) -> statement.execute((String) args[0],(int)args[1]), sql,autoGeneratedKeys);
}
@Override
public boolean execute(String sql, int[] columnIndexes) throws SQLException {
this.targetSQL = sql;
return ExecuteTemplate.execute(this, (statement, args) -> statement.execute((String) args[0],(int[])args[1]), sql,columnIndexes);
}
@Override
public boolean execute(String sql, String[] columnNames) throws SQLException {
this.targetSQL = sql;
return ExecuteTemplate.execute(this, (statement, args) -> statement.execute((String) args[0],(String[])args[1]), sql,columnNames);
}
@Override
public void addBatch(String sql) throws SQLException {
if (StringUtils.isNotBlank(targetSQL)) {
targetSQL += "; " + sql;
} else {
targetSQL = sql;
}
targetStatement.addBatch(sql);
}
@Override
public int[] executeBatch() throws SQLException {
return ExecuteTemplate.execute(this, (statement, args) -> statement.executeBatch());
}
}

View File

@@ -0,0 +1,202 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import io.seata.common.exception.NotSupportYetException;
import io.seata.common.util.CollectionUtils;
import io.seata.rm.datasource.AbstractConnectionProxy;
import io.seata.rm.datasource.ConnectionContext;
import io.seata.rm.datasource.ConnectionProxy;
import io.seata.rm.datasource.StatementProxy;
import io.seata.rm.datasource.sql.struct.TableRecords;
import io.seata.sqlparser.SQLRecognizer;
import io.seata.sqlparser.util.JdbcConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The type Abstract dml base executor.
*
* @param <T> the type parameter
* @param <S> the type parameter
* @author sharajava
*/
public abstract class AbstractDMLBaseExecutor<T, S extends Statement> extends BaseTransactionalExecutor<T, S> {
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractDMLBaseExecutor.class);
protected static final String WHERE = " WHERE ";
/**
* Instantiates a new Abstract dml base executor.
*
* @param statementProxy the statement proxy
* @param statementCallback the statement callback
* @param sqlRecognizer the sql recognizer
*/
public AbstractDMLBaseExecutor(StatementProxy<S> statementProxy, StatementCallback<T, S> statementCallback,
SQLRecognizer sqlRecognizer) {
super(statementProxy, statementCallback, sqlRecognizer);
}
/**
* Instantiates a new Base transactional executor.
*
* @param statementProxy the statement proxy
* @param statementCallback the statement callback
* @param sqlRecognizers the multi sql recognizer
*/
public AbstractDMLBaseExecutor(StatementProxy<S> statementProxy, StatementCallback<T, S> statementCallback,
List<SQLRecognizer> sqlRecognizers) {
super(statementProxy, statementCallback, sqlRecognizers);
}
@Override
public T doExecute(Object... args) throws Throwable {
AbstractConnectionProxy connectionProxy = statementProxy.getConnectionProxy();
if (connectionProxy.getAutoCommit()) {
return executeAutoCommitTrue(args);
} else {
return executeAutoCommitFalse(args);
}
}
/**
* Execute auto commit false t.
*
* @param args the args
* @return the t
* @throws Exception the exception
*/
protected T executeAutoCommitFalse(Object[] args) throws Exception {
if (!JdbcConstants.MYSQL.equalsIgnoreCase(getDbType()) && isMultiPk()) {
throw new NotSupportYetException("multi pk only support mysql!");
}
TableRecords beforeImage = beforeImage();
T result = statementCallback.execute(statementProxy.getTargetStatement(), args);
TableRecords afterImage = afterImage(beforeImage);
prepareUndoLog(beforeImage, afterImage);
return result;
}
private boolean isMultiPk() {
if (null != sqlRecognizer) {
return getTableMeta().getPrimaryKeyOnlyName().size() > 1;
}
if (CollectionUtils.isNotEmpty(sqlRecognizers)) {
List<SQLRecognizer> distinctSQLRecognizer = sqlRecognizers.stream().filter(
distinctByKey(t -> t.getTableName())).collect(Collectors.toList());
for (SQLRecognizer sqlRecognizer : distinctSQLRecognizer) {
if (getTableMeta(sqlRecognizer.getTableName()).getPrimaryKeyOnlyName().size() > 1) {
return true;
}
}
}
return false;
}
private static <T> Predicate<T> distinctByKey(Function<? super T, ?> keyExtractor) {
Map<Object, Boolean> map = new HashMap<>();
return t -> map.putIfAbsent(keyExtractor.apply(t), Boolean.TRUE) == null;
}
/**
* Execute auto commit true t.
*
* @param args the args
* @return the t
* @throws Throwable the throwable
*/
protected T executeAutoCommitTrue(Object[] args) throws Throwable {
ConnectionProxy connectionProxy = statementProxy.getConnectionProxy();
try {
connectionProxy.changeAutoCommit();
return new LockRetryPolicy(connectionProxy).execute(() -> {
T result = executeAutoCommitFalse(args);
connectionProxy.commit();
return result;
});
} catch (Exception e) {
// when exception occur in finally,this exception will lost, so just print it here
LOGGER.error("execute executeAutoCommitTrue error:{}", e.getMessage(), e);
if (!LockRetryPolicy.isLockRetryPolicyBranchRollbackOnConflict()) {
connectionProxy.getTargetConnection().rollback();
}
throw e;
} finally {
connectionProxy.getContext().reset();
connectionProxy.setAutoCommit(true);
}
}
/**
* Before image table records.
*
* @return the table records
* @throws SQLException the sql exception
*/
protected abstract TableRecords beforeImage() throws SQLException;
/**
* After image table records.
*
* @param beforeImage the before image
* @return the table records
* @throws SQLException the sql exception
*/
protected abstract TableRecords afterImage(TableRecords beforeImage) throws SQLException;
private static class LockRetryPolicy extends ConnectionProxy.LockRetryPolicy {
private final ConnectionProxy connection;
LockRetryPolicy(final ConnectionProxy connection) {
this.connection = connection;
}
@Override
public <T> T execute(Callable<T> callable) throws Exception {
if (LOCK_RETRY_POLICY_BRANCH_ROLLBACK_ON_CONFLICT) {
return doRetryOnLockConflict(callable);
} else {
return callable.call();
}
}
@Override
protected void onException(Exception e) throws Exception {
ConnectionContext context = connection.getContext();
//UndoItems can't use the Set collection class to prevent ABA
context.removeSavepoint(null);
connection.getTargetConnection().rollback();
}
public static boolean isLockRetryPolicyBranchRollbackOnConflict() {
return LOCK_RETRY_POLICY_BRANCH_ROLLBACK_ON_CONFLICT;
}
}
}

View File

@@ -0,0 +1,413 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Objects;
import com.google.common.collect.Lists;
import io.seata.common.exception.NotSupportYetException;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.common.util.CollectionUtils;
import io.seata.rm.datasource.ColumnUtils;
import io.seata.rm.datasource.PreparedStatementProxy;
import io.seata.rm.datasource.StatementProxy;
import io.seata.rm.datasource.sql.struct.ColumnMeta;
import io.seata.rm.datasource.sql.struct.TableRecords;
import io.seata.sqlparser.SQLInsertRecognizer;
import io.seata.sqlparser.SQLRecognizer;
import io.seata.sqlparser.struct.Null;
import io.seata.sqlparser.struct.Sequenceable;
import io.seata.sqlparser.struct.SqlDefaultExpr;
import io.seata.sqlparser.struct.SqlMethodExpr;
import io.seata.sqlparser.struct.SqlSequenceExpr;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The Base Insert Executor.
* @author jsbxyyx
*/
public abstract class BaseInsertExecutor<T, S extends Statement> extends AbstractDMLBaseExecutor<T, S> implements InsertExecutor<T> {
private static final Logger LOGGER = LoggerFactory.getLogger(BaseInsertExecutor.class);
protected static final String PLACEHOLDER = "?";
/**
* Instantiates a new Abstract dml base executor.
*
* @param statementProxy the statement proxy
* @param statementCallback the statement callback
* @param sqlRecognizer the sql recognizer
*/
public BaseInsertExecutor(StatementProxy<S> statementProxy, StatementCallback<T, S> statementCallback,
SQLRecognizer sqlRecognizer) {
super(statementProxy, statementCallback, sqlRecognizer);
}
@Override
protected TableRecords beforeImage() throws SQLException {
return TableRecords.empty(getTableMeta());
}
@Override
protected TableRecords afterImage(TableRecords beforeImage) throws SQLException {
Map<String, List<Object>> pkValues = getPkValues();
TableRecords afterImage = buildTableRecords(pkValues);
if (afterImage == null) {
throw new SQLException("Failed to build after-image for insert");
}
return afterImage;
}
protected boolean containsPK() {
SQLInsertRecognizer recognizer = (SQLInsertRecognizer) sqlRecognizer;
List<String> insertColumns = recognizer.getInsertColumns();
if (CollectionUtils.isEmpty(insertColumns)) {
return false;
}
return containsPK(insertColumns);
}
/**
* judge sql specify column
* @return true: contains column. false: not contains column.
*/
protected boolean containsColumns() {
return !((SQLInsertRecognizer) sqlRecognizer).insertColumnsIsEmpty();
}
/**
* get pk index
* @return the key is pk column name and the value is index of the pk column
*/
protected Map<String, Integer> getPkIndex() {
Map<String, Integer> pkIndexMap = new HashMap<>();
SQLInsertRecognizer recognizer = (SQLInsertRecognizer) sqlRecognizer;
List<String> insertColumns = recognizer.getInsertColumns();
if (CollectionUtils.isNotEmpty(insertColumns)) {
final int insertColumnsSize = insertColumns.size();
for (int paramIdx = 0; paramIdx < insertColumnsSize; paramIdx++) {
String sqlColumnName = insertColumns.get(paramIdx);
if (containPK(sqlColumnName)) {
pkIndexMap.put(getStandardPkColumnName(sqlColumnName), paramIdx);
}
}
return pkIndexMap;
}
int pkIndex = -1;
Map<String, ColumnMeta> allColumns = getTableMeta().getAllColumns();
for (Map.Entry<String, ColumnMeta> entry : allColumns.entrySet()) {
pkIndex++;
if (containPK(entry.getValue().getColumnName())) {
pkIndexMap.put(ColumnUtils.delEscape(entry.getValue().getColumnName(), getDbType()), pkIndex);
}
}
return pkIndexMap;
}
/**
* parse primary key value from statement.
* @return
*/
protected Map<String, List<Object>> parsePkValuesFromStatement() {
// insert values including PK
SQLInsertRecognizer recognizer = (SQLInsertRecognizer) sqlRecognizer;
final Map<String, Integer> pkIndexMap = getPkIndex();
if (pkIndexMap.isEmpty()) {
throw new ShouldNeverHappenException("pkIndex is not found");
}
Map<String, List<Object>> pkValuesMap = new HashMap<>();
boolean ps = true;
if (statementProxy instanceof PreparedStatementProxy) {
PreparedStatementProxy preparedStatementProxy = (PreparedStatementProxy) statementProxy;
List<List<Object>> insertRows = recognizer.getInsertRows(pkIndexMap.values());
if (insertRows != null && !insertRows.isEmpty()) {
Map<Integer, ArrayList<Object>> parameters = preparedStatementProxy.getParameters();
final int rowSize = insertRows.size();
int totalPlaceholderNum = -1;
for (List<Object> row : insertRows) {
// oracle insert sql statement specify RETURN_GENERATED_KEYS will append :rowid on sql end
// insert parameter count will than the actual +1
if (row.isEmpty()) {
continue;
}
int currentRowPlaceholderNum = -1;
for (Object r : row) {
if (PLACEHOLDER.equals(r)) {
totalPlaceholderNum += 1;
currentRowPlaceholderNum += 1;
}
}
String pkKey;
int pkIndex;
List<Object> pkValues;
for (Map.Entry<String, Integer> entry : pkIndexMap.entrySet()) {
pkKey = entry.getKey();
pkValues = pkValuesMap.get(pkKey);
if (Objects.isNull(pkValues)) {
pkValues = new ArrayList<>(rowSize);
}
pkIndex = entry.getValue();
Object pkValue = row.get(pkIndex);
if (PLACEHOLDER.equals(pkValue)) {
int currentRowNotPlaceholderNumBeforePkIndex = 0;
for (int n = 0, len = row.size(); n < len; n++) {
Object r = row.get(n);
if (n < pkIndex && !PLACEHOLDER.equals(r)) {
currentRowNotPlaceholderNumBeforePkIndex++;
}
}
int idx = totalPlaceholderNum - currentRowPlaceholderNum + pkIndex - currentRowNotPlaceholderNumBeforePkIndex;
ArrayList<Object> parameter = parameters.get(idx + 1);
pkValues.addAll(parameter);
} else {
pkValues.add(pkValue);
}
if (!pkValuesMap.containsKey(ColumnUtils.delEscape(pkKey, getDbType()))) {
pkValuesMap.put(ColumnUtils.delEscape(pkKey, getDbType()), pkValues);
}
}
}
}
} else {
ps = false;
List<List<Object>> insertRows = recognizer.getInsertRows(pkIndexMap.values());
for (List<Object> row : insertRows) {
pkIndexMap.forEach((pkKey, pkIndex) -> {
List<Object> pkValues = pkValuesMap.get(pkKey);
if (Objects.isNull(pkValues)) {
pkValuesMap.put(ColumnUtils.delEscape(pkKey, getDbType()), Lists.newArrayList(row.get(pkIndex)));
} else {
pkValues.add(row.get(pkIndex));
}
});
}
}
if (pkValuesMap.isEmpty()) {
throw new ShouldNeverHappenException();
}
boolean b = this.checkPkValues(pkValuesMap, ps);
if (!b) {
throw new NotSupportYetException(String.format("not support sql [%s]", sqlRecognizer.getOriginalSQL()));
}
return pkValuesMap;
}
/**
* default get generated keys.
* @return
* @throws SQLException
*/
public List<Object> getGeneratedKeys() throws SQLException {
// PK is just auto generated
ResultSet genKeys = statementProxy.getGeneratedKeys();
List<Object> pkValues = new ArrayList<>();
while (genKeys.next()) {
Object v = genKeys.getObject(1);
pkValues.add(v);
}
if (pkValues.isEmpty()) {
throw new NotSupportYetException(String.format("not support sql [%s]", sqlRecognizer.getOriginalSQL()));
}
try {
genKeys.beforeFirst();
} catch (SQLException e) {
LOGGER.warn("Fail to reset ResultSet cursor. can not get primary key value");
}
return pkValues;
}
/**
* the modify for test
*
* @param expr the expr
* @return the pk values by sequence
* @throws SQLException the sql exception
*/
protected List<Object> getPkValuesBySequence(SqlSequenceExpr expr) throws SQLException {
List<Object> pkValues = null;
try {
pkValues = getGeneratedKeys();
} catch (NotSupportYetException | SQLException ignore) {
}
if (!CollectionUtils.isEmpty(pkValues)) {
return pkValues;
}
Sequenceable sequenceable = (Sequenceable) this;
final String sql = sequenceable.getSequenceSql(expr);
LOGGER.warn("Fail to get auto-generated keys, use '{}' instead. Be cautious, statement could be polluted. Recommend you set the statement to return generated keys.", sql);
ResultSet genKeys;
genKeys = statementProxy.getConnection().createStatement().executeQuery(sql);
pkValues = new ArrayList<>();
while (genKeys.next()) {
Object v = genKeys.getObject(1);
pkValues.add(v);
}
return pkValues;
}
/**
* check pk values for multi Pk
* At most one null per row.
* Method is not allowed.
*
* @param pkValues the pk values
* @return boolean
*/
protected boolean checkPkValuesForMultiPk(Map<String, List<Object>> pkValues) {
Set<String> pkNames = pkValues.keySet();
if (pkNames.isEmpty()) {
throw new ShouldNeverHappenException();
}
int rowSize = pkValues.get(pkNames.iterator().next()).size();
for (int i = 0; i < rowSize; i++) {
int n = 0;
int m = 0;
for (String name : pkNames) {
Object pkValue = pkValues.get(name).get(i);
if (pkValue instanceof Null) {
n++;
}
if (pkValue instanceof SqlMethodExpr) {
m++;
}
}
if (n > 1) {
return false;
}
if (m > 0) {
return false;
}
}
return true;
}
/**
* Check pk values boolean.
*
* @param pkValues the pk values
* @param ps the ps
* @return the boolean
*/
protected boolean checkPkValues(Map<String, List<Object>> pkValues, boolean ps) {
Set<String> pkNames = pkValues.keySet();
if (pkNames.size() == 1) {
return checkPkValuesForSinglePk(pkValues.get(pkNames.iterator().next()), ps);
} else {
return checkPkValuesForMultiPk(pkValues);
}
}
/**
* check pk values for single pk
* @param pkValues pkValues
* @param ps true: is prepared statement. false: normal statement.
* @return true: support. false: not support.
*/
protected boolean checkPkValuesForSinglePk(List<Object> pkValues, boolean ps) {
/*
ps = true
-----------------------------------------------
one more
null O O
value O O
method O O
sequence O O
default O O
-----------------------------------------------
ps = false
-----------------------------------------------
one more
null O X
value O O
method X X
sequence O X
default O X
-----------------------------------------------
*/
int n = 0, v = 0, m = 0, s = 0, d = 0;
for (Object pkValue : pkValues) {
if (pkValue instanceof Null) {
n++;
continue;
}
if (pkValue instanceof SqlMethodExpr) {
m++;
continue;
}
if (pkValue instanceof SqlSequenceExpr) {
s++;
continue;
}
if (pkValue instanceof SqlDefaultExpr) {
d++;
continue;
}
v++;
}
if (!ps) {
if (m > 0) {
return false;
}
if (n == 1 && v == 0 && m == 0 && s == 0 && d == 0) {
return true;
}
if (n == 0 && v > 0 && m == 0 && s == 0 && d == 0) {
return true;
}
if (n == 0 && v == 0 && m == 0 && s == 1 && d == 0) {
return true;
}
if (n == 0 && v == 0 && m == 0 && s == 0 && d == 1) {
return true;
}
return false;
}
if (n > 0 && v == 0 && m == 0 && s == 0 && d == 0) {
return true;
}
if (n == 0 && v > 0 && m == 0 && s == 0 && d == 0) {
return true;
}
if (n == 0 && v == 0 && m > 0 && s == 0 && d == 0) {
return true;
}
if (n == 0 && v == 0 && m == 0 && s > 0 && d == 0) {
return true;
}
if (n == 0 && v == 0 && m == 0 && s == 0 && d > 0) {
return true;
}
return false;
}
}

View File

@@ -0,0 +1,417 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.common.util.CollectionUtils;
import io.seata.common.util.IOUtil;
import io.seata.common.util.StringUtils;
import io.seata.core.context.RootContext;
import io.seata.rm.datasource.ColumnUtils;
import io.seata.rm.datasource.ConnectionProxy;
import io.seata.rm.datasource.SqlGenerateUtils;
import io.seata.rm.datasource.StatementProxy;
import io.seata.rm.datasource.sql.struct.Field;
import io.seata.rm.datasource.sql.struct.TableMeta;
import io.seata.rm.datasource.sql.struct.TableMetaCacheFactory;
import io.seata.rm.datasource.sql.struct.TableRecords;
import io.seata.rm.datasource.undo.SQLUndoLog;
import io.seata.sqlparser.ParametersHolder;
import io.seata.sqlparser.SQLRecognizer;
import io.seata.sqlparser.SQLType;
import io.seata.sqlparser.WhereRecognizer;
/**
* The type Base transactional executor.
*
* @param <T> the type parameter
* @param <S> the type parameter
* @author sharajava
*/
public abstract class BaseTransactionalExecutor<T, S extends Statement> implements Executor<T> {
/**
* The Statement proxy.
*/
protected StatementProxy<S> statementProxy;
/**
* The Statement callback.
*/
protected StatementCallback<T, S> statementCallback;
/**
* The Sql recognizer.
*/
protected SQLRecognizer sqlRecognizer;
/**
* The Sql recognizer.
*/
protected List<SQLRecognizer> sqlRecognizers;
private TableMeta tableMeta;
/**
* Instantiates a new Base transactional executor.
*
* @param statementProxy the statement proxy
* @param statementCallback the statement callback
* @param sqlRecognizer the sql recognizer
*/
public BaseTransactionalExecutor(StatementProxy<S> statementProxy, StatementCallback<T, S> statementCallback,
SQLRecognizer sqlRecognizer) {
this.statementProxy = statementProxy;
this.statementCallback = statementCallback;
this.sqlRecognizer = sqlRecognizer;
}
/**
* Instantiates a new Base transactional executor.
*
* @param statementProxy the statement proxy
* @param statementCallback the statement callback
* @param sqlRecognizers the multi sql recognizer
*/
public BaseTransactionalExecutor(StatementProxy<S> statementProxy, StatementCallback<T, S> statementCallback,
List<SQLRecognizer> sqlRecognizers) {
this.statementProxy = statementProxy;
this.statementCallback = statementCallback;
this.sqlRecognizers = sqlRecognizers;
}
@Override
public T execute(Object... args) throws Throwable {
String xid = RootContext.getXID();
if (xid != null) {
statementProxy.getConnectionProxy().bind(xid);
}
statementProxy.getConnectionProxy().setGlobalLockRequire(RootContext.requireGlobalLock());
return doExecute(args);
}
/**
* Do execute object.
*
* @param args the args
* @return the object
* @throws Throwable the throwable
*/
protected abstract T doExecute(Object... args) throws Throwable;
/**
* build buildWhereCondition
*
* @param recognizer the recognizer
* @param paramAppenderList the param paramAppender list
* @return the string
*/
protected String buildWhereCondition(WhereRecognizer recognizer, ArrayList<List<Object>> paramAppenderList) {
String whereCondition = null;
if (statementProxy instanceof ParametersHolder) {
whereCondition = recognizer.getWhereCondition((ParametersHolder) statementProxy, paramAppenderList);
} else {
whereCondition = recognizer.getWhereCondition();
}
//process batch operation
if (StringUtils.isNotBlank(whereCondition) && CollectionUtils.isNotEmpty(paramAppenderList) && paramAppenderList.size() > 1) {
StringBuilder whereConditionSb = new StringBuilder();
whereConditionSb.append(" ( ").append(whereCondition).append(" ) ");
for (int i = 1; i < paramAppenderList.size(); i++) {
whereConditionSb.append(" or ( ").append(whereCondition).append(" ) ");
}
whereCondition = whereConditionSb.toString();
}
return whereCondition;
}
/**
* Gets column name in sql.
*
* @param columnName the column name
* @return the column name in sql
*/
protected String getColumnNameInSQL(String columnName) {
String tableAlias = sqlRecognizer.getTableAlias();
return tableAlias == null ? columnName : tableAlias + "." + columnName;
}
/**
* Gets several column name in sql.
*
* @param columnNameList the column name
* @return the column name in sql
*/
protected String getColumnNamesInSQL(List<String> columnNameList) {
if (Objects.isNull(columnNameList) || columnNameList.isEmpty()) {
return null;
}
StringBuilder columnNamesStr = new StringBuilder();
for (int i = 0; i < columnNameList.size(); i++) {
if (i > 0) {
columnNamesStr.append(" , ");
}
columnNamesStr.append(getColumnNameInSQL(columnNameList.get(i)));
}
return columnNamesStr.toString();
}
/**
* Gets from table in sql.
*
* @return the from table in sql
*/
protected String getFromTableInSQL() {
String tableName = sqlRecognizer.getTableName();
String tableAlias = sqlRecognizer.getTableAlias();
return tableAlias == null ? tableName : tableName + " " + tableAlias;
}
/**
* Gets table meta.
*
* @return the table meta
*/
protected TableMeta getTableMeta() {
return getTableMeta(sqlRecognizer.getTableName());
}
/**
* Gets table meta.
*
* @param tableName the table name
* @return the table meta
*/
protected TableMeta getTableMeta(String tableName) {
if (tableMeta != null) {
return tableMeta;
}
ConnectionProxy connectionProxy = statementProxy.getConnectionProxy();
tableMeta = TableMetaCacheFactory.getTableMetaCache(connectionProxy.getDbType())
.getTableMeta(connectionProxy.getTargetConnection(), tableName, connectionProxy.getDataSourceProxy().getResourceId());
return tableMeta;
}
/**
* the columns contains table meta pk
*
* @param columns the column name list
* @return true: contains pk false: not contains pk
*/
protected boolean containsPK(List<String> columns) {
if (columns == null || columns.isEmpty()) {
return false;
}
List<String> newColumns = ColumnUtils.delEscape(columns, getDbType());
return getTableMeta().containsPK(newColumns);
}
/**
* compare column name and primary key name
*
* @param columnName the primary key column name
* @return true: contain false: not contain
*/
protected boolean containPK(String columnName) {
String newColumnName = ColumnUtils.delEscape(columnName, getDbType());
return CollectionUtils.toUpperList(getTableMeta().getPrimaryKeyOnlyName()).contains(newColumnName.toUpperCase());
}
/**
* get standard pk column name from user sql column name
*
* @param userColumnName the user column name
* @return standard pk column name
*/
protected String getStandardPkColumnName(String userColumnName) {
String newUserColumnName = ColumnUtils.delEscape(userColumnName, getDbType());
for (String cn : getTableMeta().getPrimaryKeyOnlyName()) {
if (cn.toUpperCase().equals(newUserColumnName.toUpperCase())) {
return cn;
}
}
return null;
}
/**
* prepare undo log.
*
* @param beforeImage the before image
* @param afterImage the after image
* @throws SQLException the sql exception
*/
protected void prepareUndoLog(TableRecords beforeImage, TableRecords afterImage) throws SQLException {
if (beforeImage.getRows().isEmpty() && afterImage.getRows().isEmpty()) {
return;
}
if (SQLType.UPDATE == sqlRecognizer.getSQLType()) {
if (beforeImage.getRows().size() != afterImage.getRows().size()) {
throw new ShouldNeverHappenException("Before image size is not equaled to after image size, probably because you updated the primary keys.");
}
}
ConnectionProxy connectionProxy = statementProxy.getConnectionProxy();
TableRecords lockKeyRecords = sqlRecognizer.getSQLType() == SQLType.DELETE ? beforeImage : afterImage;
String lockKeys = buildLockKey(lockKeyRecords);
if (null != lockKeys) {
connectionProxy.appendLockKey(lockKeys);
SQLUndoLog sqlUndoLog = buildUndoItem(beforeImage, afterImage);
connectionProxy.appendUndoLog(sqlUndoLog);
}
}
/**
* build lockKey
*
* @param rowsIncludingPK the records
* @return the string as local key. the local key example(multi pk): "t_user:1_a,2_b"
*/
protected String buildLockKey(TableRecords rowsIncludingPK) {
if (rowsIncludingPK.size() == 0) {
return null;
}
StringBuilder sb = new StringBuilder();
sb.append(rowsIncludingPK.getTableMeta().getTableName());
sb.append(":");
int filedSequence = 0;
List<Map<String, Field>> pksRows = rowsIncludingPK.pkRows();
for (Map<String, Field> rowMap : pksRows) {
int pkSplitIndex = 0;
for (String pkName : getTableMeta().getPrimaryKeyOnlyName()) {
if (pkSplitIndex > 0) {
sb.append("_");
}
sb.append(rowMap.get(pkName).getValue());
pkSplitIndex++;
}
filedSequence++;
if (filedSequence < pksRows.size()) {
sb.append(",");
}
}
return sb.toString();
}
/**
* build a SQLUndoLog
*
* @param beforeImage the before image
* @param afterImage the after image
* @return sql undo log
*/
protected SQLUndoLog buildUndoItem(TableRecords beforeImage, TableRecords afterImage) {
SQLType sqlType = sqlRecognizer.getSQLType();
String tableName = sqlRecognizer.getTableName();
SQLUndoLog sqlUndoLog = new SQLUndoLog();
sqlUndoLog.setSqlType(sqlType);
sqlUndoLog.setTableName(tableName);
sqlUndoLog.setBeforeImage(beforeImage);
sqlUndoLog.setAfterImage(afterImage);
return sqlUndoLog;
}
/**
* build a BeforeImage
*
* @param tableMeta the tableMeta
* @param selectSQL the selectSQL
* @param paramAppenderList the paramAppender list
* @return a tableRecords
* @throws SQLException the sql exception
*/
protected TableRecords buildTableRecords(TableMeta tableMeta, String selectSQL, ArrayList<List<Object>> paramAppenderList) throws SQLException {
ResultSet rs = null;
try (PreparedStatement ps = statementProxy.getConnection().prepareStatement(selectSQL)) {
if (CollectionUtils.isNotEmpty(paramAppenderList)) {
for (int i = 0, ts = paramAppenderList.size(); i < ts; i++) {
List<Object> paramAppender = paramAppenderList.get(i);
for (int j = 0, ds = paramAppender.size(); j < ds; j++) {
ps.setObject(i * ds + j + 1, paramAppender.get(j));
}
}
}
rs = ps.executeQuery();
return TableRecords.buildRecords(tableMeta, rs);
} finally {
IOUtil.close(rs);
}
}
/**
* build TableRecords
*
* @param pkValuesMap the pkValuesMap
* @return return TableRecords;
* @throws SQLException the sql exception
*/
protected TableRecords buildTableRecords(Map<String, List<Object>> pkValuesMap) throws SQLException {
List<String> pkColumnNameList = getTableMeta().getPrimaryKeyOnlyName();
StringBuilder sql = new StringBuilder()
.append("SELECT * FROM ")
.append(getFromTableInSQL())
.append(" WHERE ");
// build check sql
String firstKey = pkValuesMap.keySet().stream().findFirst().get();
int rowSize = pkValuesMap.get(firstKey).size();
sql.append(SqlGenerateUtils.buildWhereConditionByPKs(pkColumnNameList, rowSize, getDbType()));
PreparedStatement ps = null;
ResultSet rs = null;
try {
ps = statementProxy.getConnection().prepareStatement(sql.toString());
int paramIndex = 1;
for (int r = 0; r < rowSize; r++) {
for (int c = 0; c < pkColumnNameList.size(); c++) {
List<Object> pkColumnValueList = pkValuesMap.get(pkColumnNameList.get(c));
int dataType = tableMeta.getColumnMeta(pkColumnNameList.get(c)).getDataType();
ps.setObject(paramIndex, pkColumnValueList.get(r), dataType);
paramIndex++;
}
}
rs = ps.executeQuery();
return TableRecords.buildRecords(getTableMeta(), rs);
} finally {
IOUtil.close(rs);
}
}
/**
* get db type
*
* @return db type
*/
protected String getDbType() {
return statementProxy.getConnectionProxy().getDbType();
}
}

View File

@@ -0,0 +1,91 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import java.util.StringJoiner;
import io.seata.common.util.StringUtils;
import io.seata.rm.datasource.ColumnUtils;
import io.seata.rm.datasource.StatementProxy;
import io.seata.rm.datasource.sql.struct.TableMeta;
import io.seata.rm.datasource.sql.struct.TableRecords;
import io.seata.sqlparser.ParametersHolder;
import io.seata.sqlparser.SQLDeleteRecognizer;
import io.seata.sqlparser.SQLRecognizer;
/**
* The type Delete executor.
*
* @author sharajava
*
* @param <T> the type parameter
* @param <S> the type parameter
*/
public class DeleteExecutor<T, S extends Statement> extends AbstractDMLBaseExecutor<T, S> {
/**
* Instantiates a new Delete executor.
*
* @param statementProxy the statement proxy
* @param statementCallback the statement callback
* @param sqlRecognizer the sql recognizer
*/
public DeleteExecutor(StatementProxy<S> statementProxy, StatementCallback<T,S> statementCallback,
SQLRecognizer sqlRecognizer) {
super(statementProxy, statementCallback, sqlRecognizer);
}
@Override
protected TableRecords beforeImage() throws SQLException {
SQLDeleteRecognizer visitor = (SQLDeleteRecognizer) sqlRecognizer;
TableMeta tmeta = getTableMeta(visitor.getTableName());
ArrayList<List<Object>> paramAppenderList = new ArrayList<>();
String selectSQL = buildBeforeImageSQL(visitor, tmeta, paramAppenderList);
return buildTableRecords(tmeta, selectSQL, paramAppenderList);
}
private String buildBeforeImageSQL(SQLDeleteRecognizer visitor, TableMeta tableMeta, ArrayList<List<Object>> paramAppenderList) {
String whereCondition = buildWhereCondition(visitor, paramAppenderList);
StringBuilder suffix = new StringBuilder(" FROM ").append(getFromTableInSQL());
if (StringUtils.isNotBlank(whereCondition)) {
suffix.append(WHERE).append(whereCondition);
}
String orderBy = visitor.getOrderBy();
if (StringUtils.isNotBlank(orderBy)) {
suffix.append(orderBy);
}
ParametersHolder parametersHolder = statementProxy instanceof ParametersHolder ? (ParametersHolder)statementProxy : null;
String limit = visitor.getLimit(parametersHolder, paramAppenderList);
if (StringUtils.isNotBlank(limit)) {
suffix.append(limit);
}
suffix.append(" FOR UPDATE");
StringJoiner selectSQLAppender = new StringJoiner(", ", "SELECT ", suffix.toString());
for (String column : tableMeta.getAllColumns().keySet()) {
selectSQLAppender.add(getColumnNameInSQL(ColumnUtils.addEscape(column, getDbType())));
}
return selectSQLAppender.toString();
}
@Override
protected TableRecords afterImage(TableRecords beforeImage) throws SQLException {
return TableRecords.empty(getTableMeta());
}
}

View File

@@ -0,0 +1,122 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec;
import io.seata.common.loader.EnhancedServiceLoader;
import io.seata.common.util.CollectionUtils;
import io.seata.core.context.RootContext;
import io.seata.core.model.BranchType;
import io.seata.rm.datasource.StatementProxy;
import io.seata.rm.datasource.sql.SQLVisitorFactory;
import io.seata.sqlparser.SQLRecognizer;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.List;
/**
* The type Execute template.
*
* @author sharajava
*/
public class ExecuteTemplate {
/**
* Execute t.
*
* @param <T> the type parameter
* @param <S> the type parameter
* @param statementProxy the statement proxy
* @param statementCallback the statement callback
* @param args the args
* @return the t
* @throws SQLException the sql exception
*/
public static <T, S extends Statement> T execute(StatementProxy<S> statementProxy,
StatementCallback<T, S> statementCallback,
Object... args) throws SQLException {
return execute(null, statementProxy, statementCallback, args);
}
/**
* Execute t.
*
* @param <T> the type parameter
* @param <S> the type parameter
* @param sqlRecognizers the sql recognizer list
* @param statementProxy the statement proxy
* @param statementCallback the statement callback
* @param args the args
* @return the t
* @throws SQLException the sql exception
*/
public static <T, S extends Statement> T execute(List<SQLRecognizer> sqlRecognizers,
StatementProxy<S> statementProxy,
StatementCallback<T, S> statementCallback,
Object... args) throws SQLException {
if (!RootContext.requireGlobalLock() && BranchType.AT != RootContext.getBranchType()) {
// Just work as original statement
return statementCallback.execute(statementProxy.getTargetStatement(), args);
}
String dbType = statementProxy.getConnectionProxy().getDbType();
if (CollectionUtils.isEmpty(sqlRecognizers)) {
sqlRecognizers = SQLVisitorFactory.get(
statementProxy.getTargetSQL(),
dbType);
}
Executor<T> executor;
if (CollectionUtils.isEmpty(sqlRecognizers)) {
executor = new PlainExecutor<>(statementProxy, statementCallback);
} else {
if (sqlRecognizers.size() == 1) {
SQLRecognizer sqlRecognizer = sqlRecognizers.get(0);
switch (sqlRecognizer.getSQLType()) {
case INSERT:
executor = EnhancedServiceLoader.load(InsertExecutor.class, dbType,
new Class[]{StatementProxy.class, StatementCallback.class, SQLRecognizer.class},
new Object[]{statementProxy, statementCallback, sqlRecognizer});
break;
case UPDATE:
executor = new UpdateExecutor<>(statementProxy, statementCallback, sqlRecognizer);
break;
case DELETE:
executor = new DeleteExecutor<>(statementProxy, statementCallback, sqlRecognizer);
break;
case SELECT_FOR_UPDATE:
executor = new SelectForUpdateExecutor<>(statementProxy, statementCallback, sqlRecognizer);
break;
default:
executor = new PlainExecutor<>(statementProxy, statementCallback);
break;
}
} else {
executor = new MultiExecutor<>(statementProxy, statementCallback, sqlRecognizers);
}
}
T rs;
try {
rs = executor.execute(args);
} catch (Throwable ex) {
if (!(ex instanceof SQLException)) {
// Turn other exception into SQLException
ex = new SQLException(ex);
}
throw (SQLException) ex;
}
return rs;
}
}

View File

@@ -0,0 +1,35 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec;
/**
* The interface Executor.
*
* @author sharajava
*
* @param <T> the type parameter
*/
public interface Executor<T> {
/**
* Execute t.
*
* @param args the args
* @return the t
* @throws Throwable the throwable
*/
T execute(Object... args) throws Throwable;
}

View File

@@ -0,0 +1,46 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec;
import java.sql.SQLException;
import java.util.List;
import java.util.Map;
/**
* The interface Insert executor.
*
* @param <T> the type parameter
* @author jsbxyyx
*/
public interface InsertExecutor<T> extends Executor<T> {
/**
* get primary key values.
*
* @return The primary key value.
* @throws SQLException the sql exception
*/
Map<String, List<Object>> getPkValues() throws SQLException;
/**
* get primary key values by insert column.
*
* @return pk values by column
* @throws SQLException the sql exception
*/
Map<String, List<Object>> getPkValuesByColumn() throws SQLException;
}

View File

@@ -0,0 +1,36 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec;
import java.sql.SQLException;
/**
* The type Lock conflict exception.
*
* @author sharajava
*/
public class LockConflictException extends SQLException {
/**
* Instantiates a new Lock conflict exception.
*/
public LockConflictException() {
}
public LockConflictException(String message) {
super(message);
}
}

View File

@@ -0,0 +1,133 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec;
import io.seata.common.DefaultValues;
import io.seata.common.util.NumberUtils;
import io.seata.config.Configuration;
import io.seata.config.ConfigurationCache;
import io.seata.config.ConfigurationChangeEvent;
import io.seata.config.ConfigurationChangeListener;
import io.seata.config.ConfigurationFactory;
import io.seata.core.constants.ConfigurationKeys;
import io.seata.core.context.GlobalLockConfigHolder;
import io.seata.core.model.GlobalLockConfig;
/**
* Lock retry controller
*
* @author sharajava
*/
public class LockRetryController {
private static final GlobalConfig LISTENER = new GlobalConfig();
static {
ConfigurationCache.addConfigListener(ConfigurationKeys.CLIENT_LOCK_RETRY_INTERVAL, LISTENER);
ConfigurationCache.addConfigListener(ConfigurationKeys.CLIENT_LOCK_RETRY_TIMES, LISTENER);
}
private int lockRetryInternal;
private int lockRetryTimes;
/**
* Instantiates a new Lock retry controller.
*/
public LockRetryController() {
this.lockRetryInternal = getLockRetryInternal();
this.lockRetryTimes = getLockRetryTimes();
}
/**
* Sleep.
*
* @param e the e
* @throws LockWaitTimeoutException the lock wait timeout exception
*/
public void sleep(Exception e) throws LockWaitTimeoutException {
if (--lockRetryTimes < 0) {
throw new LockWaitTimeoutException("Global lock wait timeout", e);
}
try {
Thread.sleep(lockRetryInternal);
} catch (InterruptedException ignore) {
}
}
int getLockRetryInternal() {
// get customized config first
GlobalLockConfig config = GlobalLockConfigHolder.getCurrentGlobalLockConfig();
if (config != null) {
int configInternal = config.getLockRetryInternal();
if (configInternal > 0) {
return configInternal;
}
}
// if there is no customized config, use global config instead
return LISTENER.getGlobalLockRetryInternal();
}
int getLockRetryTimes() {
// get customized config first
GlobalLockConfig config = GlobalLockConfigHolder.getCurrentGlobalLockConfig();
if (config != null) {
int configTimes = config.getLockRetryTimes();
if (configTimes >= 0) {
return configTimes;
}
}
// if there is no customized config, use global config instead
return LISTENER.getGlobalLockRetryTimes();
}
static class GlobalConfig implements ConfigurationChangeListener {
private volatile int globalLockRetryInternal;
private volatile int globalLockRetryTimes;
private final int defaultRetryInternal = DefaultValues.DEFAULT_CLIENT_LOCK_RETRY_INTERVAL;
private final int defaultRetryTimes = DefaultValues.DEFAULT_CLIENT_LOCK_RETRY_TIMES;
public GlobalConfig() {
Configuration configuration = ConfigurationFactory.getInstance();
globalLockRetryInternal = configuration.getInt(ConfigurationKeys.CLIENT_LOCK_RETRY_INTERVAL, defaultRetryInternal);
globalLockRetryTimes = configuration.getInt(ConfigurationKeys.CLIENT_LOCK_RETRY_TIMES, defaultRetryTimes);
}
@Override
public void onChangeEvent(ConfigurationChangeEvent event) {
String dataId = event.getDataId();
String newValue = event.getNewValue();
if (ConfigurationKeys.CLIENT_LOCK_RETRY_INTERVAL.equals(dataId)) {
globalLockRetryInternal = NumberUtils.toInt(newValue, defaultRetryInternal);
}
if (ConfigurationKeys.CLIENT_LOCK_RETRY_TIMES.equals(dataId)) {
globalLockRetryTimes = NumberUtils.toInt(newValue, defaultRetryTimes);
}
}
public int getGlobalLockRetryInternal() {
return globalLockRetryInternal;
}
public int getGlobalLockRetryTimes() {
return globalLockRetryTimes;
}
}
}

View File

@@ -0,0 +1,52 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec;
import java.sql.SQLException;
/**
* The type Lock wait timeout exception.
*
* @author sharajava
*/
public class LockWaitTimeoutException extends SQLException {
private static final long serialVersionUID = -6754599774015964707L;
/**
* Instantiates a new Lock wait timeout exception.
*/
public LockWaitTimeoutException() {
}
/**
* Instantiates a new Lock wait timeout exception.
*
* @param reason the reason
* @param cause the cause
*/
public LockWaitTimeoutException(String reason, Throwable cause) {
super(reason, cause);
}
/**
* Instantiates a new Lock wait timeout exception.
*
* @param e the e
*/
public LockWaitTimeoutException(Throwable e) {
super(e);
}
}

View File

@@ -0,0 +1,96 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec;
import io.seata.common.exception.NotSupportYetException;
import io.seata.common.util.StringUtils;
import io.seata.rm.datasource.ColumnUtils;
import io.seata.rm.datasource.StatementProxy;
import io.seata.rm.datasource.sql.struct.TableMeta;
import io.seata.rm.datasource.sql.struct.TableRecords;
import io.seata.sqlparser.ParametersHolder;
import io.seata.sqlparser.SQLDeleteRecognizer;
import io.seata.sqlparser.SQLRecognizer;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import java.util.StringJoiner;
/**
* The type MultiSql executor.
*
* @param <T> the type parameter
* @param <S> the type parameter
* @author wangwei.ying
*/
public class MultiDeleteExecutor<T, S extends Statement> extends AbstractDMLBaseExecutor<T, S> {
public MultiDeleteExecutor(StatementProxy<S> statementProxy, StatementCallback<T, S> statementCallback, List<SQLRecognizer> sqlRecognizers) {
super(statementProxy, statementCallback, sqlRecognizers);
}
@Override
protected TableRecords beforeImage() throws SQLException {
if (sqlRecognizers.size() == 1) {
DeleteExecutor executor = new DeleteExecutor(statementProxy, statementCallback, sqlRecognizers.get(0));
return executor.beforeImage();
}
final TableMeta tmeta = getTableMeta(sqlRecognizers.get(0).getTableName());
final ArrayList<List<Object>> paramAppenderList = new ArrayList<>();
StringBuilder whereCondition = new StringBuilder();
for (SQLRecognizer recognizer : sqlRecognizers) {
sqlRecognizer = recognizer;
SQLDeleteRecognizer visitor = (SQLDeleteRecognizer) recognizer;
ParametersHolder parametersHolder = statementProxy instanceof ParametersHolder ? (ParametersHolder)statementProxy : null;
if (StringUtils.isNotBlank(visitor.getLimit(parametersHolder, paramAppenderList))) {
throw new NotSupportYetException("Multi delete SQL with limit condition is not support yet !");
}
if (StringUtils.isNotBlank(visitor.getOrderBy())) {
throw new NotSupportYetException("Multi delete SQL with orderBy condition is not support yet !");
}
String whereConditionStr = buildWhereCondition(visitor, paramAppenderList);
if (StringUtils.isBlank(whereConditionStr)) {
whereCondition = new StringBuilder();
paramAppenderList.clear();
break;
}
if (whereCondition.length() > 0) {
whereCondition.append(" OR ");
}
whereCondition.append(whereConditionStr);
}
StringBuilder suffix = new StringBuilder(" FROM ").append(getFromTableInSQL());
if (whereCondition.length() > 0) {
suffix.append(" WHERE ").append(whereCondition);
}
suffix.append(" FOR UPDATE");
final StringJoiner selectSQLAppender = new StringJoiner(", ", "SELECT ", suffix.toString());
for (String column : tmeta.getAllColumns().keySet()) {
selectSQLAppender.add(getColumnNameInSQL(ColumnUtils.addEscape(column, getDbType())));
}
return buildTableRecords(tmeta, selectSQLAppender.toString(), paramAppenderList);
}
@Override
protected TableRecords afterImage(TableRecords beforeImage) throws SQLException {
return TableRecords.empty(getTableMeta(sqlRecognizers.get(0).getTableName()));
}
}

View File

@@ -0,0 +1,141 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.rm.datasource.StatementProxy;
import io.seata.rm.datasource.sql.struct.TableRecords;
import io.seata.sqlparser.SQLRecognizer;
import io.seata.sqlparser.SQLType;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* The type MultiSql executor. now just support same type
* ex.
* <pre>
* jdbcTemplate.update("update account_tbl set money = money - ? where user_id = ?;update account_tbl set money = money - ? where user_id = ?", new Object[] {money, userId,"U10000",money,"U1000"});
* </pre>
*
* @param <T> the type parameter
* @param <S> the type parameter
* @author wangwei.ying
*/
public class MultiExecutor<T, S extends Statement> extends AbstractDMLBaseExecutor<T, S> {
private Map<String, List<SQLRecognizer>> multiSqlGroup = new HashMap<>(4);
private Map<SQLRecognizer, TableRecords> beforeImagesMap = new HashMap<>(4);
private Map<SQLRecognizer, TableRecords> afterImagesMap = new HashMap<>(4);
/**
* Instantiates a new Abstract dml base executor.
*
* @param statementProxy the statement proxy
* @param statementCallback the statement callback
* @param sqlRecognizers the sql recognizers
*/
public MultiExecutor(StatementProxy<S> statementProxy, StatementCallback<T, S> statementCallback, List<SQLRecognizer> sqlRecognizers) {
super(statementProxy, statementCallback, sqlRecognizers);
}
/**
* Before image table records. only support update or deleted
*
* @return the table records
* @throws SQLException the sql exception
* @see io.seata.rm.datasource.sql.SQLVisitorFactory#get(String, String) validate sqlType
*/
@Override
protected TableRecords beforeImage() throws SQLException {
//group by sqlType
multiSqlGroup = sqlRecognizers.stream().collect(Collectors.groupingBy(t -> t.getTableName()));
AbstractDMLBaseExecutor<T, S> executor = null;
for (List<SQLRecognizer> value : multiSqlGroup.values()) {
switch (value.get(0).getSQLType()) {
case UPDATE:
executor = new MultiUpdateExecutor<T, S>(statementProxy, statementCallback, value);
break;
case DELETE:
executor = new MultiDeleteExecutor<T, S>(statementProxy, statementCallback, value);
break;
default:
throw new UnsupportedOperationException("not support sql" + value.get(0).getOriginalSQL());
}
TableRecords beforeImage = executor.beforeImage();
beforeImagesMap.put(value.get(0), beforeImage);
}
return null;
}
@Override
protected TableRecords afterImage(TableRecords beforeImage) throws SQLException {
AbstractDMLBaseExecutor<T, S> executor = null;
for (List<SQLRecognizer> value : multiSqlGroup.values()) {
switch (value.get(0).getSQLType()) {
case UPDATE:
executor = new MultiUpdateExecutor<T, S>(statementProxy, statementCallback, value);
break;
case DELETE:
executor = new MultiDeleteExecutor<T, S>(statementProxy, statementCallback, value);
break;
default:
throw new UnsupportedOperationException("not support sql" + value.get(0).getOriginalSQL());
}
beforeImage = beforeImagesMap.get(value.get(0));
TableRecords afterImage = executor.afterImage(beforeImage);
afterImagesMap.put(value.get(0), afterImage);
}
return null;
}
@Override
protected void prepareUndoLog(TableRecords beforeImage, TableRecords afterImage) throws SQLException {
if (beforeImagesMap == null || afterImagesMap == null) {
throw new IllegalStateException("images can not be null");
}
SQLRecognizer recognizer;
for (Map.Entry<SQLRecognizer, TableRecords> entry : beforeImagesMap.entrySet()) {
sqlRecognizer = recognizer = entry.getKey();
beforeImage = entry.getValue();
afterImage = afterImagesMap.get(recognizer);
if (SQLType.UPDATE == sqlRecognizer.getSQLType()) {
if (beforeImage.getRows().size() != afterImage.getRows().size()) {
throw new ShouldNeverHappenException("Before image size is not equaled to after image size, probably because you updated the primary keys.");
}
}
super.prepareUndoLog(beforeImage, afterImage);
}
}
public Map<String, List<SQLRecognizer>> getMultiSqlGroup() {
return multiSqlGroup;
}
public Map<SQLRecognizer, TableRecords> getBeforeImagesMap() {
return beforeImagesMap;
}
public Map<SQLRecognizer, TableRecords> getAfterImagesMap() {
return afterImagesMap;
}
}

View File

@@ -0,0 +1,179 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.HashSet;
import java.util.StringJoiner;
import io.seata.common.exception.NotSupportYetException;
import io.seata.common.util.IOUtil;
import io.seata.common.util.StringUtils;
import io.seata.config.Configuration;
import io.seata.config.ConfigurationFactory;
import io.seata.core.constants.ConfigurationKeys;
import io.seata.common.DefaultValues;
import io.seata.rm.datasource.ColumnUtils;
import io.seata.rm.datasource.SqlGenerateUtils;
import io.seata.rm.datasource.StatementProxy;
import io.seata.rm.datasource.sql.struct.TableMeta;
import io.seata.rm.datasource.sql.struct.TableRecords;
import io.seata.sqlparser.ParametersHolder;
import io.seata.sqlparser.SQLRecognizer;
import io.seata.sqlparser.SQLUpdateRecognizer;
/**
* The type MultiSql executor.
*
* @param <T> the type parameter
* @param <S> the type parameter
* @author wangwei-ying
*/
public class MultiUpdateExecutor<T, S extends Statement> extends AbstractDMLBaseExecutor<T, S> {
private static final Configuration CONFIG = ConfigurationFactory.getInstance();
private static final boolean ONLY_CARE_UPDATE_COLUMNS = CONFIG.getBoolean(
ConfigurationKeys.TRANSACTION_UNDO_ONLY_CARE_UPDATE_COLUMNS, DefaultValues.DEFAULT_ONLY_CARE_UPDATE_COLUMNS);
/**
* Instantiates a new Multi update executor.
*
* @param statementProxy the statement proxy
* @param statementCallback the statement callback
* @param sqlRecognizers the sql recognizers
*/
public MultiUpdateExecutor(StatementProxy<S> statementProxy, StatementCallback<T, S> statementCallback, List<SQLRecognizer> sqlRecognizers) {
super(statementProxy, statementCallback, sqlRecognizers);
}
@Override
protected TableRecords beforeImage() throws SQLException {
if (sqlRecognizers.size() == 1) {
UpdateExecutor executor = new UpdateExecutor<>(statementProxy, statementCallback, sqlRecognizers.get(0));
return executor.beforeImage();
}
final TableMeta tmeta = getTableMeta(sqlRecognizers.get(0).getTableName());
final ArrayList<List<Object>> paramAppenderList = new ArrayList<>();
Set<String> updateColumnsSet = new HashSet<>();
StringBuilder whereCondition = new StringBuilder();
boolean noWhereCondition = false;
for (SQLRecognizer recognizer : sqlRecognizers) {
sqlRecognizer = recognizer;
SQLUpdateRecognizer sqlUpdateRecognizer = (SQLUpdateRecognizer) recognizer;
ParametersHolder parametersHolder = statementProxy instanceof ParametersHolder ? (ParametersHolder)statementProxy : null;
if (StringUtils.isNotBlank(sqlUpdateRecognizer.getLimit(parametersHolder, paramAppenderList))) {
throw new NotSupportYetException("Multi update SQL with limit condition is not support yet !");
}
if (StringUtils.isNotBlank(sqlUpdateRecognizer.getOrderBy())) {
throw new NotSupportYetException("Multi update SQL with orderBy condition is not support yet !");
}
List<String> updateColumns = sqlUpdateRecognizer.getUpdateColumns();
updateColumnsSet.addAll(updateColumns);
if (noWhereCondition) {
continue;
}
String whereConditionStr = buildWhereCondition(sqlUpdateRecognizer, paramAppenderList);
if (StringUtils.isBlank(whereConditionStr)) {
noWhereCondition = true;
} else {
if (whereCondition.length() > 0) {
whereCondition.append(" OR ");
}
whereCondition.append(whereConditionStr);
}
}
StringBuilder prefix = new StringBuilder("SELECT ");
final StringBuilder suffix = new StringBuilder(" FROM ").append(getFromTableInSQL());
if (noWhereCondition) {
//select all rows
paramAppenderList.clear();
} else {
suffix.append(" WHERE ").append(whereCondition);
}
suffix.append(" FOR UPDATE");
final StringJoiner selectSQLAppender = new StringJoiner(", ", prefix, suffix.toString());
if (ONLY_CARE_UPDATE_COLUMNS) {
if (!containsPK(new ArrayList<>(updateColumnsSet))) {
selectSQLAppender.add(getColumnNamesInSQL(tmeta.getEscapePkNameList(getDbType())));
}
for (String updateCol : updateColumnsSet) {
selectSQLAppender.add(updateCol);
}
} else {
for (String columnName : tmeta.getAllColumns().keySet()) {
selectSQLAppender.add(ColumnUtils.addEscape(columnName, getDbType()));
}
}
return buildTableRecords(tmeta, selectSQLAppender.toString(), paramAppenderList);
}
@Override
protected TableRecords afterImage(TableRecords beforeImage) throws SQLException {
if (sqlRecognizers.size() == 1) {
UpdateExecutor executor = new UpdateExecutor<>(statementProxy, statementCallback, sqlRecognizers.get(0));
return executor.afterImage(beforeImage);
}
if (beforeImage == null || beforeImage.size() == 0) {
return TableRecords.empty(getTableMeta(sqlRecognizers.get(0).getTableName()));
}
TableMeta tmeta = getTableMeta(sqlRecognizers.get(0).getTableName());
String selectSQL = buildAfterImageSQL(tmeta, beforeImage);
ResultSet rs = null;
try (PreparedStatement pst = statementProxy.getConnection().prepareStatement(selectSQL);) {
SqlGenerateUtils.setParamForPk(beforeImage.pkRows(), getTableMeta().getPrimaryKeyOnlyName(), pst);
rs = pst.executeQuery();
return TableRecords.buildRecords(tmeta, rs);
} finally {
IOUtil.close(rs);
}
}
private String buildAfterImageSQL(TableMeta tableMeta, TableRecords beforeImage) throws SQLException {
Set<String> updateColumnsSet = new HashSet<>();
for (SQLRecognizer recognizer : sqlRecognizers) {
sqlRecognizer = recognizer;
SQLUpdateRecognizer sqlUpdateRecognizer = (SQLUpdateRecognizer) sqlRecognizer;
updateColumnsSet.addAll(sqlUpdateRecognizer.getUpdateColumns());
}
StringBuilder prefix = new StringBuilder("SELECT ");
String suffix = " FROM " + getFromTableInSQL() + " WHERE " + SqlGenerateUtils.buildWhereConditionByPKs(tableMeta.getPrimaryKeyOnlyName(), beforeImage.pkRows().size(), getDbType());
StringJoiner selectSQLJoiner = new StringJoiner(", ", prefix.toString(), suffix);
if (ONLY_CARE_UPDATE_COLUMNS) {
if (!containsPK(new ArrayList<>(updateColumnsSet))) {
selectSQLJoiner.add(getColumnNamesInSQL(tableMeta.getEscapePkNameList(getDbType())));
}
for (String updateCol : updateColumnsSet) {
selectSQLJoiner.add(updateCol);
}
} else {
for (String columnName : tableMeta.getAllColumns().keySet()) {
selectSQLJoiner.add(ColumnUtils.addEscape(columnName, getDbType()));
}
}
return selectSQLJoiner.toString();
}
}

View File

@@ -0,0 +1,51 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec;
import java.sql.Statement;
import io.seata.rm.datasource.StatementProxy;
/**
* The type Plain executor.
*
* @author sharajava
*
* @param <T> the type parameter
* @param <S> the type parameter
*/
public class PlainExecutor<T, S extends Statement> implements Executor<T> {
private StatementProxy<S> statementProxy;
private StatementCallback<T, S> statementCallback;
/**
* Instantiates a new Plain executor.
*
* @param statementProxy the statement proxy
* @param statementCallback the statement callback
*/
public PlainExecutor(StatementProxy<S> statementProxy, StatementCallback<T, S> statementCallback) {
this.statementProxy = statementProxy;
this.statementCallback = statementCallback;
}
@Override
public T execute(Object... args) throws Throwable {
return statementCallback.execute(statementProxy.getTargetStatement(), args);
}
}

View File

@@ -0,0 +1,147 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.SQLException;
import java.sql.Savepoint;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import io.seata.common.util.StringUtils;
import io.seata.core.context.RootContext;
import io.seata.rm.datasource.StatementProxy;
import io.seata.rm.datasource.sql.struct.TableRecords;
import io.seata.sqlparser.SQLRecognizer;
import io.seata.sqlparser.SQLSelectRecognizer;
import io.seata.sqlparser.util.JdbcConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The type Select for update executor.
*
* @param <S> the type parameter
* @author sharajava
*/
public class SelectForUpdateExecutor<T, S extends Statement> extends BaseTransactionalExecutor<T, S> {
private static final Logger LOGGER = LoggerFactory.getLogger(SelectForUpdateExecutor.class);
/**
* Instantiates a new Select for update executor.
*
* @param statementProxy the statement proxy
* @param statementCallback the statement callback
* @param sqlRecognizer the sql recognizer
*/
public SelectForUpdateExecutor(StatementProxy<S> statementProxy, StatementCallback<T, S> statementCallback,
SQLRecognizer sqlRecognizer) {
super(statementProxy, statementCallback, sqlRecognizer);
}
@Override
public T doExecute(Object... args) throws Throwable {
Connection conn = statementProxy.getConnection();
DatabaseMetaData dbmd = conn.getMetaData();
T rs;
Savepoint sp = null;
boolean originalAutoCommit = conn.getAutoCommit();
try {
if (originalAutoCommit) {
/*
* In order to hold the local db lock during global lock checking
* set auto commit value to false first if original auto commit was true
*/
conn.setAutoCommit(false);
} else if (dbmd.supportsSavepoints()) {
/*
* In order to release the local db lock when global lock conflict
* create a save point if original auto commit was false, then use the save point here to release db
* lock during global lock checking if necessary
*/
sp = conn.setSavepoint();
} else {
throw new SQLException("not support savepoint. please check your db version");
}
LockRetryController lockRetryController = new LockRetryController();
ArrayList<List<Object>> paramAppenderList = new ArrayList<>();
String selectPKSQL = buildSelectSQL(paramAppenderList);
while (true) {
try {
// #870
// execute return Boolean
// executeQuery return ResultSet
rs = statementCallback.execute(statementProxy.getTargetStatement(), args);
// Try to get global lock of those rows selected
TableRecords selectPKRows = buildTableRecords(getTableMeta(), selectPKSQL, paramAppenderList);
String lockKeys = buildLockKey(selectPKRows);
if (StringUtils.isNullOrEmpty(lockKeys)) {
break;
}
if (RootContext.inGlobalTransaction() || RootContext.requireGlobalLock()) {
// Do the same thing under either @GlobalTransactional or @GlobalLock,
// that only check the global lock here.
statementProxy.getConnectionProxy().checkLock(lockKeys);
} else {
throw new RuntimeException("Unknown situation!");
}
break;
} catch (LockConflictException lce) {
if (sp != null) {
conn.rollback(sp);
} else {
conn.rollback();
}
// trigger retry
lockRetryController.sleep(lce);
}
}
} finally {
if (sp != null) {
try {
if (!JdbcConstants.ORACLE.equalsIgnoreCase(getDbType())) {
conn.releaseSavepoint(sp);
}
} catch (SQLException e) {
LOGGER.error("{} release save point error.", getDbType(), e);
}
}
if (originalAutoCommit) {
conn.setAutoCommit(true);
}
}
return rs;
}
private String buildSelectSQL(ArrayList<List<Object>> paramAppenderList) {
SQLSelectRecognizer recognizer = (SQLSelectRecognizer)sqlRecognizer;
StringBuilder selectSQLAppender = new StringBuilder("SELECT ");
selectSQLAppender.append(getColumnNamesInSQL(getTableMeta().getEscapePkNameList(getDbType())));
selectSQLAppender.append(" FROM ").append(getFromTableInSQL());
String whereCondition = buildWhereCondition(recognizer, paramAppenderList);
if (StringUtils.isNotBlank(whereCondition)) {
selectSQLAppender.append(" WHERE ").append(whereCondition);
}
selectSQLAppender.append(" FOR UPDATE");
return selectSQLAppender.toString();
}
}

View File

@@ -0,0 +1,40 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec;
import java.sql.SQLException;
import java.sql.Statement;
/**
* The interface Statement callback.
*
* @author sharajava
*
* @param <T> the type parameter
* @param <S> the type parameter
*/
public interface StatementCallback<T, S extends Statement> {
/**
* Execute t.
*
* @param statement the statement
* @param args the args
* @return the t
* @throws SQLException the sql exception
*/
T execute(S statement, Object... args) throws SQLException;
}

View File

@@ -0,0 +1,149 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import java.util.StringJoiner;
import io.seata.common.util.IOUtil;
import io.seata.common.util.StringUtils;
import io.seata.config.Configuration;
import io.seata.config.ConfigurationFactory;
import io.seata.core.constants.ConfigurationKeys;
import io.seata.common.DefaultValues;
import io.seata.rm.datasource.ColumnUtils;
import io.seata.rm.datasource.SqlGenerateUtils;
import io.seata.rm.datasource.StatementProxy;
import io.seata.rm.datasource.sql.struct.TableMeta;
import io.seata.rm.datasource.sql.struct.TableRecords;
import io.seata.sqlparser.ParametersHolder;
import io.seata.sqlparser.SQLRecognizer;
import io.seata.sqlparser.SQLUpdateRecognizer;
/**
* The type Update executor.
*
* @param <T> the type parameter
* @param <S> the type parameter
* @author sharajava
*/
public class UpdateExecutor<T, S extends Statement> extends AbstractDMLBaseExecutor<T, S> {
private static final Configuration CONFIG = ConfigurationFactory.getInstance();
private static final boolean ONLY_CARE_UPDATE_COLUMNS = CONFIG.getBoolean(
ConfigurationKeys.TRANSACTION_UNDO_ONLY_CARE_UPDATE_COLUMNS, DefaultValues.DEFAULT_ONLY_CARE_UPDATE_COLUMNS);
/**
* Instantiates a new Update executor.
*
* @param statementProxy the statement proxy
* @param statementCallback the statement callback
* @param sqlRecognizer the sql recognizer
*/
public UpdateExecutor(StatementProxy<S> statementProxy, StatementCallback<T, S> statementCallback,
SQLRecognizer sqlRecognizer) {
super(statementProxy, statementCallback, sqlRecognizer);
}
@Override
protected TableRecords beforeImage() throws SQLException {
ArrayList<List<Object>> paramAppenderList = new ArrayList<>();
TableMeta tmeta = getTableMeta();
String selectSQL = buildBeforeImageSQL(tmeta, paramAppenderList);
return buildTableRecords(tmeta, selectSQL, paramAppenderList);
}
private String buildBeforeImageSQL(TableMeta tableMeta, ArrayList<List<Object>> paramAppenderList) {
SQLUpdateRecognizer recognizer = (SQLUpdateRecognizer) sqlRecognizer;
List<String> updateColumns = recognizer.getUpdateColumns();
StringBuilder prefix = new StringBuilder("SELECT ");
StringBuilder suffix = new StringBuilder(" FROM ").append(getFromTableInSQL());
String whereCondition = buildWhereCondition(recognizer, paramAppenderList);
if (StringUtils.isNotBlank(whereCondition)) {
suffix.append(WHERE).append(whereCondition);
}
String orderBy = recognizer.getOrderBy();
if (StringUtils.isNotBlank(orderBy)) {
suffix.append(orderBy);
}
ParametersHolder parametersHolder = statementProxy instanceof ParametersHolder ? (ParametersHolder)statementProxy : null;
String limit = recognizer.getLimit(parametersHolder, paramAppenderList);
if (StringUtils.isNotBlank(limit)) {
suffix.append(limit);
}
suffix.append(" FOR UPDATE");
StringJoiner selectSQLJoin = new StringJoiner(", ", prefix.toString(), suffix.toString());
if (ONLY_CARE_UPDATE_COLUMNS) {
if (!containsPK(updateColumns)) {
selectSQLJoin.add(getColumnNamesInSQL(tableMeta.getEscapePkNameList(getDbType())));
}
for (String columnName : updateColumns) {
selectSQLJoin.add(columnName);
}
} else {
for (String columnName : tableMeta.getAllColumns().keySet()) {
selectSQLJoin.add(ColumnUtils.addEscape(columnName, getDbType()));
}
}
return selectSQLJoin.toString();
}
@Override
protected TableRecords afterImage(TableRecords beforeImage) throws SQLException {
TableMeta tmeta = getTableMeta();
if (beforeImage == null || beforeImage.size() == 0) {
return TableRecords.empty(getTableMeta());
}
String selectSQL = buildAfterImageSQL(tmeta, beforeImage);
ResultSet rs = null;
try (PreparedStatement pst = statementProxy.getConnection().prepareStatement(selectSQL)) {
SqlGenerateUtils.setParamForPk(beforeImage.pkRows(), getTableMeta().getPrimaryKeyOnlyName(), pst);
rs = pst.executeQuery();
return TableRecords.buildRecords(tmeta, rs);
} finally {
IOUtil.close(rs);
}
}
private String buildAfterImageSQL(TableMeta tableMeta, TableRecords beforeImage) throws SQLException {
StringBuilder prefix = new StringBuilder("SELECT ");
String whereSql = SqlGenerateUtils.buildWhereConditionByPKs(tableMeta.getPrimaryKeyOnlyName(), beforeImage.pkRows().size(), getDbType());
String suffix = " FROM " + getFromTableInSQL() + " WHERE " + whereSql;
StringJoiner selectSQLJoiner = new StringJoiner(", ", prefix.toString(), suffix);
if (ONLY_CARE_UPDATE_COLUMNS) {
SQLUpdateRecognizer recognizer = (SQLUpdateRecognizer) sqlRecognizer;
List<String> updateColumns = recognizer.getUpdateColumns();
if (!containsPK(updateColumns)) {
selectSQLJoiner.add(getColumnNamesInSQL(tableMeta.getEscapePkNameList(getDbType())));
}
for (String columnName : updateColumns) {
selectSQLJoiner.add(columnName);
}
} else {
for (String columnName : tableMeta.getAllColumns().keySet()) {
selectSQLJoiner.add(ColumnUtils.addEscape(columnName, getDbType()));
}
}
return selectSQLJoiner.toString();
}
}

View File

@@ -0,0 +1,229 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec.mysql;
import io.seata.common.exception.NotSupportYetException;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.common.loader.LoadLevel;
import io.seata.common.loader.Scope;
import io.seata.common.util.StringUtils;
import io.seata.rm.datasource.StatementProxy;
import io.seata.rm.datasource.exec.BaseInsertExecutor;
import io.seata.rm.datasource.exec.StatementCallback;
import io.seata.rm.datasource.sql.struct.ColumnMeta;
import io.seata.sqlparser.SQLRecognizer;
import io.seata.sqlparser.struct.Defaultable;
import io.seata.sqlparser.struct.Null;
import io.seata.sqlparser.struct.SqlMethodExpr;
import io.seata.sqlparser.util.JdbcConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.math.BigDecimal;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.HashSet;
import java.util.Set;
import java.util.ArrayList;
import java.util.concurrent.ConcurrentHashMap;
/**
* The type My sql insert executor.
*
* @author jsbxyyx
*/
@LoadLevel(name = JdbcConstants.MYSQL, scope = Scope.PROTOTYPE)
public class MySQLInsertExecutor extends BaseInsertExecutor implements Defaultable {
private static final Logger LOGGER = LoggerFactory.getLogger(MySQLInsertExecutor.class);
/**
* the modify for test
*/
public static final String ERR_SQL_STATE = "S1009";
/**
* The cache of auto increment step of database
* the key is the db's resource id
* the value is the step
*/
public static final Map<String, BigDecimal> RESOURCE_ID_STEP_CACHE = new ConcurrentHashMap<>(8);
/**
* Instantiates a new Abstract dml base executor.
*
* @param statementProxy the statement proxy
* @param statementCallback the statement callback
* @param sqlRecognizer the sql recognizer
*/
public MySQLInsertExecutor(StatementProxy statementProxy, StatementCallback statementCallback,
SQLRecognizer sqlRecognizer) {
super(statementProxy, statementCallback, sqlRecognizer);
}
@Override
public Map<String,List<Object>> getPkValues() throws SQLException {
Map<String,List<Object>> pkValuesMap = null;
List<String> pkColumnNameList = getTableMeta().getPrimaryKeyOnlyName();
Boolean isContainsPk = containsPK();
//when there is only one pk in the table
if (getTableMeta().getPrimaryKeyOnlyName().size() == 1) {
if (isContainsPk) {
pkValuesMap = getPkValuesByColumn();
}
else if (containsColumns()) {
pkValuesMap = getPkValuesByAuto();
}
else {
pkValuesMap = getPkValuesByColumn();
}
} else {
//when there is multiple pk in the table
//1,all pk columns are filled value.
//2,the auto increment pk column value is null, and other pk value are not null.
pkValuesMap = getPkValuesByColumn();
for (String columnName:pkColumnNameList) {
if (!pkValuesMap.containsKey(columnName)) {
ColumnMeta pkColumnMeta = getTableMeta().getColumnMeta(columnName);
if (Objects.nonNull(pkColumnMeta) && pkColumnMeta.isAutoincrement()) {
//3,the auto increment pk column is not exits in sql, and other pk are exits also the value is not null.
pkValuesMap.putAll(getPkValuesByAuto());
}
}
}
}
return pkValuesMap;
}
/**
* the modify for test
*/
public Map<String, List<Object>> getPkValuesByAuto() throws SQLException {
// PK is just auto generated
Map<String, List<Object>> pkValuesMap = new HashMap<>(8);
Map<String, ColumnMeta> pkMetaMap = getTableMeta().getPrimaryKeyMap();
String autoColumnName = null;
for (Map.Entry<String, ColumnMeta> entry : pkMetaMap.entrySet()) {
if (entry.getValue().isAutoincrement()) {
autoColumnName = entry.getKey();
break;
}
}
if (StringUtils.isBlank(autoColumnName)) {
throw new ShouldNeverHappenException();
}
ResultSet genKeys;
try {
genKeys = statementProxy.getGeneratedKeys();
} catch (SQLException e) {
// java.sql.SQLException: Generated keys not requested. You need to
// specify Statement.RETURN_GENERATED_KEYS to
// Statement.executeUpdate() or Connection.prepareStatement().
if (ERR_SQL_STATE.equalsIgnoreCase(e.getSQLState())) {
LOGGER.error("Fail to get auto-generated keys, use 'SELECT LAST_INSERT_ID()' instead. Be cautious, " +
"statement could be polluted. Recommend you set the statement to return generated keys.");
int updateCount = statementProxy.getUpdateCount();
ResultSet firstId = genKeys = statementProxy.getTargetStatement().executeQuery("SELECT LAST_INSERT_ID()");
// If there is batch insert
// do auto increment base LAST_INSERT_ID and variable `auto_increment_increment`
if (updateCount > 1 && canAutoIncrement(pkMetaMap)) {
firstId.next();
return autoGeneratePks(new BigDecimal(firstId.getString(1)), autoColumnName, updateCount);
}
} else {
throw e;
}
}
List<Object> pkValues = new ArrayList<>();
while (genKeys.next()) {
Object v = genKeys.getObject(1);
pkValues.add(v);
}
try {
genKeys.beforeFirst();
} catch (SQLException e) {
LOGGER.warn("Fail to reset ResultSet cursor. can not get primary key value");
}
pkValuesMap.put(autoColumnName,pkValues);
return pkValuesMap;
}
@Override
public Map<String,List<Object>> getPkValuesByColumn() throws SQLException {
Map<String,List<Object>> pkValuesMap = parsePkValuesFromStatement();
Set<String> keySet = new HashSet<>(pkValuesMap.keySet());
//auto increment
for (String pkKey:keySet) {
List<Object> pkValues = pkValuesMap.get(pkKey);
// pk auto generated while single insert primary key is expression
if (pkValues.size() == 1 && (pkValues.get(0) instanceof SqlMethodExpr)) {
pkValuesMap.putAll(getPkValuesByAuto());
}
// pk auto generated while column exists and value is null
else if (!pkValues.isEmpty() && pkValues.get(0) instanceof Null) {
pkValuesMap.putAll(getPkValuesByAuto());
}
}
return pkValuesMap;
}
@Override
public List<Object> getPkValuesByDefault() throws SQLException {
// mysql default keyword the logic not support. (sample: insert into test(id, name) values(default, 'xx'))
throw new NotSupportYetException();
}
protected Map<String, List<Object>> autoGeneratePks(BigDecimal cursor, String autoColumnName, Integer updateCount) throws SQLException {
BigDecimal step = BigDecimal.ONE;
String resourceId = statementProxy.getConnectionProxy().getDataSourceProxy().getResourceId();
if (RESOURCE_ID_STEP_CACHE.containsKey(resourceId)) {
step = RESOURCE_ID_STEP_CACHE.get(resourceId);
} else {
ResultSet increment = statementProxy.getTargetStatement().executeQuery("SHOW VARIABLES LIKE 'auto_increment_increment'");
increment.next();
step = new BigDecimal(increment.getString(2));
RESOURCE_ID_STEP_CACHE.put(resourceId, step);
}
List<Object> pkValues = new ArrayList<>();
for (int i = 0; i < updateCount; i++) {
pkValues.add(cursor);
cursor = cursor.add(step);
}
Map<String, List<Object>> pkValuesMap = new HashMap<>(1, 1.001f);
pkValuesMap.put(autoColumnName,pkValues);
return pkValuesMap;
}
protected boolean canAutoIncrement(Map<String, ColumnMeta> primaryKeyMap) {
if (primaryKeyMap.size() != 1) {
return false;
}
for (ColumnMeta pk : primaryKeyMap.values()) {
return pk.isAutoincrement();
}
return false;
}
}

View File

@@ -0,0 +1,99 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec.oracle;
import io.seata.common.exception.NotSupportYetException;
import io.seata.common.loader.LoadLevel;
import io.seata.common.loader.Scope;
import io.seata.rm.datasource.StatementProxy;
import io.seata.rm.datasource.exec.BaseInsertExecutor;
import io.seata.rm.datasource.exec.StatementCallback;
import io.seata.sqlparser.SQLRecognizer;
import io.seata.sqlparser.struct.Null;
import io.seata.sqlparser.struct.Sequenceable;
import io.seata.sqlparser.struct.SqlMethodExpr;
import io.seata.sqlparser.struct.SqlSequenceExpr;
import io.seata.sqlparser.util.JdbcConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.SQLException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
/**
* The type Oracle insert executor.
*
* @author jsbxyyx
*/
@LoadLevel(name = JdbcConstants.ORACLE, scope = Scope.PROTOTYPE)
public class OracleInsertExecutor extends BaseInsertExecutor implements Sequenceable {
private static final Logger LOGGER = LoggerFactory.getLogger(OracleInsertExecutor.class);
/**
* Instantiates a new Abstract dml base executor.
*
* @param statementProxy the statement proxy
* @param statementCallback the statement callback
* @param sqlRecognizer the sql recognizer
*/
public OracleInsertExecutor(StatementProxy statementProxy, StatementCallback statementCallback,
SQLRecognizer sqlRecognizer) {
super(statementProxy, statementCallback, sqlRecognizer);
}
@Override
public Map<String,List<Object>> getPkValues() throws SQLException {
Map<String,List<Object>> pkValuesMap = null;
Boolean isContainsPk = containsPK();
//when there is only one pk in the table
if (isContainsPk) {
pkValuesMap = getPkValuesByColumn();
}
else if (containsColumns()) {
String columnName = getTableMeta().getPrimaryKeyOnlyName().get(0);
pkValuesMap = Collections.singletonMap(columnName, getGeneratedKeys());
}
else {
pkValuesMap = getPkValuesByColumn();
}
return pkValuesMap;
}
@Override
public Map<String,List<Object>> getPkValuesByColumn() throws SQLException {
Map<String,List<Object>> pkValuesMap = parsePkValuesFromStatement();
String pkKey = pkValuesMap.keySet().iterator().next();
List<Object> pkValues = pkValuesMap.get(pkKey);
if (!pkValues.isEmpty() && pkValues.get(0) instanceof SqlSequenceExpr) {
pkValuesMap.put(pkKey,getPkValuesBySequence((SqlSequenceExpr) pkValues.get(0)));
} else if (pkValues.size() == 1 && pkValues.get(0) instanceof SqlMethodExpr) {
pkValuesMap.put(pkKey,getGeneratedKeys());
} else if (pkValues.size() == 1 && pkValues.get(0) instanceof Null) {
throw new NotSupportYetException("oracle not support null");
}
return pkValuesMap;
}
@Override
public String getSequenceSql(SqlSequenceExpr expr) {
return "SELECT " + expr.getSequence() + ".currval FROM DUAL";
}
}

View File

@@ -0,0 +1,121 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.exec.postgresql;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.common.loader.LoadLevel;
import io.seata.common.loader.Scope;
import io.seata.common.util.StringUtils;
import io.seata.rm.datasource.StatementProxy;
import io.seata.rm.datasource.exec.BaseInsertExecutor;
import io.seata.rm.datasource.exec.StatementCallback;
import io.seata.rm.datasource.sql.struct.ColumnMeta;
import io.seata.sqlparser.SQLRecognizer;
import io.seata.sqlparser.struct.Defaultable;
import io.seata.sqlparser.struct.Sequenceable;
import io.seata.sqlparser.struct.SqlMethodExpr;
import io.seata.sqlparser.struct.SqlSequenceExpr;
import io.seata.sqlparser.struct.SqlDefaultExpr;
import io.seata.sqlparser.util.JdbcConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.SQLException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
/**
* The type Postgresql insert executor.
*
* @author jsbxyyx
*/
@LoadLevel(name = JdbcConstants.POSTGRESQL, scope = Scope.PROTOTYPE)
public class PostgresqlInsertExecutor extends BaseInsertExecutor implements Sequenceable, Defaultable {
private static final Logger LOGGER = LoggerFactory.getLogger(PostgresqlInsertExecutor.class);
/**
* Instantiates a new Abstract dml base executor.
*
* @param statementProxy the statement proxy
* @param statementCallback the statement callback
* @param sqlRecognizer the sql recognizer
*/
public PostgresqlInsertExecutor(StatementProxy statementProxy, StatementCallback statementCallback,
SQLRecognizer sqlRecognizer) {
super(statementProxy, statementCallback, sqlRecognizer);
}
@Override
public Map<String,List<Object>> getPkValues() throws SQLException {
Map<String,List<Object>> pkValuesMap = null;
Boolean isContainsPk = containsPK();
//when there is only one pk in the table
if (isContainsPk) {
pkValuesMap = getPkValuesByColumn();
}
else if (containsColumns()) {
String columnName = getTableMeta().getPrimaryKeyOnlyName().get(0);
pkValuesMap = Collections.singletonMap(columnName, getGeneratedKeys());
}
else {
pkValuesMap = getPkValuesByColumn();
}
return pkValuesMap;
}
@Override
public Map<String,List<Object>> getPkValuesByColumn() throws SQLException {
Map<String,List<Object>> pkValuesMap = parsePkValuesFromStatement();
String pkKey = pkValuesMap.keySet().iterator().next();
List<Object> pkValues = pkValuesMap.get(pkKey);
if (!pkValues.isEmpty() && pkValues.get(0) instanceof SqlSequenceExpr) {
pkValuesMap.put(pkKey,getPkValuesBySequence((SqlSequenceExpr) pkValues.get(0)));
} else if (!pkValues.isEmpty() && pkValues.get(0) instanceof SqlMethodExpr) {
pkValuesMap.put(pkKey,getGeneratedKeys());
} else if (!pkValues.isEmpty() && pkValues.get(0) instanceof SqlDefaultExpr) {
pkValuesMap.put(pkKey,getPkValuesByDefault());
}
return pkValuesMap;
}
/**
* get primary key values by default
* @return
* @throws SQLException
*/
@Override
public List<Object> getPkValuesByDefault() throws SQLException {
// current version 1.2 only support postgresql.
Map<String, ColumnMeta> pkMetaMap = getTableMeta().getPrimaryKeyMap();
ColumnMeta pkMeta = pkMetaMap.values().iterator().next();
String columnDef = pkMeta.getColumnDef();
// sample: nextval('test_id_seq'::regclass)
String seq = org.apache.commons.lang.StringUtils.substringBetween(columnDef, "'", "'");
String function = org.apache.commons.lang.StringUtils.substringBetween(columnDef, "", "(");
if (StringUtils.isBlank(seq)) {
throw new ShouldNeverHappenException("get primary key value failed, cause columnDef is " + columnDef);
}
return getPkValuesBySequence(new SqlSequenceExpr("'" + seq + "'", function));
}
@Override
public String getSequenceSql(SqlSequenceExpr expr) {
return "SELECT currval(" + expr.getSequence() + ")";
}
}

View File

@@ -0,0 +1,36 @@
package io.seata.rm.datasource.extend;
import io.seata.core.exception.TransactionException;
import io.seata.rm.datasource.ConnectionProxy;
import java.sql.Connection;
import java.sql.SQLException;
/**
* Seata 自定义扩展管理器
*
* @author wangchunxiang
* @date 2024/08/07
*/
public interface FtbSeataExtendManager {
/**
* Flush undo logs.
*
* @param cp the cp
* @param xid the xid
* @throws SQLException SQLException
*/
void flushUndoLogsConnection(ConnectionProxy cp,String xid) throws SQLException;
/**
* Delete undo log.
*
* @param xid the xid
* @param conn the conn
* @throws SQLException the sql exception
*/
void undoLogConnection(String xid, Connection conn) throws TransactionException;
}

View File

@@ -0,0 +1,73 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.extend;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.common.loader.EnhancedServiceLoader;
import io.seata.core.model.TransactionManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The type Default FtbSeataExtend manager.
*
* @author wangchunxiang
* @date 2024/08/07
*/
public class FtbSeataExtendManagerHolder {
private static final Logger LOGGER = LoggerFactory.getLogger(FtbSeataExtendManager.class);
private static class SingletonHolder {
private static FtbSeataExtendManager INSTANCE = null;
static {
try {
INSTANCE = EnhancedServiceLoader.load(FtbSeataExtendManager.class);
LOGGER.info("FtbSeataExtendManager Singleton {}", INSTANCE);
} catch (Throwable anyEx) {
LOGGER.error("Failed to load FtbSeataExtendManager Singleton! ", anyEx);
}
}
}
/**
* Get FtbSeataExtend manager.
*
* @return the FtbSeataExtend manager
*/
public static FtbSeataExtendManager get() {
if (SingletonHolder.INSTANCE == null) {
throw new ShouldNeverHappenException("FtbSeataExtendManager is NOT ready!");
}
return SingletonHolder.INSTANCE;
}
/**
* Set a TM instance.
*
* @param mock commonly used for test mocking
*/
public static void set(FtbSeataExtendManager mock) {
SingletonHolder.INSTANCE = mock;
}
private FtbSeataExtendManagerHolder() {
}
}

View File

@@ -0,0 +1,53 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.sql;
import io.seata.common.loader.EnhancedServiceLoader;
import io.seata.config.ConfigurationFactory;
import io.seata.core.constants.ConfigurationKeys;
import io.seata.sqlparser.SQLRecognizer;
import io.seata.sqlparser.SQLRecognizerFactory;
import io.seata.sqlparser.SqlParserType;
import java.util.List;
/**
* @author ggndnn
*/
public class SQLVisitorFactory {
/**
* SQLRecognizerFactory.
*/
private final static SQLRecognizerFactory SQL_RECOGNIZER_FACTORY;
static {
String sqlparserType = ConfigurationFactory.getInstance().getConfig(ConfigurationKeys.SQL_PARSER_TYPE, SqlParserType.SQL_PARSER_TYPE_DRUID);
SQL_RECOGNIZER_FACTORY = EnhancedServiceLoader.load(SQLRecognizerFactory.class, sqlparserType);
}
/**
* Get sql recognizer.
*
* @param sql the sql
* @param dbType the db type
* @return the sql recognizer
*/
public static List<SQLRecognizer> get(String sql, String dbType) {
return SQL_RECOGNIZER_FACTORY.create(sql, dbType);
}
}

View File

@@ -0,0 +1,189 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.sql.serial;
import javax.sql.rowset.serial.SerialBlob;
import javax.sql.rowset.serial.SerialClob;
import javax.sql.rowset.serial.SerialDatalink;
import javax.sql.rowset.serial.SerialException;
import javax.sql.rowset.serial.SerialJavaObject;
import java.net.URL;
import java.sql.Blob;
import java.sql.Clob;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.Map;
/**
* used for jdbc type is JDBCType.ARRAY serialize.
*
* @author jsbxyyx
*/
public class SerialArray implements java.sql.Array, java.io.Serializable {
static final long serialVersionUID = 1L;
private Object[] elements;
private int baseType;
private String baseTypeName;
private int len;
public SerialArray() {
}
public SerialArray(java.sql.Array array) throws SerialException, SQLException {
if (array == null) {
throw new SQLException("Cannot instantiate a SerialArray " +
"object with a null Array object");
}
if ((elements = (Object[]) array.getArray()) == null) {
throw new SQLException("Invalid Array object. Calls to Array.getArray() " +
"return null value which cannot be serialized");
}
baseType = array.getBaseType();
baseTypeName = array.getBaseTypeName();
len = elements.length;
switch (baseType) {
case java.sql.Types.BLOB:
for (int i = 0; i < len; i++) {
elements[i] = new SerialBlob((Blob) elements[i]);
}
break;
case java.sql.Types.CLOB:
for (int i = 0; i < len; i++) {
elements[i] = new SerialClob((Clob) elements[i]);
}
break;
case java.sql.Types.DATALINK:
for (int i = 0; i < len; i++) {
elements[i] = new SerialDatalink((URL) elements[i]);
}
break;
case java.sql.Types.JAVA_OBJECT:
for (int i = 0; i < len; i++) {
elements[i] = new SerialJavaObject(elements[i]);
}
break;
default:
break;
}
}
@Override
public String getBaseTypeName() throws SQLException {
return baseTypeName;
}
public void setBaseTypeName(String baseTypeName) {
this.baseTypeName = baseTypeName;
}
@Override
public int getBaseType() throws SQLException {
return baseType;
}
public void setBaseType(int baseType) {
this.baseType = baseType;
}
@Override
public Object getArray() throws SQLException {
return elements;
}
@Override
public Object getArray(Map<String, Class<?>> map) throws SQLException {
return elements;
}
@Override
public Object getArray(long index, int count) throws SQLException {
return elements;
}
@Override
public Object getArray(long index, int count, Map<String, Class<?>> map) throws SQLException {
return elements;
}
@Override
public ResultSet getResultSet() throws SQLException {
// don't throws exception.
return null;
}
@Override
public ResultSet getResultSet(Map<String, Class<?>> map) throws SQLException {
// don't throws exception.
return null;
}
@Override
public ResultSet getResultSet(long index, int count) throws SQLException {
// don't throws exception.
return null;
}
@Override
public ResultSet getResultSet(long index, int count, Map<String, Class<?>> map) throws SQLException {
// don't throws exception.
return null;
}
@Override
public void free() throws SQLException {
if (elements != null) {
elements = null;
baseTypeName = null;
}
}
public Object[] getElements() {
return elements;
}
public void setElements(Object[] elements) {
this.elements = elements;
this.len = elements != null ? elements.length : 0;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof SerialArray) {
SerialArray sa = (SerialArray) obj;
return baseType == sa.baseType &&
baseTypeName.equals(sa.baseTypeName) &&
Arrays.equals(elements, sa.elements);
}
return false;
}
@Override
public int hashCode() {
return (((31 + Arrays.hashCode(elements)) * 31 + len) * 31 +
baseType) * 31 + baseTypeName.hashCode();
}
}

View File

@@ -0,0 +1,496 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.sql.struct;
import java.util.Objects;
/**
* The type Column meta.
*
* @author sharajava
*/
public class ColumnMeta {
private String tableCat;
private String tableSchemaName;
private String tableName;
private String columnName;
private int dataType;
private String dataTypeName;
private int columnSize;
private int decimalDigits;
private int numPrecRadix;
private int nullAble;
private String remarks;
private String columnDef;
private int sqlDataType;
private int sqlDatetimeSub;
private Object charOctetLength;
private int ordinalPosition;
private String isNullAble;
private String isAutoincrement;
/**
* Instantiates a new Column meta.
*/
public ColumnMeta() {
}
@Override
public String toString() {
return "ColumnMeta{" +
"tableCat='" + tableCat + '\'' +
", tableSchemaName='" + tableSchemaName + '\'' +
", tableName='" + tableName + '\'' +
", columnName='" + columnName + '\'' +
", dataType=" + dataType +
", dataTypeName='" + dataTypeName + '\'' +
", columnSize=" + columnSize +
", decimalDigits=" + decimalDigits +
", numPrecRadix=" + numPrecRadix +
", nullAble=" + nullAble +
", remarks='" + remarks + '\'' +
", columnDef='" + columnDef + '\'' +
", sqlDataType=" + sqlDataType +
", sqlDatetimeSub=" + sqlDatetimeSub +
", charOctetLength=" + charOctetLength +
", ordinalPosition=" + ordinalPosition +
", isNullAble='" + isNullAble + '\'' +
", isAutoincrement='" + isAutoincrement + '\'' +
'}';
}
/**
* Is autoincrement boolean.
*
* @return the boolean
*/
public boolean isAutoincrement() {
return "YES".equalsIgnoreCase(isAutoincrement);
}
/**
* Gets table cat.
*
* @return the table cat
*/
public String getTableCat() {
return tableCat;
}
/**
* Sets table cat.
*
* @param tableCat the table cat
*/
public void setTableCat(String tableCat) {
this.tableCat = tableCat;
}
/**
* Sets table schema name.
*
* @param tableSchemaName the table schema name
*/
public void setTableSchemaName(String tableSchemaName) {
this.tableSchemaName = tableSchemaName;
}
/**
* Gets table schema name
*
* @return table schema name
*/
protected String getTableSchemaName() {
return tableSchemaName;
}
/**
* Sets table name.
*
* @param tableName the table name
*/
public void setTableName(String tableName) {
this.tableName = tableName;
}
/**
* Gets table name
*
* @return table name
*/
protected String getTableName() {
return tableName;
}
/**
* Gets column name.
*
* @return the column name
*/
public String getColumnName() {
return columnName;
}
/**
* Sets column name.
*
* @param columnName the column name
*/
public void setColumnName(String columnName) {
this.columnName = columnName;
}
/**
* Gets data type.
*
* @return the data type
*/
public int getDataType() {
return dataType;
}
/**
* Sets data type.
*
* @param dataType the data type
*/
public void setDataType(int dataType) {
this.dataType = dataType;
}
/**
* Gets data type name.
*
* @return the data type name
*/
public String getDataTypeName() {
return dataTypeName;
}
/**
* Sets data type name.
*
* @param dataTypeName the data type name
*/
public void setDataTypeName(String dataTypeName) {
this.dataTypeName = dataTypeName;
}
/**
* Gets column size.
*
* @return the column size
*/
public int getColumnSize() {
return columnSize;
}
/**
* Sets column size.
*
* @param columnSize the column size
*/
public void setColumnSize(int columnSize) {
this.columnSize = columnSize;
}
/**
* Gets decimal digits.
*
* @return the decimal digits
*/
public int getDecimalDigits() {
return decimalDigits;
}
/**
* Sets decimal digits.
*
* @param decimalDigits the decimal digits
*/
public void setDecimalDigits(int decimalDigits) {
this.decimalDigits = decimalDigits;
}
/**
* Gets num prec radix.
*
* @return the num prec radix
*/
public int getNumPrecRadix() {
return numPrecRadix;
}
/**
* Sets num prec radix.
*
* @param numPrecRadix the num prec radix
*/
public void setNumPrecRadix(int numPrecRadix) {
this.numPrecRadix = numPrecRadix;
}
/**
* Gets null able.
*
* @return the null able
*/
public int getNullAble() {
return nullAble;
}
/**
* Sets null able.
*
* @param nullAble the null able
*/
public void setNullAble(int nullAble) {
this.nullAble = nullAble;
}
/**
* Gets remarks.
*
* @return the remarks
*/
public String getRemarks() {
return remarks;
}
/**
* Sets remarks.
*
* @param remarks the remarks
*/
public void setRemarks(String remarks) {
this.remarks = remarks;
}
/**
* Gets column def.
*
* @return the column def
*/
public String getColumnDef() {
return columnDef;
}
/**
* Sets column def.
*
* @param columnDef the column def
*/
public void setColumnDef(String columnDef) {
this.columnDef = columnDef;
}
/**
* Gets sql data type.
*
* @return the sql data type
*/
public int getSqlDataType() {
return sqlDataType;
}
/**
* Sets sql data type.
*
* @param sqlDataType the sql data type
*/
public void setSqlDataType(int sqlDataType) {
this.sqlDataType = sqlDataType;
}
/**
* Gets sql datetime sub.
*
* @return the sql datetime sub
*/
public int getSqlDatetimeSub() {
return sqlDatetimeSub;
}
/**
* Sets sql datetime sub.
*
* @param sqlDatetimeSub the sql datetime sub
*/
public void setSqlDatetimeSub(int sqlDatetimeSub) {
this.sqlDatetimeSub = sqlDatetimeSub;
}
/**
* Gets char octet length.
*
* @return the char octet length
*/
public Object getCharOctetLength() {
return charOctetLength;
}
/**
* Sets char octet length.
*
* @param charOctetLength the char octet length
*/
public void setCharOctetLength(Object charOctetLength) {
this.charOctetLength = charOctetLength;
}
/**
* Gets ordinal position.
*
* @return the ordinal position
*/
public int getOrdinalPosition() {
return ordinalPosition;
}
/**
* Sets ordinal position.
*
* @param ordinalPosition the ordinal position
*/
public void setOrdinalPosition(int ordinalPosition) {
this.ordinalPosition = ordinalPosition;
}
/**
* Gets is null able.
*
* @return the is null able
*/
public String getIsNullAble() {
return isNullAble;
}
/**
* Sets is null able.
*
* @param isNullAble the is null able
*/
public void setIsNullAble(String isNullAble) {
this.isNullAble = isNullAble;
}
/**
* Gets is autoincrement.
*
* @return the is autoincrement
*/
public String getIsAutoincrement() {
return isAutoincrement;
}
/**
* Sets is autoincrement.
*
* @param isAutoincrement the is autoincrement
*/
public void setIsAutoincrement(String isAutoincrement) {
this.isAutoincrement = isAutoincrement;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof ColumnMeta)) {
return false;
}
ColumnMeta columnMeta = (ColumnMeta) o;
if (!Objects.equals(columnMeta.tableCat, this.tableCat)) {
return false;
}
if (!Objects.equals(columnMeta.tableSchemaName, this.tableSchemaName)) {
return false;
}
if (!Objects.equals(columnMeta.tableName, this.tableName)) {
return false;
}
if (!Objects.equals(columnMeta.columnName, this.columnName)) {
return false;
}
if (!Objects.equals(columnMeta.dataType, this.dataType)) {
return false;
}
if (!Objects.equals(columnMeta.dataTypeName, this.dataTypeName)) {
return false;
}
if (!Objects.equals(columnMeta.columnSize, this.columnSize)) {
return false;
}
if (!Objects.equals(columnMeta.decimalDigits, this.decimalDigits)) {
return false;
}
if (!Objects.equals(columnMeta.numPrecRadix, this.numPrecRadix)) {
return false;
}
if (!Objects.equals(columnMeta.nullAble, this.nullAble)) {
return false;
}
if (!Objects.equals(columnMeta.remarks, this.remarks)) {
return false;
}
if (!Objects.equals(columnMeta.columnDef, this.columnDef)) {
return false;
}
if (!Objects.equals(columnMeta.sqlDataType, this.sqlDataType)) {
return false;
}
if (!Objects.equals(columnMeta.sqlDatetimeSub, this.sqlDatetimeSub)) {
return false;
}
if (!Objects.equals(columnMeta.charOctetLength, this.charOctetLength)) {
return false;
}
if (!Objects.equals(columnMeta.ordinalPosition, this.ordinalPosition)) {
return false;
}
if (!Objects.equals(columnMeta.isNullAble, this.isNullAble)) {
return false;
}
if (!Objects.equals(columnMeta.isAutoincrement, this.isAutoincrement)) {
return false;
}
return true;
}
@Override
public int hashCode() {
int hash = Objects.hashCode(tableCat);
hash += Objects.hashCode(tableSchemaName);
hash += Objects.hashCode(tableName);
hash += Objects.hashCode(columnName);
hash += Objects.hashCode(dataType);
hash += Objects.hashCode(dataTypeName);
hash += Objects.hashCode(columnSize);
hash += Objects.hashCode(decimalDigits);
hash += Objects.hashCode(numPrecRadix);
hash += Objects.hashCode(nullAble);
hash += Objects.hashCode(remarks);
hash += Objects.hashCode(columnDef);
hash += Objects.hashCode(sqlDataType);
hash += Objects.hashCode(sqlDatetimeSub);
hash += Objects.hashCode(charOctetLength);
hash += Objects.hashCode(ordinalPosition);
hash += Objects.hashCode(isNullAble);
hash += Objects.hashCode(isAutoincrement);
return hash;
}
}

View File

@@ -0,0 +1,149 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.sql.struct;
/**
* Field
*
* @author sharajava
*/
public class Field implements java.io.Serializable {
private static final long serialVersionUID = -3489407607572041783L;
/**
* The Name.
*/
private String name;
private KeyType keyType = KeyType.NULL;
/**
* The Type.
*/
private int type;
/**
* The Value.
*/
private Object value;
/**
* Instantiates a new Field.
*/
public Field() {
}
/**
* Instantiates a new Field.
*
* @param name the name
* @param type the type
* @param value the value
*/
public Field(String name, int type, Object value) {
this.name = name;
this.type = type;
this.value = value;
}
/**
* Gets name.
*
* @return the name
*/
public String getName() {
return name;
}
/**
* Sets name.
*
* @param attrName the attr name
*/
public void setName(String attrName) {
this.name = attrName;
}
/**
* Gets key type.
*
* @return the key type
*/
public KeyType getKeyType() {
return keyType;
}
/**
* Sets key type.
*
* @param keyType the key type
*/
public void setKeyType(KeyType keyType) {
this.keyType = keyType;
}
/**
* Gets type.
*
* @return the type
*/
public int getType() {
return type;
}
/**
* Sets type.
*
* @param attrType the attr type
*/
public void setType(int attrType) {
this.type = attrType;
}
/**
* Gets value.
*
* @return the value
*/
public Object getValue() {
return value;
}
/**
* Sets value.
*
* @param value the value
*/
public void setValue(Object value) {
this.value = value;
}
/**
* Is key boolean.
*
* @param pkname the pkname
* @return the boolean
*/
public boolean isKey(String pkname) {
return name.equalsIgnoreCase(pkname);
}
@Override
public String toString() {
return String.format("[%s,%s]", name, value);
}
}

View File

@@ -0,0 +1,261 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.sql.struct;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import org.apache.commons.lang.ArrayUtils;
/**
* The type Index meta.
*
* @author sharajava
*/
public class IndexMeta {
private List<ColumnMeta> values = new ArrayList<ColumnMeta>();
private boolean nonUnique;
private String indexQualifier;
private String indexName;
private short type;
private IndexType indextype;
private String ascOrDesc;
private int cardinality;
private int ordinalPosition;
/**
* Instantiates a new Index meta.
*/
public IndexMeta() {
}
/**
* Gets values.
*
* @return the values
*/
public List<ColumnMeta> getValues() {
return values;
}
/**
* Sets values.
*
* @param values the values
*/
public void setValues(List<ColumnMeta> values) {
this.values = values;
}
/**
* Is non unique boolean.
*
* @return the boolean
*/
public boolean isNonUnique() {
return nonUnique;
}
/**
* Sets non unique.
*
* @param nonUnique the non unique
*/
public void setNonUnique(boolean nonUnique) {
this.nonUnique = nonUnique;
}
/**
* Gets index qualifier.
*
* @return the index qualifier
*/
public String getIndexQualifier() {
return indexQualifier;
}
/**
* Sets index qualifier.
*
* @param indexQualifier the index qualifier
*/
public void setIndexQualifier(String indexQualifier) {
this.indexQualifier = indexQualifier;
}
/**
* Gets index name.
*
* @return the index name
*/
public String getIndexName() {
return indexName;
}
/**
* Sets index name.
*
* @param indexName the index name
*/
public void setIndexName(String indexName) {
this.indexName = indexName;
}
/**
* Gets type.
*
* @return the type
*/
public short getType() {
return type;
}
/**
* Sets type.
*
* @param type the type
*/
public void setType(short type) {
this.type = type;
}
/**
* Gets asc or desc.
*
* @return the asc or desc
*/
public String getAscOrDesc() {
return ascOrDesc;
}
/**
* Sets asc or desc.
*
* @param ascOrDesc the asc or desc
*/
public void setAscOrDesc(String ascOrDesc) {
this.ascOrDesc = ascOrDesc;
}
/**
* Gets cardinality.
*
* @return the cardinality
*/
public int getCardinality() {
return cardinality;
}
/**
* Sets cardinality.
*
* @param cardinality the cardinality
*/
public void setCardinality(int cardinality) {
this.cardinality = cardinality;
}
/**
* Gets ordinal position.
*
* @return the ordinal position
*/
public int getOrdinalPosition() {
return ordinalPosition;
}
/**
* Sets ordinal position.
*
* @param ordinalPosition the ordinal position
*/
public void setOrdinalPosition(int ordinalPosition) {
this.ordinalPosition = ordinalPosition;
}
/**
* Gets indextype.
*
* @return the indextype
*/
public IndexType getIndextype() {
return indextype;
}
/**
* Sets indextype.
*
* @param indextype the indextype
*/
public void setIndextype(IndexType indextype) {
this.indextype = indextype;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof IndexMeta)) {
return false;
}
IndexMeta indexMeta = (IndexMeta)o;
if (!ArrayUtils.isEquals(indexMeta.values, this.values)) {
return false;
}
if (!Objects.equals(indexMeta.nonUnique, this.nonUnique)) {
return false;
}
if (!Objects.equals(indexMeta.indexQualifier, this.indexQualifier)) {
return false;
}
if (!Objects.equals(indexMeta.indexName, this.indexName)) {
return false;
}
if (!Objects.equals(indexMeta.type, this.type)) {
return false;
}
if (!Objects.equals(indexMeta.indextype.value(), this.indextype.value())) {
return false;
}
if (!Objects.equals(indexMeta.ascOrDesc, this.ascOrDesc)) {
return false;
}
if (!Objects.equals(indexMeta.ordinalPosition, this.ordinalPosition)) {
return false;
}
return true;
}
@Override
public int hashCode() {
int hash = Objects.hashCode(nonUnique);
hash += Objects.hashCode(indexQualifier);
hash += Objects.hashCode(indexName);
hash += Objects.hashCode(type);
hash += Objects.hashCode(indextype);
hash += Objects.hashCode(ascOrDesc);
hash += Objects.hashCode(ordinalPosition);
return hash;
}
@Override
public String toString() {
return "indexName:" + indexName + "->" + "type:" + type + "->" + "values:" + values;
}
}

View File

@@ -0,0 +1,70 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.sql.struct;
/**
* The enum Index type.
*
* @author sharajava
*/
public enum IndexType {
/**
* Primary index type.
*/
PRIMARY(0),
/**
* Normal index type.
*/
NORMAL(1),
/**
* Unique index type.
*/
UNIQUE(2),
/**
* Full text index type.
*/
FULL_TEXT(3);
private int i;
IndexType(int i) {
this.i = i;
}
/**
* Value int.
*
* @return the int
*/
public int value() {
return this.i;
}
/**
* Value of index type.
*
* @param i the
* @return the index type
*/
public static IndexType valueOf(int i) {
for (IndexType t : values()) {
if (t.value() == i) {
return t;
}
}
throw new IllegalArgumentException("Invalid IndexType:" + i);
}
}

View File

@@ -0,0 +1,36 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.sql.struct;
/**
* The enum Key type.
*
* @author sharajava
*/
public enum KeyType {
/**
* Null key type.
*/
// Null
NULL,
/**
* The Primary key.
*/
// Primary Key
PRIMARY_KEY
}

View File

@@ -0,0 +1,95 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.sql.struct;
import java.util.ArrayList;
import java.util.List;
/**
* The type Row.
*
* @author sharajava
*/
public class Row implements java.io.Serializable {
private static final long serialVersionUID = 6532477221179419451L;
private List<Field> fields = new ArrayList<Field>();
/**
* Instantiates a new Row.
*/
public Row() {
}
/**
* Gets fields.
*
* @return the fields
*/
public List<Field> getFields() {
return fields;
}
/**
* Sets fields.
*
* @param fields the fields
*/
public void setFields(List<Field> fields) {
this.fields = fields;
}
/**
* Add.
*
* @param field the field
*/
public void add(Field field) {
fields.add(field);
}
/**
* Primary keys list.
*
* @return the Primary keys list
*/
public List<Field> primaryKeys() {
List<Field> pkFields = new ArrayList<>();
for (Field field : fields) {
if (KeyType.PRIMARY_KEY == field.getKeyType()) {
pkFields.add(field);
}
}
return pkFields;
}
/**
* Non-primary keys list.
*
* @return the non-primary list
*/
public List<Field> nonPrimaryKeys() {
List<Field> nonPkFields = new ArrayList<>();
for (Field field : fields) {
if (KeyType.PRIMARY_KEY != field.getKeyType()) {
nonPkFields.add(field);
}
}
return nonPkFields;
}
}

View File

@@ -0,0 +1,209 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.sql.struct;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import io.seata.common.exception.NotSupportYetException;
import io.seata.common.util.CollectionUtils;
import io.seata.rm.datasource.ColumnUtils;
/**
* The type Table meta.
*
* @author sharajava
*/
public class TableMeta {
private String tableName;
/**
* key: column name
*/
private Map<String, ColumnMeta> allColumns = new LinkedHashMap<>();
/**
* key: index name
*/
private Map<String, IndexMeta> allIndexes = new LinkedHashMap<>();
/**
* Gets table name.
*
* @return the table name
*/
public String getTableName() {
return tableName;
}
/**
* Sets table name.
*
* @param tableName the table name
*/
public void setTableName(String tableName) {
this.tableName = tableName;
}
/**
* Gets column meta.
*
* @param colName the col name
* @return the column meta
*/
public ColumnMeta getColumnMeta(String colName) {
return allColumns.get(colName);
}
/**
* Gets all columns.
*
* @return the all columns
*/
public Map<String, ColumnMeta> getAllColumns() {
return allColumns;
}
/**
* Gets all indexes.
*
* @return the all indexes
*/
public Map<String, IndexMeta> getAllIndexes() {
return allIndexes;
}
/**
* Gets auto increase column.
*
* @return the auto increase column
*/
public ColumnMeta getAutoIncreaseColumn() {
// TODO: how about auto increment but not pk?
for (Entry<String, ColumnMeta> entry : allColumns.entrySet()) {
ColumnMeta col = entry.getValue();
if ("YES".equalsIgnoreCase(col.getIsAutoincrement())) {
return col;
}
}
return null;
}
/**
* Gets primary key map.
*
* @return the primary key map
*/
public Map<String, ColumnMeta> getPrimaryKeyMap() {
Map<String, ColumnMeta> pk = new HashMap<>();
allIndexes.forEach((key, index) -> {
if (index.getIndextype().value() == IndexType.PRIMARY.value()) {
for (ColumnMeta col : index.getValues()) {
pk.put(col.getColumnName(), col);
}
}
});
if (pk.size() < 1) {
throw new NotSupportYetException(String.format("%s needs to contain the primary key.", tableName));
}
return pk;
}
/**
* Gets primary key only name.
*
* @return the primary key only name
*/
@SuppressWarnings("serial")
public List<String> getPrimaryKeyOnlyName() {
List<String> list = new ArrayList<>();
for (Entry<String, ColumnMeta> entry : getPrimaryKeyMap().entrySet()) {
list.add(entry.getKey());
}
return list;
}
/**
* Gets add escape pk name.
*
* @param dbType the db type
* @return escape pk name list
*/
public List<String> getEscapePkNameList(String dbType) {
return ColumnUtils.addEscape(getPrimaryKeyOnlyName(), dbType);
}
/**
* Contains pk boolean.
*
* @param cols the cols
* @return the boolean
*/
public boolean containsPK(List<String> cols) {
if (cols == null) {
return false;
}
List<String> pk = getPrimaryKeyOnlyName();
if (pk.isEmpty()) {
return false;
}
//at least contain one pk
if (cols.containsAll(pk)) {
return true;
} else {
return CollectionUtils.toUpperList(cols).containsAll(CollectionUtils.toUpperList(pk));
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof TableMeta)) {
return false;
}
TableMeta tableMeta = (TableMeta) o;
if (!Objects.equals(tableMeta.tableName, this.tableName)) {
return false;
}
if (!Objects.equals(tableMeta.allColumns, this.allColumns)) {
return false;
}
if (!Objects.equals(tableMeta.allIndexes, this.allIndexes)) {
return false;
}
return true;
}
@Override
public int hashCode() {
int hash = Objects.hashCode(tableName);
hash += Objects.hashCode(allColumns);
hash += Objects.hashCode(allIndexes);
return hash;
}
}

View File

@@ -0,0 +1,45 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.sql.struct;
import java.sql.Connection;
/**
* The type Table meta cache.
*
* @author sharajava
*/
public interface TableMetaCache {
/**
* Gets table meta.
*
* @param connection the connection
* @param tableName the table name
* @param resourceId the resource id
* @return the table meta
*/
TableMeta getTableMeta(Connection connection, String tableName, String resourceId);
/**
* Clear the table meta cache
*
* @param connection the connection
* @param resourceId the resource id
*/
void refresh(Connection connection, String resourceId);
}

View File

@@ -0,0 +1,41 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.sql.struct;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import io.seata.common.loader.EnhancedServiceLoader;
import io.seata.common.util.CollectionUtils;
/**
* @author guoyao
*/
public class TableMetaCacheFactory {
private static final Map<String, TableMetaCache> TABLE_META_CACHE_MAP = new ConcurrentHashMap<>();
/**
* get table meta cache
*
* @param dbType the db type
* @return table meta cache
*/
public static TableMetaCache getTableMetaCache(String dbType) {
return CollectionUtils.computeIfAbsent(TABLE_META_CACHE_MAP, dbType,
key -> EnhancedServiceLoader.load(TableMetaCache.class, dbType));
}
}

View File

@@ -0,0 +1,283 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.sql.struct;
import java.sql.Array;
import java.sql.Blob;
import java.sql.Clob;
import java.sql.NClob;
import java.sql.Ref;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Types;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.sql.rowset.serial.SerialBlob;
import javax.sql.rowset.serial.SerialClob;
import javax.sql.rowset.serial.SerialDatalink;
import javax.sql.rowset.serial.SerialJavaObject;
import javax.sql.rowset.serial.SerialRef;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.rm.datasource.sql.serial.SerialArray;
/**
* The type Table records.
*
* @author sharajava
*/
public class TableRecords implements java.io.Serializable {
private static final long serialVersionUID = 4441667803166771721L;
private transient TableMeta tableMeta;
private String tableName;
private List<Row> rows = new ArrayList<Row>();
/**
* Gets table name.
*
* @return the table name
*/
public String getTableName() {
return tableName;
}
/**
* Sets table name.
*
* @param tableName the table name
*/
public void setTableName(String tableName) {
this.tableName = tableName;
}
/**
* Gets rows.
*
* @return the rows
*/
public List<Row> getRows() {
return rows;
}
/**
* Sets rows.
*
* @param rows the rows
*/
public void setRows(List<Row> rows) {
this.rows = rows;
}
/**
* Instantiates a new Table records.
*/
public TableRecords() {
}
/**
* Instantiates a new Table records.
*
* @param tableMeta the table meta
*/
public TableRecords(TableMeta tableMeta) {
setTableMeta(tableMeta);
}
/**
* Sets table meta.
*
* @param tableMeta the table meta
*/
public void setTableMeta(TableMeta tableMeta) {
if (this.tableMeta != null) {
throw new ShouldNeverHappenException();
}
this.tableMeta = tableMeta;
this.tableName = tableMeta.getTableName();
}
/**
* Size int.
*
* @return the int
*/
public int size() {
return rows.size();
}
/**
* Add.
*
* @param row the row
*/
public void add(Row row) {
rows.add(row);
}
/**
* Pk rows list.
*
* @return return a list. each element of list is a map,the map hold the pk column name as a key and field as the value
*/
public List<Map<String,Field>> pkRows() {
final Map<String, ColumnMeta> primaryKeyMap = getTableMeta().getPrimaryKeyMap();
List<Map<String,Field>> pkRows = new ArrayList<>();
for (Row row : rows) {
List<Field> fields = row.getFields();
Map<String,Field> rowMap = new HashMap<>(3);
for (Field field : fields) {
if (primaryKeyMap.containsKey(field.getName())) {
rowMap.put(field.getName(),field);
}
}
pkRows.add(rowMap);
}
return pkRows;
}
/**
* Gets table meta.
*
* @return the table meta
*/
public TableMeta getTableMeta() {
return tableMeta;
}
/**
* Empty table records.
*
* @param tableMeta the table meta
* @return the table records
*/
public static TableRecords empty(TableMeta tableMeta) {
return new EmptyTableRecords(tableMeta);
}
/**
* Build records table records.
*
* @param tmeta the tmeta
* @param resultSet the result set
* @return the table records
* @throws SQLException the sql exception
*/
public static TableRecords buildRecords(TableMeta tmeta, ResultSet resultSet) throws SQLException {
TableRecords records = new TableRecords(tmeta);
ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
int columnCount = resultSetMetaData.getColumnCount();
while (resultSet.next()) {
List<Field> fields = new ArrayList<>(columnCount);
for (int i = 1; i <= columnCount; i++) {
String colName = resultSetMetaData.getColumnName(i);
ColumnMeta col = tmeta.getColumnMeta(colName);
int dataType = col.getDataType();
Field field = new Field();
field.setName(col.getColumnName());
if (tmeta.getPrimaryKeyMap().containsKey(colName)) {
field.setKeyType(KeyType.PRIMARY_KEY);
}
field.setType(dataType);
// mysql will not run in this code
// cause mysql does not use java.sql.Blob, java.sql.sql.Clob to process Blob and Clob column
if (dataType == Types.BLOB) {
Blob blob = resultSet.getBlob(i);
if (blob != null) {
field.setValue(new SerialBlob(blob));
}
} else if (dataType == Types.CLOB) {
Clob clob = resultSet.getClob(i);
if (clob != null) {
field.setValue(new SerialClob(clob));
}
} else if (dataType == Types.NCLOB) {
NClob object = resultSet.getNClob(i);
if (object != null) {
field.setValue(new SerialClob(object));
}
} else if (dataType == Types.ARRAY) {
Array array = resultSet.getArray(i);
if (array != null) {
field.setValue(new SerialArray(array));
}
} else if (dataType == Types.REF) {
Ref ref = resultSet.getRef(i);
if (ref != null) {
field.setValue(new SerialRef(ref));
}
} else if (dataType == Types.DATALINK) {
java.net.URL url = resultSet.getURL(i);
if (url != null) {
field.setValue(new SerialDatalink(url));
}
} else if (dataType == Types.JAVA_OBJECT) {
Object object = resultSet.getObject(i);
if (object != null) {
field.setValue(new SerialJavaObject(object));
}
} else {
// JDBCType.DISTINCT, JDBCType.STRUCT etc...
field.setValue(resultSet.getObject(i));
}
fields.add(field);
}
Row row = new Row();
row.setFields(fields);
records.add(row);
}
return records;
}
public static class EmptyTableRecords extends TableRecords {
public EmptyTableRecords() {}
public EmptyTableRecords(TableMeta tableMeta) {
this.setTableMeta(tableMeta);
}
@Override
public int size() {
return 0;
}
@Override
public List<Map<String,Field>> pkRows() {
return new ArrayList<>();
}
@Override
public void add(Row row) {
throw new UnsupportedOperationException("xxx");
}
@Override
public TableMeta getTableMeta() {
throw new UnsupportedOperationException("xxx");
}
}
}

View File

@@ -0,0 +1,114 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.sql.struct.cache;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Map;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.common.util.StringUtils;
import io.seata.core.context.RootContext;
import io.seata.rm.datasource.sql.struct.TableMeta;
import io.seata.rm.datasource.sql.struct.TableMetaCache;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The type Table meta cache.
*
* @author sharajava
*/
public abstract class AbstractTableMetaCache implements TableMetaCache {
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractTableMetaCache.class);
private static final long CACHE_SIZE = 100000;
private static final long EXPIRE_TIME = 900 * 1000;
private static final Cache<String, TableMeta> TABLE_META_CACHE = Caffeine.newBuilder().maximumSize(CACHE_SIZE)
.expireAfterWrite(EXPIRE_TIME, TimeUnit.MILLISECONDS).softValues().build();
@Override
public TableMeta getTableMeta(final Connection connection, final String tableName, String resourceId) {
if (StringUtils.isNullOrEmpty(tableName)) {
throw new IllegalArgumentException("TableMeta cannot be fetched without tableName");
}
TableMeta tmeta;
final String key = getCacheKey(connection, tableName, resourceId);
tmeta = TABLE_META_CACHE.get(key, mappingFunction -> {
try {
return fetchSchema(connection, tableName);
} catch (SQLException e) {
LOGGER.error("get table meta of the table `{}` error: {}", tableName, e.getMessage(), e);
return null;
}
});
if (tmeta == null) {
throw new ShouldNeverHappenException(String.format("[xid:%s]get table meta failed," +
" please check whether the table `%s` exists.", RootContext.getXID(), tableName));
}
return tmeta;
}
@Override
public void refresh(final Connection connection, String resourceId) {
ConcurrentMap<String, TableMeta> tableMetaMap = TABLE_META_CACHE.asMap();
for (Map.Entry<String, TableMeta> entry : tableMetaMap.entrySet()) {
String key = getCacheKey(connection, entry.getValue().getTableName(), resourceId);
if (entry.getKey().equals(key)) {
try {
TableMeta tableMeta = fetchSchema(connection, entry.getValue().getTableName());
if (!tableMeta.equals(entry.getValue())) {
TABLE_META_CACHE.put(entry.getKey(), tableMeta);
LOGGER.info("table meta change was found, update table meta cache automatically.");
}
} catch (SQLException e) {
LOGGER.error("get table meta error:{}", e.getMessage(), e);
}
}
}
}
/**
* generate cache key
*
* @param connection the connection
* @param tableName the table name
* @param resourceId the resource id
* @return cache key
*/
protected abstract String getCacheKey(Connection connection, String tableName, String resourceId);
/**
* get scheme from datasource and tableName
*
* @param connection the connection
* @param tableName the table name
* @return table meta
* @throws SQLException the sql exception
*/
protected abstract TableMeta fetchSchema(Connection connection, String tableName) throws SQLException;
}

View File

@@ -0,0 +1,178 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.sql.struct.cache;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.common.loader.LoadLevel;
import io.seata.rm.datasource.ColumnUtils;
import io.seata.rm.datasource.sql.struct.ColumnMeta;
import io.seata.rm.datasource.sql.struct.IndexMeta;
import io.seata.rm.datasource.sql.struct.IndexType;
import io.seata.rm.datasource.sql.struct.TableMeta;
import io.seata.sqlparser.util.JdbcConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The type Table meta cache.
*
* @author sharajava
*/
@LoadLevel(name = JdbcConstants.MYSQL)
public class MysqlTableMetaCache extends AbstractTableMetaCache {
private static final Logger LOGGER = LoggerFactory.getLogger(MysqlTableMetaCache.class);
@Override
protected String getCacheKey(Connection connection, String tableName, String resourceId) {
StringBuilder cacheKey = new StringBuilder(resourceId);
cacheKey.append(".");
//remove single quote and separate it to catalogName and tableName
String[] tableNameWithCatalog = tableName.replace("`", "").split("\\.");
String defaultTableName = tableNameWithCatalog.length > 1 ? tableNameWithCatalog[1] : tableNameWithCatalog[0];
DatabaseMetaData databaseMetaData = null;
try {
databaseMetaData = connection.getMetaData();
} catch (SQLException e) {
LOGGER.error("Could not get connection, use default cache key {}", e.getMessage(), e);
return cacheKey.append(defaultTableName).toString();
}
try {
//prevent duplicated cache key
if (databaseMetaData.supportsMixedCaseIdentifiers()) {
cacheKey.append(defaultTableName);
} else {
cacheKey.append(defaultTableName.toLowerCase());
}
} catch (SQLException e) {
LOGGER.error("Could not get supportsMixedCaseIdentifiers in connection metadata, use default cache key {}", e.getMessage(), e);
return cacheKey.append(defaultTableName).toString();
}
return cacheKey.toString();
}
@Override
protected TableMeta fetchSchema(Connection connection, String tableName) throws SQLException {
String sql = "SELECT * FROM " + ColumnUtils.addEscape(tableName, JdbcConstants.MYSQL) + " LIMIT 1";
try (Statement stmt = connection.createStatement();
ResultSet rs = stmt.executeQuery(sql)) {
return resultSetMetaToSchema(rs.getMetaData(), connection.getMetaData());
} catch (SQLException sqlEx) {
throw sqlEx;
} catch (Exception e) {
throw new SQLException(String.format("Failed to fetch schema of %s", tableName), e);
}
}
private TableMeta resultSetMetaToSchema(ResultSetMetaData rsmd, DatabaseMetaData dbmd)
throws SQLException {
//always "" for mysql
String schemaName = rsmd.getSchemaName(1);
String catalogName = rsmd.getCatalogName(1);
/*
* use ResultSetMetaData to get the pure table name
* can avoid the problem below
*
* select * from account_tbl
* select * from account_TBL
* select * from `account_tbl`
* select * from account.account_tbl
*/
String tableName = rsmd.getTableName(1);
TableMeta tm = new TableMeta();
tm.setTableName(tableName);
/*
* here has two different type to get the data
* make sure the table name was right
* 1. show full columns from xxx from xxx(normal)
* 2. select xxx from xxx where catalog_name like ? and table_name like ?(informationSchema=true)
*/
try (ResultSet rsColumns = dbmd.getColumns(catalogName, schemaName, tableName, "%");
ResultSet rsIndex = dbmd.getIndexInfo(catalogName, schemaName, tableName, false, true)) {
while (rsColumns.next()) {
ColumnMeta col = new ColumnMeta();
col.setTableCat(rsColumns.getString("TABLE_CAT"));
col.setTableSchemaName(rsColumns.getString("TABLE_SCHEM"));
col.setTableName(rsColumns.getString("TABLE_NAME"));
col.setColumnName(rsColumns.getString("COLUMN_NAME"));
col.setDataType(rsColumns.getInt("DATA_TYPE"));
col.setDataTypeName(rsColumns.getString("TYPE_NAME"));
col.setColumnSize(rsColumns.getInt("COLUMN_SIZE"));
col.setDecimalDigits(rsColumns.getInt("DECIMAL_DIGITS"));
col.setNumPrecRadix(rsColumns.getInt("NUM_PREC_RADIX"));
col.setNullAble(rsColumns.getInt("NULLABLE"));
col.setRemarks(rsColumns.getString("REMARKS"));
col.setColumnDef(rsColumns.getString("COLUMN_DEF"));
col.setSqlDataType(rsColumns.getInt("SQL_DATA_TYPE"));
col.setSqlDatetimeSub(rsColumns.getInt("SQL_DATETIME_SUB"));
col.setCharOctetLength(rsColumns.getInt("CHAR_OCTET_LENGTH"));
col.setOrdinalPosition(rsColumns.getInt("ORDINAL_POSITION"));
col.setIsNullAble(rsColumns.getString("IS_NULLABLE"));
col.setIsAutoincrement(rsColumns.getString("IS_AUTOINCREMENT"));
tm.getAllColumns().put(col.getColumnName(), col);
}
while (rsIndex.next()) {
String indexName = rsIndex.getString("INDEX_NAME");
String colName = rsIndex.getString("COLUMN_NAME");
ColumnMeta col = tm.getAllColumns().get(colName);
if (tm.getAllIndexes().containsKey(indexName)) {
IndexMeta index = tm.getAllIndexes().get(indexName);
index.getValues().add(col);
} else {
IndexMeta index = new IndexMeta();
index.setIndexName(indexName);
index.setNonUnique(rsIndex.getBoolean("NON_UNIQUE"));
index.setIndexQualifier(rsIndex.getString("INDEX_QUALIFIER"));
index.setIndexName(rsIndex.getString("INDEX_NAME"));
index.setType(rsIndex.getShort("TYPE"));
index.setOrdinalPosition(rsIndex.getShort("ORDINAL_POSITION"));
index.setAscOrDesc(rsIndex.getString("ASC_OR_DESC"));
index.setCardinality(rsIndex.getInt("CARDINALITY"));
index.getValues().add(col);
if ("PRIMARY".equalsIgnoreCase(indexName)) {
index.setIndextype(IndexType.PRIMARY);
} else if (!index.isNonUnique()) {
index.setIndextype(IndexType.UNIQUE);
} else {
index.setIndextype(IndexType.NORMAL);
}
tm.getAllIndexes().put(indexName, index);
}
}
if (tm.getAllIndexes().isEmpty()) {
throw new ShouldNeverHappenException("Could not found any index in the table: " + tableName);
}
}
return tm;
}
}

View File

@@ -0,0 +1,161 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.sql.struct.cache;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLException;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.common.loader.LoadLevel;
import io.seata.common.util.StringUtils;
import io.seata.rm.datasource.sql.struct.ColumnMeta;
import io.seata.rm.datasource.sql.struct.IndexMeta;
import io.seata.rm.datasource.sql.struct.IndexType;
import io.seata.rm.datasource.sql.struct.TableMeta;
import io.seata.sqlparser.util.JdbcConstants;
/**
* The type Table meta cache.
*
* @author ygy
*/
@LoadLevel(name = JdbcConstants.ORACLE)
public class OracleTableMetaCache extends AbstractTableMetaCache {
@Override
protected String getCacheKey(Connection connection, String tableName, String resourceId) {
StringBuilder cacheKey = new StringBuilder(resourceId);
cacheKey.append(".");
//separate it to schemaName and tableName
String[] tableNameWithSchema = tableName.split("\\.");
String defaultTableName = tableNameWithSchema.length > 1 ? tableNameWithSchema[1] : tableNameWithSchema[0];
//oracle does not implement supportsMixedCaseIdentifiers in DatabaseMetadata
if (defaultTableName.contains("\"")) {
cacheKey.append(defaultTableName.replace("\"", ""));
} else {
// oracle default store in upper case
cacheKey.append(defaultTableName.toUpperCase());
}
return cacheKey.toString();
}
@Override
protected TableMeta fetchSchema(Connection connection, String tableName) throws SQLException {
try {
return resultSetMetaToSchema(connection.getMetaData(), tableName);
} catch (SQLException sqlEx) {
throw sqlEx;
} catch (Exception e) {
throw new SQLException(String.format("Failed to fetch schema of %s", tableName), e);
}
}
private TableMeta resultSetMetaToSchema(DatabaseMetaData dbmd, String tableName) throws SQLException {
TableMeta tm = new TableMeta();
tm.setTableName(tableName);
String[] schemaTable = tableName.split("\\.");
String schemaName = schemaTable.length > 1 ? schemaTable[0] : dbmd.getUserName();
tableName = schemaTable.length > 1 ? schemaTable[1] : tableName;
if (schemaName.contains("\"")) {
schemaName = schemaName.replace("\"", "");
} else {
schemaName = schemaName.toUpperCase();
}
if (tableName.contains("\"")) {
tableName = tableName.replace("\"", "");
} else {
tableName = tableName.toUpperCase();
}
try (ResultSet rsColumns = dbmd.getColumns("", schemaName, tableName, "%");
ResultSet rsIndex = dbmd.getIndexInfo(null, schemaName, tableName, false, true);
ResultSet rsPrimary = dbmd.getPrimaryKeys(null, schemaName, tableName)) {
while (rsColumns.next()) {
ColumnMeta col = new ColumnMeta();
col.setTableCat(rsColumns.getString("TABLE_CAT"));
col.setTableSchemaName(rsColumns.getString("TABLE_SCHEM"));
col.setTableName(rsColumns.getString("TABLE_NAME"));
col.setColumnName(rsColumns.getString("COLUMN_NAME"));
col.setDataType(rsColumns.getInt("DATA_TYPE"));
col.setDataTypeName(rsColumns.getString("TYPE_NAME"));
col.setColumnSize(rsColumns.getInt("COLUMN_SIZE"));
col.setDecimalDigits(rsColumns.getInt("DECIMAL_DIGITS"));
col.setNumPrecRadix(rsColumns.getInt("NUM_PREC_RADIX"));
col.setNullAble(rsColumns.getInt("NULLABLE"));
col.setRemarks(rsColumns.getString("REMARKS"));
col.setColumnDef(rsColumns.getString("COLUMN_DEF"));
col.setSqlDataType(rsColumns.getInt("SQL_DATA_TYPE"));
col.setSqlDatetimeSub(rsColumns.getInt("SQL_DATETIME_SUB"));
col.setCharOctetLength(rsColumns.getInt("CHAR_OCTET_LENGTH"));
col.setOrdinalPosition(rsColumns.getInt("ORDINAL_POSITION"));
col.setIsNullAble(rsColumns.getString("IS_NULLABLE"));
tm.getAllColumns().put(col.getColumnName(), col);
}
while (rsIndex.next()) {
String indexName = rsIndex.getString("INDEX_NAME");
if (StringUtils.isNullOrEmpty(indexName)) {
continue;
}
String colName = rsIndex.getString("COLUMN_NAME");
ColumnMeta col = tm.getAllColumns().get(colName);
if (tm.getAllIndexes().containsKey(indexName)) {
IndexMeta index = tm.getAllIndexes().get(indexName);
index.getValues().add(col);
} else {
IndexMeta index = new IndexMeta();
index.setIndexName(indexName);
index.setNonUnique(rsIndex.getBoolean("NON_UNIQUE"));
index.setIndexQualifier(rsIndex.getString("INDEX_QUALIFIER"));
index.setIndexName(rsIndex.getString("INDEX_NAME"));
index.setType(rsIndex.getShort("TYPE"));
index.setOrdinalPosition(rsIndex.getShort("ORDINAL_POSITION"));
index.setAscOrDesc(rsIndex.getString("ASC_OR_DESC"));
index.setCardinality(rsIndex.getInt("CARDINALITY"));
index.getValues().add(col);
if (!index.isNonUnique()) {
index.setIndextype(IndexType.UNIQUE);
} else {
index.setIndextype(IndexType.NORMAL);
}
tm.getAllIndexes().put(indexName, index);
}
}
while (rsPrimary.next()) {
String pkIndexName = rsPrimary.getString("PK_NAME");
if (tm.getAllIndexes().containsKey(pkIndexName)) {
IndexMeta index = tm.getAllIndexes().get(pkIndexName);
index.setIndextype(IndexType.PRIMARY);
}
}
if (tm.getAllIndexes().isEmpty()) {
throw new ShouldNeverHappenException(String.format("Could not found any index in the table: %s", tableName));
}
}
return tm;
}
}

View File

@@ -0,0 +1,178 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.sql.struct.cache;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLException;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.common.loader.LoadLevel;
import io.seata.common.util.StringUtils;
import io.seata.rm.datasource.sql.struct.ColumnMeta;
import io.seata.rm.datasource.sql.struct.IndexMeta;
import io.seata.rm.datasource.sql.struct.IndexType;
import io.seata.rm.datasource.sql.struct.TableMeta;
import io.seata.sqlparser.util.JdbcConstants;
/**
* The type Table meta cache.
*
* @author jaspercloud
*/
@LoadLevel(name = JdbcConstants.POSTGRESQL)
public class PostgresqlTableMetaCache extends AbstractTableMetaCache {
@Override
protected String getCacheKey(Connection connection, String tableName, String resourceId) {
StringBuilder cacheKey = new StringBuilder(resourceId);
cacheKey.append(".");
//separate it to schemaName and tableName
String[] tableNameWithSchema = tableName.split("\\.");
String defaultTableName = tableNameWithSchema.length > 1 ? tableNameWithSchema[1] : tableNameWithSchema[0];
//postgres does not implement supportsMixedCaseIdentifiers in DatabaseMetadata
if (defaultTableName.contains("\"")) {
cacheKey.append(defaultTableName.replace("\"", ""));
} else {
//postgres default store in lower case
cacheKey.append(defaultTableName.toLowerCase());
}
return cacheKey.toString();
}
@Override
protected TableMeta fetchSchema(Connection connection, String tableName) throws SQLException {
try {
DatabaseMetaData dbmd = connection.getMetaData();
return resultSetMetaToSchema(dbmd, tableName);
} catch (SQLException sqlEx) {
throw sqlEx;
} catch (Exception e) {
throw new SQLException("Failed to fetch schema of " + tableName, e);
}
}
private TableMeta resultSetMetaToSchema(DatabaseMetaData dbmd, String tableName) throws SQLException {
TableMeta tm = new TableMeta();
tm.setTableName(tableName);
String[] schemaTable = tableName.split("\\.");
String schemaName = schemaTable.length > 1 ? schemaTable[0] : null;
tableName = schemaTable.length > 1 ? schemaTable[1] : tableName;
/*
* use ResultSetMetaData to get the pure table name
* can avoid the problem below
*
* select * from account_tbl
* select * from account_TBL
* select * from account_tbl
* select * from account.account_tbl
* select * from "select"
* select * from "Select"
* select * from "Sel""ect"
* select * from "Sel'ect"
* select * from TEST.test
* select * from test.TEST
* select * from "Test".test
* select * from "Test"."Select"
*/
if (schemaName != null) {
if (schemaName.startsWith("\"") && schemaName.endsWith("\"")) {
schemaName = schemaName.replaceAll("(^\")|(\"$)", "");
} else {
schemaName = schemaName.toLowerCase();
}
}
if (tableName.startsWith("\"") && tableName.endsWith("\"")) {
tableName = tableName.replaceAll("(^\")|(\"$)", "");
} else {
tableName = tableName.toLowerCase();
}
try (ResultSet rsColumns = dbmd.getColumns(null, schemaName, tableName, "%");
ResultSet rsIndex = dbmd.getIndexInfo(null, schemaName, tableName, false, true);
ResultSet rsPrimary = dbmd.getPrimaryKeys(null, schemaName, tableName)) {
while (rsColumns.next()) {
ColumnMeta col = new ColumnMeta();
col.setTableCat(rsColumns.getString("TABLE_CAT"));
col.setTableSchemaName(rsColumns.getString("TABLE_SCHEM"));
col.setTableName(rsColumns.getString("TABLE_NAME"));
col.setColumnName(rsColumns.getString("COLUMN_NAME"));
col.setDataType(rsColumns.getInt("DATA_TYPE"));
col.setDataTypeName(rsColumns.getString("TYPE_NAME"));
col.setColumnSize(rsColumns.getInt("COLUMN_SIZE"));
col.setDecimalDigits(rsColumns.getInt("DECIMAL_DIGITS"));
col.setNumPrecRadix(rsColumns.getInt("NUM_PREC_RADIX"));
col.setNullAble(rsColumns.getInt("NULLABLE"));
col.setRemarks(rsColumns.getString("REMARKS"));
col.setColumnDef(rsColumns.getString("COLUMN_DEF"));
col.setSqlDataType(rsColumns.getInt("SQL_DATA_TYPE"));
col.setSqlDatetimeSub(rsColumns.getInt("SQL_DATETIME_SUB"));
col.setCharOctetLength(rsColumns.getObject("CHAR_OCTET_LENGTH"));
col.setOrdinalPosition(rsColumns.getInt("ORDINAL_POSITION"));
col.setIsNullAble(rsColumns.getString("IS_NULLABLE"));
col.setIsAutoincrement(rsColumns.getString("IS_AUTOINCREMENT"));
tm.getAllColumns().put(col.getColumnName(), col);
}
while (rsIndex.next()) {
String indexName = rsIndex.getString("index_name");
if (StringUtils.isNullOrEmpty(indexName)) {
continue;
}
String colName = rsIndex.getString("column_name");
ColumnMeta col = tm.getAllColumns().get(colName);
if (tm.getAllIndexes().containsKey(indexName)) {
IndexMeta index = tm.getAllIndexes().get(indexName);
index.getValues().add(col);
} else {
IndexMeta index = new IndexMeta();
index.setIndexName(indexName);
index.setNonUnique(rsIndex.getBoolean("non_unique"));
index.setIndexQualifier(rsIndex.getString("index_qualifier"));
index.setIndexName(rsIndex.getString("index_name"));
index.setType(rsIndex.getShort("type"));
index.setOrdinalPosition(rsIndex.getShort("ordinal_position"));
index.setAscOrDesc(rsIndex.getString("asc_or_desc"));
index.setCardinality(rsIndex.getInt("cardinality"));
index.getValues().add(col);
if (!index.isNonUnique()) {
index.setIndextype(IndexType.UNIQUE);
} else {
index.setIndextype(IndexType.NORMAL);
}
tm.getAllIndexes().put(indexName, index);
}
}
while (rsPrimary.next()) {
String pkIndexName = rsPrimary.getString("pk_name");
if (tm.getAllIndexes().containsKey(pkIndexName)) {
IndexMeta index = tm.getAllIndexes().get(pkIndexName);
index.setIndextype(IndexType.PRIMARY);
}
}
if (tm.getAllIndexes().isEmpty()) {
throw new ShouldNeverHappenException("Could not found any index in the table: " + tableName);
}
}
return tm;
}
}

View File

@@ -0,0 +1,387 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo;
import javax.sql.rowset.serial.SerialBlob;
import javax.sql.rowset.serial.SerialClob;
import javax.sql.rowset.serial.SerialDatalink;
import java.sql.Array;
import java.sql.Connection;
import java.sql.JDBCType;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import com.alibaba.fastjson.JSON;
import io.seata.common.util.BlobUtils;
import io.seata.common.util.IOUtil;
import io.seata.common.util.StringUtils;
import io.seata.config.ConfigurationFactory;
import io.seata.core.constants.ConfigurationKeys;
import io.seata.core.model.Result;
import io.seata.rm.datasource.ColumnUtils;
import io.seata.rm.datasource.DataCompareUtils;
import io.seata.rm.datasource.SqlGenerateUtils;
import io.seata.rm.datasource.sql.serial.SerialArray;
import io.seata.rm.datasource.sql.struct.Field;
import io.seata.rm.datasource.sql.struct.KeyType;
import io.seata.rm.datasource.sql.struct.Row;
import io.seata.rm.datasource.sql.struct.TableMeta;
import io.seata.rm.datasource.sql.struct.TableRecords;
import io.seata.rm.datasource.util.JdbcUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static io.seata.common.DefaultValues.DEFAULT_TRANSACTION_UNDO_DATA_VALIDATION;
import java.util.Map;
import java.util.stream.Collectors;
/**
* The type Abstract undo executor.
*
* @author sharajava
* @author Geng Zhang
*/
public abstract class AbstractUndoExecutor {
/**
* Logger for AbstractUndoExecutor
**/
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractUndoExecutor.class);
/**
* template of check sql
* TODO support multiple primary key
*/
private static final String CHECK_SQL_TEMPLATE = "SELECT * FROM %s WHERE %s FOR UPDATE";
/**
* Switch of undo data validation
*/
public static final boolean IS_UNDO_DATA_VALIDATION_ENABLE = ConfigurationFactory.getInstance()
.getBoolean(ConfigurationKeys.TRANSACTION_UNDO_DATA_VALIDATION, DEFAULT_TRANSACTION_UNDO_DATA_VALIDATION);
/**
* The Sql undo log.
*/
protected SQLUndoLog sqlUndoLog;
/**
* Build undo sql string.
*
* @return the string
*/
protected abstract String buildUndoSQL();
/**
* Instantiates a new Abstract undo executor.
*
* @param sqlUndoLog the sql undo log
*/
public AbstractUndoExecutor(SQLUndoLog sqlUndoLog) {
this.sqlUndoLog = sqlUndoLog;
}
/**
* Gets sql undo log.
*
* @return the sql undo log
*/
public SQLUndoLog getSqlUndoLog() {
return sqlUndoLog;
}
/**
* Execute on.
*
* @param conn the conn
* @throws SQLException the sql exception
*/
public void executeOn(Connection conn) throws SQLException {
if (IS_UNDO_DATA_VALIDATION_ENABLE && !dataValidationAndGoOn(conn)) {
return;
}
try {
String undoSQL = buildUndoSQL();
PreparedStatement undoPST = conn.prepareStatement(undoSQL);
TableRecords undoRows = getUndoRows();
for (Row undoRow : undoRows.getRows()) {
ArrayList<Field> undoValues = new ArrayList<>();
List<Field> pkValueList = getOrderedPkList(undoRows, undoRow, getDbType(conn));
for (Field field : undoRow.getFields()) {
if (field.getKeyType() != KeyType.PRIMARY_KEY) {
undoValues.add(field);
}
}
undoPrepare(undoPST, undoValues, pkValueList);
undoPST.executeUpdate();
}
} catch (Exception ex) {
if (ex instanceof SQLException) {
throw (SQLException) ex;
} else {
throw new SQLException(ex);
}
}
}
/**
* Undo prepare.
*
* @param undoPST the undo pst
* @param undoValues the undo values
* @param pkValueList the pk value
* @throws SQLException the sql exception
*/
protected void undoPrepare(PreparedStatement undoPST, ArrayList<Field> undoValues, List<Field> pkValueList)
throws SQLException {
int undoIndex = 0;
for (Field undoValue : undoValues) {
undoIndex++;
int type = undoValue.getType();
Object value = undoValue.getValue();
if (type == JDBCType.BLOB.getVendorTypeNumber()) {
SerialBlob serialBlob = (SerialBlob) value;
if (serialBlob != null) {
undoPST.setBytes(undoIndex, BlobUtils.blob2Bytes(serialBlob));
} else {
undoPST.setObject(undoIndex, null);
}
} else if (type == JDBCType.CLOB.getVendorTypeNumber()) {
SerialClob serialClob = (SerialClob) value;
if (serialClob != null) {
undoPST.setClob(undoIndex, serialClob.getCharacterStream());
} else {
undoPST.setObject(undoIndex, null);
}
} else if (type == JDBCType.DATALINK.getVendorTypeNumber()) {
SerialDatalink dataLink = (SerialDatalink) value;
if (dataLink != null) {
undoPST.setURL(undoIndex, dataLink.getDatalink());
} else {
undoPST.setObject(undoIndex, null);
}
} else if (type == JDBCType.ARRAY.getVendorTypeNumber()) {
SerialArray array = (SerialArray) value;
if (array != null) {
Array arrayOf = undoPST.getConnection().createArrayOf(array.getBaseTypeName(), array.getElements());
undoPST.setArray(undoIndex, arrayOf);
} else {
undoPST.setObject(undoIndex, null);
}
} else if (undoValue.getType() == JDBCType.OTHER.getVendorTypeNumber()) {
undoPST.setObject(undoIndex, value);
} else {
// JDBCType.REF, JDBCType.JAVA_OBJECT etc...
undoPST.setObject(undoIndex, value, type);
}
}
// PK is always at last.
// INSERT INTO a (x, y, z, pk1,pk2) VALUES (?, ?, ?, ? ,?)
// UPDATE a SET x=?, y=?, z=? WHERE pk1 in (?) and pk2 in (?)
// DELETE FROM a WHERE pk1 in (?) and pk2 in (?)
for (Field pkField : pkValueList) {
undoIndex++;
undoPST.setObject(undoIndex, pkField.getValue(), pkField.getType());
}
}
/**
* Gets undo rows.
*
* @return the undo rows
*/
protected abstract TableRecords getUndoRows();
/**
* Data validation.
*
* @param conn the conn
* @return return true if data validation is ok and need continue undo, and return false if no need continue undo.
* @throws SQLException the sql exception such as has dirty data
*/
protected boolean dataValidationAndGoOn(Connection conn) throws SQLException {
TableRecords beforeRecords = sqlUndoLog.getBeforeImage();
TableRecords afterRecords = sqlUndoLog.getAfterImage();
// Compare current data with before data
// No need undo if the before data snapshot is equivalent to the after data snapshot.
Result<Boolean> beforeEqualsAfterResult = DataCompareUtils.isRecordsEquals(beforeRecords, afterRecords);
if (beforeEqualsAfterResult.getResult()) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info("Stop rollback because there is no data change " +
"between the before data snapshot and the after data snapshot.");
}
// no need continue undo.
return false;
}
// Validate if data is dirty.
TableRecords currentRecords = queryCurrentRecords(conn);
// compare with current data and after image.
Result<Boolean> afterEqualsCurrentResult = DataCompareUtils.isRecordsEquals(afterRecords, currentRecords);
if (!afterEqualsCurrentResult.getResult()) {
// If current data is not equivalent to the after data, then compare the current data with the before
// data, too. No need continue to undo if current data is equivalent to the before data snapshot
Result<Boolean> beforeEqualsCurrentResult = DataCompareUtils.isRecordsEquals(beforeRecords, currentRecords);
if (beforeEqualsCurrentResult.getResult()) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info("Stop rollback because there is no data change " +
"between the before data snapshot and the current data snapshot.");
}
// no need continue undo.
return false;
} else {
if (LOGGER.isInfoEnabled()) {
if (StringUtils.isNotBlank(afterEqualsCurrentResult.getErrMsg())) {
LOGGER.info(afterEqualsCurrentResult.getErrMsg(), afterEqualsCurrentResult.getErrMsgParams());
}
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("check dirty datas failed, old and new data are not equal," +
"tableName:[" + sqlUndoLog.getTableName() + "]," +
"oldRows:[" + JSON.toJSONString(afterRecords.getRows()) + "]," +
"newRows:[" + JSON.toJSONString(currentRecords.getRows()) + "].");
}
throw new SQLException("Has dirty records when undo.");
}
}
return true;
}
/**
* Query current records.
*
* @param conn the conn
* @return the table records
* @throws SQLException the sql exception
*/
protected TableRecords queryCurrentRecords(Connection conn) throws SQLException {
TableRecords undoRecords = getUndoRows();
TableMeta tableMeta = undoRecords.getTableMeta();
//the order of element matters
List<String> pkNameList = tableMeta.getPrimaryKeyOnlyName();
// pares pk values
Map<String, List<Field>> pkRowValues = parsePkValues(getUndoRows());
if (pkRowValues.size() == 0) {
return TableRecords.empty(tableMeta);
}
// build check sql
String firstKey = pkRowValues.keySet().stream().findFirst().get();
int pkRowSize = pkRowValues.get(firstKey).size();
String checkSQL = String.format(CHECK_SQL_TEMPLATE, sqlUndoLog.getTableName(),
SqlGenerateUtils.buildWhereConditionByPKs(pkNameList, pkRowSize, getDbType(conn)));
PreparedStatement statement = null;
ResultSet checkSet = null;
TableRecords currentRecords;
try {
statement = conn.prepareStatement(checkSQL);
int paramIndex = 1;
int rowSize = pkRowValues.get(pkNameList.get(0)).size();
for (int r = 0; r < rowSize; r++) {
for (int c = 0; c < pkNameList.size(); c++) {
List<Field> pkColumnValueList = pkRowValues.get(pkNameList.get(c));
Field field = pkColumnValueList.get(r);
int dataType = tableMeta.getColumnMeta(field.getName()).getDataType();
statement.setObject(paramIndex, field.getValue(), dataType);
paramIndex++;
}
}
checkSet = statement.executeQuery();
currentRecords = TableRecords.buildRecords(tableMeta, checkSet);
} finally {
IOUtil.close(checkSet, statement);
}
return currentRecords;
}
protected List<Field> getOrderedPkList(TableRecords image, Row row, String dbType) {
List<Field> pkFields = new ArrayList<>();
// To ensure the order of the pk, the order should based on getPrimaryKeyOnlyName.
List<String> pkColumnNameListByOrder = image.getTableMeta().getPrimaryKeyOnlyName();
List<String> pkColumnNameListNoOrder = row.primaryKeys()
.stream()
.map(e -> ColumnUtils.delEscape(e.getName(), dbType))
.collect(Collectors.toList());
pkColumnNameListByOrder.forEach(pkName -> {
int pkIndex = pkColumnNameListNoOrder.indexOf(pkName);
if (pkIndex != -1) {
// add PK to the last of the list.
pkFields.add(row.primaryKeys().get(pkIndex));
}
});
return pkFields;
}
/**
* Parse pk values Field List.
*
* @param records the records
* @return each element represents a row. And inside a row list contains pk columns(Field).
*/
protected Map<String, List<Field>> parsePkValues(TableRecords records) {
return parsePkValues(records.getRows(), records.getTableMeta().getPrimaryKeyOnlyName());
}
/**
* Parse pk values Field List.
*
* @param rows pk rows
* @param pkNameList pk column name
* @return each element represents a row. And inside a row list contains pk columns(Field).
*/
protected Map<String, List<Field>> parsePkValues(List<Row> rows, List<String> pkNameList) {
List<Field> pkFieldList = new ArrayList<>();
for (int i = 0; i < rows.size(); i++) {
List<Field> fields = rows.get(i).getFields();
if (fields != null) {
for (Field field : fields) {
if (pkNameList.stream().anyMatch(e -> field.getName().equalsIgnoreCase(e))) {
pkFieldList.add(field);
}
}
}
}
Map<String, List<Field>> pkValueMap = pkFieldList.stream().collect(Collectors.groupingBy(Field::getName));
return pkValueMap;
}
/**
* Get db type
*
* @param conn the connection
* @return the db type
* @throws SQLException SQLException
*/
protected String getDbType(Connection conn) throws SQLException {
return JdbcUtils.getDbType(conn.getMetaData().getURL());
}
}

View File

@@ -0,0 +1,443 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.SQLIntegrityConstraintViolationException;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import io.seata.common.Constants;
import io.seata.common.util.CollectionUtils;
import io.seata.common.util.SizeUtil;
import io.seata.config.ConfigurationFactory;
import io.seata.core.compressor.CompressorFactory;
import io.seata.core.compressor.CompressorType;
import io.seata.core.constants.ClientTableColumnsName;
import io.seata.core.constants.ConfigurationKeys;
import io.seata.core.exception.BranchTransactionException;
import io.seata.core.exception.TransactionException;
import io.seata.rm.datasource.ConnectionContext;
import io.seata.rm.datasource.ConnectionProxy;
import io.seata.rm.datasource.DataSourceProxy;
import io.seata.rm.datasource.extend.FtbSeataExtendManagerHolder;
import io.seata.rm.datasource.sql.struct.TableMeta;
import io.seata.rm.datasource.sql.struct.TableMetaCacheFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.swing.*;
import static io.seata.common.DefaultValues.DEFAULT_TRANSACTION_UNDO_LOG_TABLE;
import static io.seata.common.DefaultValues.DEFAULT_CLIENT_UNDO_COMPRESS_ENABLE;
import static io.seata.common.DefaultValues.DEFAULT_CLIENT_UNDO_COMPRESS_TYPE;
import static io.seata.common.DefaultValues.DEFAULT_CLIENT_UNDO_COMPRESS_THRESHOLD;
import static io.seata.core.exception.TransactionExceptionCode.BranchRollbackFailed_Retriable;
/**
* @author jsbxyyx
*/
public abstract class AbstractUndoLogManager implements UndoLogManager {
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractUndoLogManager.class);
protected enum State {
/**
* This state can be properly rolled back by services
*/
Normal(0),
/**
* This state prevents the branch transaction from inserting undo_log after the global transaction is rolled
* back.
*/
GlobalFinished(1);
private int value;
State(int value) {
this.value = value;
}
public int getValue() {
return value;
}
}
protected static final String UNDO_LOG_TABLE_NAME = ConfigurationFactory.getInstance().getConfig(
ConfigurationKeys.TRANSACTION_UNDO_LOG_TABLE, DEFAULT_TRANSACTION_UNDO_LOG_TABLE);
protected static final String SELECT_UNDO_LOG_SQL = "SELECT * FROM " + UNDO_LOG_TABLE_NAME + " WHERE "
+ ClientTableColumnsName.UNDO_LOG_BRANCH_XID + " = ? AND " + ClientTableColumnsName.UNDO_LOG_XID
+ " = ? FOR UPDATE";
protected static final String DELETE_UNDO_LOG_SQL = "DELETE FROM " + UNDO_LOG_TABLE_NAME + " WHERE "
+ ClientTableColumnsName.UNDO_LOG_BRANCH_XID + " = ? AND " + ClientTableColumnsName.UNDO_LOG_XID + " = ?";
protected static final boolean ROLLBACK_INFO_COMPRESS_ENABLE = ConfigurationFactory.getInstance().getBoolean(
ConfigurationKeys.CLIENT_UNDO_COMPRESS_ENABLE, DEFAULT_CLIENT_UNDO_COMPRESS_ENABLE);
protected static final CompressorType ROLLBACK_INFO_COMPRESS_TYPE = CompressorType.getByName(ConfigurationFactory.getInstance().getConfig(
ConfigurationKeys.CLIENT_UNDO_COMPRESS_TYPE, DEFAULT_CLIENT_UNDO_COMPRESS_TYPE));
protected static final long ROLLBACK_INFO_COMPRESS_THRESHOLD = SizeUtil.size2Long(ConfigurationFactory.getInstance().getConfig(
ConfigurationKeys.CLIENT_UNDO_COMPRESS_THRESHOLD, DEFAULT_CLIENT_UNDO_COMPRESS_THRESHOLD));
private static final ThreadLocal<String> SERIALIZER_LOCAL = new ThreadLocal<>();
public static String getCurrentSerializer() {
return SERIALIZER_LOCAL.get();
}
public static void setCurrentSerializer(String serializer) {
SERIALIZER_LOCAL.set(serializer);
}
public static void removeCurrentSerializer() {
SERIALIZER_LOCAL.remove();
}
/**
* Delete undo log.
*
* @param xid the xid
* @param branchId the branch id
* @param conn the conn
* @throws SQLException the sql exception
*/
@Override
public void deleteUndoLog(String xid, long branchId, Connection conn) throws SQLException {
try (PreparedStatement deletePST = conn.prepareStatement(DELETE_UNDO_LOG_SQL)) {
deletePST.setLong(1, branchId);
deletePST.setString(2, xid);
deletePST.executeUpdate();
} catch (Exception e) {
if (!(e instanceof SQLException)) {
e = new SQLException(e);
}
throw (SQLException) e;
}
}
/**
* batch Delete undo log.
*
* @param xids xid
* @param branchIds branch Id
* @param conn connection
*/
@Override
public void batchDeleteUndoLog(Set<String> xids, Set<Long> branchIds, Connection conn) throws SQLException {
if (CollectionUtils.isEmpty(xids) || CollectionUtils.isEmpty(branchIds)) {
return;
}
int xidSize = xids.size();
int branchIdSize = branchIds.size();
String batchDeleteSql = toBatchDeleteUndoLogSql(xidSize, branchIdSize);
try (PreparedStatement deletePST = conn.prepareStatement(batchDeleteSql)) {
int paramsIndex = 1;
for (Long branchId : branchIds) {
deletePST.setLong(paramsIndex++, branchId);
}
for (String xid : xids) {
// seata自定义扩展处理connection连接变更
FtbSeataExtendManagerHolder.get().undoLogConnection(xid,conn);
deletePST.setString(paramsIndex++, xid);
}
int deleteRows = deletePST.executeUpdate();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("batch delete undo log size {}", deleteRows);
}
} catch (Exception e) {
if (!(e instanceof SQLException)) {
e = new SQLException(e);
}
throw (SQLException) e;
}
}
protected static String toBatchDeleteUndoLogSql(int xidSize, int branchIdSize) {
StringBuilder sqlBuilder = new StringBuilder(64);
sqlBuilder.append("DELETE FROM ").append(UNDO_LOG_TABLE_NAME).append(" WHERE ").append(
ClientTableColumnsName.UNDO_LOG_BRANCH_XID).append(" IN ");
appendInParam(branchIdSize, sqlBuilder);
sqlBuilder.append(" AND ").append(ClientTableColumnsName.UNDO_LOG_XID).append(" IN ");
appendInParam(xidSize, sqlBuilder);
return sqlBuilder.toString();
}
protected static void appendInParam(int size, StringBuilder sqlBuilder) {
sqlBuilder.append(" (");
for (int i = 0; i < size; i++) {
sqlBuilder.append("?");
if (i < (size - 1)) {
sqlBuilder.append(",");
}
}
sqlBuilder.append(") ");
}
protected static boolean canUndo(int state) {
return state == State.Normal.getValue();
}
protected String buildContext(String serializer, CompressorType compressorType) {
Map<String, String> map = new HashMap<>();
map.put(UndoLogConstants.SERIALIZER_KEY, serializer);
map.put(UndoLogConstants.COMPRESSOR_TYPE_KEY, compressorType.name());
return CollectionUtils.encodeMap(map);
}
protected Map<String, String> parseContext(String data) {
return CollectionUtils.decodeMap(data);
}
/**
* Flush undo logs.
*
* @param cp the cp
* @throws SQLException the sql exception
*/
@Override
public void flushUndoLogs(ConnectionProxy cp) throws SQLException {
ConnectionContext connectionContext = cp.getContext();
if (!connectionContext.hasUndoLog()) {
return;
}
String xid = connectionContext.getXid();
long branchId = connectionContext.getBranchId();
// seata自定义扩展处理connection连接变更
FtbSeataExtendManagerHolder.get().flushUndoLogsConnection(cp,xid);
BranchUndoLog branchUndoLog = new BranchUndoLog();
branchUndoLog.setXid(xid);
branchUndoLog.setBranchId(branchId);
branchUndoLog.setSqlUndoLogs(connectionContext.getUndoItems());
UndoLogParser parser = UndoLogParserFactory.getInstance();
byte[] undoLogContent = parser.encode(branchUndoLog);
CompressorType compressorType = CompressorType.NONE;
if (needCompress(undoLogContent)) {
compressorType = ROLLBACK_INFO_COMPRESS_TYPE;
undoLogContent = CompressorFactory.getCompressor(compressorType.getCode()).compress(undoLogContent);
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Flushing UNDO LOG: {}", new String(undoLogContent, Constants.DEFAULT_CHARSET));
}
insertUndoLogWithNormal(xid, branchId, buildContext(parser.getName(), compressorType), undoLogContent, cp.getTargetConnection());
}
/**
* Undo.
*
* @param dataSourceProxy the data source proxy
* @param xid the xid
* @param branchId the branch id
* @throws TransactionException the transaction exception
*/
@Override
public void undo(DataSourceProxy dataSourceProxy, String xid, long branchId) throws TransactionException {
Connection conn = null;
ResultSet rs = null;
PreparedStatement selectPST = null;
boolean originalAutoCommit = true;
for (; ; ) {
try {
conn = dataSourceProxy.getPlainConnection();
// seata自定义扩展处理connection连接变更
FtbSeataExtendManagerHolder.get().undoLogConnection(xid,conn);
// The entire undo process should run in a local transaction.
if (originalAutoCommit = conn.getAutoCommit()) {
conn.setAutoCommit(false);
}
// Find UNDO LOG
selectPST = conn.prepareStatement(SELECT_UNDO_LOG_SQL);
selectPST.setLong(1, branchId);
selectPST.setString(2, xid);
rs = selectPST.executeQuery();
boolean exists = false;
while (rs.next()) {
exists = true;
// It is possible that the server repeatedly sends a rollback request to roll back
// the same branch transaction to multiple processes,
// ensuring that only the undo_log in the normal state is processed.
int state = rs.getInt(ClientTableColumnsName.UNDO_LOG_LOG_STATUS);
if (!canUndo(state)) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info("xid {} branch {}, ignore {} undo_log", xid, branchId, state);
}
return;
}
String contextString = rs.getString(ClientTableColumnsName.UNDO_LOG_CONTEXT);
Map<String, String> context = parseContext(contextString);
byte[] rollbackInfo = getRollbackInfo(rs);
String serializer = context == null ? null : context.get(UndoLogConstants.SERIALIZER_KEY);
UndoLogParser parser = serializer == null ? UndoLogParserFactory.getInstance()
: UndoLogParserFactory.getInstance(serializer);
BranchUndoLog branchUndoLog = parser.decode(rollbackInfo);
try {
// put serializer name to local
setCurrentSerializer(parser.getName());
List<SQLUndoLog> sqlUndoLogs = branchUndoLog.getSqlUndoLogs();
if (sqlUndoLogs.size() > 1) {
Collections.reverse(sqlUndoLogs);
}
for (SQLUndoLog sqlUndoLog : sqlUndoLogs) {
TableMeta tableMeta = TableMetaCacheFactory.getTableMetaCache(dataSourceProxy.getDbType()).getTableMeta(
conn, sqlUndoLog.getTableName(), dataSourceProxy.getResourceId());
sqlUndoLog.setTableMeta(tableMeta);
AbstractUndoExecutor undoExecutor = UndoExecutorFactory.getUndoExecutor(
dataSourceProxy.getDbType(), sqlUndoLog);
undoExecutor.executeOn(conn);
}
} finally {
// remove serializer name
removeCurrentSerializer();
}
}
// If undo_log exists, it means that the branch transaction has completed the first phase,
// we can directly roll back and clean the undo_log
// Otherwise, it indicates that there is an exception in the branch transaction,
// causing undo_log not to be written to the database.
// For example, the business processing timeout, the global transaction is the initiator rolls back.
// To ensure data consistency, we can insert an undo_log with GlobalFinished state
// to prevent the local transaction of the first phase of other programs from being correctly submitted.
// See https://github.com/seata/seata/issues/489
if (exists) {
deleteUndoLog(xid, branchId, conn);
conn.commit();
if (LOGGER.isInfoEnabled()) {
LOGGER.info("xid {} branch {}, undo_log deleted with {}", xid, branchId,
State.GlobalFinished.name());
}
} else {
insertUndoLogWithGlobalFinished(xid, branchId, UndoLogParserFactory.getInstance(), conn);
conn.commit();
if (LOGGER.isInfoEnabled()) {
LOGGER.info("xid {} branch {}, undo_log added with {}", xid, branchId,
State.GlobalFinished.name());
}
}
return;
} catch (SQLIntegrityConstraintViolationException e) {
// Possible undo_log has been inserted into the database by other processes, retrying rollback undo_log
if (LOGGER.isInfoEnabled()) {
LOGGER.info("xid {} branch {}, undo_log inserted, retry rollback", xid, branchId);
}
} catch (Throwable e) {
if (conn != null) {
try {
conn.rollback();
} catch (SQLException rollbackEx) {
LOGGER.warn("Failed to close JDBC resource while undo ... ", rollbackEx);
}
}
throw new BranchTransactionException(BranchRollbackFailed_Retriable, String
.format("Branch session rollback failed and try again later xid = %s branchId = %s %s", xid,
branchId, e.getMessage()), e);
} finally {
try {
if (rs != null) {
rs.close();
}
if (selectPST != null) {
selectPST.close();
}
if (conn != null) {
if (originalAutoCommit) {
conn.setAutoCommit(true);
}
conn.close();
}
} catch (SQLException closeEx) {
LOGGER.warn("Failed to close JDBC resource while undo ... ", closeEx);
}
}
}
}
/**
* insert uodo log when global finished
*
* @param xid the xid
* @param branchId the branchId
* @param undoLogParser the undoLogParse
* @param conn sql connection
* @throws SQLException SQLException
*/
protected abstract void insertUndoLogWithGlobalFinished(String xid, long branchId, UndoLogParser undoLogParser,
Connection conn) throws SQLException;
/**
* insert uodo log when normal
*
* @param xid the xid
* @param branchId the branchId
* @param rollbackCtx the rollbackContext
* @param undoLogContent the undoLogContent
* @param conn sql connection
* @throws SQLException SQLException
*/
protected abstract void insertUndoLogWithNormal(String xid, long branchId, String rollbackCtx, byte[] undoLogContent,
Connection conn) throws SQLException;
/**
* RollbackInfo to bytes
*
* @param rs
* @return
* @throws SQLException SQLException
*/
protected byte[] getRollbackInfo(ResultSet rs) throws SQLException {
byte[] rollbackInfo = rs.getBytes(ClientTableColumnsName.UNDO_LOG_ROLLBACK_INFO);
String rollbackInfoContext = rs.getString(ClientTableColumnsName.UNDO_LOG_CONTEXT);
Map<String, String> context = CollectionUtils.decodeMap(rollbackInfoContext);
CompressorType compressorType = CompressorType.getByName(context.getOrDefault(UndoLogConstants.COMPRESSOR_TYPE_KEY,
CompressorType.NONE.name()));
return CompressorFactory.getCompressor(compressorType.getCode()).decompress(rollbackInfo);
}
/**
* if the undoLogContent is big enough to be compress
* @param undoLogContent undoLogContent
* @return boolean
*/
protected boolean needCompress(byte[] undoLogContent) {
return ROLLBACK_INFO_COMPRESS_ENABLE && undoLogContent.length > ROLLBACK_INFO_COMPRESS_THRESHOLD;
}
}

View File

@@ -0,0 +1,88 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo;
import java.util.List;
/**
* The type Branch undo log.
*
* @author sharajava
*/
public class BranchUndoLog implements java.io.Serializable {
private static final long serialVersionUID = -101750721633603671L;
private String xid;
private long branchId;
private List<SQLUndoLog> sqlUndoLogs;
/**
* Gets xid.
*
* @return the xid
*/
public String getXid() {
return xid;
}
/**
* Sets xid.
*
* @param xid the xid
*/
public void setXid(String xid) {
this.xid = xid;
}
/**
* Gets branch id.
*
* @return the branch id
*/
public long getBranchId() {
return branchId;
}
/**
* Sets branch id.
*
* @param branchId the branch id
*/
public void setBranchId(long branchId) {
this.branchId = branchId;
}
/**
* Gets sql undo logs.
*
* @return the sql undo logs
*/
public List<SQLUndoLog> getSqlUndoLogs() {
return sqlUndoLogs;
}
/**
* Sets sql undo logs.
*
* @param sqlUndoLogs the sql undo logs
*/
public void setSqlUndoLogs(List<SQLUndoLog> sqlUndoLogs) {
this.sqlUndoLogs = sqlUndoLogs;
}
}

View File

@@ -0,0 +1,40 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo;
/**
* The interface Keyword checker.
*
* @author Wu
*/
public interface KeywordChecker {
/**
* check whether given field name and table name use keywords
*
* @param fieldOrTableName the field or table name
* @return boolean
*/
boolean check(String fieldOrTableName);
/**
* check whether given field or table name use keywords. the method has database special logic.
* @param fieldOrTableName the field or table name
* @return true: need to escape. false: no need to escape.
*/
boolean checkEscape(String fieldOrTableName);
}

View File

@@ -0,0 +1,43 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo;
import io.seata.common.loader.EnhancedServiceLoader;
import io.seata.common.util.CollectionUtils;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* The type Keyword checker factory.
*
* @author Wu
*/
public class KeywordCheckerFactory {
private static final Map<String, KeywordChecker> KEYWORD_CHECKER_MAP = new ConcurrentHashMap<>();
/**
* get keyword checker
*
* @param dbType the db type
* @return keyword checker
*/
public static KeywordChecker getKeywordChecker(String dbType) {
return CollectionUtils.computeIfAbsent(KEYWORD_CHECKER_MAP, dbType,
key -> EnhancedServiceLoader.load(KeywordChecker.class, dbType));
}
}

View File

@@ -0,0 +1,125 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo;
import io.seata.rm.datasource.sql.struct.TableMeta;
import io.seata.rm.datasource.sql.struct.TableRecords;
import io.seata.sqlparser.SQLType;
/**
* The type Sql undo log.
*
* @author sharajava
*/
public class SQLUndoLog implements java.io.Serializable {
private static final long serialVersionUID = -4160065043902060730L;
private SQLType sqlType;
private String tableName;
private TableRecords beforeImage;
private TableRecords afterImage;
/**
* Sets table meta.
*
* @param tableMeta the table meta
*/
public void setTableMeta(TableMeta tableMeta) {
if (beforeImage != null) {
beforeImage.setTableMeta(tableMeta);
}
if (afterImage != null) {
afterImage.setTableMeta(tableMeta);
}
}
/**
* Gets sql type.
*
* @return the sql type
*/
public SQLType getSqlType() {
return sqlType;
}
/**
* Sets sql type.
*
* @param sqlType the sql type
*/
public void setSqlType(SQLType sqlType) {
this.sqlType = sqlType;
}
/**
* Gets table name.
*
* @return the table name
*/
public String getTableName() {
return tableName;
}
/**
* Sets table name.
*
* @param tableName the table name
*/
public void setTableName(String tableName) {
this.tableName = tableName;
}
/**
* Gets before image.
*
* @return the before image
*/
public TableRecords getBeforeImage() {
return beforeImage;
}
/**
* Sets before image.
*
* @param beforeImage the before image
*/
public void setBeforeImage(TableRecords beforeImage) {
this.beforeImage = beforeImage;
}
/**
* Gets after image.
*
* @return the after image
*/
public TableRecords getAfterImage() {
return afterImage;
}
/**
* Sets after image.
*
* @param afterImage the after image
*/
public void setAfterImage(TableRecords afterImage) {
this.afterImage = afterImage;
}
}

View File

@@ -0,0 +1,52 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo;
import io.seata.common.exception.ShouldNeverHappenException;
/**
* The type Undo executor factory.
*
* @author sharajava
*/
public class UndoExecutorFactory {
/**
* Gets undo executor.
*
* @param dbType the db type
* @param sqlUndoLog the sql undo log
* @return the undo executor
*/
public static AbstractUndoExecutor getUndoExecutor(String dbType, SQLUndoLog sqlUndoLog) {
AbstractUndoExecutor result = null;
UndoExecutorHolder holder = UndoExecutorHolderFactory.getUndoExecutorHolder(dbType.toLowerCase());
switch (sqlUndoLog.getSqlType()) {
case INSERT:
result = holder.getInsertExecutor(sqlUndoLog);
break;
case UPDATE:
result = holder.getUpdateExecutor(sqlUndoLog);
break;
case DELETE:
result = holder.getDeleteExecutor(sqlUndoLog);
break;
default:
throw new ShouldNeverHappenException();
}
return result;
}
}

View File

@@ -0,0 +1,48 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo;
/**
* The Type UndoExecutorHolder
*
* @author: Zhibei Hao
*/
public interface UndoExecutorHolder {
/**
* get the specific Insert UndoExecutor by sqlUndoLog
*
* @param sqlUndoLog the sqlUndoLog
* @return the specific UndoExecutor
*/
AbstractUndoExecutor getInsertExecutor(SQLUndoLog sqlUndoLog);
/**
* get the specific Update UndoExecutor by sqlUndoLog
*
* @param sqlUndoLog the sqlUndoLog
* @return the specific UndoExecutor
*/
AbstractUndoExecutor getUpdateExecutor(SQLUndoLog sqlUndoLog);
/**
* get the specific Delete UndoExecutor by sqlUndoLog
*
* @param sqlUndoLog the sqlUndoLog
* @return the specific UndoExecutor
*/
AbstractUndoExecutor getDeleteExecutor(SQLUndoLog sqlUndoLog);
}

View File

@@ -0,0 +1,43 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import io.seata.common.loader.EnhancedServiceLoader;
import io.seata.common.util.CollectionUtils;
/**
* The Type UndoExecutorHolderFactory
*
* @author: Zhibei Hao
*/
public class UndoExecutorHolderFactory {
private static final Map<String, UndoExecutorHolder> UNDO_EXECUTOR_HOLDER_MAP = new ConcurrentHashMap<>();
/**
* Get UndoExecutorHolder by db type
*
* @param dbType the db type
* @return the UndoExecutorGroup
*/
public static UndoExecutorHolder getUndoExecutorHolder(String dbType) {
return CollectionUtils.computeIfAbsent(UNDO_EXECUTOR_HOLDER_MAP, dbType,
key -> EnhancedServiceLoader.load(UndoExecutorHolder.class, dbType));
}
}

View File

@@ -0,0 +1,34 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo;
import io.seata.config.ConfigurationFactory;
import io.seata.core.constants.ConfigurationKeys;
import static io.seata.common.DefaultValues.DEFAULT_TRANSACTION_UNDO_LOG_SERIALIZATION;
/**
* @author Geng Zhang
*/
public interface UndoLogConstants {
String SERIALIZER_KEY = "serializer";
String DEFAULT_SERIALIZER = ConfigurationFactory.getInstance()
.getConfig(ConfigurationKeys.TRANSACTION_UNDO_LOG_SERIALIZATION, DEFAULT_TRANSACTION_UNDO_LOG_SERIALIZATION);
String COMPRESSOR_TYPE_KEY = "compressorType";
}

View File

@@ -0,0 +1,82 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Date;
import java.util.Set;
import io.seata.core.exception.TransactionException;
import io.seata.rm.datasource.ConnectionProxy;
import io.seata.rm.datasource.DataSourceProxy;
/**
* The type Undo log manager.
*
* @author sharajava
* @author Geng Zhang
*/
public interface UndoLogManager {
/**
* Flush undo logs.
* @param cp the cp
* @throws SQLException the sql exception
*/
void flushUndoLogs(ConnectionProxy cp) throws SQLException;
/**
* Undo.
*
* @param dataSourceProxy the data source proxy
* @param xid the xid
* @param branchId the branch id
* @throws TransactionException the transaction exception
*/
void undo(DataSourceProxy dataSourceProxy, String xid, long branchId) throws TransactionException;
/**
* Delete undo log.
*
* @param xid the xid
* @param branchId the branch id
* @param conn the conn
* @throws SQLException the sql exception
*/
void deleteUndoLog(String xid, long branchId, Connection conn) throws SQLException;
/**
* batch Delete undo log.
*
* @param xids the xid set collections
* @param branchIds the branch id set collections
* @param conn the connection
* @throws SQLException the sql exception
*/
void batchDeleteUndoLog(Set<String> xids, Set<Long> branchIds, Connection conn) throws SQLException;
/**
* delete undolog by created
* @param logCreated the created time
* @param limitRows the limit rows
* @param conn the connection
* @return the update rows
* @throws SQLException the sql exception
*/
int deleteUndoLogByLogCreated(Date logCreated, int limitRows, Connection conn) throws SQLException;
}

View File

@@ -0,0 +1,41 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import io.seata.common.loader.EnhancedServiceLoader;
import io.seata.common.util.CollectionUtils;
/**
* @author jsbxyyx
*/
public class UndoLogManagerFactory {
private static final Map<String, UndoLogManager> UNDO_LOG_MANAGER_MAP = new ConcurrentHashMap<>();
/**
* get undo log manager.
*
* @param dbType the db type
* @return undo log manager.
*/
public static UndoLogManager getUndoLogManager(String dbType) {
return CollectionUtils.computeIfAbsent(UNDO_LOG_MANAGER_MAP, dbType,
key -> EnhancedServiceLoader.load(UndoLogManager.class, dbType));
}
}

View File

@@ -0,0 +1,55 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo;
/**
* The interface Undo log parser.
*
* @author sharajava
* @author Geng Zhang
*/
public interface UndoLogParser {
/**
* Get the name of parser;
*
* @return the name of parser
*/
String getName();
/**
* Get default context of this parser
*
* @return the default content if undo log is empty
*/
byte[] getDefaultContent();
/**
* Encode branch undo log to byte array.
*
* @param branchUndoLog the branch undo log
* @return the byte array
*/
byte[] encode(BranchUndoLog branchUndoLog);
/**
* Decode byte array to branch undo log.
*
* @param bytes the byte array
* @return the branch undo log
*/
BranchUndoLog decode(byte[] bytes);
}

View File

@@ -0,0 +1,64 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo;
import io.seata.common.loader.EnhancedServiceLoader;
import io.seata.common.util.CollectionUtils;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
/**
* The type Undo log parser factory.
*
* @author sharajava
* @author Geng Zhang
*/
public class UndoLogParserFactory {
private UndoLogParserFactory() {
}
/**
* {serializerName:UndoLogParser}
*/
private static final ConcurrentMap<String, UndoLogParser> INSTANCES = new ConcurrentHashMap<>();
private static class SingletonHolder {
private static final UndoLogParser INSTANCE = getInstance(UndoLogConstants.DEFAULT_SERIALIZER);
}
/**
* Gets default UndoLogParser instance.
*
* @return the instance
*/
public static UndoLogParser getInstance() {
return SingletonHolder.INSTANCE;
}
/**
* Gets UndoLogParser by name
*
* @param name parser name
* @return the UndoLogParser
*/
public static UndoLogParser getInstance(String name) {
return CollectionUtils.computeIfAbsent(INSTANCES, name,
key -> EnhancedServiceLoader.load(UndoLogParser.class, name));
}
}

View File

@@ -0,0 +1,87 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.mysql;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.common.util.CollectionUtils;
import io.seata.rm.datasource.ColumnUtils;
import io.seata.rm.datasource.sql.struct.Field;
import io.seata.rm.datasource.sql.struct.Row;
import io.seata.rm.datasource.sql.struct.TableRecords;
import io.seata.rm.datasource.undo.AbstractUndoExecutor;
import io.seata.rm.datasource.undo.SQLUndoLog;
import io.seata.sqlparser.util.JdbcConstants;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
/**
* The type My sql undo delete executor.
*
* @author sharajava
*/
public class MySQLUndoDeleteExecutor extends AbstractUndoExecutor {
/**
* Instantiates a new My sql undo delete executor.
*
* @param sqlUndoLog the sql undo log
*/
public MySQLUndoDeleteExecutor(SQLUndoLog sqlUndoLog) {
super(sqlUndoLog);
}
/**
* INSERT INTO a (x, y, z, pk) VALUES (?, ?, ?, ?)
*/
private static final String INSERT_SQL_TEMPLATE = "INSERT INTO %s (%s) VALUES (%s)";
/**
* Undo delete.
*
* Notice: PK is at last one.
* @see AbstractUndoExecutor#undoPrepare
*
* @return sql
*/
@Override
protected String buildUndoSQL() {
TableRecords beforeImage = sqlUndoLog.getBeforeImage();
List<Row> beforeImageRows = beforeImage.getRows();
if (CollectionUtils.isEmpty(beforeImageRows)) {
throw new ShouldNeverHappenException("Invalid UNDO LOG");
}
Row row = beforeImageRows.get(0);
List<Field> fields = new ArrayList<>(row.nonPrimaryKeys());
fields.addAll(getOrderedPkList(beforeImage,row,JdbcConstants.MYSQL));
// delete sql undo log before image all field come from table meta, need add escape.
// see BaseTransactionalExecutor#buildTableRecords
String insertColumns = fields.stream()
.map(field -> ColumnUtils.addEscape(field.getName(), JdbcConstants.MYSQL))
.collect(Collectors.joining(", "));
String insertValues = fields.stream().map(field -> "?")
.collect(Collectors.joining(", "));
return String.format(INSERT_SQL_TEMPLATE, sqlUndoLog.getTableName(), insertColumns, insertValues);
}
@Override
protected TableRecords getUndoRows() {
return sqlUndoLog.getBeforeImage();
}
}

View File

@@ -0,0 +1,46 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.mysql;
import io.seata.common.loader.LoadLevel;
import io.seata.rm.datasource.undo.AbstractUndoExecutor;
import io.seata.rm.datasource.undo.SQLUndoLog;
import io.seata.rm.datasource.undo.UndoExecutorHolder;
import io.seata.sqlparser.util.JdbcConstants;
/**
* The Type MySQLUndoExecutorHolder
*
* @author: Zhibei Hao
*/
@LoadLevel(name = JdbcConstants.MYSQL)
public class MySQLUndoExecutorHolder implements UndoExecutorHolder {
@Override
public AbstractUndoExecutor getInsertExecutor(SQLUndoLog sqlUndoLog) {
return new MySQLUndoInsertExecutor(sqlUndoLog);
}
@Override
public AbstractUndoExecutor getUpdateExecutor(SQLUndoLog sqlUndoLog) {
return new MySQLUndoUpdateExecutor(sqlUndoLog);
}
@Override
public AbstractUndoExecutor getDeleteExecutor(SQLUndoLog sqlUndoLog) {
return new MySQLUndoDeleteExecutor(sqlUndoLog);
}
}

View File

@@ -0,0 +1,91 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.mysql;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.common.util.CollectionUtils;
import io.seata.rm.datasource.SqlGenerateUtils;
import io.seata.rm.datasource.sql.struct.Field;
import io.seata.rm.datasource.sql.struct.Row;
import io.seata.rm.datasource.sql.struct.TableRecords;
import io.seata.rm.datasource.undo.AbstractUndoExecutor;
import io.seata.rm.datasource.undo.SQLUndoLog;
import io.seata.sqlparser.util.JdbcConstants;
/**
* The type My sql undo insert executor.
*
* @author sharajava
*/
public class MySQLUndoInsertExecutor extends AbstractUndoExecutor {
/**
* DELETE FROM a WHERE pk = ?
*/
private static final String DELETE_SQL_TEMPLATE = "DELETE FROM %s WHERE %s ";
/**
* Undo Inset.
*
* @return sql
*/
@Override
protected String buildUndoSQL() {
TableRecords afterImage = sqlUndoLog.getAfterImage();
List<Row> afterImageRows = afterImage.getRows();
if (CollectionUtils.isEmpty(afterImageRows)) {
throw new ShouldNeverHappenException("Invalid UNDO LOG");
}
return generateDeleteSql(afterImageRows,afterImage);
}
@Override
protected void undoPrepare(PreparedStatement undoPST, ArrayList<Field> undoValues, List<Field> pkValueList)
throws SQLException {
int undoIndex = 0;
for (Field pkField:pkValueList) {
undoIndex++;
undoPST.setObject(undoIndex, pkField.getValue(), pkField.getType());
}
}
private String generateDeleteSql(List<Row> rows, TableRecords afterImage) {
List<String> pkNameList = getOrderedPkList(afterImage, rows.get(0), JdbcConstants.MYSQL).stream().map(
e -> e.getName()).collect(Collectors.toList());
String whereSql = SqlGenerateUtils.buildWhereConditionByPKs(pkNameList, JdbcConstants.MYSQL);
return String.format(DELETE_SQL_TEMPLATE, sqlUndoLog.getTableName(), whereSql);
}
/**
* Instantiates a new My sql undo insert executor.
*
* @param sqlUndoLog the sql undo log
*/
public MySQLUndoInsertExecutor(SQLUndoLog sqlUndoLog) {
super(sqlUndoLog);
}
@Override
protected TableRecords getUndoRows() {
return sqlUndoLog.getAfterImage();
}
}

View File

@@ -0,0 +1,99 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.mysql;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Date;
import io.seata.common.loader.LoadLevel;
import io.seata.core.compressor.CompressorType;
import io.seata.core.constants.ClientTableColumnsName;
import io.seata.rm.datasource.undo.AbstractUndoLogManager;
import io.seata.rm.datasource.undo.UndoLogParser;
import io.seata.sqlparser.util.JdbcConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @author jsbxyyx
*/
@LoadLevel(name = JdbcConstants.MYSQL)
public class MySQLUndoLogManager extends AbstractUndoLogManager {
private static final Logger LOGGER = LoggerFactory.getLogger(MySQLUndoLogManager.class);
/**
* branch_id, xid, context, rollback_info, log_status, log_created, log_modified
*/
private static final String INSERT_UNDO_LOG_SQL = "INSERT INTO " + UNDO_LOG_TABLE_NAME +
" (" + ClientTableColumnsName.UNDO_LOG_BRANCH_XID + ", " + ClientTableColumnsName.UNDO_LOG_XID + ", "
+ ClientTableColumnsName.UNDO_LOG_CONTEXT + ", " + ClientTableColumnsName.UNDO_LOG_ROLLBACK_INFO + ", "
+ ClientTableColumnsName.UNDO_LOG_LOG_STATUS + ", " + ClientTableColumnsName.UNDO_LOG_LOG_CREATED + ", "
+ ClientTableColumnsName.UNDO_LOG_LOG_MODIFIED + ")"
+ " VALUES (?, ?, ?, ?, ?, now(6), now(6))";
private static final String DELETE_UNDO_LOG_BY_CREATE_SQL = "DELETE FROM " + UNDO_LOG_TABLE_NAME +
" WHERE " + ClientTableColumnsName.UNDO_LOG_LOG_CREATED + " <= ? LIMIT ?";
@Override
public int deleteUndoLogByLogCreated(Date logCreated, int limitRows, Connection conn) throws SQLException {
try (PreparedStatement deletePST = conn.prepareStatement(DELETE_UNDO_LOG_BY_CREATE_SQL)) {
deletePST.setDate(1, new java.sql.Date(logCreated.getTime()));
deletePST.setInt(2, limitRows);
int deleteRows = deletePST.executeUpdate();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("batch delete undo log size {}", deleteRows);
}
return deleteRows;
} catch (Exception e) {
if (!(e instanceof SQLException)) {
e = new SQLException(e);
}
throw (SQLException) e;
}
}
@Override
protected void insertUndoLogWithNormal(String xid, long branchId, String rollbackCtx, byte[] undoLogContent,
Connection conn) throws SQLException {
insertUndoLog(xid, branchId, rollbackCtx, undoLogContent, State.Normal, conn);
}
@Override
protected void insertUndoLogWithGlobalFinished(String xid, long branchId, UndoLogParser parser, Connection conn) throws SQLException {
insertUndoLog(xid, branchId, buildContext(parser.getName(), CompressorType.NONE), parser.getDefaultContent(), State.GlobalFinished, conn);
}
private void insertUndoLog(String xid, long branchId, String rollbackCtx, byte[] undoLogContent,
State state, Connection conn) throws SQLException {
try (PreparedStatement pst = conn.prepareStatement(INSERT_UNDO_LOG_SQL)) {
pst.setLong(1, branchId);
pst.setString(2, xid);
pst.setString(3, rollbackCtx);
pst.setBytes(4, undoLogContent);
pst.setInt(5, state.getValue());
pst.executeUpdate();
} catch (Exception e) {
if (!(e instanceof SQLException)) {
e = new SQLException(e);
}
throw (SQLException) e;
}
}
}

View File

@@ -0,0 +1,85 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.mysql;
import java.util.List;
import java.util.stream.Collectors;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.common.util.CollectionUtils;
import io.seata.rm.datasource.ColumnUtils;
import io.seata.rm.datasource.SqlGenerateUtils;
import io.seata.rm.datasource.sql.struct.Field;
import io.seata.rm.datasource.sql.struct.Row;
import io.seata.rm.datasource.sql.struct.TableRecords;
import io.seata.rm.datasource.undo.AbstractUndoExecutor;
import io.seata.rm.datasource.undo.SQLUndoLog;
import io.seata.sqlparser.util.JdbcConstants;
/**
* The type My sql undo update executor.
*
* @author sharajava
*/
public class MySQLUndoUpdateExecutor extends AbstractUndoExecutor {
/**
* UPDATE a SET x = ?, y = ?, z = ? WHERE pk1 in (?) pk2 in (?)
*/
private static final String UPDATE_SQL_TEMPLATE = "UPDATE %s SET %s WHERE %s ";
/**
* Undo Update.
*
* @return sql
*/
@Override
protected String buildUndoSQL() {
TableRecords beforeImage = sqlUndoLog.getBeforeImage();
List<Row> beforeImageRows = beforeImage.getRows();
if (CollectionUtils.isEmpty(beforeImageRows)) {
throw new ShouldNeverHappenException("Invalid UNDO LOG"); // TODO
}
Row row = beforeImageRows.get(0);
List<Field> nonPkFields = row.nonPrimaryKeys();
// update sql undo log before image all field come from table meta. need add escape.
// see BaseTransactionalExecutor#buildTableRecords
String updateColumns = nonPkFields.stream().map(
field -> ColumnUtils.addEscape(field.getName(), JdbcConstants.MYSQL) + " = ?").collect(
Collectors.joining(", "));
List<String> pkNameList = getOrderedPkList(beforeImage, row, JdbcConstants.MYSQL).stream().map(e -> e.getName())
.collect(Collectors.toList());
String whereSql = SqlGenerateUtils.buildWhereConditionByPKs(pkNameList, JdbcConstants.MYSQL);
return String.format(UPDATE_SQL_TEMPLATE, sqlUndoLog.getTableName(), updateColumns, whereSql);
}
/**
* Instantiates a new My sql undo update executor.
*
* @param sqlUndoLog the sql undo log
*/
public MySQLUndoUpdateExecutor(SQLUndoLog sqlUndoLog) {
super(sqlUndoLog);
}
@Override
protected TableRecords getUndoRows() {
return sqlUndoLog.getBeforeImage();
}
}

View File

@@ -0,0 +1,79 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.oracle;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.common.util.CollectionUtils;
import io.seata.rm.datasource.ColumnUtils;
import io.seata.rm.datasource.sql.struct.Field;
import io.seata.rm.datasource.sql.struct.Row;
import io.seata.rm.datasource.sql.struct.TableRecords;
import io.seata.rm.datasource.undo.AbstractUndoExecutor;
import io.seata.rm.datasource.undo.SQLUndoLog;
import io.seata.sqlparser.util.JdbcConstants;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
/**
* The type oracle undo delete executor.
*
* @author ccg
*/
public class OracleUndoDeleteExecutor extends AbstractUndoExecutor {
/**
* INSERT INTO a (x, y, z, pk) VALUES (?, ?, ?, ?)
*/
private static final String INSERT_SQL_TEMPLATE = "INSERT INTO %s (%s) VALUES (%s)";
/**
* Instantiates a new oracle undo delete executor.
*
* @param sqlUndoLog the sql undo log
*/
public OracleUndoDeleteExecutor(SQLUndoLog sqlUndoLog) {
super(sqlUndoLog);
}
@Override
protected String buildUndoSQL() {
TableRecords beforeImage = sqlUndoLog.getBeforeImage();
List<Row> beforeImageRows = beforeImage.getRows();
if (CollectionUtils.isEmpty(beforeImageRows)) {
throw new ShouldNeverHappenException("Invalid UNDO LOG");
}
Row row = beforeImageRows.get(0);
List<Field> fields = new ArrayList<>(row.nonPrimaryKeys());
fields.addAll(getOrderedPkList(beforeImage,row,JdbcConstants.ORACLE));
// delete sql undo log before image all field come from table meta, need add escape.
// see BaseTransactionalExecutor#buildTableRecords
String insertColumns = fields.stream()
.map(field -> ColumnUtils.addEscape(field.getName(), JdbcConstants.ORACLE))
.collect(Collectors.joining(", "));
String insertValues = fields.stream().map(field -> "?")
.collect(Collectors.joining(", "));
return String.format(INSERT_SQL_TEMPLATE, sqlUndoLog.getTableName(), insertColumns, insertValues);
}
@Override
protected TableRecords getUndoRows() {
return sqlUndoLog.getBeforeImage();
}
}

View File

@@ -0,0 +1,46 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.oracle;
import io.seata.common.loader.LoadLevel;
import io.seata.rm.datasource.undo.AbstractUndoExecutor;
import io.seata.rm.datasource.undo.SQLUndoLog;
import io.seata.rm.datasource.undo.UndoExecutorHolder;
import io.seata.sqlparser.util.JdbcConstants;
/**
* The Type OracleUndoExecutorHolder
*
* @author: Zhibei Hao
*/
@LoadLevel(name = JdbcConstants.ORACLE)
public class OracleUndoExecutorHolder implements UndoExecutorHolder {
@Override
public AbstractUndoExecutor getInsertExecutor(SQLUndoLog sqlUndoLog) {
return new OracleUndoInsertExecutor(sqlUndoLog);
}
@Override
public AbstractUndoExecutor getUpdateExecutor(SQLUndoLog sqlUndoLog) {
return new OracleUndoUpdateExecutor(sqlUndoLog);
}
@Override
public AbstractUndoExecutor getDeleteExecutor(SQLUndoLog sqlUndoLog) {
return new OracleUndoDeleteExecutor(sqlUndoLog);
}
}

View File

@@ -0,0 +1,86 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.oracle;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.common.util.CollectionUtils;
import io.seata.rm.datasource.SqlGenerateUtils;
import io.seata.rm.datasource.sql.struct.Field;
import io.seata.rm.datasource.sql.struct.Row;
import io.seata.rm.datasource.sql.struct.TableRecords;
import io.seata.rm.datasource.undo.AbstractUndoExecutor;
import io.seata.rm.datasource.undo.SQLUndoLog;
import io.seata.sqlparser.util.JdbcConstants;
/**
* The type oralce undo insert executor.
*
* @author ccg
*/
public class OracleUndoInsertExecutor extends AbstractUndoExecutor {
/**
* DELETE FROM a WHERE pk = ?
*/
private static final String DELETE_SQL_TEMPLATE = "DELETE FROM %s WHERE %s ";
@Override
protected String buildUndoSQL() {
TableRecords afterImage = sqlUndoLog.getAfterImage();
List<Row> afterImageRows = afterImage.getRows();
if (CollectionUtils.isEmpty(afterImageRows)) {
throw new ShouldNeverHappenException("Invalid UNDO LOG");
}
return generateDeleteSql(afterImageRows,afterImage);
}
@Override
protected void undoPrepare(PreparedStatement undoPST, ArrayList<Field> undoValues, List<Field> pkValueList)
throws SQLException {
int undoIndex = 0;
for (Field pkField:pkValueList) {
undoIndex++;
undoPST.setObject(undoIndex, pkField.getValue(), pkField.getType());
}
}
private String generateDeleteSql(List<Row> rows, TableRecords afterImage) {
List<String> pkNameList = getOrderedPkList(afterImage, rows.get(0), JdbcConstants.ORACLE).stream().map(
e -> e.getName()).collect(Collectors.toList());
String whereSql = SqlGenerateUtils.buildWhereConditionByPKs(pkNameList, JdbcConstants.ORACLE);
return String.format(DELETE_SQL_TEMPLATE, sqlUndoLog.getTableName(), whereSql);
}
/**
* Instantiates a new My sql undo insert executor.
*
* @param sqlUndoLog the sql undo log
*/
public OracleUndoInsertExecutor(SQLUndoLog sqlUndoLog) {
super(sqlUndoLog);
}
@Override
protected TableRecords getUndoRows() {
return sqlUndoLog.getAfterImage();
}
}

View File

@@ -0,0 +1,100 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.oracle;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Date;
import io.seata.common.loader.LoadLevel;
import io.seata.core.compressor.CompressorType;
import io.seata.core.constants.ClientTableColumnsName;
import io.seata.rm.datasource.undo.AbstractUndoLogManager;
import io.seata.rm.datasource.undo.UndoLogParser;
import io.seata.sqlparser.util.JdbcConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @author jsbxyyx
*/
@LoadLevel(name = JdbcConstants.ORACLE)
public class OracleUndoLogManager extends AbstractUndoLogManager {
private static final Logger LOGGER = LoggerFactory.getLogger(OracleUndoLogManager.class);
private static final String INSERT_UNDO_LOG_SQL = "INSERT INTO " + UNDO_LOG_TABLE_NAME +
" (" + ClientTableColumnsName.UNDO_LOG_ID + "," + ClientTableColumnsName.UNDO_LOG_BRANCH_XID + ", "
+ ClientTableColumnsName.UNDO_LOG_XID + ", " + ClientTableColumnsName.UNDO_LOG_CONTEXT + ", "
+ ClientTableColumnsName.UNDO_LOG_ROLLBACK_INFO + ", " + ClientTableColumnsName.UNDO_LOG_LOG_STATUS + ", "
+ ClientTableColumnsName.UNDO_LOG_LOG_CREATED + ", " + ClientTableColumnsName.UNDO_LOG_LOG_MODIFIED + ")"
+ "VALUES (UNDO_LOG_SEQ.nextval, ?, ?, ?, ?, ?, sysdate, sysdate)";
private static final String DELETE_UNDO_LOG_BY_CREATE_SQL = "DELETE FROM " + UNDO_LOG_TABLE_NAME +
" WHERE " + ClientTableColumnsName.UNDO_LOG_LOG_CREATED + " <= ? and ROWNUM <= ?";
@Override
public int deleteUndoLogByLogCreated(Date logCreated, int limitRows, Connection conn) throws SQLException {
try (PreparedStatement deletePST = conn.prepareStatement(DELETE_UNDO_LOG_BY_CREATE_SQL)) {
deletePST.setDate(1, new java.sql.Date(logCreated.getTime()));
deletePST.setInt(2, limitRows);
int deleteRows = deletePST.executeUpdate();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("batch delete undo log size {}", deleteRows);
}
return deleteRows;
} catch (Exception e) {
if (!(e instanceof SQLException)) {
e = new SQLException(e);
}
throw (SQLException) e;
}
}
@Override
protected void insertUndoLogWithNormal(String xid, long branchId, String rollbackCtx, byte[] undoLogContent,
Connection conn) throws SQLException {
insertUndoLog(xid, branchId,rollbackCtx, undoLogContent, State.Normal, conn);
}
@Override
protected void insertUndoLogWithGlobalFinished(String xid, long branchId, UndoLogParser parser, Connection conn) throws SQLException {
insertUndoLog(xid, branchId, buildContext(parser.getName(), CompressorType.NONE), parser.getDefaultContent(),
State.GlobalFinished, conn);
}
private void insertUndoLog(String xid, long branchID, String rollbackCtx, byte[] undoLogContent,
State state, Connection conn) throws SQLException {
try (PreparedStatement pst = conn.prepareStatement(INSERT_UNDO_LOG_SQL)) {
pst.setLong(1, branchID);
pst.setString(2, xid);
pst.setString(3, rollbackCtx);
pst.setBytes(4, undoLogContent);
pst.setInt(5, state.getValue());
pst.executeUpdate();
} catch (Exception e) {
if (!(e instanceof SQLException)) {
e = new SQLException(e);
}
throw (SQLException) e;
}
}
}

View File

@@ -0,0 +1,80 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.oracle;
import java.util.List;
import java.util.stream.Collectors;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.common.util.CollectionUtils;
import io.seata.rm.datasource.ColumnUtils;
import io.seata.rm.datasource.SqlGenerateUtils;
import io.seata.rm.datasource.sql.struct.Field;
import io.seata.rm.datasource.sql.struct.Row;
import io.seata.rm.datasource.sql.struct.TableRecords;
import io.seata.rm.datasource.undo.AbstractUndoExecutor;
import io.seata.rm.datasource.undo.SQLUndoLog;
import io.seata.sqlparser.util.JdbcConstants;
/**
* The type oracle undo update executor.
*
* @author ccg
*/
public class OracleUndoUpdateExecutor extends AbstractUndoExecutor {
/**
* UPDATE a SET x = ?, y = ?, z = ? WHERE pk1 = ? and pk2 = ?
*/
private static final String UPDATE_SQL_TEMPLATE = "UPDATE %s SET %s WHERE %s ";
@Override
protected String buildUndoSQL() {
TableRecords beforeImage = sqlUndoLog.getBeforeImage();
List<Row> beforeImageRows = beforeImage.getRows();
if (CollectionUtils.isEmpty(beforeImageRows)) {
throw new ShouldNeverHappenException("Invalid UNDO LOG"); // TODO
}
Row row = beforeImageRows.get(0);
List<Field> nonPkFields = row.nonPrimaryKeys();
// update sql undo log before image all field come from table meta. need add escape.
// see BaseTransactionalExecutor#buildTableRecords
String updateColumns = nonPkFields.stream().map(
field -> ColumnUtils.addEscape(field.getName(), JdbcConstants.ORACLE) + " = ?").collect(
Collectors.joining(", "));
List<String> pkNameList = getOrderedPkList(beforeImage, row, JdbcConstants.ORACLE).stream().map(
e -> e.getName()).collect(Collectors.toList());
String whereSql = SqlGenerateUtils.buildWhereConditionByPKs(pkNameList, JdbcConstants.ORACLE);
return String.format(UPDATE_SQL_TEMPLATE, sqlUndoLog.getTableName(), updateColumns, whereSql);
}
/**
* Instantiates a new My sql undo update executor.
*
* @param sqlUndoLog the sql undo log
*/
public OracleUndoUpdateExecutor(SQLUndoLog sqlUndoLog) {
super(sqlUndoLog);
}
@Override
protected TableRecords getUndoRows() {
return sqlUndoLog.getBeforeImage();
}
}

View File

@@ -0,0 +1,526 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.oracle.keyword;
import java.util.Arrays;
import java.util.Set;
import java.util.stream.Collectors;
import io.seata.common.loader.LoadLevel;
import io.seata.rm.datasource.undo.KeywordChecker;
import io.seata.sqlparser.util.JdbcConstants;
/**
* The type oracle sql keyword checker.
*
* @author ccg
*/
@LoadLevel(name = JdbcConstants.ORACLE)
public class OracleKeywordChecker implements KeywordChecker {
private Set<String> keywordSet = Arrays.stream(OracleKeyword.values()).map(OracleKeyword::name).collect(Collectors.toSet());
/**
* oracle keyword
*/
private enum OracleKeyword {
/**
* ACCESS is oracle keyword
*/
ACCESS("ACCESS"),
/**
* ADD is oracle keyword
*/
ADD("ADD"),
/**
* ALL is oracle keyword
*/
ALL("ALL"),
/**
* ALTER is oracle keyword
*/
ALTER("ALTER"),
/**
* AND is oracle keyword
*/
AND("AND"),
/**
* ANY is oracle keyword
*/
ANY("ANY"),
/**
* AS is oracle keyword
*/
AS("AS"),
/**
* ASC is oracle keyword
*/
ASC("ASC"),
/**
* AUDIT is oracle keyword
*/
AUDIT("AUDIT"),
/**
* BETWEEN is oracle keyword
*/
BETWEEN("BETWEEN"),
/**
* BY is oracle keyword
*/
BY("BY"),
/**
* CHAR is oracle keyword
*/
CHAR("CHAR"),
/**
* CHECK is oracle keyword
*/
CHECK("CHECK"),
/**
* CLUSTER is oracle keyword
*/
CLUSTER("CLUSTER"),
/**
* COLUMN is oracle keyword
*/
COLUMN("COLUMN"),
/**
* COLUMN_VALUE is oracle keyword
*/
COLUMN_VALUE("COLUMN_VALUE"),
/**
* COMMENT is oracle keyword
*/
COMMENT("COMMENT"),
/**
* COMPRESS is oracle keyword
*/
COMPRESS("COMPRESS"),
/**
* CONNECT is oracle keyword
*/
CONNECT("CONNECT"),
/**
* CREATE is oracle keyword
*/
CREATE("CREATE"),
/**
* CURRENT is oracle keyword
*/
CURRENT("CURRENT"),
/**
* DATE is oracle keyword
*/
DATE("DATE"),
/**
* DECIMAL is oracle keyword
*/
DECIMAL("DECIMAL"),
/**
* DEFAULT is oracle keyword
*/
DEFAULT("DEFAULT"),
/**
* DELETE is oracle keyword
*/
DELETE("DELETE"),
/**
* DESC is oracle keyword
*/
DESC("DESC"),
/**
* DISTINCT is oracle keyword
*/
DISTINCT("DISTINCT"),
/**
* DROP is oracle keyword
*/
DROP("DROP"),
/**
* ELSE is oracle keyword
*/
ELSE("ELSE"),
/**
* EXCLUSIVE is oracle keyword
*/
EXCLUSIVE("EXCLUSIVE"),
/**
* EXISTS is oracle keyword
*/
EXISTS("EXISTS"),
/**
* FILE is oracle keyword
*/
FILE("FILE"),
/**
* FLOAT is oracle keyword
*/
FLOAT("FLOAT"),
/**
* FOR is oracle keyword
*/
FOR("FOR"),
/**
* FROM is oracle keyword
*/
FROM("FROM"),
/**
* GRANT is oracle keyword
*/
GRANT("GRANT"),
/**
* GROUP is oracle keyword
*/
GROUP("GROUP"),
/**
* HAVING is oracle keyword
*/
HAVING("HAVING"),
/**
* IDENTIFIED is oracle keyword
*/
IDENTIFIED("IDENTIFIED"),
/**
* IMMEDIATE is oracle keyword
*/
IMMEDIATE("IMMEDIATE"),
/**
* IN is oracle keyword
*/
IN("IN"),
/**
* INCREMENT is oracle keyword
*/
INCREMENT("INCREMENT"),
/**
* INDEX is oracle keyword
*/
INDEX("INDEX"),
/**
* INITIAL is oracle keyword
*/
INITIAL("INITIAL"),
/**
* INSERT is oracle keyword
*/
INSERT("INSERT"),
/**
* INTEGER is oracle keyword
*/
INTEGER("INTEGER"),
/**
* INTERSECT is oracle keyword
*/
INTERSECT("INTERSECT"),
/**
* INTO is oracle keyword
*/
INTO("INTO"),
/**
* IS is oracle keyword
*/
IS("IS"),
/**
* LEVEL is oracle keyword
*/
LEVEL("LEVEL"),
/**
* LIKE is oracle keyword
*/
LIKE("LIKE"),
/**
* LOCK is oracle keyword
*/
LOCK("LOCK"),
/**
* LONG is oracle keyword
*/
LONG("LONG"),
/**
* MAXEXTENTS is oracle keyword
*/
MAXEXTENTS("MAXEXTENTS"),
/**
* MINUS is oracle keyword
*/
MINUS("MINUS"),
/**
* MLSLABEL is oracle keyword
*/
MLSLABEL("MLSLABEL"),
/**
* MODE is oracle keyword
*/
MODE("MODE"),
/**
* MODIFY is oracle keyword
*/
MODIFY("MODIFY"),
/**
* NESTED_TABLE_ID is oracle keyword
*/
NESTED_TABLE_ID("NESTED_TABLE_ID"),
/**
* NOAUDIT is oracle keyword
*/
NOAUDIT("NOAUDIT"),
/**
* NOCOMPRESS is oracle keyword
*/
NOCOMPRESS("NOCOMPRESS"),
/**
* NOT is oracle keyword
*/
NOT("NOT"),
/**
* NOWAIT is oracle keyword
*/
NOWAIT("NOWAIT"),
/**
* NULL is oracle keyword
*/
NULL("NULL"),
/**
* NUMBER is oracle keyword
*/
NUMBER("NUMBER"),
/**
* OF is oracle keyword
*/
OF("OF"),
/**
* OFFLINE is oracle keyword
*/
OFFLINE("OFFLINE"),
/**
* ON is oracle keyword
*/
ON("ON"),
/**
* ONLINE is oracle keyword
*/
ONLINE("ONLINE"),
/**
* OPTION is oracle keyword
*/
OPTION("OPTION"),
/**
* OR is oracle keyword
*/
OR("OR"),
/**
* ORDER is oracle keyword
*/
ORDER("ORDER"),
/**
* PCTFREE is oracle keyword
*/
PCTFREE("PCTFREE"),
/**
* PRIOR is oracle keyword
*/
PRIOR("PRIOR"),
/**
* PUBLIC is oracle keyword
*/
PUBLIC("PUBLIC"),
/**
* RAW is oracle keyword
*/
RAW("RAW"),
/**
* RENAME is oracle keyword
*/
RENAME("RENAME"),
/**
* RESOURCE is oracle keyword
*/
RESOURCE("RESOURCE"),
/**
* REVOKE is oracle keyword
*/
REVOKE("REVOKE"),
/**
* ROW is oracle keyword
*/
ROW("ROW"),
/**
* ROWID is oracle keyword
*/
ROWID("ROWID"),
/**
* ROWNUM is oracle keyword
*/
ROWNUM("ROWNUM"),
/**
* ROWS is oracle keyword
*/
ROWS("ROWS"),
/**
* SELECT is oracle keyword
*/
SELECT("SELECT"),
/**
* SESSION is oracle keyword
*/
SESSION("SESSION"),
/**
* SET is oracle keyword
*/
SET("SET"),
/**
* SHARE is oracle keyword
*/
SHARE("SHARE"),
/**
* SIZE is oracle keyword
*/
SIZE("SIZE"),
/**
* SMALLINT is oracle keyword
*/
SMALLINT("SMALLINT"),
/**
* START is oracle keyword
*/
START("START"),
/**
* SUCCESSFUL is oracle keyword
*/
SUCCESSFUL("SUCCESSFUL"),
/**
* SYNONYM is oracle keyword
*/
SYNONYM("SYNONYM"),
/**
* SYSDATE is oracle keyword
*/
SYSDATE("SYSDATE"),
/**
* TABLE is oracle keyword
*/
TABLE("TABLE"),
/**
* THEN is oracle keyword
*/
THEN("THEN"),
/**
* TO is oracle keyword
*/
TO("TO"),
/**
* TRIGGER is oracle keyword
*/
TRIGGER("TRIGGER"),
/**
* UID is oracle keyword
*/
UID("UID"),
/**
* UNION is oracle keyword
*/
UNION("UNION"),
/**
* UNIQUE is oracle keyword
*/
UNIQUE("UNIQUE"),
/**
* UPDATE is oracle keyword
*/
UPDATE("UPDATE"),
/**
* USER is oracle keyword
*/
USER("USER"),
/**
* VALIDATE is oracle keyword
*/
VALIDATE("VALIDATE"),
/**
* VALUES is oracle keyword
*/
VALUES("VALUES"),
/**
* VARCHAR is oracle keyword
*/
VARCHAR("VARCHAR"),
/**
* VARCHAR2 is oracle keyword
*/
VARCHAR2("VARCHAR2"),
/**
* VIEW is oracle keyword
*/
VIEW("VIEW"),
/**
* WHENEVER is oracle keyword
*/
WHENEVER("WHENEVER"),
/**
* WHERE is oracle keyword
*/
WHERE("WHERE"),
/**
* WITH is oracle keyword
*/
WITH("WITH");
/**
* The Name.
*/
public final String name;
OracleKeyword(String name) {
this.name = name;
}
}
@Override
public boolean check(String fieldOrTableName) {
if (keywordSet.contains(fieldOrTableName)) {
return true;
}
if (fieldOrTableName != null) {
fieldOrTableName = fieldOrTableName.toUpperCase();
}
return keywordSet.contains(fieldOrTableName);
}
@Override
public boolean checkEscape(String fieldOrTableName) {
boolean check = check(fieldOrTableName);
// oracle
// we are recommend table name and column name must uppercase.
// if exists full uppercase, the table name or column name does't bundle escape symbol.
if (!check && isUppercase(fieldOrTableName)) {
return false;
}
return true;
}
private static boolean isUppercase(String fieldOrTableName) {
if (fieldOrTableName == null) {
return false;
}
char[] chars = fieldOrTableName.toCharArray();
for (char ch : chars) {
if (ch >= 'a' && ch <= 'z') {
return false;
}
}
return true;
}
}

View File

@@ -0,0 +1,65 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.parser;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.alibaba.fastjson.serializer.SimplePropertyPreFilter;
import io.seata.common.Constants;
import io.seata.common.executor.Initialize;
import io.seata.common.loader.LoadLevel;
import io.seata.rm.datasource.undo.BranchUndoLog;
import io.seata.rm.datasource.undo.UndoLogParser;
/**
* The type Json based undo log parser.
*
* @author sharajava
*/
@LoadLevel(name = FastjsonUndoLogParser.NAME)
public class FastjsonUndoLogParser implements UndoLogParser, Initialize {
public static final String NAME = "fastjson";
private final SimplePropertyPreFilter filter = new SimplePropertyPreFilter();
@Override
public void init() {
filter.getExcludes().add("tableMeta");
}
@Override
public String getName() {
return NAME;
}
@Override
public byte[] getDefaultContent() {
return "{}".getBytes(Constants.DEFAULT_CHARSET);
}
@Override
public byte[] encode(BranchUndoLog branchUndoLog) {
String json = JSON.toJSONString(branchUndoLog, filter, SerializerFeature.WriteClassName, SerializerFeature.WriteDateUseDateFormat);
return json.getBytes(Constants.DEFAULT_CHARSET);
}
@Override
public BranchUndoLog decode(byte[] bytes) {
String text = new String(bytes, Constants.DEFAULT_CHARSET);
return JSON.parseObject(text, BranchUndoLog.class);
}
}

View File

@@ -0,0 +1,55 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.parser;
import java.sql.Timestamp;
import javax.sql.rowset.serial.SerialBlob;
import javax.sql.rowset.serial.SerialClob;
import org.nustaq.serialization.FSTConfiguration;
import org.nustaq.serialization.FSTObjectSerializer;
/**
* @author funkye
*/
public class FstSerializerFactory {
private static final FstSerializerFactory FACTORY = new FstSerializerFactory();
private final FSTConfiguration conf = FSTConfiguration.createDefaultConfiguration();
public static FstSerializerFactory getDefaultFactory() {
return FACTORY;
}
public FstSerializerFactory() {
// support clob and blob sql type
conf.registerClass(SerialBlob.class, SerialClob.class, Timestamp.class);
UndoLogSerializerClassRegistry.getRegisteredClasses().keySet().forEach(conf::registerClass);
}
public void registerSerializer(Class type, FSTObjectSerializer ser, boolean alsoForAllSubclasses) {
conf.registerSerializer(type, ser, alsoForAllSubclasses);
}
public <T> byte[] serialize(T t) {
return conf.asByteArray(t);
}
public <T> T deserialize(byte[] bytes) {
return (T)conf.asObject(bytes);
}
}

View File

@@ -0,0 +1,87 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.parser;
import io.seata.common.executor.Initialize;
import io.seata.common.loader.EnhancedServiceLoader;
import io.seata.common.loader.EnhancedServiceNotFoundException;
import io.seata.common.loader.LoadLevel;
import io.seata.common.util.CollectionUtils;
import io.seata.rm.datasource.undo.BranchUndoLog;
import io.seata.rm.datasource.undo.UndoLogParser;
import io.seata.rm.datasource.undo.parser.spi.FstSerializer;
import org.nustaq.serialization.FSTObjectSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
/**
* fst serializer
* @author funkye
*/
@LoadLevel(name = FstUndoLogParser.NAME)
public class FstUndoLogParser implements UndoLogParser, Initialize {
private static final Logger LOGGER = LoggerFactory.getLogger(FstUndoLogParser.class);
public static final String NAME = "fst";
private FstSerializerFactory fstFactory = FstSerializerFactory.getDefaultFactory();
@Override
public void init() {
try {
List<FstSerializer> serializers = EnhancedServiceLoader.loadAll(FstSerializer.class);
if (CollectionUtils.isNotEmpty(serializers)) {
for (FstSerializer serializer : serializers) {
if (serializer != null) {
Class type = serializer.type();
FSTObjectSerializer ser = serializer.ser();
boolean alsoForAllSubclasses = serializer.alsoForAllSubclasses();
if (type != null && ser != null) {
fstFactory.registerSerializer(type, ser, alsoForAllSubclasses);
LOGGER.info("fst undo log parser load [{}].", serializer.getClass().getName());
}
}
}
}
} catch (EnhancedServiceNotFoundException e) {
LOGGER.warn("FstSerializer not found children class.", e);
}
}
@Override
public String getName() {
return NAME;
}
@Override
public byte[] getDefaultContent() {
return fstFactory.serialize(new BranchUndoLog());
}
@Override
public byte[] encode(BranchUndoLog branchUndoLog) {
return fstFactory.serialize(branchUndoLog);
}
@Override
public BranchUndoLog decode(byte[] bytes) {
return fstFactory.deserialize(bytes);
}
}

View File

@@ -0,0 +1,304 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.parser;
import java.util.Arrays;
import java.io.IOException;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.List;
import javax.sql.rowset.serial.SerialBlob;
import javax.sql.rowset.serial.SerialClob;
import javax.sql.rowset.serial.SerialException;
import com.fasterxml.jackson.annotation.JsonTypeInfo;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.JsonToken;
import com.fasterxml.jackson.core.type.WritableTypeId;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.DeserializationContext;
import com.fasterxml.jackson.databind.JsonDeserializer;
import com.fasterxml.jackson.databind.MapperFeature;
import com.fasterxml.jackson.databind.SerializerProvider;
import com.fasterxml.jackson.databind.JsonSerializer;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.deser.std.JsonNodeDeserializer;
import com.fasterxml.jackson.databind.jsontype.TypeSerializer;
import com.fasterxml.jackson.databind.module.SimpleModule;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.ser.std.ArraySerializerBase;
import io.seata.common.Constants;
import io.seata.common.executor.Initialize;
import io.seata.common.loader.EnhancedServiceLoader;
import io.seata.common.loader.EnhancedServiceNotFoundException;
import io.seata.common.loader.LoadLevel;
import io.seata.common.util.CollectionUtils;
import io.seata.rm.datasource.undo.BranchUndoLog;
import io.seata.rm.datasource.undo.UndoLogParser;
import io.seata.rm.datasource.undo.parser.spi.JacksonSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The type Json based undo log parser.
*
* @author jsbxyyx
*/
@LoadLevel(name = JacksonUndoLogParser.NAME)
public class JacksonUndoLogParser implements UndoLogParser, Initialize {
public static final String NAME = "jackson";
private static final Logger LOGGER = LoggerFactory.getLogger(JacksonUndoLogParser.class);
private final ObjectMapper mapper = new ObjectMapper();
private final SimpleModule module = new SimpleModule();
/**
* customize serializer for java.sql.Timestamp
*/
private final JsonSerializer timestampSerializer = new TimestampSerializer();
/**
* customize deserializer for java.sql.Timestamp
*/
private final JsonDeserializer timestampDeserializer = new TimestampDeserializer();
/**
* customize serializer of java.sql.Blob
*/
private final JsonSerializer blobSerializer = new BlobSerializer();
/**
* customize deserializer of java.sql.Blob
*/
private final JsonDeserializer blobDeserializer = new BlobDeserializer();
/**
* customize serializer of java.sql.Clob
*/
private final JsonSerializer clobSerializer = new ClobSerializer();
/**
* customize deserializer of java.sql.Clob
*/
private final JsonDeserializer clobDeserializer = new ClobDeserializer();
@Override
public void init() {
try {
List<JacksonSerializer> jacksonSerializers = EnhancedServiceLoader.loadAll(JacksonSerializer.class);
if (CollectionUtils.isNotEmpty(jacksonSerializers)) {
for (JacksonSerializer jacksonSerializer : jacksonSerializers) {
Class type = jacksonSerializer.type();
JsonSerializer ser = jacksonSerializer.ser();
JsonDeserializer deser = jacksonSerializer.deser();
if (type != null) {
if (ser != null) {
module.addSerializer(type, ser);
}
if (deser != null) {
module.addDeserializer(type, deser);
}
LOGGER.info("jackson undo log parser load [{}].", jacksonSerializer.getClass().getName());
}
}
}
} catch (EnhancedServiceNotFoundException e) {
LOGGER.warn("JacksonSerializer not found children class.", e);
}
module.addSerializer(Timestamp.class, timestampSerializer);
module.addDeserializer(Timestamp.class, timestampDeserializer);
module.addSerializer(SerialBlob.class, blobSerializer);
module.addDeserializer(SerialBlob.class, blobDeserializer);
module.addSerializer(SerialClob.class, clobSerializer);
module.addDeserializer(SerialClob.class, clobDeserializer);
mapper.registerModule(module);
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
mapper.enableDefaultTyping(ObjectMapper.DefaultTyping.NON_FINAL, JsonTypeInfo.As.PROPERTY);
mapper.enable(MapperFeature.PROPAGATE_TRANSIENT_MARKER);
}
@Override
public String getName() {
return NAME;
}
@Override
public byte[] getDefaultContent() {
return "{}".getBytes(Constants.DEFAULT_CHARSET);
}
@Override
public byte[] encode(BranchUndoLog branchUndoLog) {
try {
return mapper.writeValueAsBytes(branchUndoLog);
} catch (JsonProcessingException e) {
LOGGER.error("json encode exception, {}", e.getMessage(), e);
throw new RuntimeException(e);
}
}
@Override
public BranchUndoLog decode(byte[] bytes) {
try {
BranchUndoLog branchUndoLog;
if (Arrays.equals(bytes, getDefaultContent())) {
branchUndoLog = new BranchUndoLog();
} else {
branchUndoLog = mapper.readValue(bytes, BranchUndoLog.class);
}
return branchUndoLog;
} catch (IOException e) {
LOGGER.error("json decode exception, {}", e.getMessage(), e);
throw new RuntimeException(e);
}
}
/**
* if necessary
* extend {@link ArraySerializerBase}
*/
private static class TimestampSerializer extends JsonSerializer<Timestamp> {
@Override
public void serializeWithType(Timestamp timestamp, JsonGenerator gen, SerializerProvider serializers,
TypeSerializer typeSerializer) throws IOException {
WritableTypeId typeId = typeSerializer.writeTypePrefix(gen,
typeSerializer.typeId(timestamp, JsonToken.START_ARRAY));
serialize(timestamp, gen, serializers);
gen.writeTypeSuffix(typeId);
}
@Override
public void serialize(Timestamp timestamp, JsonGenerator gen, SerializerProvider serializers) {
try {
gen.writeNumber(timestamp.getTime());
gen.writeNumber(timestamp.getNanos());
} catch (IOException e) {
LOGGER.error("serialize java.sql.Timestamp error : {}", e.getMessage(), e);
}
}
}
/**
* if necessary
* extend {@link JsonNodeDeserializer}
*/
private static class TimestampDeserializer extends JsonDeserializer<Timestamp> {
@Override
public Timestamp deserialize(JsonParser p, DeserializationContext ctxt) {
if (p.isExpectedStartArrayToken()) {
ArrayNode arrayNode;
try {
arrayNode = p.getCodec().readTree(p);
Timestamp timestamp = new Timestamp(arrayNode.get(0).asLong());
timestamp.setNanos(arrayNode.get(1).asInt());
return timestamp;
} catch (IOException e) {
LOGGER.error("deserialize java.sql.Timestamp error : {}", e.getMessage(), e);
}
}
LOGGER.error("deserialize java.sql.Timestamp type error.");
return null;
}
}
/**
* the class of serialize blob type
*/
private static class BlobSerializer extends JsonSerializer<SerialBlob> {
@Override
public void serializeWithType(SerialBlob blob, JsonGenerator gen, SerializerProvider serializers,
TypeSerializer typeSer) throws IOException {
WritableTypeId typeIdDef = typeSer.writeTypePrefix(gen,
typeSer.typeId(blob, JsonToken.VALUE_EMBEDDED_OBJECT));
serialize(blob, gen, serializers);
typeSer.writeTypeSuffix(gen, typeIdDef);
}
@Override
public void serialize(SerialBlob blob, JsonGenerator gen, SerializerProvider serializers) throws IOException {
try {
gen.writeBinary(blob.getBytes(1, (int)blob.length()));
} catch (SerialException e) {
LOGGER.error("serialize java.sql.Blob error : {}", e.getMessage(), e);
}
}
}
/**
* the class of deserialize blob type
*/
private static class BlobDeserializer extends JsonDeserializer<SerialBlob> {
@Override
public SerialBlob deserialize(JsonParser p, DeserializationContext ctxt) throws IOException {
try {
return new SerialBlob(p.getBinaryValue());
} catch (SQLException e) {
LOGGER.error("deserialize java.sql.Blob error : {}", e.getMessage(), e);
}
return null;
}
}
/**
* the class of serialize clob type
*/
private static class ClobSerializer extends JsonSerializer<SerialClob> {
@Override
public void serializeWithType(SerialClob clob, JsonGenerator gen, SerializerProvider serializers,
TypeSerializer typeSer) throws IOException {
WritableTypeId typeIdDef = typeSer.writeTypePrefix(gen,
typeSer.typeId(clob, JsonToken.VALUE_EMBEDDED_OBJECT));
serialize(clob, gen, serializers);
typeSer.writeTypeSuffix(gen, typeIdDef);
}
@Override
public void serialize(SerialClob clob, JsonGenerator gen, SerializerProvider serializers) throws IOException {
try {
gen.writeString(clob.getCharacterStream(), (int)clob.length());
} catch (SerialException e) {
LOGGER.error("serialize java.sql.Blob error : {}", e.getMessage(), e);
}
}
}
private static class ClobDeserializer extends JsonDeserializer<SerialClob> {
@Override
public SerialClob deserialize(JsonParser p, DeserializationContext ctxt) throws IOException {
try {
return new SerialClob(p.getValueAsString().toCharArray());
} catch (SQLException e) {
LOGGER.error("deserialize java.sql.Clob error : {}", e.getMessage(), e);
}
return null;
}
}
}

View File

@@ -0,0 +1,56 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.parser;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.util.Objects;
/**
* @author jsbxyyx
*/
public class KryoSerializer {
private final Kryo kryo;
public KryoSerializer(Kryo kryo) {
this.kryo = Objects.requireNonNull(kryo);
}
public Kryo getKryo() {
return kryo;
}
public <T> byte[] serialize(T t) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
Output output = new Output(baos);
kryo.writeClassAndObject(output, t);
output.close();
return baos.toByteArray();
}
public <T> T deserialize(byte[] bytes) {
ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
Input input = new Input(bais);
input.close();
return (T) kryo.readClassAndObject(input);
}
}

View File

@@ -0,0 +1,166 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.parser;
import java.lang.reflect.InvocationHandler;
import java.sql.Blob;
import java.sql.Clob;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import javax.sql.rowset.serial.SerialBlob;
import javax.sql.rowset.serial.SerialClob;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.Serializer;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
import com.esotericsoftware.kryo.pool.KryoFactory;
import com.esotericsoftware.kryo.pool.KryoPool;
import de.javakaffee.kryoserializers.JdkProxySerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @author jsbxyyx
*/
public class KryoSerializerFactory implements KryoFactory {
private static final Logger LOGGER = LoggerFactory.getLogger(KryoSerializerFactory.class);
private static final KryoSerializerFactory FACTORY = new KryoSerializerFactory();
private KryoPool pool = new KryoPool.Builder(this).softReferences().build();
private static final Map<Class, Serializer> TYPE_MAP = new ConcurrentHashMap<>();
private KryoSerializerFactory() {}
public static KryoSerializerFactory getInstance() {
return FACTORY;
}
public KryoSerializer get() {
return new KryoSerializer(pool.borrow());
}
public void returnKryo(KryoSerializer kryoSerializer) {
if (kryoSerializer == null) {
throw new IllegalArgumentException("kryoSerializer is null");
}
pool.release(kryoSerializer.getKryo());
}
public void registerSerializer(Class type, Serializer ser) {
if (type != null && ser != null) {
TYPE_MAP.put(type, ser);
}
}
@Override
public Kryo create() {
Kryo kryo = new Kryo();
kryo.setRegistrationRequired(false);
for (Map.Entry<Class, Serializer> entry : TYPE_MAP.entrySet()) {
kryo.register(entry.getKey(), entry.getValue());
}
// support clob and blob
kryo.register(SerialBlob.class, new BlobSerializer());
kryo.register(SerialClob.class, new ClobSerializer());
// register sql type
kryo.register(Timestamp.class, new TimestampSerializer());
kryo.register(InvocationHandler.class, new JdkProxySerializer());
// register commonly class
UndoLogSerializerClassRegistry.getRegisteredClasses().forEach((clazz, ser) -> {
if (ser == null) {
kryo.register(clazz);
} else {
kryo.register(clazz, (Serializer)ser);
}
});
return kryo;
}
private static class BlobSerializer extends Serializer<Blob> {
@Override
public void write(Kryo kryo, Output output, Blob object) {
try {
byte[] bytes = object.getBytes(1L, (int)object.length());
output.writeInt(bytes.length, true);
output.write(bytes);
} catch (SQLException e) {
LOGGER.error("kryo write java.sql.Blob error: {}", e.getMessage(), e);
}
}
@Override
public Blob read(Kryo kryo, Input input, Class<Blob> type) {
int length = input.readInt(true);
byte[] bytes = input.readBytes(length);
try {
return new SerialBlob(bytes);
} catch (SQLException e) {
LOGGER.error("kryo read java.sql.Blob error: {}", e.getMessage(), e);
}
return null;
}
}
private static class ClobSerializer extends Serializer<Clob> {
@Override
public void write(Kryo kryo, Output output, Clob object) {
try {
String s = object.getSubString(1, (int)object.length());
output.writeString(s);
} catch (SQLException e) {
LOGGER.error("kryo write java.sql.Clob error: {}", e.getMessage(), e);
}
}
@Override
public Clob read(Kryo kryo, Input input, Class<Clob> type) {
try {
String s = input.readString();
return new SerialClob(s.toCharArray());
} catch (SQLException e) {
LOGGER.error("kryo read java.sql.Clob error: {}", e.getMessage(), e);
}
return null;
}
}
private class TimestampSerializer extends Serializer<Timestamp> {
@Override
public void write(Kryo kryo, Output output, Timestamp object) {
output.writeLong(object.getTime(), true);
output.writeInt(object.getNanos(), true);
}
@Override
public Timestamp read(Kryo kryo, Input input, Class<Timestamp> type) {
Timestamp timestamp = new Timestamp(input.readLong(true));
timestamp.setNanos(input.readInt(true));
return timestamp;
}
}
}

View File

@@ -0,0 +1,99 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.parser;
import com.esotericsoftware.kryo.Serializer;
import io.seata.common.executor.Initialize;
import io.seata.common.loader.EnhancedServiceLoader;
import io.seata.common.loader.EnhancedServiceNotFoundException;
import io.seata.common.loader.LoadLevel;
import io.seata.common.util.CollectionUtils;
import io.seata.rm.datasource.undo.BranchUndoLog;
import io.seata.rm.datasource.undo.UndoLogParser;
import io.seata.rm.datasource.undo.parser.spi.KryoTypeSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
/**
* kryo serializer
* @author jsbxyyx
*/
@LoadLevel(name = KryoUndoLogParser.NAME)
public class KryoUndoLogParser implements UndoLogParser, Initialize {
private static final Logger LOGGER = LoggerFactory.getLogger(KryoUndoLogParser.class);
public static final String NAME = "kryo";
@Override
public void init() {
try {
List<KryoTypeSerializer> serializers = EnhancedServiceLoader.loadAll(KryoTypeSerializer.class);
if (CollectionUtils.isNotEmpty(serializers)) {
for (KryoTypeSerializer typeSerializer : serializers) {
if (typeSerializer != null) {
Class type = typeSerializer.type();
Serializer ser = typeSerializer.serializer();
if (type != null) {
KryoSerializerFactory.getInstance().registerSerializer(type, ser);
LOGGER.info("kryo undo log parser load [{}].", typeSerializer.getClass().getName());
}
}
}
}
} catch (EnhancedServiceNotFoundException e) {
LOGGER.warn("KryoTypeSerializer not found children class.", e);
}
}
@Override
public String getName() {
return NAME;
}
@Override
public byte[] getDefaultContent() {
KryoSerializer kryoSerializer = KryoSerializerFactory.getInstance().get();
try {
return kryoSerializer.serialize(new BranchUndoLog());
} finally {
KryoSerializerFactory.getInstance().returnKryo(kryoSerializer);
}
}
@Override
public byte[] encode(BranchUndoLog branchUndoLog) {
KryoSerializer kryoSerializer = KryoSerializerFactory.getInstance().get();
try {
return kryoSerializer.serialize(branchUndoLog);
} finally {
KryoSerializerFactory.getInstance().returnKryo(kryoSerializer);
}
}
@Override
public BranchUndoLog decode(byte[] bytes) {
KryoSerializer kryoSerializer = KryoSerializerFactory.getInstance().get();
try {
return kryoSerializer.deserialize(bytes);
} finally {
KryoSerializerFactory.getInstance().returnKryo(kryoSerializer);
}
}
}

View File

@@ -0,0 +1,254 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.parser;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.sql.Timestamp;
import java.util.List;
import io.protostuff.Input;
import io.protostuff.LinkedBuffer;
import io.protostuff.Output;
import io.protostuff.Pipe;
import io.protostuff.ProtostuffIOUtil;
import io.protostuff.Schema;
import io.protostuff.WireFormat.FieldType;
import io.protostuff.runtime.DefaultIdStrategy;
import io.protostuff.runtime.Delegate;
import io.protostuff.runtime.RuntimeEnv;
import io.protostuff.runtime.RuntimeSchema;
import io.seata.common.executor.Initialize;
import io.seata.common.loader.EnhancedServiceLoader;
import io.seata.common.loader.EnhancedServiceNotFoundException;
import io.seata.common.loader.LoadLevel;
import io.seata.common.util.CollectionUtils;
import io.seata.rm.datasource.undo.BranchUndoLog;
import io.seata.rm.datasource.undo.UndoLogParser;
import io.seata.rm.datasource.undo.parser.spi.ProtostuffDelegate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The type protostuff based undo log parser.
*
* @author Geng Zhang
*/
@LoadLevel(name = ProtostuffUndoLogParser.NAME)
public class ProtostuffUndoLogParser implements UndoLogParser, Initialize {
private static final Logger LOGGER = LoggerFactory.getLogger(ProtostuffUndoLogParser.class);
public static final String NAME = "protostuff";
private final DefaultIdStrategy idStrategy = (DefaultIdStrategy) RuntimeEnv.ID_STRATEGY;
private final Schema<BranchUndoLog> schema = RuntimeSchema.getSchema(BranchUndoLog.class, idStrategy);
@Override
public void init() {
try {
List<ProtostuffDelegate> delegates = EnhancedServiceLoader.loadAll(ProtostuffDelegate.class);
if (CollectionUtils.isNotEmpty(delegates)) {
for (ProtostuffDelegate delegate : delegates) {
idStrategy.registerDelegate(delegate.create());
LOGGER.info("protostuff undo log parser load [{}].", delegate.getClass().getName());
}
}
} catch (EnhancedServiceNotFoundException e) {
LOGGER.warn("ProtostuffDelegate not found children class.", e);
}
idStrategy.registerDelegate(new DateDelegate());
idStrategy.registerDelegate(new TimestampDelegate());
idStrategy.registerDelegate(new SqlDateDelegate());
idStrategy.registerDelegate(new TimeDelegate());
}
@Override
public String getName() {
return ProtostuffUndoLogParser.NAME;
}
@Override
public byte[] getDefaultContent() {
return encode(new BranchUndoLog());
}
@Override
public byte[] encode(BranchUndoLog branchUndoLog) {
// Re-use (manage) this buffer to avoid allocating on every serialization
LinkedBuffer buffer = LinkedBuffer.allocate(512);
// ser
try {
return ProtostuffIOUtil.toByteArray(branchUndoLog, schema, buffer);
} finally {
buffer.clear();
}
}
@Override
public BranchUndoLog decode(byte[] bytes) {
if (bytes.length == 0) {
return new BranchUndoLog();
}
BranchUndoLog fooParsed = schema.newMessage();
ProtostuffIOUtil.mergeFrom(bytes, fooParsed, schema);
return fooParsed;
}
/**
* Delegate for java.sql.Timestamp
*
* @author zhangsen
*/
public static class TimestampDelegate implements Delegate<java.sql.Timestamp> {
@Override
public FieldType getFieldType() {
return FieldType.BYTES;
}
@Override
public Class<?> typeClass() {
return java.sql.Timestamp.class;
}
@Override
public java.sql.Timestamp readFrom(Input input) throws IOException {
ByteBuffer buffer = input.readByteBuffer();
long time = buffer.getLong();
int nanos = buffer.getInt();
buffer.flip();
java.sql.Timestamp timestamp = new Timestamp(time);
timestamp.setNanos(nanos);
return timestamp;
}
@Override
public void writeTo(Output output, int number, java.sql.Timestamp value, boolean repeated) throws IOException {
ByteBuffer buffer = ByteBuffer.allocate(12);
buffer.putLong(value.getTime());
buffer.putInt(value.getNanos());
buffer.flip();
output.writeBytes(number, buffer, repeated);
}
@Override
public void transfer(Pipe pipe, Input input, Output output, int number, boolean repeated) throws IOException {
output.writeBytes(number, input.readByteBuffer(), repeated);
}
}
/**
* Delegate for java.sql.Date
*
* @author zhangsen
*/
public static class SqlDateDelegate implements Delegate<java.sql.Date> {
@Override
public FieldType getFieldType() {
return FieldType.FIXED64;
}
@Override
public Class<?> typeClass() {
return java.sql.Date.class;
}
@Override
public java.sql.Date readFrom(Input input) throws IOException {
return new java.sql.Date(input.readFixed64());
}
@Override
public void transfer(Pipe pipe, Input input, Output output, int number, boolean repeated) throws IOException {
output.writeFixed64(number, input.readFixed64(), repeated);
}
@Override
public void writeTo(Output output, int number, java.sql.Date value, boolean repeated) throws IOException {
output.writeFixed64(number, value.getTime(), repeated);
}
}
/**
* Delegate for java.sql.Time
*
* @author zhangsen
*/
public static class TimeDelegate implements Delegate<java.sql.Time> {
@Override
public FieldType getFieldType() {
return FieldType.FIXED64;
}
@Override
public Class<?> typeClass() {
return java.sql.Time.class;
}
@Override
public java.sql.Time readFrom(Input input) throws IOException {
return new java.sql.Time(input.readFixed64());
}
@Override
public void transfer(Pipe pipe, Input input, Output output, int number, boolean repeated) throws IOException {
output.writeFixed64(number, input.readFixed64(), repeated);
}
@Override
public void writeTo(Output output, int number, java.sql.Time value, boolean repeated) throws IOException {
output.writeFixed64(number, value.getTime(), repeated);
}
}
/**
* Delegate for java.util.Date
*
* @author zhangsen
*/
public static class DateDelegate implements Delegate<java.util.Date> {
@Override
public FieldType getFieldType() {
return FieldType.FIXED64;
}
@Override
public Class<?> typeClass() {
return java.util.Date.class;
}
@Override
public java.util.Date readFrom(Input input) throws IOException {
return new java.util.Date(input.readFixed64());
}
@Override
public void transfer(Pipe pipe, Input input, Output output, int number, boolean repeated) throws IOException {
output.writeFixed64(number, input.readFixed64(), repeated);
}
@Override
public void writeTo(Output output, int number, java.util.Date value, boolean repeated) throws IOException {
output.writeFixed64(number, value.getTime(), repeated);
}
}
}

View File

@@ -0,0 +1,127 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.parser;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.net.URI;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.Calendar;
import java.util.Collections;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.TreeSet;
import java.util.UUID;
import java.util.Vector;
import java.util.concurrent.ConcurrentHashMap;
import java.util.regex.Pattern;
import com.esotericsoftware.kryo.serializers.DefaultSerializers;
import de.javakaffee.kryoserializers.ArraysAsListSerializer;
import de.javakaffee.kryoserializers.BitSetSerializer;
import de.javakaffee.kryoserializers.GregorianCalendarSerializer;
import de.javakaffee.kryoserializers.RegexSerializer;
import de.javakaffee.kryoserializers.URISerializer;
import de.javakaffee.kryoserializers.UUIDSerializer;
import io.seata.rm.datasource.undo.BranchUndoLog;
/**
* Provide a unified serialization registry, this class used for {@code seata-serializer-fst}
* and {@code seata-serializer-kryo}, it will register some classes at startup time (for example {@link KryoSerializerFactory#create})
* @author funkye
*/
public class UndoLogSerializerClassRegistry {
private static final Map<Class<?>, Object> REGISTRATIONS = new LinkedHashMap<>();
static {
// register serializer
registerClass(Collections.singletonList("").getClass(), new ArraysAsListSerializer());
registerClass(GregorianCalendar.class, new GregorianCalendarSerializer());
registerClass(BigDecimal.class, new DefaultSerializers.BigDecimalSerializer());
registerClass(BigInteger.class, new DefaultSerializers.BigIntegerSerializer());
registerClass(Pattern.class, new RegexSerializer());
registerClass(BitSet.class, new BitSetSerializer());
registerClass(URI.class, new URISerializer());
registerClass(UUID.class, new UUIDSerializer());
// register commonly class
registerClass(HashMap.class);
registerClass(ArrayList.class);
registerClass(LinkedList.class);
registerClass(HashSet.class);
registerClass(TreeSet.class);
registerClass(Hashtable.class);
registerClass(Date.class);
registerClass(Calendar.class);
registerClass(ConcurrentHashMap.class);
registerClass(SimpleDateFormat.class);
registerClass(GregorianCalendar.class);
registerClass(Vector.class);
registerClass(BitSet.class);
registerClass(StringBuffer.class);
registerClass(StringBuilder.class);
registerClass(Object.class);
registerClass(Object[].class);
registerClass(String[].class);
registerClass(byte[].class);
registerClass(char[].class);
registerClass(int[].class);
registerClass(float[].class);
registerClass(double[].class);
// register branchUndoLog
registerClass(BranchUndoLog.class);
}
/**
* only supposed to be called at startup time
*
* @param clazz object type
*/
public static void registerClass(Class<?> clazz) {
registerClass(clazz, null);
}
/**
* only supposed to be called at startup time
*
* @param clazz object type
* @param serializer object serializer
*/
public static void registerClass(Class<?> clazz, Object serializer) {
if (clazz == null) {
throw new IllegalArgumentException("Class registered cannot be null!");
}
REGISTRATIONS.put(clazz, serializer);
}
/**
* get registered classes
*
* @return class serializer
* */
public static Map<Class<?>, Object> getRegisteredClasses() {
return REGISTRATIONS;
}
}

View File

@@ -0,0 +1,49 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.parser.spi;
import org.nustaq.serialization.FSTObjectSerializer;
/**
* The interface Fst serializer.
*
* @author jsbxyyx
*/
public interface FstSerializer {
/**
* fst serializer class type
*
* @return class
*/
Class type();
/**
* FSTObjectSerializer custom serializer
*
* @return fst object serializer
*/
FSTObjectSerializer ser();
/**
* for sub classes
*
* @return boolean
*/
boolean alsoForAllSubclasses();
}

View File

@@ -0,0 +1,51 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.parser.spi;
import com.fasterxml.jackson.databind.JsonDeserializer;
import com.fasterxml.jackson.databind.JsonSerializer;
/**
* The interface Jackson serializer.
*
* @param <T> the type parameter
* @author jsbxyyx
*/
public interface JacksonSerializer<T> {
/**
* jackson serializer class type.
*
* @return class
*/
Class<T> type();
/**
* Jackson custom serializer
*
* @return json serializer
*/
JsonSerializer<T> ser();
/**
* Jackson custom deserializer
*
* @return json deserializer
*/
JsonDeserializer<? extends T> deser();
}

View File

@@ -0,0 +1,43 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.parser.spi;
import com.esotericsoftware.kryo.Serializer;
/**
* The interface Kryo type serializer.
*
* @param <T> the type parameter
* @author jsbxyyx
*/
public interface KryoTypeSerializer<T> {
/**
* kryo serializer class type.
*
* @return class
*/
Class<T> type();
/**
* kryo custom serializer.
*
* @return serializer
*/
Serializer serializer();
}

View File

@@ -0,0 +1,36 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.parser.spi;
import io.protostuff.runtime.Delegate;
/**
* The interface Protostuff delegate.
*
* @param <T> the type parameter
* @author jsbxyyx
*/
public interface ProtostuffDelegate<T> {
/**
* Delegate create.
*
* @return delegate
*/
Delegate<T> create();
}

View File

@@ -0,0 +1,79 @@
/*
* Copyright 1999-2019 Seata.io Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.seata.rm.datasource.undo.postgresql;
import io.seata.common.exception.ShouldNeverHappenException;
import io.seata.common.util.CollectionUtils;
import io.seata.rm.datasource.ColumnUtils;
import io.seata.rm.datasource.sql.struct.Field;
import io.seata.rm.datasource.sql.struct.Row;
import io.seata.rm.datasource.sql.struct.TableRecords;
import io.seata.rm.datasource.undo.AbstractUndoExecutor;
import io.seata.rm.datasource.undo.SQLUndoLog;
import io.seata.sqlparser.util.JdbcConstants;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
/**
* The type postgresql undo delete executor.
*
* @author japsercloud
*/
public class PostgresqlUndoDeleteExecutor extends AbstractUndoExecutor {
/**
* Instantiates a new postgresql undo delete executor.
*
* @param sqlUndoLog the sql undo log
*/
public PostgresqlUndoDeleteExecutor(SQLUndoLog sqlUndoLog) {
super(sqlUndoLog);
}
/**
* INSERT INTO a (x, y, z, pk) VALUES (?, ?, ?, ?)
*/
private static final String INSERT_SQL_TEMPLATE = "INSERT INTO %s (%s) VALUES (%s)";
@Override
protected String buildUndoSQL() {
TableRecords beforeImage = sqlUndoLog.getBeforeImage();
List<Row> beforeImageRows = beforeImage.getRows();
if (CollectionUtils.isEmpty(beforeImageRows)) {
throw new ShouldNeverHappenException("Invalid UNDO LOG");
}
Row row = beforeImageRows.get(0);
List<Field> fields = new ArrayList<>(row.nonPrimaryKeys());
fields.addAll(getOrderedPkList(beforeImage,row,JdbcConstants.POSTGRESQL));
// delete sql undo log before image all field come from table meta, need add escape.
// see BaseTransactionalExecutor#buildTableRecords
String insertColumns = fields.stream()
.map(field -> ColumnUtils.addEscape(field.getName(), JdbcConstants.POSTGRESQL))
.collect(Collectors.joining(", "));
String insertValues = fields.stream().map(field -> "?")
.collect(Collectors.joining(", "));
return String.format(INSERT_SQL_TEMPLATE, sqlUndoLog.getTableName(), insertColumns, insertValues);
}
@Override
protected TableRecords getUndoRows() {
return sqlUndoLog.getBeforeImage();
}
}

Some files were not shown because too many files have changed in this diff Show More