├── .gitignore ├── src ├── main │ └── java │ │ └── uk │ │ └── co │ │ └── omegaprime │ │ └── mdbi │ │ ├── MapEntryAppender.java │ │ ├── RetryNothing.java │ │ ├── SQLAction.java │ │ ├── ContextWrite.java │ │ ├── Time.java │ │ ├── ContextRead.java │ │ ├── StatementlikeBatchRead.java │ │ ├── Statementlike.java │ │ ├── Write.java │ │ ├── StatementlikeBatchReads.java │ │ ├── Read.java │ │ ├── IndexRef.java │ │ ├── Retries.java │ │ ├── Retry.java │ │ ├── PreparedStatementlike.java │ │ ├── CollectionBatchRead.java │ │ ├── UnpreparedStatementlike.java │ │ ├── BoundRead.java │ │ ├── BatchRead.java │ │ ├── TupleWriteBuilder.java │ │ ├── SnocList.java │ │ ├── MatrixBatchRead.java │ │ ├── TupleWrite.java │ │ ├── MapBatchRead.java │ │ ├── BoundWrite.java │ │ ├── RetryDeadlocks.java │ │ ├── LabelledMatrixBatchRead.java │ │ ├── TupleRead.java │ │ ├── Primitives.java │ │ ├── BeanRead.java │ │ ├── BeanWrite.java │ │ ├── SegmentedMapBatchRead.java │ │ ├── Transactionally.java │ │ ├── RowReadBuilder.java │ │ ├── PeekedResultSet.java │ │ ├── DropColumnsResultSetMetaData.java │ │ ├── ContiguouslyFilteredResultSet.java │ │ ├── FunctionRead.java │ │ ├── Context.java │ │ ├── MatrixBatchReadBuilder.java │ │ ├── Reflection.java │ │ ├── SQLBuilder.java │ │ ├── BatchReads.java │ │ ├── SQL.java │ │ ├── MDBI.java │ │ ├── Writes.java │ │ └── Reads.java └── test │ └── java │ ├── uk │ └── co │ │ └── omegaprime │ │ └── mdbi │ │ ├── SnocListTest.java │ │ └── SQLInterfaceConformanceTest.java │ └── userpackage │ ├── RetryDeadlocksTest.java │ ├── TransactionallyTest.java │ ├── MDBIRetryTest.java │ └── Sample.java ├── .travis.yml ├── README.md ├── jdbi-comparison.txt └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | *.iml 2 | lib/ 3 | .idea/ 4 | out/ 5 | .DS_Store 6 | build/ 7 | .gradle/ 8 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/MapEntryAppender.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | interface MapEntryAppender { 4 | V append(K key, V oldValue, V newValue); 5 | } 6 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: java 2 | before_cache: 3 | - rm -f $HOME/.gradle/caches/modules-2/modules-2.lock 4 | cache: 5 | directories: 6 | - $HOME/.gradle/caches/ 7 | - $HOME/.gradle/wrapper/ 8 | jdk: 9 | - oraclejdk8 -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/RetryNothing.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | class RetryNothing implements Retry { 4 | @Override 5 | public void consider(T e) throws T { 6 | throw e; 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/SQLAction.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.sql.SQLException; 4 | 5 | /** Basically a {@link java.util.concurrent.Callable} that throws a more discriminating exception type */ 6 | @FunctionalInterface 7 | public interface SQLAction { 8 | T run() throws SQLException; 9 | } 10 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/ContextWrite.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | class ContextWrite implements Write { 4 | private final Class klass; 5 | 6 | public ContextWrite(Class klass) { 7 | this.klass = klass; 8 | } 9 | 10 | @Override 11 | public BoundWrite bind(Write.Context ctxt) { 12 | return ctxt.get(klass).bind(ctxt); 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # mdbi 2 | 3 | Max's DataBase Interface: a simple but powerful JDBC wrapper inspired by JDBI 4 | 5 | To get started with MDBI, please see the [website](http://batterseapower.github.io/mdbi/). If you just want some JARs, check out [Maven Central](http://mvnrepository.com/artifact/uk.co.omega-prime/mdbi/). 6 | 7 | [![Build Status](https://travis-ci.org/batterseapower/mdbi.svg?branch=master)](https://travis-ci.org/batterseapower/mdbi) 8 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/Time.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.time.ZoneId; 4 | import java.util.Calendar; 5 | import java.util.TimeZone; 6 | 7 | class Time { 8 | private Time() {} 9 | 10 | public static final TimeZone UTC_ZONE = TimeZone.getTimeZone("UTC"); 11 | public static final ThreadLocal UTC_CALENDAR = ThreadLocal.withInitial(() -> Calendar.getInstance(UTC_ZONE)); 12 | public static final ZoneId UTC_ZONE_ID = ZoneId.of("UTC"); 13 | } 14 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/ContextRead.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | class ContextRead implements Read { 4 | private final Class klass; 5 | 6 | public ContextRead(Class klass) { 7 | this.klass = klass; 8 | } 9 | 10 | @Override 11 | public Class getElementClass() { 12 | return klass; 13 | } 14 | 15 | @Override 16 | public BoundRead bind(Read.Context ctxt) { 17 | return ctxt.get(klass).bind(ctxt); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/StatementlikeBatchRead.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import javax.annotation.ParametersAreNonnullByDefault; 4 | import java.sql.SQLException; 5 | 6 | /** 7 | * A function that turns the entire result of executing some SQL into a single Java object. 8 | *

9 | * Useful instances of this interface can be created using the methods in {@link BatchReads}. 10 | */ 11 | @ParametersAreNonnullByDefault 12 | public interface StatementlikeBatchRead { 13 | T get(Read.Context ctxt, Statementlike ps) throws SQLException; 14 | } 15 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/Statementlike.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.sql.PreparedStatement; 4 | import java.sql.ResultSet; 5 | import java.sql.SQLException; 6 | import java.sql.Statement; 7 | 8 | /** The whole point of this interface is to give {@link Statement} and {@link PreparedStatement} a common API for use in {@link BatchRead}. */ 9 | public interface Statementlike { 10 | void execute() throws SQLException; 11 | int executeUpdate() throws SQLException; 12 | long executeLargeUpdate() throws SQLException; 13 | ResultSet executeQuery() throws SQLException; 14 | } 15 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/Write.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | /** 4 | * Given a context describing how to obtain arbitrary SQL types from Java ones, describes how a particular 5 | * SQL type should be derived from a Java type. 6 | *

7 | * Useful instances of this can be obtained from {@link Writes}. 8 | */ 9 | public interface Write { 10 | BoundWrite bind(Context ctxt); 11 | 12 | /** An immutable description of how to obtain specific SQL types from Java. */ 13 | interface Context { 14 | Write get(Class klass); 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/StatementlikeBatchReads.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import javax.annotation.ParametersAreNonnullByDefault; 4 | import java.util.*; 5 | 6 | /** Functions for creating useful instances of {@link StatementlikeBatchRead}. */ 7 | @ParametersAreNonnullByDefault 8 | public final class StatementlikeBatchReads { 9 | private StatementlikeBatchReads() {} 10 | 11 | /** Creates a {@code BatchRead} that just processes a {@code ResultSet} via the {@link BatchRead} interface. */ 12 | public static StatementlikeBatchRead fromBatchRead(BatchRead rsbr) { 13 | return (ctxt, ps) -> rsbr.get(ctxt, ps.executeQuery()); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/Read.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | /** 4 | * Given a context describing how to obtain arbitrary Java types from SQL ones, describes how a particular 5 | * Java type should be derived from a SQL type. 6 | *

7 | * Useful instances of this can be obtained from {@link Reads}. 8 | */ 9 | public interface Read { 10 | /** Runtime representation of the {@code Read} type parameter: useful for constructing arrays */ 11 | Class getElementClass(); 12 | 13 | BoundRead bind(Context ctxt); 14 | 15 | /** An immutable description of how to retrieve specific Java types from SQL. */ 16 | interface Context { 17 | Read get(Class klass); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/IndexRef.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | /** Mutable type indicating which JDBC column is the next to be consumed. */ 4 | public class IndexRef { 5 | private int x = 1; 6 | 7 | private IndexRef() {} 8 | 9 | /** Returns an instance where the first {@link #take()} will return 1, the first column index in any {@code ResultSet}. */ 10 | public static IndexRef create() { 11 | return new IndexRef(); 12 | } 13 | 14 | /** Returns the next column index to consume, and simultaneously advances the {@code IndexRef} to point to the next column. */ 15 | public int take() { 16 | return x++; 17 | } 18 | 19 | /** Returns the next item that {@link #take()} would return, without actually advancing the index. */ 20 | public int peek() { return x; } 21 | } 22 | -------------------------------------------------------------------------------- /src/test/java/uk/co/omegaprime/mdbi/SnocListTest.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import org.junit.Test; 4 | 5 | import java.util.ArrayList; 6 | import java.util.Arrays; 7 | import java.util.List; 8 | 9 | import static org.junit.Assert.assertEquals; 10 | 11 | public class SnocListTest { 12 | @Test 13 | public void basicOperation() { 14 | assertEquals(Arrays.asList(1, 2, 3), iterableToList(SnocList.singleton(1).snoc(2).snoc(3))); 15 | assertEquals(Arrays.asList(1, 2, 3, 4), iterableToList(SnocList.singleton(1).snocs(SnocList.singleton(2).snoc(3)).snoc(4))); 16 | } 17 | 18 | private static List iterableToList(Iterable xs) { 19 | final List result = new ArrayList<>(); 20 | for (T x : xs) { 21 | result.add(x); 22 | } 23 | 24 | return result; 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/Retries.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.util.concurrent.TimeUnit; 4 | 5 | /** Useful instances of the {@link Retry} interface. */ 6 | public class Retries { 7 | private Retries() {} 8 | 9 | /** Always rethrow exceptions — i.e. don't retry anything */ 10 | public static Retry nothing() { 11 | return new RetryNothing(); 12 | } 13 | 14 | /** Retry exceptions that seem to be caused by deadlocks using a reasonable default backoff strategy */ 15 | public static Retry deadlocks() { 16 | return new RetryDeadlocks(); 17 | } 18 | 19 | /** Retry exceptions that seem to be caused by deadlocks using up to the given number of times, using the supplied initial backoff period */ 20 | public static Retry deadlocks(int maxRetries, int backoffInterval, TimeUnit backoffIntervalUnit) { 21 | return new RetryDeadlocks(maxRetries, backoffInterval, backoffIntervalUnit); 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/Retry.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | /** 4 | * A function that decides how to handle an exception that is encountered in the course of executing a query. 5 | *

6 | * When MDBI performs a query, it constructs a {@code Retry} instance. If an error is encountered during 7 | * execution of the query, the exception is supplied to the instance. The {@code Retry} may choose to rethrow 8 | * the exception, in which case execution of the query terminates, or it may choose to discard the exception, 9 | * in which case the query will be retried. 10 | *

11 | * By keeping mutable state in the {@code Retry} instance it's possible to implement elaborate retry strategies 12 | * that e.g. retry a fixed number of times before giving up, or have arbitrarily interesting backoff schemes. 13 | */ 14 | public interface Retry { 15 | /** Rethrows the supplied exception if necessary */ 16 | void consider(T e) throws T; 17 | } 18 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/PreparedStatementlike.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.sql.PreparedStatement; 4 | import java.sql.ResultSet; 5 | import java.sql.SQLException; 6 | 7 | class PreparedStatementlike implements Statementlike { 8 | private final PreparedStatement ps; 9 | 10 | public PreparedStatementlike(PreparedStatement ps) { 11 | this.ps = ps; 12 | } 13 | 14 | @Override 15 | public void execute() throws SQLException { 16 | if (ps.execute()) { 17 | while (ps.getMoreResults()) {} 18 | } 19 | } 20 | 21 | @Override 22 | public int executeUpdate() throws SQLException { 23 | return ps.executeUpdate(); 24 | } 25 | 26 | @Override 27 | public long executeLargeUpdate() throws SQLException { 28 | return ps.executeLargeUpdate(); 29 | } 30 | 31 | @Override 32 | public ResultSet executeQuery() throws SQLException { 33 | return ps.executeQuery(); 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/CollectionBatchRead.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import javax.annotation.Nonnull; 4 | import java.sql.ResultSet; 5 | import java.sql.SQLException; 6 | import java.util.Collection; 7 | import java.util.function.Supplier; 8 | 9 | class CollectionBatchRead> implements BatchRead { 10 | private final Supplier factory; 11 | private final Read read; 12 | 13 | public CollectionBatchRead(Supplier factory, Read read) { 14 | this.factory = factory; 15 | this.read = read; 16 | } 17 | 18 | @Override 19 | public CollectionT get(@Nonnull Read.Context ctxt, @Nonnull ResultSet rs) throws SQLException { 20 | final BoundRead boundRead = read.bind(ctxt); 21 | 22 | final CollectionT result = factory.get(); 23 | while (rs.next()) { 24 | result.add(boundRead.get(rs, IndexRef.create())); 25 | } 26 | return result; 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/UnpreparedStatementlike.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.sql.ResultSet; 4 | import java.sql.SQLException; 5 | import java.sql.Statement; 6 | 7 | class UnpreparedStatementlike implements Statementlike { 8 | private final Statement s; 9 | private final String sql; 10 | 11 | public UnpreparedStatementlike(Statement s, String sql) { 12 | this.s = s; 13 | this.sql = sql; 14 | } 15 | 16 | @Override 17 | public void execute() throws SQLException { 18 | if (s.execute(sql)) { 19 | while (s.getMoreResults()) {} 20 | } 21 | } 22 | 23 | @Override 24 | public int executeUpdate() throws SQLException { 25 | return s.executeUpdate(sql); 26 | } 27 | 28 | @Override 29 | public long executeLargeUpdate() throws SQLException { 30 | return s.executeLargeUpdate(sql); 31 | } 32 | 33 | @Override 34 | public ResultSet executeQuery() throws SQLException { 35 | return s.executeQuery(sql); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/test/java/uk/co/omegaprime/mdbi/SQLInterfaceConformanceTest.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import org.junit.Test; 4 | 5 | import java.lang.reflect.Method; 6 | import java.lang.reflect.Modifier; 7 | import java.util.Arrays; 8 | 9 | public class SQLInterfaceConformanceTest { 10 | @Test 11 | public void sqlReturningMethodsHaveStaticEquivalent() throws NoSuchMethodException { 12 | for (Method m : SQL.class.getDeclaredMethods()) { 13 | if ((m.getModifiers() & Modifier.STATIC) != 0) { 14 | continue; 15 | } 16 | 17 | if ((m.getModifiers() & Modifier.PUBLIC) == 0) { 18 | continue; 19 | } 20 | 21 | if (m.getReturnType() != SQL.class) { 22 | continue; 23 | } 24 | 25 | // A static version of SQL#sql(SQL) would be pointless! 26 | if (m.getName().equals("sql") && Arrays.equals(m.getParameterTypes(), new Class[] { SQL.class })) { 27 | continue; 28 | } 29 | 30 | MDBI.class.getMethod(m.getName(), m.getParameterTypes()); 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/BoundRead.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import javax.annotation.Nonnull; 4 | import java.sql.ResultSet; 5 | import java.sql.SQLException; 6 | 7 | /** 8 | * A function that turns a consecutive subset of columns in one row of a {@code ResultSet} into a Java object. 9 | *

10 | * You should only have to worry about this interface if you are creating your own implementations 11 | * of the {@link Read} interface. Users who are just using the built-in type mappers can ignore it. 12 | */ 13 | public interface BoundRead { 14 | /** Returns the number of parameters that we will consume from a row of the {@code ResultSet}. */ 15 | int arity(); 16 | 17 | /** 18 | * Consumes data in zero or more columns of the current {@code ResultSet} row, beginning with 19 | * that indicated by {@code ix}. 20 | *

21 | * Please do not advance the {@code ResultSet} from within your implementation of this method! 22 | * However, feel free to advance the {@code IndexRef} as many times as you need. 23 | */ 24 | T get(@Nonnull ResultSet rs, @Nonnull IndexRef ix) throws SQLException; 25 | } 26 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/BatchRead.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.sql.ResultSet; 4 | import java.sql.SQLException; 5 | 6 | /** 7 | * A slightly simplified version of {@link StatementlikeBatchRead} that consumes a {@code ResultSet}. 8 | *

9 | * Useful instances of this can be obtained from {@link BatchReads}. 10 | */ 11 | public interface BatchRead { 12 | /** 13 | * Consume one or more row from the {@code ResultSet}, returning a corresponding Java object. 14 | *

15 | * When it arrives, the {@code ResultSet} will be on the row before the first one that you can consume -- i.e. 16 | * it will not be on a valid row. Consequently you need to call {@link ResultSet#next()} before you access any column. 17 | *

18 | * If you cease consuming the {@code ResultSet} early, you should leave the {@code ResultSet} 19 | * on the last row that you have decided to consume. This means that there is no difference between 20 | * leaving the {@code ResultSet} on the final row and leaving it after the end of the whole {@code ResultSet}. 21 | */ 22 | T get(Read.Context ctxt, ResultSet rs) throws SQLException; 23 | } 24 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/TupleWriteBuilder.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.util.AbstractMap; 4 | import java.util.ArrayList; 5 | import java.util.List; 6 | import java.util.Map; 7 | import java.util.function.Function; 8 | 9 | /** Allows the construction of {@link Write} instances that can turn compound Java objects into sequences of SQL values. */ 10 | public class TupleWriteBuilder { 11 | private final List> args = new ArrayList<>(); 12 | 13 | private TupleWriteBuilder() { } 14 | public static TupleWriteBuilder create() { return new TupleWriteBuilder<>(); } 15 | 16 | /** Turns the property specified by {@code f} into a SQL object using the {@link Write} registered in the {@link Context} for the supplied class. */ 17 | public TupleWriteBuilder add(Class klass, Function f) { 18 | return add(new ContextWrite(klass), f); 19 | } 20 | 21 | /** Turns the property specified by {@code f} into a SQL object using the supplied {@code Write} instance. */ 22 | public TupleWriteBuilder add(Write write, Function f) { 23 | args.add(Writes.map(write, f)); 24 | return this; 25 | } 26 | 27 | public Write build() { 28 | return new TupleWrite<>(new ArrayList<>(args)); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/SnocList.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.util.*; 4 | 5 | class SnocList implements Iterable { 6 | private final Optional> init; 7 | private final T last; 8 | 9 | public SnocList(Optional> init, T last) { 10 | this.init = init; 11 | this.last = last; 12 | } 13 | 14 | public static SnocList singleton(T last) { 15 | return new SnocList<>(Optional.empty(), last); 16 | } 17 | 18 | public SnocList snoc(T last) { 19 | return new SnocList<>(Optional.of(this), last); 20 | } 21 | 22 | public SnocList snocs(SnocList xs) { 23 | SnocList result = this; 24 | for (T x : xs) { 25 | result = result.snoc(x); 26 | } 27 | 28 | return result; 29 | } 30 | 31 | @Override 32 | public Iterator iterator() { 33 | final ArrayDeque ts = new ArrayDeque<>(); 34 | 35 | SnocList current = this; 36 | while (true) { 37 | ts.push(current.last); 38 | 39 | if (current.init.isPresent()) { 40 | current = current.init.get(); 41 | } else { 42 | break; 43 | } 44 | } 45 | 46 | return ts.iterator(); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /jdbi-comparison.txt: -------------------------------------------------------------------------------- 1 | - Built-in support for empty-collection-safe "IN" clauses 2 | - Automatic deadlock retry 3 | - Default mappers that remove JDBC's stupid "null ==> 0" default behaviour. Provision of more sensible "null ==> NaN" default behaviour for double/float. 4 | - Transaction support that doesn't blow up in interesting ways when you try to nest it 5 | - Native support for Java 8 time/date types 6 | - No attempt to parse the SQL, so no need to worry about escaping it 7 | - Positional rather than named argument binding, eliminating a whole class of errors 8 | - More powerful resultset mapping than just row-by-row -- can also do whole-resultset transformations (e.g. the matrix mapper) 9 | - Compositional compound mappers -- JDBI's ReflectionBeanMapper must consume the whole row, but the MDBI Readers.bean() can 10 | read just a fragment of the row and so be used with e.g. BatchReads.asMap to retrieve just the key/value part of the row 11 | - User-transparent ability to toggle between prepared and unprepared modes (unprepared statements are mandatory if you want to 12 | e.g. create a temp table using the SQL Server JDBC driver) 13 | - Symmetric treatment of statement parameters and results: you can register mappers for input to statements as well as their outputs 14 | - No dependencies 15 | - No wrapping of exceptions into weird custom types: just uses SQLException 16 | -------------------------------------------------------------------------------- /src/test/java/userpackage/RetryDeadlocksTest.java: -------------------------------------------------------------------------------- 1 | package userpackage; 2 | 3 | import org.junit.Test; 4 | import uk.co.omegaprime.mdbi.Retries; 5 | import uk.co.omegaprime.mdbi.Retry; 6 | 7 | import java.sql.SQLException; 8 | import java.util.concurrent.TimeUnit; 9 | 10 | public class RetryDeadlocksTest { 11 | private final SQLException DEADLOCK_EXCEPTION = new SQLException("deadlocked with another process"); 12 | 13 | @Test 14 | public void retryTwiceIfRetryCountIsTwo() throws SQLException { 15 | final Retry r = Retries.deadlocks(2, 0, TimeUnit.MILLISECONDS); 16 | r.consider(DEADLOCK_EXCEPTION); 17 | // First retry happens here.. 18 | r.consider(DEADLOCK_EXCEPTION); 19 | // Second retry happens here.. 20 | } 21 | 22 | @Test(expected = SQLException.class) 23 | public void dontRetryThrifeIfRetryCountIsTwo() throws SQLException { 24 | final Retry r = Retries.deadlocks(2, 0, TimeUnit.MILLISECONDS); 25 | r.consider(DEADLOCK_EXCEPTION); 26 | // First retry happens here.. 27 | r.consider(DEADLOCK_EXCEPTION); 28 | // Second retry happens here.. 29 | r.consider(DEADLOCK_EXCEPTION); 30 | } 31 | 32 | @Test(expected = SQLException.class) 33 | public void rethrowNonDeadlockyExceptions() throws SQLException { 34 | final Retry r = Retries.deadlocks(2, 0, TimeUnit.MILLISECONDS); 35 | r.consider(new SQLException("you got a column name wrong")); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/MatrixBatchRead.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import javax.annotation.Nonnull; 4 | import java.sql.ResultSet; 5 | import java.sql.SQLException; 6 | import java.util.*; 7 | import java.util.stream.Collectors; 8 | 9 | class MatrixBatchRead implements BatchRead { 10 | private final Collection> reads; 11 | 12 | public MatrixBatchRead(Collection> reads) { 13 | this.reads = reads; 14 | } 15 | 16 | @Override 17 | @SuppressWarnings("unchecked") 18 | public Object[] get(@Nonnull Read.Context ctxt, @Nonnull ResultSet rs) throws SQLException { 19 | final List> boundReads = reads.stream().map(read -> read.bind(ctxt)).collect(Collectors.toList()); 20 | 21 | final List[] columnLists = new List[boundReads.size()]; 22 | for (int i = 0; i < columnLists.length; i++) { 23 | columnLists[i] = new ArrayList<>(); 24 | } 25 | 26 | while (rs.next()) { 27 | final IndexRef ix = IndexRef.create(); 28 | for (int i = 0; i < columnLists.length; i++) { 29 | ((List)columnLists[i]).add(boundReads.get(i).get(rs, ix)); 30 | } 31 | } 32 | 33 | final Object[] columns = new Object[columnLists.length]; 34 | final Iterator> readsIt = reads.iterator(); 35 | for (int i = 0; i < columnLists.length; i++) { 36 | final List list = columnLists[i]; 37 | columns[i] = Primitives.listToArray(readsIt.next().getElementClass(), list); 38 | } 39 | 40 | return columns; 41 | } 42 | 43 | } 44 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/TupleWrite.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import javax.annotation.Nonnull; 4 | import java.sql.PreparedStatement; 5 | import java.sql.SQLException; 6 | import java.util.AbstractMap; 7 | import java.util.ArrayList; 8 | import java.util.List; 9 | import java.util.function.Function; 10 | 11 | class TupleWrite implements Write { 12 | private final List> args; 13 | 14 | public TupleWrite(List> args) { 15 | this.args = args; 16 | } 17 | 18 | @Override 19 | public BoundWrite bind(Write.Context ctxt) { 20 | final List> boundWrites = new ArrayList<>(args.size()); 21 | for (Write write : args) { 22 | boundWrites.add(write.bind(ctxt)); 23 | } 24 | return new BoundWrite() { 25 | @Override 26 | public int arity() { 27 | return boundWrites.stream().mapToInt(BoundWrite::arity).sum(); 28 | } 29 | 30 | @Override 31 | public void set(@Nonnull PreparedStatement s, @Nonnull IndexRef ix, T x) throws SQLException { 32 | for (BoundWrite write : boundWrites) { 33 | write.set(s, ix, x); 34 | } 35 | } 36 | 37 | @Nonnull 38 | @Override 39 | public List asSQL(T x) { 40 | final List result = new ArrayList<>(); 41 | for (BoundWrite write : boundWrites) { 42 | result.addAll(write.asSQL(x)); 43 | } 44 | return result; 45 | } 46 | }; 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/MapBatchRead.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import javax.annotation.ParametersAreNonnullByDefault; 4 | import java.sql.ResultSet; 5 | import java.sql.SQLException; 6 | import java.util.Map; 7 | import java.util.function.Supplier; 8 | 9 | @ParametersAreNonnullByDefault 10 | class MapBatchRead> implements BatchRead { 11 | private final Supplier factory; 12 | private final MapEntryAppender append; 13 | private final Read readKey; 14 | private final Read readValue; 15 | 16 | public MapBatchRead(Supplier factory, MapEntryAppender append, Read readKey, Read readValue) { 17 | this.factory = factory; 18 | this.append = append; 19 | this.readKey = readKey; 20 | this.readValue = readValue; 21 | } 22 | 23 | @Override 24 | public MapT get(Read.Context ctxt, ResultSet rs) throws SQLException { 25 | final BoundRead boundReadKey = readKey.bind(ctxt); 26 | final BoundRead boundReadValue = readValue.bind(ctxt); 27 | final MapT result = factory.get(); 28 | while (rs.next()) { 29 | final IndexRef ix = IndexRef.create(); 30 | final K key = boundReadKey.get(rs, ix); 31 | final V value = boundReadValue.get(rs, ix); 32 | 33 | final V consolidatedValue; 34 | if (!result.containsKey(key)) { 35 | consolidatedValue = value; 36 | } else { 37 | consolidatedValue = append.append(key, result.get(key), value); 38 | } 39 | 40 | result.put(key, consolidatedValue); 41 | } 42 | 43 | return result; 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/BoundWrite.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import javax.annotation.Nonnull; 4 | import javax.annotation.Nullable; 5 | import java.sql.PreparedStatement; 6 | import java.sql.SQLException; 7 | import java.util.List; 8 | 9 | /** 10 | * A function that turns a Java object into data in a consecutive subset of parameters of a SQL query. 11 | *

12 | * You should only have to worry about this interface if you are creating your own implementations 13 | * of the {@link Read} interface. Users who are just using the built-in type mappers can ignore it. 14 | */ 15 | public interface BoundWrite { 16 | /** Returns the number of parameters that we will require to represent the Java object. */ 17 | int arity(); 18 | 19 | /** 20 | * Inserts data into zero or more columns of the {@code PreparedStatement}, beginning with that 21 | * indicated by {@code ix}. It is expected that this data will be based on the value of {@code x}. 22 | *

23 | * Please do not e.g. execute the {@code PreparedStatement} from within your implementation of this method! 24 | * You should only be setting data into the statement. You are also expected to advance the {@code IndexRef} 25 | * exactly {@link #arity()} times. 26 | */ 27 | void set(@Nonnull PreparedStatement s, @Nonnull IndexRef ix, @Nullable T x) throws SQLException; 28 | 29 | /** 30 | * Returns a sequence of SQL expressions presenting the data that would have been set on the {@code PreparedStatement} 31 | * by the equivalent {@link #set(PreparedStatement, IndexRef, Object)} call. 32 | *

33 | * The returned list should be exactly {@link #arity()} elements in length. 34 | *

35 | * This method is used when the user of MDBI has elected not to use {@code PreparedStatement}s. 36 | */ 37 | @Nonnull List asSQL(@Nullable T x); 38 | } 39 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/RetryDeadlocks.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.sql.SQLException; 4 | import java.util.Random; 5 | import java.util.concurrent.TimeUnit; 6 | 7 | class RetryDeadlocks implements Retry { 8 | private final int maxRetries; 9 | private final int backoffMillis; 10 | 11 | private Random jitterSource; 12 | private int retry = 0; 13 | 14 | public RetryDeadlocks() { 15 | this(5, 1000, TimeUnit.MILLISECONDS); 16 | } 17 | 18 | public RetryDeadlocks(int maxRetries, int backoffInterval, TimeUnit backoffIntervalUnit) { 19 | this.maxRetries = maxRetries; 20 | this.backoffMillis = (int)backoffIntervalUnit.toMillis(backoffInterval); 21 | } 22 | 23 | private long pow(int a, int b) { 24 | if (b < 0) { 25 | throw new IllegalArgumentException(); 26 | } 27 | 28 | long result = 1; 29 | while (b != 0) { 30 | if (b == 1) { 31 | result *= a; 32 | break; 33 | } else if (b % 2 == 1) { 34 | result *= a; 35 | } 36 | 37 | a = a * a; 38 | b = b / 2; 39 | } 40 | 41 | return result; 42 | } 43 | 44 | @Override 45 | public void consider(T e) throws T { 46 | if (e instanceof SQLException && e.getMessage() != null && e.getMessage().toLowerCase().contains("deadlock") && retry++ < maxRetries) { 47 | try { 48 | // Lazy initialization here is just a small perf hack 49 | if (jitterSource == null) { 50 | jitterSource = new Random(); 51 | } 52 | 53 | final int maxJitterMillis = backoffMillis / 4; 54 | final int jitterMillis = maxJitterMillis == 0 ? 0 : jitterSource.nextInt(maxJitterMillis); 55 | 56 | // 1 <= retry <= maxRetries 57 | Thread.sleep(jitterMillis + (backoffMillis * pow(2, retry - 1))); 58 | } catch (InterruptedException e1) { 59 | Thread.currentThread().interrupt(); 60 | } 61 | } else { 62 | throw e; 63 | } 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/LabelledMatrixBatchRead.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.sql.ResultSet; 4 | import java.sql.ResultSetMetaData; 5 | import java.sql.SQLException; 6 | import java.util.*; 7 | import java.util.stream.Collectors; 8 | 9 | class LabelledMatrixBatchRead implements BatchRead> { 10 | private final Collection> reads; 11 | 12 | public LabelledMatrixBatchRead(Collection> reads) { 13 | this.reads = reads; 14 | } 15 | 16 | @SuppressWarnings("unchecked") 17 | @Override 18 | public Map get(Read.Context ctxt, ResultSet rs) throws SQLException { 19 | final List> boundReads = reads.stream().map(read -> read.bind(ctxt)).collect(Collectors.toList()); 20 | 21 | final List[] columnLists = new List[reads.size()]; 22 | for (int i = 0; i < columnLists.length; i++) { 23 | columnLists[i] = new ArrayList<>(); 24 | } 25 | 26 | final String[] keys = new String[reads.size()]; 27 | { 28 | final ResultSetMetaData rsmd = rs.getMetaData(); 29 | 30 | int ix = 1; 31 | int i = 0; 32 | for (BoundRead boundRead : boundReads) { 33 | keys[i++] = rsmd.getColumnName(ix); 34 | ix += boundRead.arity(); 35 | } 36 | } 37 | 38 | while (rs.next()) { 39 | final IndexRef ix = IndexRef.create(); 40 | for (int i = 0; i < columnLists.length; i++) { 41 | ((List)columnLists[i]).add(boundReads.get(i).get(rs, ix)); 42 | } 43 | } 44 | 45 | final Map result = new LinkedHashMap<>(); 46 | final Iterator> readsIt = reads.iterator(); 47 | for (int i = 0; i < columnLists.length; i++) { 48 | if (result.containsKey(keys[i])) { 49 | throw new IllegalArgumentException("Column " + keys[i] + " occurs in ResultSet twice"); 50 | } 51 | 52 | final List list = columnLists[i]; 53 | result.put(keys[i], Primitives.listToArray(readsIt.next().getElementClass(), list)); 54 | } 55 | 56 | return result; 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/TupleRead.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import javax.annotation.Nonnull; 4 | import java.lang.reflect.Constructor; 5 | import java.sql.ResultSet; 6 | import java.sql.SQLException; 7 | import java.util.Arrays; 8 | import java.util.Collection; 9 | import java.util.List; 10 | import java.util.stream.Collectors; 11 | 12 | class TupleRead implements Read { 13 | private final Class klass; 14 | private final Constructor constructor; 15 | private final Collection> reads; 16 | 17 | public TupleRead(Class klass) { 18 | this.klass = klass; 19 | this.constructor = Reflection.getUniqueConstructor(klass); 20 | this.reads = Arrays.asList(constructor.getParameterTypes()).stream().map(ContextRead::new).collect(Collectors.toList()); 21 | 22 | this.constructor.setAccessible(true); 23 | } 24 | 25 | public TupleRead(Class klass, Collection> reads) { 26 | this.klass = klass; 27 | this.constructor = Reflection.getCompatibleConstructor(klass, reads.stream().map(Read::getElementClass).collect(Collectors.toList())); 28 | this.reads = reads; 29 | 30 | this.constructor.setAccessible(true); 31 | } 32 | 33 | @Override 34 | public Class getElementClass() { 35 | return klass; 36 | } 37 | 38 | @Override 39 | public BoundRead bind(Read.Context ctxt) { 40 | final List> boundReads = reads.stream().map(r -> r.bind(ctxt)).collect(Collectors.toList()); 41 | return new BoundRead() { 42 | @Override 43 | public int arity() { 44 | return boundReads.stream().mapToInt(BoundRead::arity).sum(); 45 | } 46 | 47 | @Override 48 | public T get(@Nonnull ResultSet rs, @Nonnull IndexRef ix) throws SQLException { 49 | final Object[] arguments = new Object[boundReads.size()]; 50 | for (int i = 0; i < arguments.length; i++) { 51 | arguments[i] = boundReads.get(i).get(rs, ix); 52 | } 53 | return Reflection.constructUnchecked(constructor, arguments); 54 | } 55 | }; 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/Primitives.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.lang.reflect.Array; 4 | import java.util.List; 5 | 6 | class Primitives { 7 | private Primitives() {} 8 | 9 | static Object listToArray(Class klass, List list) { 10 | if (klass == boolean.class) { 11 | final boolean[] result = new boolean[list.size()]; 12 | for (int i = 0; i < list.size(); i++) result[i] = (boolean)(Boolean)list.get(i); 13 | return result; 14 | } else if (klass == byte.class) { 15 | final byte[] result = new byte[list.size()]; 16 | for (int i = 0; i < list.size(); i++) result[i] = (byte)(Byte)list.get(i); 17 | return result; 18 | } else if (klass == char.class) { 19 | final char[] result = new char[list.size()]; 20 | for (int i = 0; i < list.size(); i++) result[i] = (char)(Character)list.get(i); 21 | return result; 22 | } else if (klass == short.class) { 23 | final short[] result = new short[list.size()]; 24 | for (int i = 0; i < list.size(); i++) result[i] = (short)(Short)list.get(i); 25 | return result; 26 | } else if (klass == int.class) { 27 | final int[] result = new int[list.size()]; 28 | for (int i = 0; i < list.size(); i++) result[i] = (int)(Integer)list.get(i); 29 | return result; 30 | } else if (klass == long.class) { 31 | final long[] result = new long[list.size()]; 32 | for (int i = 0; i < list.size(); i++) result[i] = (long)(Long)list.get(i); 33 | return result; 34 | } else if (klass == float.class) { 35 | final float[] result = new float[list.size()]; 36 | for (int i = 0; i < list.size(); i++) result[i] = (float)(Float)list.get(i); 37 | return result; 38 | } else if (klass == double.class) { 39 | final double[] result = new double[list.size()]; 40 | for (int i = 0; i < list.size(); i++) result[i] = (double)(Double)list.get(i); 41 | return result; 42 | } else if (Object.class.isAssignableFrom(klass)) { 43 | final Object[] result = (Object[]) Array.newInstance(klass, list.size()); 44 | for (int i = 0; i < list.size(); i++) result[i] = list.get(i); 45 | return result; 46 | } else { 47 | throw new IllegalStateException("Please add support for primitive type " + klass); 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/BeanRead.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import javax.annotation.Nonnull; 4 | import java.lang.reflect.Constructor; 5 | import java.lang.reflect.Method; 6 | import java.sql.ResultSet; 7 | import java.sql.SQLException; 8 | import java.util.Arrays; 9 | import java.util.Collection; 10 | import java.util.List; 11 | import java.util.stream.Collectors; 12 | 13 | class BeanRead implements Read { 14 | private final Class klass; 15 | private final Constructor constructor; 16 | private final Method[] setters; 17 | private final Collection> reads; 18 | 19 | public BeanRead(Class klass, String... fields) { 20 | this.klass = klass; 21 | this.constructor = Reflection.getBeanConstructor(klass); 22 | this.setters = Reflection.lookupBeanSetters(klass, Arrays.asList(fields)); 23 | this.reads = Arrays.asList(this.setters).stream().map(m -> new ContextRead<>(m.getParameterTypes()[0])).collect(Collectors.toList()); 24 | } 25 | 26 | public BeanRead(Class klass, Collection fields, Collection> reads) { 27 | this.klass = klass; 28 | this.constructor = Reflection.getBeanConstructor(klass); 29 | this.setters = Reflection.lookupBeanSetters(klass, fields); 30 | this.reads = reads; 31 | 32 | Reflection.checkReadsConformance("Fields " + fields, Arrays.asList(setters).stream().map(m -> m.getParameterTypes()[0]).collect(Collectors.toList()), reads); 33 | } 34 | 35 | @Override 36 | public Class getElementClass() { 37 | return klass; 38 | } 39 | 40 | @Override 41 | public BoundRead bind(Read.Context ctxt) { 42 | final List boundReads = reads.stream().map(r -> r.bind(ctxt)).collect(Collectors.toList()); 43 | return new BoundRead() { 44 | @Override 45 | public int arity() { 46 | return boundReads.stream().mapToInt(BoundRead::arity).sum(); 47 | } 48 | 49 | @Override 50 | public T get(@Nonnull ResultSet rs, @Nonnull IndexRef ix) throws SQLException { 51 | final T x = Reflection.constructUnchecked(constructor, new Object[0]); 52 | for (int i = 0; i < setters.length; i++) { 53 | Reflection.invokeUnchecked(setters[i], x, new Object[]{boundReads.get(i).get(rs, ix)}); 54 | } 55 | return x; 56 | } 57 | }; 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/BeanWrite.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import javax.annotation.Nonnull; 4 | import java.lang.reflect.Method; 5 | import java.sql.PreparedStatement; 6 | import java.sql.SQLException; 7 | import java.util.ArrayList; 8 | import java.util.Arrays; 9 | import java.util.Collection; 10 | import java.util.List; 11 | import java.util.stream.Collectors; 12 | 13 | class BeanWrite implements Write { 14 | private final Method[] getters; 15 | private final Collection> writes; 16 | 17 | public BeanWrite(Class klass, String... fields) { 18 | this.getters = Reflection.lookupBeanGetters(klass, Arrays.asList(fields)); 19 | this.writes = Arrays.asList(getters).stream().map(r -> new ContextWrite<>(r.getReturnType())).collect(Collectors.toList()); 20 | } 21 | 22 | public BeanWrite(Class klass, Collection fields, Collection> writes) { 23 | this.getters = Reflection.lookupBeanSetters(klass, fields); 24 | this.writes = writes; 25 | Reflection.checkWritesConformance("Fields " + fields, Arrays.asList(getters).stream().map(Method::getReturnType).collect(Collectors.toList()), writes); 26 | } 27 | 28 | @Override 29 | public BoundWrite bind(Context ctxt) { 30 | final List> boundWrites = writes.stream().map(w -> w.bind(ctxt)).collect(Collectors.toList()); 31 | return new BoundWrite() { 32 | @Override 33 | @SuppressWarnings("unchecked") 34 | public void set(@Nonnull PreparedStatement s, @Nonnull IndexRef ix, T x) throws SQLException { 35 | for (int i = 0; i < getters.length; i++) { 36 | ((BoundWrite)boundWrites.get(i)).set(s, ix, Reflection.invokeUnchecked(getters[i], x, new Object[0])); 37 | } 38 | } 39 | 40 | @Nonnull 41 | @Override 42 | @SuppressWarnings("unchecked") 43 | public List asSQL(T x) { 44 | final List result = new ArrayList<>(); 45 | for (int i = 0; i < getters.length; i++) { 46 | final Object o; 47 | try { 48 | o = Reflection.invokeUnchecked(getters[i], x, new Object[0]); 49 | } catch (SQLException e) { 50 | throw new RuntimeException(e); 51 | } 52 | result.addAll(((BoundWrite) boundWrites.get(i)).asSQL(o)); 53 | } 54 | return result; 55 | } 56 | 57 | @Override 58 | public int arity() { 59 | return boundWrites.stream().mapToInt(BoundWrite::arity).sum(); 60 | } 61 | }; 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/test/java/userpackage/TransactionallyTest.java: -------------------------------------------------------------------------------- 1 | package userpackage; 2 | 3 | import org.junit.Assert; 4 | import org.junit.Before; 5 | import org.junit.Test; 6 | import uk.co.omegaprime.mdbi.*; 7 | 8 | import java.sql.Connection; 9 | import java.sql.DriverManager; 10 | import java.sql.SQLException; 11 | 12 | import static org.junit.Assert.assertEquals; 13 | import static org.junit.Assert.assertTrue; 14 | import static uk.co.omegaprime.mdbi.MDBI.sql; 15 | 16 | public class TransactionallyTest { 17 | private Connection conn; 18 | private MDBI m; 19 | 20 | @Before 21 | public void setUp() throws SQLException { 22 | conn = DriverManager.getConnection("jdbc:sqlite::memory:"); 23 | m = MDBI.of(conn); 24 | m.execute(sql("create table tab (value integer)")); 25 | m.execute(sql("insert into tab (value) values (0)")); 26 | } 27 | 28 | @Test 29 | public void transactionallySucceed() throws SQLException { 30 | Assert.assertEquals(0, Transactionally.run(conn, () -> m.queryFirst(sql("select value from tab"), int.class)).intValue()); 31 | assertTrue(conn.getAutoCommit()); 32 | } 33 | 34 | @Test 35 | public void transactionallySucceedNested() throws SQLException { 36 | Assert.assertEquals(0, Transactionally.run(conn, () -> Transactionally.run(conn, () -> m.queryFirst(sql("select value from tab"), int.class))).intValue()); 37 | assertTrue(conn.getAutoCommit()); 38 | } 39 | 40 | @Test 41 | public void transactionallyFail() throws SQLException { 42 | boolean thrown = false; 43 | try { 44 | Transactionally.run(conn, () -> { 45 | m.execute(sql("update tab set value = value + 1")); 46 | m.execute(sql("i'm a bogus query lol")); 47 | return null; 48 | }); 49 | } catch (SQLException e) { 50 | if (!e.getMessage().contains("near \"i\"")) { 51 | throw e; 52 | } else { 53 | thrown = true; 54 | } 55 | } 56 | 57 | assertTrue(conn.getAutoCommit()); 58 | assertTrue(thrown); 59 | Assert.assertEquals(0, m.queryFirst(sql("select value from tab"), int.class).intValue()); 60 | } 61 | 62 | @Test 63 | public void transactionallyFailNested() throws SQLException { 64 | boolean thrown = false; 65 | try { 66 | Transactionally.run(conn, () -> { 67 | m.execute(sql("update tab set value = value + 1")); 68 | return Transactionally.run(conn, () -> { 69 | m.execute(sql("update tab set value = value + 1")); 70 | m.execute(sql("i'm a bogus query lol")); 71 | return null; 72 | }); 73 | }); 74 | } catch (SQLException e) { 75 | if (!e.getMessage().contains("near \"i\"")) { 76 | throw e; 77 | } else { 78 | thrown = true; 79 | } 80 | } 81 | 82 | assertTrue(conn.getAutoCommit()); 83 | assertTrue(thrown); 84 | Assert.assertEquals(0, m.queryFirst(sql("select value from tab"), int.class).intValue()); 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/SegmentedMapBatchRead.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import javax.annotation.ParametersAreNonnullByDefault; 4 | import java.sql.ResultSet; 5 | import java.sql.SQLException; 6 | import java.util.Map; 7 | import java.util.Objects; 8 | import java.util.function.Supplier; 9 | 10 | @ParametersAreNonnullByDefault 11 | class SegmentedMapBatchRead> implements BatchRead { 12 | private final Supplier factory; 13 | private final Read readKey; 14 | private final BatchRead readValue; 15 | private final MapEntryAppender append; 16 | 17 | public SegmentedMapBatchRead(Supplier factory, MapEntryAppender append, Read readKey, BatchRead readValue) { 18 | this.factory = factory; 19 | this.append = append; 20 | this.readKey = readKey; 21 | this.readValue = readValue; 22 | } 23 | 24 | @Override 25 | public MapT get(Read.Context ctxt, ResultSet rs) throws SQLException { 26 | final MapT result = factory.get(); 27 | final BoundRead boundReadKey = readKey.bind(ctxt); 28 | if (rs.next()) { 29 | while (true) { 30 | final IndexRef ix = IndexRef.create(); 31 | final K key = boundReadKey.get(rs, ix); 32 | 33 | final PeekedResultSet prs = new PeekedResultSet(rs); 34 | final ContiguouslyFilteredResultSet srs = new ContiguouslyFilteredResultSet(prs, () -> Objects.equals(key, boundReadKey.get(prs, IndexRef.create()))); 35 | final V newValue = readValue.get(ctxt, new DropColumnsResultSet(ix.peek(), srs)); 36 | 37 | final V consolidatedValue; 38 | if (result.containsKey(key)) { 39 | consolidatedValue = append.append(key, result.get(key), newValue); 40 | } else { 41 | consolidatedValue = newValue; 42 | } 43 | 44 | result.put(key, consolidatedValue); 45 | 46 | if (prs.isUsedPeekedRow()) { 47 | // 1. If readValue never called next(), it left "srs" on the row before where "rs" is right now, and so 48 | // we still need to treat the current row of "rs" as unconsumed, so don't next() over it. 49 | // 50 | // 2. Equally, if readValue() called next() but did so leaving the ResultSet after the last row in the filtered 51 | // set, we should *not* treat that as meaning that the current row of "rs" is unconsumed, since the user 52 | // of "srs" never actually got a chance to see it! 53 | // 54 | // Note that if readValue called next() and then previous() then "srs" will be on row 0, exactly as in case 1. 55 | // However, *unlike* in that case "rs" will have been moved back to the row prior to what it was on before the 56 | // readValue call, so doing next() here just takes us back to where we were before the readValue call, which is fine. 57 | if (srs.isAfterLast()) { 58 | if (rs.isAfterLast()) break; 59 | } else { 60 | if (!rs.next()) break; 61 | } 62 | } 63 | } 64 | } 65 | 66 | return result; 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/Transactionally.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import com.sun.media.jfxmedia.locator.ConnectionHolder; 4 | 5 | import java.sql.Connection; 6 | import java.sql.SQLException; 7 | 8 | /** Utilities for working with transactions */ 9 | public class Transactionally { 10 | private Transactionally() {} 11 | 12 | private static class TransactionHelper implements AutoCloseable { 13 | private final Connection conn; 14 | private boolean success; 15 | 16 | public TransactionHelper(Connection conn) throws SQLException { 17 | this.conn = conn; 18 | conn.setAutoCommit(false); 19 | } 20 | 21 | @Override 22 | public void close() throws SQLException { 23 | if (conn.getAutoCommit()) { 24 | // rollback() docs say it should be used only when auto-commit mode has been disabled 25 | // If the transaction has failed for some reason, auto-commit may have been disabled 26 | // behind our back, so check for that. Doing this also helps make close() idempotent. 27 | return; 28 | } else if (!success) { 29 | conn.rollback(); 30 | } 31 | conn.setAutoCommit(true); 32 | } 33 | } 34 | 35 | /** 36 | * Runs the action (which presumably performs some SQL queries) in the context of a transaction. 37 | *

38 | * If the action throws an exception, the transaction will be rolled back. If the action completes 39 | * without throwing, the transaction will be committed. 40 | *

41 | * If a transaction is already in progress then the action will just be executed with no special 42 | * handling, essentially joining the transaction that is already in progress. 43 | */ 44 | public static T run(Connection c, SQLAction action) throws SQLException { 45 | if (!c.getAutoCommit()) { 46 | // Already in transaction, join that one 47 | return action.run(); 48 | } else { 49 | try (TransactionHelper th = new TransactionHelper(c)) { 50 | final T result = action.run(); 51 | th.success = true; 52 | return result; 53 | } 54 | } 55 | } 56 | 57 | /** 58 | * Runs the action (which presumably performs some SQL queries) in the context of a transaction. 59 | *

60 | * If the action throws an exception, the transaction will be retried assuming that the supplied 61 | * {@link Retry} instance does not rethrow the exception. 62 | * 63 | * @throws IllegalArgumentException if a transaction is already in progress on the supplied connection. 64 | */ 65 | public static T runWithRetry(Connection c, Retry retry, SQLAction action) throws SQLException { 66 | if (!c.getAutoCommit()) { 67 | throw new IllegalArgumentException("The supplied connection must be in auto-commit mode (retrying an action " + 68 | "on a connection with an open connection probably won't do what you expect!)"); 69 | } 70 | 71 | while (true) { 72 | try { 73 | return Transactionally.run(c, action); 74 | } catch (RuntimeException e) { 75 | retry.consider(e); 76 | } catch (SQLException e) { 77 | retry.consider(e); 78 | } 79 | } 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/RowReadBuilder.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.util.ArrayList; 4 | import java.util.Collection; 5 | import java.util.List; 6 | import java.util.function.*; 7 | 8 | /** 9 | * A convenience wrapper around the {@link Reads#list(Collection)} functionality that avoids you having to track column indexes. 10 | *

11 | *

 12 |  * RowReadBuilder rrb = RowReadBuilder.create();
 13 |  * Supplier<String> names = rrb.add(sql("name"), String.class);
 14 |  * IntSupplier ages = rrb.addInt(sql("age"));
 15 |  *
 16 |  * for (List<Object> row : mdbi.queryList(sql("select ", columns, " from people"), rrb.build())) {
 17 |  *     rrb.bindSuppliers(row);
 18 |  *     System.out.println("Hello " + names.get() + " of age " + ages.get());
 19 |  * }
 20 |  * 
21 | */ 22 | public class RowReadBuilder { 23 | private final List columns = new ArrayList<>(); 24 | private final List> reads = new ArrayList<>(); 25 | private final List> suppliers = new ArrayList<>(); 26 | 27 | private static class CompletableSupplier implements Supplier { 28 | public T value; 29 | 30 | @Override 31 | public T get() { 32 | if (value == null) { 33 | throw new IllegalStateException("You must bindSuppliers on the corresponding RowReadBuilder before invoking a Supplier that it returns"); 34 | } 35 | 36 | return value; 37 | } 38 | } 39 | 40 | private RowReadBuilder() {} 41 | 42 | public static RowReadBuilder create() { 43 | return new RowReadBuilder(); 44 | } 45 | 46 | /** Use the supplied row to bind all the {@code Supplier} objects that we have returned. */ 47 | @SuppressWarnings("unchecked") 48 | public void bindSuppliers(List row) { 49 | for (int i = 0; i < row.size(); i++) { 50 | ((CompletableSupplier)suppliers.get(i)).value = row.get(i); 51 | } 52 | } 53 | 54 | /** Returns comma delimited column list */ 55 | public SQL buildColumns() { 56 | return SQL.commaSeparate(columns.iterator()); 57 | } 58 | 59 | /** Returns how to interpret a {@code ResultSet} as a row */ 60 | public Read> build() { 61 | return Reads.list(reads); 62 | } 63 | 64 | public Supplier add(SQL column, Class klass) { 65 | return add(column, Reads.useContext(klass)); 66 | } 67 | 68 | private CompletableSupplier add(SQL column, Read read) { 69 | columns.add(column); 70 | reads.add(read); 71 | 72 | final CompletableSupplier supplier = new CompletableSupplier<>(); 73 | suppliers.add(supplier); 74 | return supplier; 75 | } 76 | 77 | // Very boring repetitive code below this line to deal with each prim type 78 | 79 | public BooleanSupplier addBoolean(SQL column) { 80 | return addBoolean(column, Reads.useContext(boolean.class)); 81 | } 82 | 83 | private BooleanSupplier addBoolean(SQL column, Read read) { 84 | return add(column, read)::get; 85 | } 86 | 87 | public IntSupplier addInt(SQL column) { 88 | return addInt(column, Reads.useContext(int.class)); 89 | } 90 | 91 | private IntSupplier addInt(SQL column, Read read) { 92 | return add(column, read)::get; 93 | } 94 | 95 | public LongSupplier addLong(SQL column) { 96 | return addLong(column, Reads.useContext(long.class)); 97 | } 98 | 99 | private LongSupplier addLong(SQL column, Read read) { 100 | return add(column, read)::get; 101 | } 102 | 103 | public DoubleSupplier addDouble(SQL column) { 104 | return addDouble(column, Reads.useContext(double.class)); 105 | } 106 | 107 | private DoubleSupplier addDouble(SQL column, Read read) { 108 | return add(column, read)::get; 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/PeekedResultSet.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.sql.ResultSet; 4 | import java.sql.SQLException; 5 | 6 | // A ResultSet that initially pretends to be on row N-1, when the underlying ResultSet is actually on row N. 7 | // NB: the supplied ResultSet is assumed to be on a valid row initially. 8 | class PeekedResultSet 9 | // NB: just delegating all the update/get methods is not strictly right, because if someone tries to 10 | // use them before we have used the peeked row, we would need to forceUsePeekedRow() first. 11 | // This doesn't matter for my application (because we ask nicely in the Read docs not to do this :-) 12 | // so I conveniently forget about it. 13 | extends DelegatingResultSet { 14 | private boolean usedPeekedRow = false; 15 | 16 | public PeekedResultSet(ResultSet rs) { 17 | super(rs); 18 | } 19 | 20 | public boolean isUsedPeekedRow() { 21 | return usedPeekedRow; 22 | } 23 | 24 | @Override 25 | public boolean next() throws SQLException { 26 | if (!usedPeekedRow) { 27 | usedPeekedRow = true; 28 | return true; 29 | } else { 30 | return rs.next(); 31 | } 32 | } 33 | 34 | // Forces usedPeekedRow to true. We only want to do this at the last possible 35 | // moment when there is no alternative, because moving back in the ResultSet 36 | // might not be supported (forward-only ResultSets are the common case). 37 | // 38 | // If returns false, we are certainly not on a valid row. If returns true, we might be. 39 | private boolean forceUsePeekedRow() throws SQLException { 40 | if (!usedPeekedRow) { 41 | usedPeekedRow = true; 42 | if (!rs.previous()) return false; 43 | } 44 | 45 | return true; 46 | } 47 | 48 | @Override 49 | public boolean previous() throws SQLException { 50 | return forceUsePeekedRow() && rs.previous(); 51 | 52 | } 53 | 54 | @Override 55 | public boolean isBeforeFirst() throws SQLException { 56 | return usedPeekedRow ? rs.isBeforeFirst() : rs.isFirst(); 57 | } 58 | 59 | @Override 60 | public boolean isAfterLast() throws SQLException { 61 | return usedPeekedRow && rs.isAfterLast(); 62 | } 63 | 64 | @Override 65 | public boolean isFirst() throws SQLException { 66 | return forceUsePeekedRow() && rs.isFirst(); 67 | } 68 | 69 | @Override 70 | public boolean isLast() throws SQLException { 71 | return usedPeekedRow && rs.isLast(); 72 | } 73 | 74 | @Override 75 | public void beforeFirst() throws SQLException { 76 | usedPeekedRow = true; 77 | rs.beforeFirst(); 78 | } 79 | 80 | @Override 81 | public void afterLast() throws SQLException { 82 | usedPeekedRow = true; 83 | rs.afterLast(); 84 | } 85 | 86 | @Override 87 | public boolean first() throws SQLException { 88 | usedPeekedRow = true; 89 | return rs.first(); 90 | } 91 | 92 | @Override 93 | public boolean last() throws SQLException { 94 | usedPeekedRow = true; 95 | return rs.last(); 96 | } 97 | 98 | @Override 99 | public int getRow() throws SQLException { 100 | return usedPeekedRow ? rs.getRow() : rs.getRow() - 1; 101 | } 102 | 103 | @Override 104 | public boolean absolute(int row) throws SQLException { 105 | usedPeekedRow = true; 106 | return rs.absolute(row); 107 | } 108 | 109 | @Override 110 | public boolean relative(int rows) throws SQLException { 111 | if (usedPeekedRow) { 112 | return rs.relative(rows); 113 | } else { 114 | usedPeekedRow = true; 115 | if (rows == 0) { 116 | return relative(0); 117 | } else if (rows > 0) { 118 | return relative(rows - 1); 119 | } else { 120 | return relative(rows + 1); 121 | } 122 | } 123 | } 124 | } -------------------------------------------------------------------------------- /src/test/java/userpackage/MDBIRetryTest.java: -------------------------------------------------------------------------------- 1 | package userpackage; 2 | 3 | import org.junit.Rule; 4 | import org.junit.Test; 5 | import org.junit.rules.TemporaryFolder; 6 | import uk.co.omegaprime.mdbi.*; 7 | 8 | import java.io.File; 9 | import java.io.IOException; 10 | import java.lang.reflect.UndeclaredThrowableException; 11 | import java.sql.Connection; 12 | import java.sql.DriverManager; 13 | import java.sql.SQLException; 14 | import java.util.Arrays; 15 | import java.util.HashSet; 16 | import java.util.concurrent.ExecutionException; 17 | import java.util.concurrent.ExecutorService; 18 | import java.util.concurrent.Executors; 19 | import java.util.concurrent.Future; 20 | import java.util.function.Supplier; 21 | 22 | import static org.junit.Assert.assertEquals; 23 | import static org.junit.Assert.assertTrue; 24 | import static uk.co.omegaprime.mdbi.MDBI.sql; 25 | 26 | public class MDBIRetryTest { 27 | @Rule 28 | public final TemporaryFolder temporaryFolder = new TemporaryFolder(); 29 | 30 | @Test(expected=SQLException.class) 31 | public void failIfDeadlockyAndNoRetry() throws SQLException, IOException, ExecutionException, InterruptedException { 32 | tryDeadlockyTransaction(Retries::nothing); 33 | } 34 | 35 | @Test 36 | public void succeedIfDeadlockyAndRetry() throws SQLException, IOException, ExecutionException, InterruptedException { 37 | tryDeadlockyTransaction(() -> new Retry() { 38 | @Override 39 | public void consider(T e) throws T { 40 | if (e instanceof SQLException && e.getMessage().contains("is locked")) { 41 | try { 42 | Thread.sleep(100); 43 | } catch (InterruptedException _interrupted) { 44 | Thread.currentThread().interrupt(); 45 | } 46 | } else { 47 | throw e; 48 | } 49 | } 50 | }); 51 | } 52 | 53 | private void tryDeadlockyTransaction(Supplier retryPolicy) throws SQLException, IOException, ExecutionException, InterruptedException { 54 | final File tempFile = temporaryFolder.newFile(); 55 | final Context ctxt = Context.DEFAULT; 56 | 57 | try (final Connection conn1 = DriverManager.getConnection("jdbc:sqlite:" + tempFile.toString()); 58 | final Connection conn2 = DriverManager.getConnection("jdbc:sqlite:" + tempFile.toString())) { 59 | final MDBI m1 = MDBI.of(ctxt, conn1); 60 | // Using unprepared statement here because of a bug in the SQLite library: 61 | // https://github.com/xerial/sqlite-jdbc/pull/72 62 | final MDBI m2 = MDBI.of(ctxt, conn2).withPrepared(false); 63 | 64 | m1.execute(sql("create table tab (id int)")); 65 | m1.execute(sql("insert into tab (id) values (1)")); 66 | 67 | conn1.setAutoCommit(false); 68 | m1.execute(sql("insert into tab (id) values (2)")); 69 | 70 | final ExecutorService executor = Executors.newSingleThreadExecutor(); 71 | try { 72 | final Future future = executor.submit(() -> { 73 | try { 74 | Thread.sleep(5000); 75 | } catch (InterruptedException e) { 76 | Thread.currentThread().interrupt(); 77 | } finally { 78 | try { 79 | conn1.setAutoCommit(true); 80 | } catch (SQLException e) { 81 | throw new UndeclaredThrowableException(e); 82 | } 83 | } 84 | }); 85 | 86 | // Using "update" rather than the more natural "execute" because of a bug in the SQLite library: 87 | // https://github.com/xerial/sqlite-jdbc/pull/72 88 | m2.withRetryPolicy(retryPolicy).update(sql("insert into tab (id) values (3)")); 89 | 90 | // Deliver any background thread exceptions to the main thread 91 | future.get(); 92 | } finally { 93 | executor.shutdown(); 94 | } 95 | 96 | assertEquals(new HashSet<>(Arrays.asList(1, 2, 3)), 97 | m2.query(sql("select id from tab"), BatchReads.asSet(Reads.useContext(int.class)))); 98 | } 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/DropColumnsResultSetMetaData.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.sql.ResultSetMetaData; 4 | import java.sql.SQLException; 5 | 6 | class DropColumnsResultSetMetaData implements ResultSetMetaData { 7 | private final int firstIx; 8 | private final ResultSetMetaData rsmd; 9 | 10 | public DropColumnsResultSetMetaData(int firstIx, ResultSetMetaData rsmd) { 11 | this.firstIx = firstIx; 12 | this.rsmd = rsmd; 13 | } 14 | 15 | private int ix(int columnIndex) throws SQLException { 16 | if (columnIndex < 1) { 17 | throw new SQLException("Invalid column index " + columnIndex); 18 | } else { 19 | return (firstIx - 1) + columnIndex; 20 | } 21 | } 22 | 23 | @Override 24 | public int getColumnCount() throws SQLException { 25 | return rsmd.getColumnCount() - (firstIx - 1); 26 | } 27 | 28 | // *** Everything below this line is horrible repetitive code that just delegates to rs, possibly via the ix function 29 | 30 | @Override 31 | public boolean isAutoIncrement(int column) throws SQLException { 32 | return rsmd.isAutoIncrement(ix(column)); 33 | } 34 | 35 | @Override 36 | public boolean isCaseSensitive(int column) throws SQLException { 37 | return rsmd.isCaseSensitive(ix(column)); 38 | } 39 | 40 | @Override 41 | public boolean isSearchable(int column) throws SQLException { 42 | return rsmd.isSearchable(ix(column)); 43 | } 44 | 45 | @Override 46 | public boolean isCurrency(int column) throws SQLException { 47 | return rsmd.isCurrency(ix(column)); 48 | } 49 | 50 | @Override 51 | public int isNullable(int column) throws SQLException { 52 | return rsmd.isNullable(ix(column)); 53 | } 54 | 55 | @Override 56 | public boolean isSigned(int column) throws SQLException { 57 | return rsmd.isSigned(ix(column)); 58 | } 59 | 60 | @Override 61 | public int getColumnDisplaySize(int column) throws SQLException { 62 | return rsmd.getColumnDisplaySize(ix(column)); 63 | } 64 | 65 | @Override 66 | public String getColumnLabel(int column) throws SQLException { 67 | return rsmd.getColumnLabel(ix(column)); 68 | } 69 | 70 | @Override 71 | public String getColumnName(int column) throws SQLException { 72 | return rsmd.getColumnName(ix(column)); 73 | } 74 | 75 | @Override 76 | public String getSchemaName(int column) throws SQLException { 77 | return rsmd.getSchemaName(ix(column)); 78 | } 79 | 80 | @Override 81 | public int getPrecision(int column) throws SQLException { 82 | return rsmd.getPrecision(ix(column)); 83 | } 84 | 85 | @Override 86 | public int getScale(int column) throws SQLException { 87 | return rsmd.getScale(ix(column)); 88 | } 89 | 90 | @Override 91 | public String getTableName(int column) throws SQLException { 92 | return rsmd.getTableName(ix(column)); 93 | } 94 | 95 | @Override 96 | public String getCatalogName(int column) throws SQLException { 97 | return rsmd.getCatalogName(ix(column)); 98 | } 99 | 100 | @Override 101 | public int getColumnType(int column) throws SQLException { 102 | return rsmd.getColumnType(ix(column)); 103 | } 104 | 105 | @Override 106 | public String getColumnTypeName(int column) throws SQLException { 107 | return rsmd.getColumnTypeName(ix(column)); 108 | } 109 | 110 | @Override 111 | public boolean isReadOnly(int column) throws SQLException { 112 | return rsmd.isReadOnly(ix(column)); 113 | } 114 | 115 | @Override 116 | public boolean isWritable(int column) throws SQLException { 117 | return rsmd.isWritable(ix(column)); 118 | } 119 | 120 | @Override 121 | public boolean isDefinitelyWritable(int column) throws SQLException { 122 | return rsmd.isDefinitelyWritable(ix(column)); 123 | } 124 | 125 | @Override 126 | public String getColumnClassName(int column) throws SQLException { 127 | return rsmd.getColumnClassName(ix(column)); 128 | } 129 | 130 | @Override 131 | public T unwrap(Class iface) throws SQLException { 132 | return rsmd.unwrap(iface); 133 | } 134 | 135 | @Override 136 | public boolean isWrapperFor(Class iface) throws SQLException { 137 | return rsmd.isWrapperFor(iface); 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/ContiguouslyFilteredResultSet.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.sql.ResultSet; 4 | import java.sql.SQLException; 5 | 6 | class ContiguouslyFilteredResultSet extends DelegatingResultSet { 7 | public interface ShouldContinue { 8 | boolean ok() throws SQLException; 9 | } 10 | 11 | // NB: this predicate will in general make use of "rs", so make sure that's in the right state before calling this 12 | private final ShouldContinue rowMatches; 13 | 14 | private int row = 0; 15 | 16 | // 0: not started 17 | // 1: started 18 | // 2: finished 19 | private byte state = 0; 20 | 21 | public ContiguouslyFilteredResultSet(ResultSet rs, ShouldContinue rowMatches) { 22 | super(rs); 23 | this.rowMatches = rowMatches; 24 | } 25 | 26 | private boolean rowOK() throws SQLException { 27 | // Alas, it is *not* sufficient to just use rowMatches to delimit the valid region 28 | // of the ResultSet. Reason: in some pathological cases we might have some rows *before* 29 | // our initial row which match the predicate, and we don't want to include those. This 30 | // is the purpose of this row index, which can otherwise get away without. 31 | return row > 0 && rowMatches.ok(); 32 | } 33 | 34 | @Override 35 | public boolean next() throws SQLException { 36 | if (state == 2) return false; 37 | 38 | row++; 39 | if (!rs.next() || !rowOK()) { 40 | state = 2; 41 | return false; 42 | } else { 43 | return true; 44 | } 45 | } 46 | 47 | @Override 48 | public boolean previous() throws SQLException { 49 | if (state == 0) return false; 50 | 51 | row--; 52 | if (!rs.previous() || !rowOK()) { 53 | state = 0; 54 | return false; 55 | } else { 56 | return true; 57 | } 58 | } 59 | 60 | @Override 61 | public boolean isBeforeFirst() throws SQLException { 62 | return state == 0; 63 | } 64 | 65 | @Override 66 | public boolean isAfterLast() throws SQLException { 67 | return state == 2; 68 | } 69 | 70 | @Override 71 | public boolean isFirst() throws SQLException { 72 | if (state == 1) { 73 | try { 74 | return !previous(); 75 | } finally { 76 | if (!next()) { 77 | throw new SQLException("Failed to move back to original position after isFirst() test"); 78 | } 79 | } 80 | } else { 81 | return false; 82 | } 83 | } 84 | 85 | @Override 86 | public boolean isLast() throws SQLException { 87 | if (state == 1) { 88 | try { 89 | return !next(); 90 | } finally { 91 | if (!previous()) { 92 | throw new SQLException("Failed to move back to original position after isLast() test"); 93 | } 94 | } 95 | } else { 96 | return false; 97 | } 98 | } 99 | 100 | @SuppressWarnings("StatementWithEmptyBody") 101 | @Override 102 | public void beforeFirst() throws SQLException { 103 | while (previous()); 104 | } 105 | 106 | @SuppressWarnings("StatementWithEmptyBody") 107 | @Override 108 | public void afterLast() throws SQLException { 109 | while (next()); 110 | } 111 | 112 | @Override 113 | public boolean first() throws SQLException { 114 | beforeFirst(); 115 | return next(); 116 | } 117 | 118 | @Override 119 | public boolean last() throws SQLException { 120 | afterLast(); 121 | return previous(); 122 | } 123 | 124 | @Override 125 | public int getRow() throws SQLException { 126 | if (state != 1) throw new SQLException("ResultSet.getRow() on ResultSet with no current row"); 127 | 128 | return row; 129 | } 130 | 131 | @Override 132 | public boolean absolute(int row) throws SQLException { 133 | if (row == 0) { 134 | beforeFirst(); 135 | return false; 136 | } else if (row > 0) { 137 | // If target row is row+1 we need to go forward 1. If target row is row-1 we need to go backward 1. 138 | return relative(row - this.row); 139 | } else { 140 | // If row is -1 then we need to skip backward 0 additional rows after the first 141 | return last() && relative(row + 1); 142 | } 143 | } 144 | 145 | @Override 146 | public boolean relative(int rows) throws SQLException { 147 | if (rows == 0) { 148 | return state == 1; 149 | } else if (rows > 0) { 150 | for (int i = 0; i < rows; i++) { 151 | if (!next()) return false; 152 | } 153 | return true; 154 | } else { 155 | for (int i = 0; i < -rows; i++) { 156 | if (!previous()) return false; 157 | } 158 | return true; 159 | } 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/FunctionRead.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import javax.annotation.Nonnull; 4 | import java.lang.reflect.*; 5 | import java.sql.ResultSet; 6 | import java.sql.SQLException; 7 | import java.util.ArrayList; 8 | import java.util.Arrays; 9 | import java.util.Collection; 10 | import java.util.List; 11 | import java.util.stream.Collectors; 12 | 13 | class FunctionRead implements Read { 14 | private static Method getMethod(Object x) { 15 | final List candidates = new ArrayList<>(); 16 | for (Method method : x.getClass().getDeclaredMethods()) { 17 | final int modifiers = method.getModifiers(); 18 | if (Modifier.isPublic(modifiers) && !Modifier.isStatic(modifiers)) { 19 | candidates.add(method); 20 | } 21 | } 22 | 23 | if (candidates.size() != 1) { 24 | throw new IllegalArgumentException("The object you supply to Reads.ofFunction must have " + 25 | "exactly one public instance method, but we found " + candidates); 26 | } 27 | 28 | return candidates.get(0); 29 | } 30 | 31 | private final Method method; 32 | private final Class klass; // INVARIANT: equal to the return type of "method" 33 | private final Object receiver; 34 | private final Collection> reads; 35 | 36 | public FunctionRead(Class klass, Object receiver) { 37 | this.method = getMethod(receiver); 38 | this.klass = checkExtends(method.getGenericReturnType(), klass); 39 | this.receiver = receiver; 40 | this.reads = Arrays.asList(method.getParameterTypes()).stream().map(ContextRead::new).collect(Collectors.toList()); 41 | 42 | // You might be surprised that I have do to this, but it's necessary if e.g. the receiver is a package-private class 43 | // (which it normally will be, if it's an instance of an anonymous inner class!) 44 | method.setAccessible(true); 45 | } 46 | 47 | public FunctionRead(Class klass, Object receiver, Collection> reads) { 48 | this.method = getMethod(receiver); 49 | this.klass = checkExtends(method.getGenericReturnType(), klass); 50 | this.receiver = receiver; 51 | this.reads = reads; 52 | 53 | method.setAccessible(true); 54 | Reflection.checkReadsConformance("Method " + method, Arrays.asList(method.getParameterTypes()), reads); 55 | } 56 | 57 | private static Class checkExtends(Type type, Class mustExtend) { 58 | if (type instanceof Class) { 59 | return checkExtends((Class)type, mustExtend); 60 | } else if (type instanceof ParameterizedType) { 61 | return checkExtends(((ParameterizedType)type).getRawType(), mustExtend); 62 | } else if (type instanceof GenericArrayType) { 63 | final GenericArrayType gat = (GenericArrayType)type; 64 | if (!mustExtend.equals(Object.class) && !mustExtend.isArray()) { 65 | throw new IllegalArgumentException("Found type " + gat + " must extend supplied class " + mustExtend); 66 | } 67 | } else { 68 | // i.e. WildcardType/TypeVariable: not sure there are any sensible extra checks we can do 69 | } 70 | 71 | // In the generic case we will just assume that our method will return exactly the type that the user 72 | // supplied. We will check this condition below (using Class.cast) so it's not dangerous, just prevents 73 | // the error from being detected earlier. 74 | return mustExtend; 75 | } 76 | 77 | private static Class checkExtends(Class klass, Class mustExtend) { 78 | if (!mustExtend.isAssignableFrom(klass)) { 79 | throw new IllegalArgumentException("Found class " + klass + " must extend supplied class " + mustExtend); 80 | } else { 81 | // We know this is safe because mustExtend is assignable from klass, so klass must extend mustExtend 82 | //noinspection unchecked 83 | return (Class)klass; 84 | } 85 | } 86 | 87 | @Override 88 | public Class getElementClass() { 89 | return klass; 90 | } 91 | 92 | @Override 93 | public BoundRead bind(Read.Context ctxt) { 94 | final List> boundReads = reads.stream().map(r -> r.bind(ctxt)).collect(Collectors.toList()); 95 | return new BoundRead() { 96 | @Override 97 | public int arity() { 98 | return boundReads.stream().mapToInt(BoundRead::arity).sum(); 99 | } 100 | 101 | @Override 102 | public T get(@Nonnull ResultSet rs, @Nonnull IndexRef ix) throws SQLException { 103 | final Object[] arguments = new Object[boundReads.size()]; 104 | for (int i = 0; i < arguments.length; i++) { 105 | arguments[i] = boundReads.get(i).get(rs, ix); 106 | } 107 | return klass.cast(Reflection.invokeUnchecked(method, receiver, arguments)); 108 | } 109 | }; 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/Context.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.math.BigDecimal; 4 | import java.time.LocalDate; 5 | import java.time.LocalDateTime; 6 | import java.time.LocalTime; 7 | import java.util.Map; 8 | 9 | /** 10 | * An immutable object describing how to map Java types to SQL ones, and vice versa. 11 | *

12 | * In order to construct one of these, you probably want to either just use {@link #DEFAULT} 13 | * or (if you want some customization), to use {@link Context.Builder}. 14 | */ 15 | public class Context { 16 | public static final Context DEFAULT = Builder.createDefault().build(); 17 | 18 | /** A mutable builder allowing you to construct an immutable {@link Context} object. */ 19 | public static class Builder { 20 | private final Reads.Map readers; 21 | private final Writes.Map writers; 22 | 23 | /** Returns a builder with no type mappings whatsoever — not even the built-in ones. */ 24 | public static Builder createEmpty() { 25 | return new Builder(new Reads.Map(), new Writes.Map()); 26 | } 27 | 28 | /** Returns a builder with the default set of type mappings, covering basic JDK types. */ 29 | public static Builder createDefault() { 30 | final Builder context = createEmpty(); 31 | context.register(boolean.class, Writes.PRIM_BOOLEAN, Reads.PRIM_BOOLEAN); 32 | context.register(Boolean.class, Writes.BOOLEAN, Reads.BOOLEAN); 33 | context.register(byte.class, Writes.PRIM_BYTE, Reads.PRIM_BYTE); 34 | context.register(Byte.class, Writes.BYTE, Reads.BYTE); 35 | context.register(char.class, Writes.PRIM_CHAR, Reads.PRIM_CHAR); 36 | context.register(Character.class, Writes.CHARACTER, Reads.CHARACTER); 37 | context.register(short.class, Writes.PRIM_SHORT, Reads.PRIM_SHORT); 38 | context.register(Short.class, Writes.SHORT, Reads.SHORT); 39 | context.register(int.class, Writes.PRIM_INT, Reads.PRIM_INT); 40 | context.register(Integer.class, Writes.INTEGER, Reads.INTEGER); 41 | context.register(long.class, Writes.PRIM_LONG, Reads.PRIM_LONG); 42 | context.register(Long.class, Writes.LONG, Reads.LONG); 43 | context.register(float.class, Writes.PRIM_FLOAT, Reads.PRIM_FLOAT); 44 | context.register(Float.class, Writes.FLOAT, Reads.FLOAT); 45 | context.register(double.class, Writes.PRIM_DOUBLE, Reads.PRIM_DOUBLE); 46 | context.register(Double.class, Writes.DOUBLE, Reads.DOUBLE); 47 | context.register(String.class, Writes.STRING, Reads.STRING); 48 | context.register(LocalDate.class, Writes.LOCAL_DATE, Reads.LOCAL_DATE); 49 | context.register(LocalTime.class, Writes.LOCAL_TIME, Reads.LOCAL_TIME); 50 | context.register(LocalDateTime.class, Writes.LOCAL_DATE_TIME, Reads.LOCAL_DATE_TIME); 51 | context.register(byte[].class, Writes.BYTE_ARRAY, Reads.BYTE_ARRAY); 52 | context.register(BigDecimal.class, Writes.BIG_DECIMAL, Reads.BIG_DECIMAL); 53 | return context; 54 | } 55 | 56 | public static Builder createFromContext(Context context) { 57 | return new Builder(context.readers, context.writers); 58 | } 59 | 60 | private Builder(Reads.Map readers, Writes.Map writers) { 61 | this.readers = readers; 62 | this.writers = writers; 63 | } 64 | 65 | /** Tells the builder how you want to map a Java type to SQL, both going in to SQL ({@code Write}) and out of SQL ({@code Read}) */ 66 | public Builder register(Class klass, Write write, Read read) { 67 | readers.put(klass, read); 68 | writers.put(klass, write); 69 | return this; 70 | } 71 | 72 | /** Tells the builder how you want to construct a Java value from a SQL value */ 73 | public Builder registerRead(Class klass, Read read) { 74 | readers.put(klass, read); 75 | return this; 76 | } 77 | 78 | /** Tells the builder how you want to construct a SQL value from a Java value */ 79 | public Builder registerWrite(Class klass, Write write) { 80 | writers.put(klass, write); 81 | return this; 82 | } 83 | 84 | /** Convenience for registered a JavaBean for both reading and writing simultaneously */ 85 | public Builder registerBean(Class klass, String... fields) { 86 | return register(klass, Writes.bean(klass, fields), Reads.bean(klass, fields)); 87 | } 88 | 89 | public Context build() { 90 | return new Context(new Reads.Map(readers), new Writes.Map(writers)); 91 | } 92 | } 93 | 94 | private final Reads.Map readers; 95 | private final Writes.Map writers; 96 | 97 | private Context(Reads.Map readers, Writes.Map writers) { 98 | this.readers = readers; 99 | this.writers = writers; 100 | } 101 | 102 | public Read.Context readContext() { return readers; } 103 | public Write.Context writeContext() { return writers; } 104 | } 105 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/MatrixBatchReadBuilder.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.lang.reflect.Array; 4 | import java.sql.SQLException; 5 | import java.util.ArrayList; 6 | import java.util.List; 7 | import java.util.function.Function; 8 | import java.util.function.Supplier; 9 | 10 | /** 11 | * A convenience wrapper around the {@link BatchReads#matrix(Class[])} functionality that avoids you having to track column indexes. 12 | *

13 | *

 14 |  * MatrixBatchReadBuilder mrb = MatrixBatchReadBuilder.create();
 15 |  * Supplier<String[]> names = mrb.add(sql("name"), String.class);
 16 |  * Supplier<int[]> ages = mrb.addInt(sql("age"));
 17 |  *
 18 |  * int n = mrb.buildAndExecute(mdbi, columns -> sql("select ", columns, " from people"));
 19 |  *
 20 |  * for (int i = 0; i < n; i++) {
 21 |  *     System.out.println("Hello " + names[i] + " of age " + ages[i]);
 22 |  * }
 23 |  * 
24 | */ 25 | public class MatrixBatchReadBuilder { 26 | private final List columns = new ArrayList<>(); 27 | private final List> reads = new ArrayList<>(); 28 | private final List> suppliers = new ArrayList<>(); 29 | 30 | private static class CompletableSupplier implements Supplier { 31 | public T value; 32 | 33 | @Override 34 | public T get() { 35 | if (value == null) { 36 | throw new IllegalStateException("You must bindSuppliers on the corresponding MatrixBatchReadBuilder before invoking a Supplier that it returns"); 37 | } 38 | 39 | return value; 40 | } 41 | } 42 | 43 | private MatrixBatchReadBuilder() {} 44 | 45 | public static MatrixBatchReadBuilder create() { return new MatrixBatchReadBuilder(); } 46 | 47 | /** A convenience that builds the SQL query and executes it with an appropriate reader all in one go. Returns the row count. */ 48 | public int buildAndExecute(MDBI mdbi, Function mkSelect) throws SQLException { 49 | return bindSuppliers(mdbi.query(mkSelect.apply(buildColumns()), build())); 50 | } 51 | 52 | /** Use the supplied matrix to bind all the {@code Supplier} objects that we have returned. Returns the row count. */ 53 | @SuppressWarnings("unchecked") 54 | public int bindSuppliers(Object[] matrix) { 55 | // For now I'm intentionally going to allow this to be completed > 1 time, just in case anyone 56 | // wants to use this in a scenario where there is > 1 matrix returned from a particular query 57 | for (int i = 0; i < matrix.length; i++) { 58 | ((CompletableSupplier)suppliers.get(i)).value = matrix[i]; 59 | } 60 | 61 | return Array.getLength(matrix[0]); 62 | } 63 | 64 | /** Returns comma delimited column list */ 65 | public SQL buildColumns() { 66 | return SQL.commaSeparate(columns.iterator()); 67 | } 68 | 69 | /** Returns how to interpret a {@code ResultSet} as a matrix */ 70 | public BatchRead build() { 71 | return BatchReads.matrix(reads); 72 | } 73 | 74 | private Supplier addInternal(SQL column, Read read) { 75 | columns.add(column); 76 | reads.add(read); 77 | 78 | final CompletableSupplier supplier = new CompletableSupplier<>(); 79 | suppliers.add(supplier); 80 | return supplier; 81 | } 82 | 83 | public Supplier add(SQL column, Class klass) { 84 | return add(column, new ContextRead<>(klass)); 85 | } 86 | 87 | public Supplier add(SQL column, Read read) { 88 | return addInternal(column, read); 89 | } 90 | 91 | // Very boring repetitive code below this line to deal with each prim type 92 | 93 | public Supplier addBoolean(SQL column) { 94 | return addBoolean(column, Reads.useContext(boolean.class)); 95 | } 96 | 97 | public Supplier addBoolean(SQL column, Read read) { 98 | return addInternal(column, read); 99 | } 100 | 101 | public Supplier addByte(SQL column) { 102 | return addByte(column, Reads.useContext(byte.class)); 103 | } 104 | 105 | public Supplier addByte(SQL column, Read read) { 106 | return addInternal(column, read); 107 | } 108 | 109 | public Supplier addChar(SQL column) { 110 | return addChar(column, Reads.useContext(char.class)); 111 | } 112 | 113 | public Supplier addChar(SQL column, Read read) { 114 | return addInternal(column, read); 115 | } 116 | 117 | public Supplier addShort(SQL column) { 118 | return addShort(column, Reads.useContext(short.class)); 119 | } 120 | 121 | public Supplier addShort(SQL column, Read read) { 122 | return addInternal(column, read); 123 | } 124 | 125 | public Supplier addInt(SQL column) { 126 | return addInt(column, Reads.useContext(int.class)); 127 | } 128 | 129 | public Supplier addInt(SQL column, Read read) { 130 | return addInternal(column, read); 131 | } 132 | 133 | public Supplier addLong(SQL column) { 134 | return addLong(column, Reads.useContext(long.class)); 135 | } 136 | 137 | public Supplier addLong(SQL column, Read read) { 138 | return addInternal(column, read); 139 | } 140 | 141 | public Supplier addFloat(SQL column) { 142 | return addFloat(column, Reads.useContext(float.class)); 143 | } 144 | 145 | public Supplier addFloat(SQL column, Read read) { 146 | return addInternal(column, read); 147 | } 148 | 149 | public Supplier addDouble(SQL column) { 150 | return addDouble(column, Reads.useContext(double.class)); 151 | } 152 | 153 | public Supplier addDouble(SQL column, Read read) { 154 | return addInternal(column, read); 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /src/test/java/userpackage/Sample.java: -------------------------------------------------------------------------------- 1 | package userpackage; 2 | 3 | import uk.co.omegaprime.mdbi.*; 4 | 5 | import java.sql.Connection; 6 | import java.sql.DriverManager; 7 | import java.sql.SQLException; 8 | import java.time.LocalDate; 9 | import java.util.Arrays; 10 | import java.util.List; 11 | import java.util.Map; 12 | import java.util.function.Supplier; 13 | 14 | import static uk.co.omegaprime.mdbi.MDBI.sql; 15 | 16 | public class Sample { 17 | public static void main(String[] args) throws SQLException { 18 | // Getting started with MDBI is easy: all you need is a javax.sql.DataSource or Connection 19 | final Connection conn = DriverManager.getConnection("jdbc:sqlite::memory:"); 20 | MDBI.of(conn).execute(sql("create table people (name text, age int)")); 21 | 22 | // As well as executing raw SQL, it is of course possible to interpolate Java objects into the query 23 | final String name = "Michael O'Hare"; 24 | MDBI.of(conn).execute(sql("insert into people (name, age) values (").$(name).sql(", 30)")); 25 | 26 | // To get back Java objects from the database, simply use a "query" method rather than "execute" 27 | final int age = MDBI.of(conn).queryFirst(sql("select age from people"), int.class); 28 | println(age); // 30 29 | 30 | // Most people who work with JDBC have been burned at some point by the fact that it silently 31 | // turns NULLs in the database into zeroes, which is almost never what you want. MDBI removes 32 | // this misfeature, so the following query actually throws a NullPointerException: 33 | MDBI.of(conn).queryFirst(sql("select null"), int.class); 34 | 35 | // Of course, you can still retrieve nulls if you explicitly ask for them: 36 | final Integer nully = MDBI.of(conn).queryFirst(sql("select null"), Integer.class); 37 | println(nully); // null 38 | 39 | // Note that database NULLs *are* supported when retrieving primitive doubles and floats, where 40 | // they can be cleanly mapped to NaNs 41 | final double nullyDouble = MDBI.of(conn).queryFirst(sql("select null"), double.class); 42 | println(nullyDouble); // NaN 43 | 44 | // Batch insert is fully supported 45 | { 46 | final List names = Arrays.asList("Fry", "Leela"); 47 | final List ages = Arrays.asList(1025, 25); 48 | MDBI.of(conn).updateBatch(sql("insert into people (name, age) values (").$s(names).sql(",").$s(ages).sql(")")); 49 | } 50 | 51 | // You can even mix batched and non-batched bits of the query: 52 | final List moreNames = Arrays.asList("Foo", "Bar"); 53 | final int anotherAge = 13; 54 | MDBI.of(conn).updateBatch(sql("insert into people (name, age) values (").$s(moreNames).sql(",").$(anotherAge).sql(")")); 55 | 56 | // MDBI has built-in support for IN clauses 57 | final List foundAges = MDBI.of(conn).queryList(sql("select age from people where name ").in("Fry", "Foo"), int.class); 58 | println(foundAges); // [1025, 13] 59 | 60 | // These IN clauses work properly with empty argument lists, even if the database does not normally support 61 | // nullary IN clauses (most databases don't -- SQLite is the only one I know of that supports them) 62 | final int count = MDBI.of(conn).queryFirst(sql("select count(*) from people where name not ").in(), int.class); 63 | println(count); // 5 64 | 65 | // There is transaction support that's really easy to use (no messing around with the confusing setAutocommit interface) 66 | try { 67 | Transactionally.run(conn, () -> { 68 | MDBI.of(conn).execute(sql("insert into people (name, age) values ('foo', 1)")); 69 | throw new IllegalArgumentException("Changed my mind!"); 70 | }); 71 | } catch (IllegalArgumentException _ignored) {} 72 | 73 | final int postTransactionCount = MDBI.of(conn).queryFirst(sql("select count(*) from people"), int.class); 74 | println(postTransactionCount); // 5 75 | 76 | // You can get structured types out of the database, not just primitives: 77 | final Map ageMap = MDBI.of(conn).queryMap(sql("select name, age from people"), String.class, int.class); 78 | println(ageMap.get("Fry")); // 1025 79 | 80 | // One that is particularly handy is the "matrix": 81 | final Object[] matrix = MDBI.of(conn).query(sql("select name, age from people order by name"), 82 | BatchReads.matrix(String.class, int.class)); 83 | final String[] nameColumn = (String[])matrix[0]; 84 | final int[] ageColumn = (int[])matrix[1]; 85 | println(nameColumn[0] + ": " + ageColumn[1]); // Bar: 13 86 | 87 | // Alternatively, that can be written as: 88 | { 89 | final MatrixBatchReadBuilder mrb = MatrixBatchReadBuilder.create(); 90 | final Supplier names = mrb.add(sql("name"), String.class); 91 | final Supplier ages = mrb.addInt(sql("int")); 92 | mrb.buildAndExecute(MDBI.of(conn), columns -> sql("select ", columns, " from people order by name")); 93 | println(names.get()[0] + ": " + ages.get()[1]); // Bar: 13 94 | } 95 | 96 | // MDBI has great support for Java primitive types, but it can also be extended with support for your own. Let's say 97 | // you have a bean, PersonBean, representing one row of the table. This works: 98 | final Context ctxt0 = Context.Builder.createDefault() 99 | .registerRead(PersonBean.class, Reads.bean(PersonBean.class, "Name", "Age")) 100 | .build(); 101 | final PersonBean bean = MDBI.of(ctxt0, conn).queryFirst(sql("select name, age from people order by name"), PersonBean.class); 102 | println(bean.getName()); // Bar 103 | 104 | // If you don't like beans, that's no problem. There are also strongly-typed interfaces suitable for immutable data types: 105 | // public class Person { 106 | // public final String name; 107 | // public final int age; 108 | // 109 | // public Person(String name, int age) { 110 | // this.name = name; 111 | // this.age = age; 112 | // } 113 | // } 114 | final Context ctxt1 = Context.Builder.createDefault() 115 | .registerRead(Person.class, Reads.tuple(Person.class)) 116 | .build(); 117 | final Person person = MDBI.of(ctxt1, conn).queryFirst(sql("select name, age from people order by name"), Person.class); 118 | println(person.name); // Bar 119 | 120 | // Custom types are also usable when you are constructing SQL queries -- you just use registerWrite instead of registerRead: 121 | final Context ctxt2 = Context.Builder.createDefault() 122 | .registerWrite(Person.class, TupleWriteBuilder.create() 123 | .add(String.class, p -> p.name) 124 | .add(int.class, p -> p.age) 125 | .build()) 126 | .build(); 127 | final Person personToSave = new Person("Max", 29); 128 | MDBI.of(ctxt2, conn).execute(sql("insert into people (name, age) values (").$(personToSave).sql(")")); 129 | 130 | // And there are lots more features besides: 131 | // * Deadlocks are automatically retried. (The retry policy is fully customizable.) 132 | // * Java 8 date and time types are fully supported 133 | // * Support for both PreparedStatement and Statement. This can be useful when working with a database 134 | // that e.g. scopes the lifetime of temp tables to a prepared statement. 135 | // All of this comes with no runtime dependencies at all -- you only need the JDK. 136 | } 137 | 138 | public static void println(Object x) { 139 | System.out.println(x); 140 | } 141 | 142 | public static class Person { 143 | public final String name; 144 | public final int age; 145 | 146 | public Person(String name, int age) { 147 | this.name = name; 148 | this.age = age; 149 | } 150 | } 151 | 152 | public static class PersonBean { 153 | private String name; 154 | private int age; 155 | 156 | public String getName() { return name; } 157 | public int getAge() { return age; } 158 | 159 | public void setName(String name) { this.name = name; } 160 | public void setAge (int age) { this.age = age; } 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/Reflection.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.lang.reflect.Constructor; 4 | import java.lang.reflect.InvocationTargetException; 5 | import java.lang.reflect.Method; 6 | import java.lang.reflect.UndeclaredThrowableException; 7 | import java.sql.SQLException; 8 | import java.util.*; 9 | 10 | class Reflection { 11 | private Reflection() {} 12 | 13 | public static Constructor getUniqueConstructor(Class klass) { 14 | final Constructor[] constructors = klass.getConstructors(); 15 | return chooseUniqueConstructor(klass, constructors); 16 | } 17 | 18 | @SuppressWarnings("unchecked") 19 | private static Constructor chooseUniqueConstructor(Class klass, Constructor[] constructors) { 20 | if (constructors.length == 0) { 21 | throw new IllegalArgumentException("No public constructors for " + klass); 22 | } else if (constructors.length > 1) { 23 | throw new IllegalArgumentException("Ambiguous public constructor for " + klass); 24 | } else { 25 | return (Constructor)constructors[0]; 26 | } 27 | } 28 | 29 | public static Constructor getCompatibleConstructor(Class klass, List> klasses) { 30 | final Constructor[] constructors = klass.getConstructors(); 31 | final List> candidates = new ArrayList<>(); 32 | 33 | // Sort of a crummy implementation of the overload rules: http://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.12.2 34 | // It might be worth taking a dependency just to get this right. It's hard to do, and it's not our core competency. 35 | for (Constructor constructor : constructors) { 36 | final Class[] params = constructor.getParameterTypes(); 37 | if (params.length != klasses.size()) continue; 38 | 39 | boolean compatible = true; 40 | for (int i = 0; i < params.length; i++) { 41 | if (!params[i].isAssignableFrom(klasses.get(i))) { 42 | compatible = false; 43 | break; 44 | } 45 | } 46 | 47 | if (!compatible) continue; 48 | 49 | candidates.add(constructor); 50 | } 51 | 52 | boolean changed = true; 53 | while (changed) { 54 | changed = false; 55 | 56 | final Iterator> it = candidates.iterator(); 57 | while (it.hasNext()) { 58 | final Constructor constructor = it.next(); 59 | final Class[] params = constructor.getParameterTypes(); 60 | 61 | boolean atLeastOneOtherConstructorIsAtLeastAsSpecific = false; 62 | for (Constructor otherConstructor : candidates) { 63 | if (otherConstructor == constructor) continue; 64 | 65 | final Class[] otherParams = otherConstructor.getParameterTypes(); 66 | if (otherParams.length != params.length) continue; 67 | 68 | boolean otherConstructorIsAtLeastAsSpecific = true; 69 | for (int i = 0; i < params.length; i++) { 70 | if (!params[i].isAssignableFrom(otherParams[i])) { 71 | // We can accept something that "other" could not, so we are more specific 72 | otherConstructorIsAtLeastAsSpecific = false; 73 | break; 74 | } 75 | } 76 | 77 | if (otherConstructorIsAtLeastAsSpecific) { 78 | // All of params[i] are assignable from otherParams[i] 79 | atLeastOneOtherConstructorIsAtLeastAsSpecific = true; 80 | break; 81 | } 82 | } 83 | 84 | if (atLeastOneOtherConstructorIsAtLeastAsSpecific) { 85 | changed = true; 86 | it.remove(); 87 | } 88 | } 89 | } 90 | 91 | return chooseUniqueConstructor(klass, candidates.toArray(new Constructor[candidates.size()])); 92 | } 93 | 94 | public static Constructor getBeanConstructor(Class klass) { 95 | try { 96 | return klass.getConstructor(new Class[0]); 97 | } catch (NoSuchMethodException e) { 98 | throw new IllegalArgumentException("Class " + klass + " must have a public no-args constructor"); 99 | } 100 | } 101 | 102 | public static T constructUnchecked(Constructor constructor, Object[] arguments) throws SQLException { 103 | try { 104 | return constructor.newInstance(arguments); 105 | } catch (InstantiationException e) { 106 | throw new IllegalStateException("Constructor " + constructor + " was not callable, though we should have already checked that", e); 107 | } catch (IllegalAccessException e) { 108 | throw new IllegalStateException("Constructor " + constructor + " was not accessible, though we should have already checked that", e); 109 | } catch (InvocationTargetException e) { 110 | throw rethrowInvocationTargetException(e); 111 | } 112 | } 113 | 114 | public static Object invokeUnchecked(Method method, Object receiver, Object[] arguments) throws SQLException { 115 | try { 116 | return method.invoke(receiver, arguments); 117 | } catch (IllegalAccessException e) { 118 | throw new IllegalStateException("Method " + method + " was not accessible, though we should have already checked that", e); 119 | } catch (InvocationTargetException e) { 120 | throw rethrowInvocationTargetException(e); 121 | } 122 | } 123 | 124 | private static RuntimeException rethrowInvocationTargetException(InvocationTargetException e) throws SQLException { 125 | final Throwable cause = e.getCause(); 126 | if (cause instanceof RuntimeException) { 127 | throw (RuntimeException)cause; 128 | } else if (cause instanceof Error) { 129 | throw (Error)cause; 130 | } else if (cause instanceof SQLException) { 131 | throw (SQLException)cause; 132 | } else { 133 | return new UndeclaredThrowableException(cause); 134 | } 135 | } 136 | 137 | public static void checkReadsConformance(String context, List> types, Collection> reads) { 138 | if (reads.size() != types.size()) { 139 | throw new IllegalArgumentException(context + " has " + types.size() + " elements but you supplied " + reads.size() + " readers"); 140 | } 141 | 142 | final Iterator> readsIt = reads.iterator(); 143 | for (int i = 0; i < reads.size(); i++) { 144 | final Read read = readsIt.next(); 145 | if (!types.get(i).isAssignableFrom(read.getElementClass())) { 146 | throw new IllegalArgumentException(context + " element " + i + " is of type " + types.get(i) + " but you supplied a reader for " + read.getElementClass()); 147 | } 148 | } 149 | } 150 | 151 | public static void checkWritesConformance(String context, List> types, Collection> writes) { 152 | if (writes.size() != types.size()) { 153 | throw new IllegalArgumentException(context + " has " + types.size() + " elements but you supplied " + writes.size() + " writers"); 154 | } 155 | } 156 | 157 | public static Method[] lookupBeanSetters(Class klass, Collection fields) { 158 | final HashMap setters = new HashMap<>(); 159 | for (Method m : klass.getMethods()) { 160 | if (m.getName().startsWith("set") && m.getParameterCount() == 1) { 161 | if (setters.put(m.getName().substring(3), m) != null) { 162 | throw new IllegalArgumentException("Class " + klass + " has multiple 1-arg methods called " + m.getName()); 163 | } 164 | } 165 | } 166 | 167 | final Method[] methods = new Method[fields.size()]; 168 | final Iterator fieldsIt = fields.iterator(); 169 | for (int i = 0; i < fields.size(); i++) { 170 | final String field = fieldsIt.next(); 171 | final Method setter = setters.get(field); 172 | if (setter == null) { 173 | throw new IllegalArgumentException("Class " + klass + " doesn't have a setter for " + field); 174 | } 175 | methods[i] = setter; 176 | } 177 | 178 | return methods; 179 | } 180 | 181 | public static Method[] lookupBeanGetters(Class klass, List fields) { 182 | final HashMap getters = new HashMap<>(); 183 | for (Method m : klass.getMethods()) { 184 | if (m.getName().startsWith("is") && m.getParameterCount() == 0) { 185 | final String property = m.getName().substring(2); 186 | if (getters.put(property, m) != null) { 187 | throw new IllegalArgumentException("Class " + klass + " has multiple getters for property " + property); 188 | } 189 | } else if (m.getName().startsWith("get") && m.getParameterCount() == 0) { 190 | final String property = m.getName().substring(3); 191 | if (getters.put(property, m) != null) { 192 | throw new IllegalArgumentException("Class " + klass + " has multiple getters for property " + property); 193 | } 194 | } 195 | } 196 | 197 | final Method[] methods = new Method[fields.size()]; 198 | for (int i = 0; i < fields.size(); i++) { 199 | final String field = fields.get(i); 200 | final Method setter = getters.get(field); 201 | if (setter == null) { 202 | throw new IllegalArgumentException("Class " + klass + " doesn't have a getter for " + field); 203 | } 204 | methods[i] = setter; 205 | } 206 | 207 | return methods; 208 | } 209 | } 210 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/SQLBuilder.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import javax.annotation.ParametersAreNonnullByDefault; 4 | import java.sql.Connection; 5 | import java.sql.PreparedStatement; 6 | import java.sql.SQLException; 7 | import java.util.*; 8 | import java.util.function.Consumer; 9 | import java.util.function.Function; 10 | import java.util.stream.Collectors; 11 | 12 | class SQLBuilder { 13 | private final Consumer visitHole; 14 | private final StringBuilder stringBuilder = new StringBuilder(); 15 | 16 | public SQLBuilder(Consumer visitHole) { 17 | this.visitHole = visitHole; 18 | } 19 | 20 | public void visitSQL(SQL sql) { 21 | for (Object arg : sql.args) { 22 | if (arg instanceof String) { 23 | visitSQLLiteral((String) arg); 24 | } else { 25 | visitHole.accept(arg); 26 | } 27 | } 28 | } 29 | 30 | public void visitSQLLiteral(String sql) { 31 | stringBuilder.append(sql); 32 | } 33 | 34 | public String build() { 35 | return stringBuilder.toString(); 36 | } 37 | } 38 | 39 | class PreparedSQLBuilder { 40 | private final SQLBuilder builder; 41 | 42 | public PreparedSQLBuilder(Function visitHole) { 43 | this.builder = new SQLBuilder(new Consumer() { 44 | @Override 45 | public void accept(Object arg) { 46 | final int arity = visitHole.apply(arg); 47 | for (int i = 0; i < arity; i++) { 48 | if (i != 0) PreparedSQLBuilder.this.builder.visitSQLLiteral(","); 49 | PreparedSQLBuilder.this.builder.visitSQLLiteral("?"); 50 | } 51 | 52 | } 53 | }); 54 | } 55 | 56 | public void visitSQL(SQL sql) { 57 | builder.visitSQL(sql); 58 | } 59 | 60 | public PreparedStatement build(Connection connection) throws SQLException { 61 | return connection.prepareStatement(builder.build()); 62 | } 63 | } 64 | 65 | class UnpreparedSQLBuilder { 66 | private final SQLBuilder builder; 67 | 68 | public UnpreparedSQLBuilder(Function> visitHole) { 69 | this.builder = new SQLBuilder(new Consumer() { 70 | @Override 71 | public void accept(Object arg) { 72 | final List xs = visitHole.apply(arg); 73 | for (int i = 0; i < xs.size(); i++) { 74 | if (i != 0) UnpreparedSQLBuilder.this.builder.visitSQLLiteral(","); 75 | UnpreparedSQLBuilder.this.builder.visitSQLLiteral(xs.get(i)); 76 | } 77 | } 78 | }); 79 | } 80 | 81 | public void visitSQL(SQL sql) { 82 | builder.visitSQL(sql); 83 | } 84 | 85 | public String build() { 86 | return builder.build(); 87 | } 88 | } 89 | 90 | class BatchBuilder { 91 | private final int size; 92 | private final Write.Context wm; 93 | 94 | private final List collections = new ArrayList<>(); 95 | 96 | public BatchBuilder(int size, Write.Context wm) { 97 | this.size = size; 98 | this.wm = wm; 99 | } 100 | 101 | public BoundWrite visitHole(Object arg) { 102 | if (arg instanceof SQL.Hole) { 103 | final SQL.Hole hole = (SQL.Hole) arg; 104 | collections.add(Collections.nCopies(size, hole.object)); 105 | return hole.write.bind(wm); 106 | } else if (arg instanceof SQL.BatchHole) { 107 | final SQL.BatchHole hole = (SQL.BatchHole) arg; 108 | collections.add(hole.objects); 109 | return hole.write.bind(wm); 110 | } else { 111 | throw new IllegalStateException("Not expecting " + arg); 112 | } 113 | } 114 | 115 | public List build() { 116 | return collections; 117 | } 118 | } 119 | 120 | @ParametersAreNonnullByDefault 121 | class BatchUnpreparedSQLBuilder { 122 | private BatchUnpreparedSQLBuilder() {} 123 | 124 | public static Map.Entry> build(SQL sql, Write.Context wm) { 125 | final BatchBuilder batchBuilder = new BatchBuilder(sql.size(), wm); 126 | 127 | final List, List>> boundWrites = new ArrayList<>(); 128 | final UnpreparedSQLBuilder sqlBuilder = new UnpreparedSQLBuilder(arg -> { 129 | final BoundWrite boundWrite = batchBuilder.visitHole(arg); 130 | 131 | final List result = new ArrayList<>(); 132 | for (int i = 0; i < boundWrite.arity(); i++) { 133 | result.add(UUID.randomUUID().toString()); 134 | } 135 | 136 | boundWrites.add(new AbstractMap.SimpleImmutableEntry<>(boundWrite, result)); 137 | return result; 138 | }); 139 | 140 | sqlBuilder.visitSQL(sql); 141 | 142 | final String sqlString = sqlBuilder.build(); 143 | final List batchBuilt = batchBuilder.build(); 144 | 145 | final List iterators = batchBuilt.stream().map(Collection::iterator).collect(Collectors.toList()); 146 | return new AbstractMap.SimpleImmutableEntry<>( 147 | sql.size(), 148 | new Iterator() { 149 | private int i = 0; 150 | 151 | @Override 152 | public boolean hasNext() { 153 | return i < sql.size(); 154 | } 155 | 156 | @Override 157 | @SuppressWarnings("unchecked") 158 | public String next() { 159 | String result = sqlString; 160 | for (int j = 0; j < boundWrites.size(); j++) { 161 | final Map.Entry, List> boundWrite = boundWrites.get(j); 162 | final List replacements = ((BoundWrite)boundWrite.getKey()).asSQL(iterators.get(j).next()); 163 | for (int k = 0; k < boundWrite.getValue().size(); k++) { 164 | result = result.replace(boundWrite.getValue().get(k), replacements.get(k)); 165 | } 166 | } 167 | 168 | i++; 169 | return result; 170 | } 171 | }); 172 | } 173 | } 174 | 175 | @ParametersAreNonnullByDefault 176 | class BatchPreparedSQLBuilder { 177 | private interface Action { 178 | void write(PreparedStatement stmt, IndexRef ref, Object arg) throws SQLException; 179 | } 180 | 181 | private BatchPreparedSQLBuilder() {} 182 | 183 | public static PreparedStatement build(SQL sql, Write.Context wm, Connection connection) throws SQLException { 184 | final int size = sql.size(); 185 | final BatchBuilder batch = new BatchBuilder(size, wm); 186 | 187 | final List actions = new ArrayList<>(); 188 | final PreparedSQLBuilder sqlBuilder = new PreparedSQLBuilder(arg -> { 189 | final BoundWrite write = batch.visitHole(arg); 190 | actions.add((s, ix, x) -> ((BoundWrite)write).set(s, ix, x)); 191 | return write.arity(); 192 | }); 193 | 194 | sqlBuilder.visitSQL(sql); 195 | 196 | final PreparedStatement stmt = sqlBuilder.build(connection); 197 | final List batchBuilt = batch.build(); 198 | 199 | final List iterators = batchBuilt.stream().map(Collection::iterator).collect(Collectors.toList()); 200 | for (int i = 0; i < size; i++) { 201 | final IndexRef ref = IndexRef.create(); 202 | for (int j = 0; j < iterators.size(); j++) { 203 | actions.get(j).write(stmt, ref, iterators.get(j).next()); 204 | } 205 | stmt.addBatch(); 206 | } 207 | 208 | return stmt; 209 | } 210 | } 211 | 212 | class BespokeUnpreparedSQLBuilder { 213 | private BespokeUnpreparedSQLBuilder() {} 214 | 215 | @SuppressWarnings("unchecked") 216 | public static String build(SQL sql, Write.Context wm) { 217 | final UnpreparedSQLBuilder sqlBuilder = new UnpreparedSQLBuilder(arg -> { 218 | final SQL.Hole hole = BespokePreparedSQLBuilder.unwrapHole(arg); 219 | return ((SQL.Hole)hole).write.bind(wm).asSQL(hole.object); 220 | }); 221 | 222 | sqlBuilder.visitSQL(sql); 223 | return sqlBuilder.build(); 224 | } 225 | } 226 | 227 | class BespokePreparedSQLBuilder { 228 | private interface Action { 229 | void write(PreparedStatement stmt, IndexRef ref) throws SQLException; 230 | } 231 | 232 | private BespokePreparedSQLBuilder() {} 233 | 234 | @SuppressWarnings("unchecked") 235 | public static PreparedStatement build(SQL sql, Write.Context wm, Connection connection) throws SQLException { 236 | final PreparedSQLBuilder sqlBuilder; 237 | final List actions = new ArrayList<>(); 238 | 239 | sqlBuilder = new PreparedSQLBuilder(arg -> { 240 | final SQL.Hole hole = unwrapHole(arg); 241 | final BoundWrite boundWrite = hole.write.bind(wm); 242 | actions.add((stmt, ref) -> ((BoundWrite) boundWrite).set(stmt, ref, hole.object)); 243 | return boundWrite.arity(); 244 | }); 245 | 246 | sqlBuilder.visitSQL(sql); 247 | final PreparedStatement stmt = sqlBuilder.build(connection); 248 | 249 | final IndexRef ref = IndexRef.create(); 250 | for (Action action : actions) { 251 | action.write(stmt, ref); 252 | } 253 | 254 | return stmt; 255 | } 256 | 257 | static SQL.Hole unwrapHole(Object arg) { 258 | if (arg instanceof SQL.Hole) { 259 | return (SQL.Hole)arg; 260 | } else if (arg instanceof SQL.BatchHole) { 261 | throw new IllegalArgumentException("This SQL statement has some batched parts, but you are trying to execute it in unbatched mode"); 262 | } else { 263 | throw new IllegalStateException("Not expecting " + arg); 264 | } 265 | } 266 | } 267 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/BatchReads.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import java.util.*; 4 | import java.util.function.Function; 5 | import java.util.stream.Collectors; 6 | 7 | /** Functions for creating useful instances of {@link BatchRead}. */ 8 | public class BatchReads { 9 | private BatchReads() {} 10 | 11 | /** Returns the first row of the {@code ResultSet}, and throws {@code NoSuchElementException} if no such row exists. */ 12 | public static BatchRead first(Class klass) { 13 | return first(new ContextRead<>(klass)); 14 | } 15 | 16 | /** Returns the first row of the {@code ResultSet}, and throws {@code NoSuchElementException} if no such row exists. */ 17 | public static BatchRead first(Read read) { 18 | return (ctxt, rs) -> { 19 | if (rs.next()) { 20 | return read.bind(ctxt).get(rs, IndexRef.create()); 21 | } else { 22 | throw new NoSuchElementException(); 23 | } 24 | }; 25 | } 26 | 27 | /** Returns the first row of the {@code ResultSet}, or null if no such row exists. */ 28 | public static BatchRead firstOrNull(Class klass) { 29 | return firstOrNull(new ContextRead<>(klass)); 30 | } 31 | 32 | /** Returns the first row of the {@code ResultSet}, or null if no such row exists. */ 33 | public static BatchRead firstOrNull(Read read) { 34 | return (ctxt, rs) -> rs.next() ? read.bind(ctxt).get(rs, IndexRef.create()) : null; 35 | } 36 | 37 | public static BatchRead> asList(Class klass) { 38 | return asList(new ContextRead<>(klass)); 39 | } 40 | 41 | public static BatchRead> asList(Read read) { 42 | return new CollectionBatchRead<>(ArrayList::new, read); 43 | } 44 | 45 | public static BatchRead> asSet(Class klass) { 46 | return asSet(new ContextRead<>(klass)); 47 | } 48 | 49 | public static BatchRead> asSet(Read read) { 50 | return new CollectionBatchRead<>(LinkedHashSet::new, read); 51 | } 52 | 53 | /** Return the {@code ResultSet} as a map, failing if any key occurs more than once */ 54 | public static BatchRead> asMap(Class keyClass, Class valueClass) { 55 | return asMap(new ContextRead<>(keyClass), new ContextRead<>(valueClass)); 56 | } 57 | 58 | /** Return the {@code ResultSet} as a map, failing if any key occurs more than once */ 59 | public static BatchRead> asMap(Read readKey, Read readValue) { 60 | return new MapBatchRead<>(LinkedHashMap::new, BatchReads::appendFail, readKey, readValue); 61 | } 62 | 63 | private static V appendFail(K key, V oldValue, V newValue) { 64 | throw new IllegalArgumentException("Key " + key + " occurs more than once in result, associated with both " + oldValue + " and " + newValue); 65 | } 66 | 67 | /** Return the {@code ResultSet} as a map, using the first value encountered for any given key */ 68 | public static BatchRead> asMapFirst(Class keyClass, Class valueClass) { 69 | return asMapFirst(new ContextRead<>(keyClass), new ContextRead<>(valueClass)); 70 | } 71 | 72 | /** Return the {@code ResultSet} as a map, using the first value encountered for any given key */ 73 | public static BatchRead> asMapFirst(Read readKey, Read readValue) { 74 | return new MapBatchRead<>(LinkedHashMap::new, (_key, od, _nw) -> od, readKey, readValue); 75 | } 76 | 77 | /** Return the {@code ResultSet} as a map, using the last value encountered for any given key */ 78 | public static BatchRead> asMapLast(Class keyClass, Class valueClass) { 79 | return asMapLast(new ContextRead<>(keyClass), new ContextRead<>(valueClass)); 80 | } 81 | 82 | /** Return the {@code ResultSet} as a map, using the last value encountered for any given key */ 83 | public static BatchRead> asMapLast(Read readKey, Read readValue) { 84 | return new MapBatchRead<>(LinkedHashMap::new, (_key, _od, nw) -> nw, readKey, readValue); 85 | } 86 | 87 | /** Return the {@code ResultSet} as a map, allowing multiple values for any given key */ 88 | public static BatchRead>> asMultiMap(Class keyClass, Class valueClass) { 89 | return asMultiMap(new ContextRead<>(keyClass), new ContextRead<>(valueClass)); 90 | } 91 | 92 | /** Return the {@code ResultSet} as a map, allowing multiple values for any given key */ 93 | @SuppressWarnings("unchecked") 94 | public static BatchRead>> asMultiMap(Read readKey, Read readValue) { 95 | return new MapBatchRead<>(LinkedHashMap::new, BatchReads::appendListHack, readKey, (Read>)(Read)Reads.map(List.class, readValue, (V v) -> new ArrayList(Collections.singletonList(v)))); 96 | } 97 | 98 | /** Return the {@code ResultSet} as a {@code NavigableMap}, allowing multiple values for any given key */ 99 | @SuppressWarnings("unchecked") 100 | public static BatchRead>> asNavigableMultiMap(Class keyClass, Class valueClass) { 101 | return asNavigableMultiMap(new ContextRead<>(keyClass), new ContextRead<>(valueClass)); 102 | } 103 | 104 | /** Return the {@code ResultSet} as a {@code NavigableMap}, allowing multiple values for any given key */ 105 | @SuppressWarnings("unchecked") 106 | public static BatchRead>> asNavigableMultiMap(Read readKey, Read readValue) { 107 | return new MapBatchRead<>(TreeMap::new, BatchReads::appendListHack, readKey, (Read>)(Read)Reads.map(List.class, readValue, (V v) -> new ArrayList(Collections.singletonList(v)))); 108 | } 109 | 110 | // Bit dodgy because correctness depends crucially on how we are called 111 | private static List appendListHack(K key, List od, List nw) { 112 | if (nw.size() != 1) throw new IllegalStateException("This really shouldn't happen.."); 113 | od.add(nw.get(0)); 114 | return od; 115 | } 116 | 117 | /** As {@link #asMap(Read, BatchRead)} but simply reads the key using the {@code Context}-default read instance for the class */ 118 | public static BatchRead> asMap(Class readKey, BatchRead readValue) { 119 | return asMap(Reads.useContext(readKey), readValue); 120 | } 121 | 122 | /** 123 | * Splits the {@code ResultSet} into contiguous runs based on equality of the supplied key type. Reads the remaining 124 | * columns in each segment using the supplied {@code BatchRead}. 125 | *

126 | * So for example, {@code asMap(key, value)} is equivalent to {@code segmented(key, first(value)}, and {@code asMultiMap(key, value)} 127 | * is equivalent to {@code segmented(key, asList(value)} in the case where the {@code ResultSet} is sorted by the key columns. 128 | *

129 | * If a key occurs non-contiguously then {@code IllegalArgumentException} will be thrown. 130 | */ 131 | public static BatchRead> asMap(Read readKey, BatchRead readValue) { 132 | return new SegmentedMapBatchRead<>(LinkedHashMap::new, BatchReads::appendFail, readKey, readValue); 133 | } 134 | 135 | /** As {@link #asMapFirst(Read, BatchRead)} but simply reads the key using the {@code Context}-default read instance for the class */ 136 | public static BatchRead> asMapFirst(Class readKey, BatchRead readValue) { 137 | return asMapFirst(Reads.useContext(readKey), readValue); 138 | } 139 | 140 | /** As {@link #asMap(Read, BatchRead)} but returns the value associated with the first occurrence of a given key instead of failing. */ 141 | public static BatchRead> asMapFirst(Read readKey, BatchRead readValue) { 142 | return new SegmentedMapBatchRead<>(LinkedHashMap::new, (_key, od, _nw) -> od, readKey, readValue); 143 | } 144 | 145 | /** As {@link #asMapLast(Read, BatchRead)} but simply reads the key using the {@code Context}-default read instance for the class */ 146 | public static BatchRead> asMapLast(Class readKey, BatchRead readValue) { 147 | return asMapLast(Reads.useContext(readKey), readValue); 148 | } 149 | 150 | /** As {@link #asMap(Read, BatchRead)} but returns the value associated with the first occurrence of a given key instead of failing. */ 151 | public static BatchRead> asMapLast(Read readKey, BatchRead readValue) { 152 | return new SegmentedMapBatchRead<>(LinkedHashMap::new, (_key, _od, nw) -> nw, readKey, readValue); 153 | } 154 | 155 | /** As {@link #asMultiMap(Read, BatchRead)} but simply reads the key using the {@code Context}-default read instance for the class */ 156 | public static BatchRead>> asMultiMap(Class readKey, BatchRead readValue) { 157 | return asMultiMap(Reads.useContext(readKey), readValue); 158 | } 159 | 160 | /** As {@link #asMap(Read, BatchRead)} but returns all values associated with the a given key instead of failing. */ 161 | @SuppressWarnings("unchecked") 162 | public static BatchRead>> asMultiMap(Read readKey, BatchRead readValue) { 163 | return new SegmentedMapBatchRead<>(LinkedHashMap::new, BatchReads::appendListHack, readKey, (BatchRead>)(BatchRead)BatchReads.map(readValue, (V v) -> new ArrayList(Collections.singletonList(v)))); 164 | } 165 | 166 | public static BatchRead map(BatchRead read, Function f) { 167 | return (ctxt, rs) -> f.apply(read.get(ctxt, rs)); 168 | } 169 | 170 | /** 171 | * Returns the {@code ResultSet} interpreted as an array of column vectors. 172 | *

173 | * The classes specify the element types of the column vectors. So if you call {@code matrix(String.class, int.class)} 174 | * then your {@code ResultSet} will be turned into an {@code Object[]} with two elements: a {@code String[]} and a {@code int[]}. 175 | */ 176 | public static BatchRead matrix(Class... klasses) { 177 | return matrix(Arrays.asList(klasses).stream().map(ContextRead::new).collect(Collectors.toList())); 178 | } 179 | 180 | /** As {@link #matrix(Class[])}, but for the case where you want to be explicit about how the columns are constructed. */ 181 | public static BatchRead matrix(Collection> reads) { 182 | return new MatrixBatchRead(reads); 183 | } 184 | 185 | /** 186 | * Returns the {@code ResultSet} interpreted as a map of column names to column vectors. 187 | *

188 | * The classes specify the element types of the column vectors. So if you call {@code matrix(String.class, int.class)} 189 | * then your {@code ResultSet} will be turned into a {@code Map} with two elements: a {@code String[]} and a {@code int[]}. 190 | *

191 | * If a {@code Read} instance spans more than one column, the name chosen will be that of the first column. 192 | */ 193 | public static BatchRead> labelledMatrix(Class... klasses) { 194 | return labelledMatrix(Arrays.asList(klasses).stream().map(ContextRead::new).collect(Collectors.toList())); 195 | } 196 | 197 | /** As {@link #labelledMatrix(Class[])}, but for the case where you want to be explicit about how the columns are constructed. */ 198 | public static BatchRead> labelledMatrix(Collection> reads) { 199 | return new LabelledMatrixBatchRead(reads); 200 | } 201 | } 202 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/SQL.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import javax.annotation.Nonnull; 4 | import javax.annotation.Nullable; 5 | import javax.annotation.ParametersAreNonnullByDefault; 6 | import java.sql.PreparedStatement; 7 | import java.sql.SQLException; 8 | import java.util.*; 9 | import java.util.function.BiFunction; 10 | 11 | /** An immutable type representing a SQL statement with zero or more holes that are filled by Java objects */ 12 | @ParametersAreNonnullByDefault 13 | public final class SQL { 14 | static class Hole { 15 | public final @Nullable T object; 16 | public final Write write; 17 | 18 | Hole(@Nullable T object, Write write) { 19 | this.object = object; 20 | this.write = write; 21 | } 22 | } 23 | 24 | static class BatchHole { 25 | public final Collection objects; 26 | public final Write write; 27 | 28 | BatchHole(Collection objects, Write write) { 29 | this.objects = objects; 30 | this.write = write; 31 | } 32 | } 33 | 34 | // Elements of list are either Hole, BatchHole, or String 35 | final SnocList args; 36 | final @Nullable Integer size; 37 | 38 | SQL(SnocList args, @Nullable Integer size) { 39 | this.args = args; 40 | this.size = size; 41 | } 42 | 43 | int size() { 44 | return this.size == null ? 0 : this.size; 45 | } 46 | 47 | /** Interpolate a Java object into the SQL query using a {@link Write} instance that is appropriate for its runtime type */ 48 | @SuppressWarnings("unchecked") 49 | public SQL $(@Nullable Object x) { 50 | return x == null ? $(Writes.nullReference(), null) 51 | : $((Class) x.getClass(), x); 52 | } 53 | 54 | /** Interpolate a Java object into the SQL query using a {@link Write} instance suitable for the supplied class */ 55 | public SQL $(Class klass, @Nullable T x) { 56 | return new SQL(args.snoc(new Hole<>(x, new ContextWrite<>(klass))), size); 57 | } 58 | 59 | /** Interpolate a Java object into the SQL query using the supplied {@link Write} instance */ 60 | public SQL $(Write write, @Nullable T x) { 61 | return new SQL(args.snoc(new Hole<>(x, write)), size); 62 | } 63 | 64 | /** 65 | * Interpolate a series of Java objects into a batch SQL query using a {@link Write} instance inferred from the 66 | * runtime type of the first non-null item in the collection. 67 | */ 68 | @SuppressWarnings("unchecked") 69 | public SQL $s(Collection arg) { 70 | final Class klass; 71 | if (arg.size() > 0) { 72 | final Iterator it = arg.iterator(); 73 | T example = it.next(); 74 | while (example == null && it.hasNext()) { 75 | example = it.next(); 76 | } 77 | 78 | klass = example == null ? null : example.getClass(); 79 | } else { 80 | klass = null; 81 | } 82 | 83 | if (klass == null) { 84 | // We know for sure that all elements of the column are null, this is the best we can do.. 85 | return $s(Writes.nullReference(), arg); 86 | } else { 87 | return $s((Class)klass, arg); 88 | } 89 | } 90 | 91 | /** Interpolate a series of Java objects into a batch SQL query using a {@link Write} instance suitable for the supplied class. */ 92 | public SQL $s(Class klass, Collection x) { 93 | return $s(Writes.useContext(klass), x); 94 | } 95 | 96 | /** Interpolate a series of Java objects into a batch SQL query using the supplied {@link Write} instance */ 97 | public SQL $s(Write write, Collection x) { 98 | if (size != null && size != x.size()) { 99 | throw new IllegalArgumentException("All collections supplied to a batch SQL statement must be of the same size, but you had both sizes " + size + " and " + x.size()); 100 | } 101 | 102 | return new SQL(args.snoc(new BatchHole<>(x, write)), x.size()); 103 | } 104 | 105 | /** Append a SQL literal */ 106 | public SQL sql(SQL x) { 107 | final Integer size; 108 | if (x.size == null) { 109 | size = this.size; 110 | } else if (this.size == null) { 111 | size = x.size; 112 | } else if (x.size.intValue() != this.size) { 113 | throw new IllegalArgumentException("All collections supplied to a batch SQL statement must be of the same size, but you had both sizes " + this.size + " and " + x.size); 114 | } else { 115 | size = x.size; 116 | } 117 | 118 | return new SQL(args.snocs(x.args), size); 119 | } 120 | 121 | /** Append a SQL literal */ 122 | public SQL sql(String x) { 123 | return sql(MDBI.sql(x)); 124 | } 125 | 126 | /** 127 | * Appends several bits of SQL. 128 | *

129 | * The arguments must either be {@code SQL} instances, or Strings (in which case they will be 130 | * assumed to represent SQL fragments, rather than string parameters to the query). 131 | */ 132 | public SQL sql(Object... xs) { 133 | SQL result = sql(""); 134 | for (int i = 0; i < xs.length; i++) { 135 | final Object x = xs[i]; 136 | if (x == null) { 137 | throw new NullPointerException("The argument at index " + i + " was null"); 138 | } 139 | 140 | if (x instanceof String) { 141 | result = result.sql((String)x); 142 | } else if (x instanceof SQL) { 143 | result = result.sql((SQL)x); 144 | } else { 145 | throw new IllegalArgumentException("Supplied argument " + x + " at index " + i + " is neither a String nor a SQL instance"); 146 | } 147 | } 148 | 149 | return result; 150 | } 151 | 152 | /** Append a SQL literal */ 153 | @SafeVarargs 154 | public final SQL in(T... xs) { 155 | return in(Arrays.asList(xs)); 156 | } 157 | 158 | /** 159 | * Append an "IN" clause based on the supplied collection 160 | * 161 | * The input objects are turned into SQL using a {@link Write} based on their runtime type, similar to how 162 | * {@link #$(Object)} works. 163 | */ 164 | public SQL in(Iterable xs) { 165 | return inCore(xs, SQL::$); 166 | } 167 | 168 | /** Append an "IN" clause based on the supplied collection, turning objects into SQL using the {@link Write} instance for the supplied class. */ 169 | public SQL in(Class klass, Iterable xs) { 170 | return in(Writes.useContext(klass), xs); 171 | } 172 | 173 | /** Append an "IN" clause based on the supplied collection, turning objects into SQL using the supplied {@link Write} instance. */ 174 | public SQL in(Write write, Iterable xs) { 175 | return inCore(xs, (sql, x) -> sql.$(write, x)); 176 | } 177 | 178 | private SQL inCore(Iterable xs, BiFunction f) { 179 | final Iterator it = xs.iterator(); 180 | if (!it.hasNext()) { 181 | // I used to get clever in this case and generate "in (null)" on the basis that nothing is equal 182 | // to null... unfortunately it seems that e.g. on SQLite, all of these queries return 0 results: 183 | // select 1 where 1 in (null) 184 | // select 1 where 1 not in (null) 185 | // select 1 where not (1 in (null)) 186 | // select 1 where not (1 not in (null)) 187 | // See also http://stackoverflow.com/questions/129077/not-in-clause-and-null-values 188 | // 189 | // Then I tried generating "in ('e0afa0da0e3444d5ae3b34202b759e0c')", where that value is just a 190 | // random GUID. Unfortunately some systems (e.g. SQL Server) insist on coercing the values in the 191 | // 'in' clause to the type of the LHS, which obviously fails in this case if e.g. LHS is an int. 192 | // 193 | // So now I use a cheeky sub-query: 194 | return sql(" in (select null where 1 = 0)"); 195 | } else { 196 | SQL result = f.apply(sql(" in ("), it.next()); 197 | while (it.hasNext()) { 198 | result = f.apply(result.sql(","), it.next()); 199 | } 200 | return result.sql(") "); 201 | } 202 | } 203 | 204 | static SQL commaSeparate(Iterator it) { 205 | if (!it.hasNext()) throw new IllegalStateException("You must add at least one column"); 206 | 207 | SQL result = it.next(); 208 | while (it.hasNext()) { 209 | result = result.sql(", ").sql(it.next()); 210 | } 211 | 212 | return result; 213 | } 214 | 215 | @SuppressWarnings("unchecked") 216 | @Override 217 | public String toString() { 218 | boolean isBatch = false; 219 | for (Object arg : args) { 220 | if (arg instanceof BatchHole) { 221 | isBatch = true; 222 | break; 223 | } 224 | } 225 | 226 | // We try to be a bit clever to make things easier for the humans looking at SQL objects: we show holes using 227 | // the proper SQL interpretation if possible. If this fails for whatever reason we splice them in using a made-up syntax. 228 | final Write.Context tolerantWriteContext = new Write.Context() { 229 | @Override 230 | public Write get(Class klass) { 231 | Write unbound_ = null; 232 | try { 233 | unbound_ = Context.DEFAULT.writeContext().get(klass); 234 | } catch (Exception _ignored) { 235 | // Will just fallback on default printing method 236 | } 237 | 238 | final Write unbound = unbound_; 239 | return ctxt -> { 240 | BoundWrite bound_ = null; 241 | if (unbound != null) { 242 | try { 243 | bound_ = unbound.bind(ctxt); 244 | } catch (Exception _ignored) { 245 | // Will just fallback on default printing method 246 | } 247 | } 248 | 249 | final BoundWrite bound = bound_; 250 | return new BoundWrite() { 251 | private Integer reportedArity; 252 | 253 | @Override 254 | public int arity() { 255 | if (reportedArity == null) { 256 | if (bound != null) { 257 | try { 258 | reportedArity = bound.arity(); 259 | } catch (Exception _ignored) { 260 | // Will just fallback on default printing method 261 | } 262 | } 263 | 264 | if (reportedArity == null) { 265 | reportedArity = 1; 266 | } 267 | } 268 | 269 | return reportedArity; 270 | } 271 | 272 | @Override 273 | public void set(@Nonnull PreparedStatement s, @Nonnull IndexRef ix, @Nullable T x) throws SQLException { 274 | throw new UnsupportedOperationException("This code should be unreachable"); 275 | } 276 | 277 | @Nonnull 278 | @Override 279 | public List asSQL(@Nullable T x) { 280 | List result = null; 281 | if (bound != null) { 282 | try { 283 | result = bound.asSQL(x); 284 | } catch (Exception _ignored) { 285 | // Will just fallback on default printing method for this param 286 | } 287 | } 288 | 289 | if (result != null && (reportedArity == null || reportedArity == result.size())) { 290 | // Common case 291 | return result; 292 | } else { 293 | final int needArity = reportedArity == null ? 1 : reportedArity; 294 | final List modifiedResult = new ArrayList<>(); 295 | int i = 0; 296 | while (modifiedResult.size() < needArity) { 297 | if (result != null && i < result.size()) { 298 | modifiedResult.add(result.get(i++)); 299 | } else { 300 | modifiedResult.add("${" + x + (needArity == 1 ? "" : ":" + modifiedResult.size()) + "}"); 301 | } 302 | } 303 | 304 | return modifiedResult; 305 | } 306 | } 307 | }; 308 | }; 309 | } 310 | }; 311 | 312 | if (isBatch) { 313 | final StringBuilder result = new StringBuilder(); 314 | final Iterator it = BatchUnpreparedSQLBuilder.build(this, tolerantWriteContext).getValue(); 315 | while (it.hasNext()) { 316 | if (result.length() != 0) result.append("\n"); 317 | result.append(it.next()); 318 | } 319 | 320 | return result.toString(); 321 | } else { 322 | return BespokeUnpreparedSQLBuilder.build(this, tolerantWriteContext); 323 | } 324 | } 325 | } 326 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/MDBI.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import javax.annotation.Nullable; 4 | import javax.sql.DataSource; 5 | import java.sql.*; 6 | import java.util.Collection; 7 | import java.util.Iterator; 8 | import java.util.List; 9 | import java.util.Map; 10 | import java.util.function.Supplier; 11 | 12 | /** 13 | * The most important class in the library: a wrapper around a {@link Connection} or {@link DataSource} that gives it superpowers. 14 | *

15 | * To get hold of one of these, you'll probably want to use either {@link #of(Connection)} or {@link #of(DataSource)}. 16 | */ 17 | public class MDBI { 18 | private interface ConnectionObtainer { 19 | T with(ConnectionUser user) throws SQLException; 20 | 21 | static ConnectionObtainer fromDataSource(DataSource dataSource) { 22 | return new ConnectionObtainer() { 23 | @Override 24 | public T with(ConnectionUser user) throws SQLException { 25 | try (final Connection c = dataSource.getConnection()) { 26 | return user.consume(c); 27 | } 28 | } 29 | }; 30 | } 31 | 32 | static ConnectionObtainer fromConnection(Connection connection) { 33 | return new ConnectionObtainer() { 34 | @Override 35 | public T with(ConnectionUser user) throws SQLException { 36 | return user.consume(connection); 37 | } 38 | }; 39 | } 40 | } 41 | 42 | private interface ConnectionUser { 43 | T consume(Connection c) throws SQLException; 44 | } 45 | 46 | private final Context context; 47 | private final ConnectionObtainer connectionObtainer; 48 | private final boolean prepared, transactional; 49 | private final Supplier retryPolicy; 50 | 51 | // TODO: support generated keys? Bit awkward because we need to know we need the feature when we prepare the stmt. 52 | 53 | /** Creates a {@code MDBI} where all queries are executed against this connection. */ 54 | public static MDBI of(Connection connection) { 55 | return MDBI.of(Context.DEFAULT, connection); 56 | } 57 | /** Creates a {@code MDBI} where all queries are executed against fresh connections retrieved from the {@code DataSource}. */ 58 | public static MDBI of(DataSource dataSource) { 59 | return MDBI.of(Context.DEFAULT, dataSource); 60 | } 61 | public static MDBI of(Context context, Connection connection) { 62 | return new MDBI(context, ConnectionObtainer.fromConnection(connection)); 63 | } 64 | public static MDBI of(Context context, DataSource dataSource) { 65 | return new MDBI(context, ConnectionObtainer.fromDataSource(dataSource)); 66 | } 67 | 68 | private MDBI(Context context, ConnectionObtainer connectionObtainer) { 69 | this(context, connectionObtainer, true, true, Retries::deadlocks); 70 | } 71 | 72 | private MDBI(Context context, ConnectionObtainer connectionObtainer, 73 | boolean prepared, boolean transactional, Supplier retryPolicy) { 74 | this.context = context; 75 | this.connectionObtainer = connectionObtainer; 76 | this.prepared = prepared; 77 | this.transactional = transactional; 78 | this.retryPolicy = retryPolicy; 79 | } 80 | 81 | /** Should we use {@link PreparedStatement}s to execute SQL (the default)? Or should we instead construct SQL strings for use with {@link Statement}? */ 82 | public boolean isPrepared() { return prepared; } 83 | public MDBI withPrepared(boolean prepared) { 84 | return new MDBI(context, connectionObtainer, prepared, transactional, retryPolicy); 85 | } 86 | 87 | /** 88 | * Should we use execute the SQL statement in a transaction? 89 | *

90 | * Note that if transactions are disabled then the retry policy ({@link #getRetryPolicy()}) will never be used. 91 | */ 92 | public boolean isTransactional() { return transactional; } 93 | public MDBI withTransactional(boolean transactional) { 94 | return new MDBI(context, connectionObtainer, prepared, transactional, retryPolicy); 95 | } 96 | 97 | /** 98 | * Specifies how exceptions that occur during querying should be handled. By default, we'll retry 99 | * deadlocks using the {@link Retries#deadlocks()} strategy. 100 | *

101 | * For more about what how to construct a policy, see the documentation of {@link Retry}. 102 | *

103 | * Important note: the retry policy will only be used if both: 104 | *

    105 | *
  1. Transactions are enabled in MDBI (the default)
  2. 106 | *
  3. You executing a query against a 107 | * connection with no open transaction. i.e. it will always come into play if you constructed this 108 | * {@code MDBI} using a {@code DataSource}, but won't be used if you constructed it from a {@code Connection} 109 | * with autocommit set to false. You might think this is a strange restriction, but if we didn't have it 110 | * then users of transactions might get just part of their transaction retried, which would be really surprising. 111 | *
  4. 112 | *
113 | */ 114 | public Supplier getRetryPolicy() { return retryPolicy; } 115 | public MDBI withRetryPolicy(Supplier retryPolicy) { 116 | return new MDBI(context, connectionObtainer, prepared, transactional, retryPolicy); 117 | } 118 | 119 | public Context getContext() { return context; } 120 | public MDBI withContext(Context context) { 121 | return new MDBI(context, connectionObtainer, prepared, transactional, retryPolicy); 122 | } 123 | 124 | /** Executes a query and throws away the result, if any. */ 125 | public void execute(SQL sql) throws SQLException { 126 | query(sql, (StatementlikeBatchRead) (ctxt, s) -> { 127 | s.execute(); 128 | return null; 129 | }); 130 | } 131 | 132 | /** Executes a batch query, and returns the number of rows affected by each statement in the batch. */ 133 | public long[] updateBatch(SQL sql) throws SQLException { 134 | if (prepared) { 135 | return connectionObtainer.with(c -> { 136 | try (final PreparedStatement ps = BatchPreparedSQLBuilder.build(sql, context.writeContext(), c)) { 137 | return retry(c, () -> { 138 | try { 139 | return ps.executeLargeBatch(); 140 | } catch (UnsupportedOperationException _unsupported) { 141 | final int[] ints = ps.executeBatch(); 142 | final long[] longs = new long[ints.length]; 143 | for (int i = 0; i < ints.length; i++) { 144 | longs[i] = ints[i]; 145 | } 146 | return longs; 147 | } 148 | }); 149 | } 150 | }); 151 | } else { 152 | return connectionObtainer.with(c -> { 153 | try (final Statement s = c.createStatement()) { 154 | final Map.Entry> e = BatchUnpreparedSQLBuilder.build(sql, context.writeContext()); 155 | final Iterator it = e.getValue(); 156 | 157 | return Transactionally.run(c, () -> retry(c, () -> { 158 | final long[] result = new long[e.getKey()]; 159 | boolean supportsLargeUpdate = true; 160 | int i = 0; 161 | while (it.hasNext()) { 162 | final String x = it.next(); 163 | if (!supportsLargeUpdate) { 164 | result[i] = s.executeUpdate(x); 165 | } else { 166 | try { 167 | result[i] = s.executeLargeUpdate(x); 168 | } catch (UnsupportedOperationException _unsupported) { 169 | supportsLargeUpdate = false; 170 | result[i] = s.executeUpdate(x); 171 | } 172 | } 173 | i++; 174 | } 175 | 176 | return result; 177 | })); 178 | } 179 | }); 180 | } 181 | } 182 | 183 | /** Executes a query and returns the number of rows affected */ 184 | public long update(SQL sql) throws SQLException { 185 | return query(sql, (StatementlikeBatchRead) (ctxt, s) -> { 186 | try { 187 | return s.executeLargeUpdate(); 188 | } catch (UnsupportedOperationException _unsupported) { 189 | return (long)s.executeUpdate(); 190 | } 191 | }); 192 | } 193 | 194 | /** Executes a query and interprets each row of the result as an instance of the supplied class. */ 195 | public List queryList(SQL sql, Class klass) throws SQLException { 196 | return queryList(sql, new ContextRead<>(klass)); 197 | } 198 | 199 | /** Executes a query and interprets each row using the supplied {@code Read} instance. */ 200 | public List queryList(SQL sql, Read read) throws SQLException { 201 | return query(sql, BatchReads.asList(read)); 202 | } 203 | 204 | /** Executes a query and interprets each row of the result as an entry in a map using the supplied classes. */ 205 | public Map queryMap(SQL sql, Class keyKlass, Class valueKlass) throws SQLException { 206 | return queryMap(sql, new ContextRead<>(keyKlass), new ContextRead<>(valueKlass)); 207 | } 208 | 209 | /** Executes a query and interprets each row of the result as an entry in a map using the supplied {@code Read} instances. */ 210 | public Map queryMap(SQL sql, Read keyRead, Read valueRead) throws SQLException { 211 | return query(sql, BatchReads.asMap(keyRead, valueRead)); 212 | } 213 | 214 | /** 215 | * Executes a query and returns the first row of the result interpreted as the supplied class. 216 | * 217 | * @throws java.util.NoSuchElementException if the result is empty. 218 | */ 219 | public T queryFirst(SQL sql, Class klass) throws SQLException { 220 | return queryFirst(sql, new ContextRead<>(klass)); 221 | } 222 | 223 | /** 224 | * Executes a query and returns the first row of the result interpreted using the supplied {@code Read} instance. 225 | * 226 | * @throws java.util.NoSuchElementException if the result is empty. 227 | */ 228 | public T queryFirst(SQL sql, Read read) throws SQLException { 229 | return query(sql, BatchReads.first(read)); 230 | } 231 | 232 | /** 233 | * Executes a query and returns the first row of the result interpreted as the supplied class, or null if there is no such row. 234 | */ 235 | public T queryFirstOrNull(SQL sql, Class klass) throws SQLException { 236 | return queryFirstOrNull(sql, new ContextRead<>(klass)); 237 | } 238 | 239 | /** 240 | * Executes a query and returns the first row of the result interpreted using the supplied {@code Read} instance, or null if there is no such row. 241 | */ 242 | public T queryFirstOrNull(SQL sql, Read read) throws SQLException { 243 | return query(sql, BatchReads.firstOrNull(read)); 244 | } 245 | 246 | /** Executes a query and interprets the result in a fully customizable way using the {@code BatchRead} instance. */ 247 | public T query(SQL sql, BatchRead batchRead) throws SQLException { 248 | return query(sql, StatementlikeBatchReads.fromBatchRead(batchRead)); 249 | } 250 | 251 | /** Executes a query and interprets the result in a fully customizable way using the {@code StatementlikeBatchRead} instance. */ 252 | public T query(SQL sql, StatementlikeBatchRead batchRead) throws SQLException { 253 | if (prepared) { 254 | return connectionObtainer.with(c -> { 255 | try (final PreparedStatement ps = BespokePreparedSQLBuilder.build(sql, context.writeContext(), c)) { 256 | return retry(c, () -> batchRead.get(context.readContext(), new PreparedStatementlike(ps))); 257 | } 258 | }); 259 | } else { 260 | return connectionObtainer.with(c -> { 261 | try (final Statement s = c.createStatement()) { 262 | return retry(c, () -> batchRead.get(context.readContext(), new UnpreparedStatementlike(s, BespokeUnpreparedSQLBuilder.build(sql, context.writeContext())))); 263 | } 264 | }); 265 | } 266 | } 267 | 268 | private T retry(Connection c, SQLAction act) throws SQLException { 269 | if (!transactional || !c.getAutoCommit()) { 270 | // Already in transaction, we can't safely retry because failure of the SQL action we 271 | // are trying to do might cause rollback. Example: what if we execute these 3 one after another: 272 | // update tab set x = 1 273 | // begin tran; update tab set x = 2 274 | // select x from tab // <-- if this deadlocks then the rollback causes the update to be lost. If we just retry this statement we'll return 1 (unexpected). 275 | return act.run(); 276 | } else { 277 | return Transactionally.runWithRetry(c, retryPolicy.get(), act); 278 | } 279 | } 280 | 281 | // Static versions of SQL.* methods below this line: 282 | 283 | /** See {@link SQL#$(Object)}. */ 284 | public static SQL $(@Nullable Object x) { 285 | return sql("").$(x); 286 | } 287 | 288 | /** See {@link SQL#$(Class, Object)}. */ 289 | public static SQL $(Class klass, @Nullable T x) { 290 | return sql("").$(klass, x); 291 | } 292 | 293 | /** See {@link SQL#$(Write, Object)}. */ 294 | public static SQL $(Write write, @Nullable T x) { 295 | return sql("").$(write, x); 296 | } 297 | 298 | /** See {@link SQL#$s(Collection)}. */ 299 | public static SQL $s(Collection arg) { 300 | return sql("").$s(arg); 301 | } 302 | 303 | /** See {@link SQL#$s(Class, Collection)}. */ 304 | public static SQL $s(Class klass, Collection x) { 305 | return sql("").$s(klass, x); 306 | } 307 | 308 | /** See {@link SQL#$s(Write, Collection)}. */ 309 | public static SQL $s(Write write, Collection x) { 310 | return sql("").$s(write, x); 311 | } 312 | 313 | /** 314 | * Constructs a simple {@link SQL} object representing just the supplied SQL fragment. See {@link SQL#sql(String)}. 315 | *

316 | * For the best ergonomics, we suggest that you import this method using a static import. 317 | */ 318 | public static SQL sql(String x) { 319 | return new SQL(SnocList.singleton(x), null); 320 | } 321 | 322 | /** See {@link SQL#sql(Object...)}. */ 323 | public static SQL sql(Object... bits) { 324 | return sql("").sql(bits); 325 | } 326 | 327 | /** See {@link SQL#in(Object...)}. */ 328 | @SafeVarargs 329 | public static SQL in(Object... xs) { 330 | return sql("").in(xs); 331 | } 332 | 333 | /** See {@link SQL#in(Iterable)}. */ 334 | public static SQL in(Iterable xs) { 335 | return sql("").in(xs); 336 | } 337 | 338 | /** See {@link SQL#in(Class, Iterable)}. */ 339 | public static SQL in(Class klass, Iterable xs) { 340 | return sql("").in(klass, xs); 341 | } 342 | 343 | /** See {@link SQL#in(Write, Iterable)}. */ 344 | public static SQL in(Write write, Iterable xs) { 345 | return sql("").in(write, xs); 346 | } 347 | } 348 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/Writes.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import javax.annotation.Nonnull; 4 | import javax.annotation.Nullable; 5 | import java.io.UnsupportedEncodingException; 6 | import java.math.BigDecimal; 7 | import java.sql.PreparedStatement; 8 | import java.sql.SQLException; 9 | import java.sql.Timestamp; 10 | import java.sql.Types; 11 | import java.time.LocalDate; 12 | import java.time.LocalDateTime; 13 | import java.time.LocalTime; 14 | import java.util.*; 15 | import java.util.function.Function; 16 | import java.util.stream.Collectors; 17 | 18 | /** Construct useful instances of the {@link Write} interface. */ 19 | public class Writes { 20 | public static final Write PRIM_BOOLEAN = new AbstractUnaryWrite() { 21 | @Override public String asSQL(@Nullable Boolean x) { assert x != null; return Boolean.toString(x); } 22 | 23 | @Override 24 | public void set(PreparedStatement s, int ix, @Nullable Boolean x) throws SQLException { 25 | assert x != null; 26 | s.setBoolean(ix, x); 27 | } 28 | }; 29 | public static final Write BOOLEAN = new AbstractUnaryWrite() { 30 | @Override String asSQL(@Nullable Boolean x) { return x == null ? "null" : Boolean.toString(x); } 31 | 32 | @Override 33 | void set(PreparedStatement s, int ix, @Nullable Boolean x) throws SQLException { 34 | if (x == null) { 35 | s.setNull(ix, Types.BOOLEAN); 36 | } else { 37 | s.setBoolean(ix, x); 38 | } 39 | } 40 | }; 41 | public static final Write PRIM_BYTE = new AbstractUnaryWrite() { 42 | @Override public String asSQL(@Nullable Byte x) { assert x != null; return Byte.toString(x); } 43 | @Override public void set(PreparedStatement s, int ix, @Nullable Byte x) throws SQLException { assert x != null; s.setByte(ix, x); } 44 | }; 45 | public static final Write BYTE = new AbstractUnaryWrite() { 46 | @Override public String asSQL(@Nullable Byte x) { return x == null ? "null" : Byte.toString(x); } 47 | 48 | @Override 49 | public void set(PreparedStatement s, int ix, @Nullable Byte x) throws SQLException { 50 | if (x == null) { 51 | s.setNull(ix, Types.TINYINT); 52 | } else { 53 | s.setByte(ix, x); 54 | } 55 | } 56 | }; 57 | public static final Write PRIM_CHAR = new AbstractUnaryWrite() { 58 | @Override String asSQL(@Nullable Character x) { assert x != null; return Character.toString(x); } 59 | @Override void set(PreparedStatement s, int ix, @Nullable Character x) throws SQLException { assert x != null; s.setString(ix, Character.toString(x)); } 60 | }; 61 | public static final Write CHARACTER = new AbstractUnaryWrite() { 62 | @Override String asSQL(@Nullable Character x) { return x == null ? "null" : Character.toString(x); } 63 | @Override void set(PreparedStatement s, int ix, @Nullable Character x) throws SQLException { s.setString(ix, x == null ? null : Character.toString(x)); } 64 | }; 65 | public static final Write PRIM_SHORT = new AbstractUnaryWrite() { 66 | @Override String asSQL(@Nullable Short x) { assert x != null; return Short.toString(x); } 67 | @Override void set(PreparedStatement s, int ix, @Nullable Short x) throws SQLException { assert x != null; s.setShort(ix, x); } 68 | }; 69 | public static final Write SHORT = new AbstractUnaryWrite() { 70 | @Override String asSQL(@Nullable Short x) { return x == null ? "null" : Short.toString(x); } 71 | 72 | @Override 73 | void set(PreparedStatement s, int ix, @Nullable Short x) throws SQLException { 74 | if (x == null) { 75 | s.setNull(ix, Types.SMALLINT); 76 | } else { 77 | s.setShort(ix, x); 78 | } 79 | } 80 | }; 81 | public static final Write PRIM_INT = new AbstractUnaryWrite() { 82 | @Override public String asSQL(@Nullable Integer x) { assert x != null; return Integer.toString(x.intValue()); } 83 | @Override public void set(PreparedStatement s, int ix, @Nullable Integer x) throws SQLException { assert x != null; s.setInt(ix, x); } 84 | }; 85 | public static final Write INTEGER = new AbstractUnaryWrite() { 86 | @Override public String asSQL(@Nullable Integer x) { return x == null ? "null" : Integer.toString(x.intValue()); } 87 | 88 | @Override 89 | public void set(PreparedStatement s, int ix, @Nullable Integer x) throws SQLException { 90 | if (x == null) { 91 | s.setNull(ix, Types.INTEGER); 92 | } else { 93 | s.setInt(ix, x); 94 | } 95 | } 96 | }; 97 | public static final Write PRIM_LONG = new AbstractUnaryWrite() { 98 | @Override String asSQL(@Nullable Long x) { assert x != null; return Long.toString(x); } 99 | @Override void set(PreparedStatement s, int ix, @Nullable Long x) throws SQLException { assert x != null; s.setLong(ix, x); } 100 | }; 101 | public static final Write LONG = new AbstractUnaryWrite() { 102 | @Override String asSQL(@Nullable Long x) { return x == null ? "null" : Long.toString(x); } 103 | 104 | @Override 105 | void set(PreparedStatement s, int ix, @Nullable Long x) throws SQLException { 106 | if (x == null) { 107 | s.setNull(ix, Types.BIGINT); 108 | } else { 109 | s.setLong(ix, x); 110 | } 111 | } 112 | }; 113 | public static final Write PRIM_FLOAT = new AbstractUnaryWrite() { 114 | @Override public String asSQL(@Nullable Float x) { assert x != null; return Float.isNaN(x) ? "null" : Float.toString(x); } 115 | 116 | @Override 117 | public void set(PreparedStatement s, int ix, @Nullable Float x) throws SQLException { 118 | assert x != null; 119 | if (Float.isNaN(x)) { 120 | s.setNull(ix, Types.FLOAT); 121 | } else { 122 | s.setFloat(ix, x); 123 | } 124 | } 125 | }; 126 | public static final Write FLOAT = new AbstractUnaryWrite() { 127 | @Override public String asSQL(@Nullable Float x) { return (x == null || Float.isNaN(x)) ? "null" : Float.toString(x); } 128 | 129 | @Override 130 | public void set(PreparedStatement s, int ix, @Nullable Float x) throws SQLException { 131 | if (x == null || Float.isNaN(x)) { 132 | s.setNull(ix, Types.FLOAT); 133 | } else { 134 | s.setFloat(ix, x); 135 | } 136 | } 137 | }; 138 | public static final Write PRIM_DOUBLE = new AbstractUnaryWrite() { 139 | @Override public String asSQL(@Nullable Double x) { assert x != null; return Double.isNaN(x) ? "null" : Double.toString(x); } 140 | 141 | @Override 142 | public void set(PreparedStatement s, int ix, @Nullable Double x) throws SQLException { 143 | assert x != null; 144 | if (Double.isNaN(x)) { 145 | s.setNull(ix, Types.DOUBLE); 146 | } else { 147 | s.setDouble(ix, x); 148 | } 149 | } 150 | }; 151 | public static final Write DOUBLE = new AbstractUnaryWrite() { 152 | @Override public String asSQL(@Nullable Double x) { return (x == null || Double.isNaN(x)) ? "null" : Double.toString(x); } 153 | 154 | @Override 155 | public void set(PreparedStatement s, int ix, @Nullable Double x) throws SQLException { 156 | if (x == null || Double.isNaN(x)) { 157 | s.setNull(ix, Types.DOUBLE); 158 | } else { 159 | s.setDouble(ix, x); 160 | } 161 | } 162 | }; 163 | public static final Write STRING = new AbstractUnaryWrite() { 164 | @Override public String asSQL(@Nullable String x) { return x == null ? null : "'" + x.replace("'", "''") + "'"; } 165 | @Override public void set(PreparedStatement s, int ix, @Nullable String x) throws SQLException { s.setString(ix, x); } 166 | }; 167 | public static final Write LOCAL_DATE = new AbstractUnaryWrite() { 168 | @Override public String asSQL(@Nullable LocalDate x) { return x == null ? "null" : "'" + x.toString() + "'"; } 169 | 170 | @Override 171 | public void set(PreparedStatement s, int ix, @Nullable LocalDate x) throws SQLException { 172 | s.setTimestamp(ix, x == null ? null : new Timestamp(x.atTime(0, 0).atZone(Time.UTC_ZONE_ID).toInstant().toEpochMilli()), Time.UTC_CALENDAR.get()); 173 | } 174 | }; 175 | public static final Write LOCAL_TIME = new AbstractUnaryWrite() { 176 | @Override String asSQL(@Nullable LocalTime x) { return x == null ? "null" : "'" + x.toString() + "'"; } 177 | 178 | @Override 179 | void set(PreparedStatement s, int ix, @Nullable LocalTime x) throws SQLException { 180 | s.setTime(ix, x == null ? null : new java.sql.Time(x.atDate(LocalDate.of(1970, 1, 1)).atZone(Time.UTC_ZONE_ID).toInstant().toEpochMilli()), Time.UTC_CALENDAR.get()); 181 | } 182 | }; 183 | public static final Write LOCAL_DATE_TIME = new AbstractUnaryWrite() { 184 | @Override public String asSQL(@Nullable LocalDateTime x) { return x == null ? "null" : "'" + x.toString() + "'"; } 185 | 186 | @Override 187 | public void set(PreparedStatement s, int ix, @Nullable LocalDateTime x) throws SQLException { 188 | s.setTimestamp(ix, x == null ? null : new Timestamp(x.atZone(Time.UTC_ZONE_ID).toInstant().toEpochMilli()), Time.UTC_CALENDAR.get()); 189 | } 190 | }; 191 | public static final Write BYTE_ARRAY = new AbstractUnaryWrite() { 192 | @Override 193 | String asSQL(@Nullable byte[] x) { 194 | try { 195 | return x == null ? "null" : "'" + new String(x, "ASCII").replace("'", "''") + "'"; 196 | } catch (UnsupportedEncodingException e) { 197 | throw new IllegalStateException("No ASCII encoding, WTF?", e); 198 | } 199 | } 200 | 201 | @Override 202 | void set(PreparedStatement s, int ix, @Nullable byte[] x) throws SQLException { 203 | s.setBytes(ix, x); 204 | } 205 | }; 206 | public static final Write BIG_DECIMAL = new AbstractUnaryWrite() { 207 | @Override public String asSQL(@Nullable BigDecimal x) { return x == null ? "null" : x.toPlainString(); } 208 | @Override public void set(PreparedStatement s, int ix, @Nullable BigDecimal x) throws SQLException { s.setBigDecimal(ix, x); } 209 | }; 210 | 211 | /** A {@code Write} instance that simply defers to the {@link Context} to decide how to turn instances of the given class into SQL. */ 212 | public static Write useContext(Class klass) { 213 | return new ContextWrite<>(klass); 214 | } 215 | 216 | /** Turns a bean into SQL by serializing the named fields in order using {@link Write} registered in the context for the respective property type. */ 217 | public static Write bean(Class klass, String... fields) { 218 | return new BeanWrite<>(klass, fields); 219 | } 220 | 221 | /** Turns a bean into SQL by serialized the named fields in order using the contextual {@link Write} instances for the supplied classes. */ 222 | public static Write beanWithFieldClasses(Class klass, Collection fields, Collection> klasses) { 223 | return new BeanWrite<>(klass, fields, klasses.stream().map(argklass -> new ContextWrite<>(argklass)).collect(Collectors.toList())); 224 | } 225 | 226 | /** Turns a bean into SQL by serialized the named fields in order using the supplied {@link Write} instances. */ 227 | public static Write bean(Class klass, Collection fields, Collection> reads) { 228 | return new BeanWrite<>(klass, fields, reads); 229 | } 230 | 231 | /** Always turns the argument into a SQL null. Useful internally as a last-ditch fallback when we've been asked to serialize a null. */ 232 | public static Write nullReference() { 233 | return new AbstractUnaryWrite() { 234 | @Override String asSQL(@Nullable T x) { return "null"; } 235 | 236 | @Override 237 | void set(PreparedStatement s, int ix, @Nullable T x) throws SQLException { 238 | s.setObject(ix, null); 239 | } 240 | }; 241 | } 242 | 243 | /** Mapping treating {@code Write} as a co-functor. */ 244 | public static Write map(Class klass, Function f) { 245 | return map(new ContextWrite<>(klass), f); 246 | } 247 | 248 | /** Mapping treating {@code Write} as a co-functor. */ 249 | public static Write map(Write write, Function f) { 250 | return ctxt -> { 251 | final BoundWrite boundWrite = write.bind(ctxt); 252 | return new BoundWrite() { 253 | @Override 254 | public int arity() { 255 | return boundWrite.arity(); 256 | } 257 | 258 | @Override 259 | public void set(@Nonnull PreparedStatement s, @Nonnull IndexRef ix, @Nullable U x) throws SQLException { 260 | boundWrite.set(s, ix, f.apply(x)); 261 | } 262 | 263 | @Nonnull 264 | @Override 265 | public List asSQL(@Nullable U x) { 266 | return boundWrite.asSQL(f.apply(x)); 267 | } 268 | }; 269 | }; 270 | } 271 | 272 | /** Writes the given classes one after another into SQL, picking them from the supplied {@code List} */ 273 | public static Write> listWithClasses(Collection> klasses) { 274 | return list(klasses.stream().map(klass -> new ContextWrite<>(klass)).collect(Collectors.toList())); 275 | } 276 | 277 | /** Variadic version of {@link #listWithClasses(Collection)} */ 278 | @SafeVarargs 279 | public static Write> listWithClasses(Class... klasses) { 280 | return listWithClasses(Arrays.asList(klasses)); 281 | } 282 | 283 | /** Writes a fixed number of elements into the result */ 284 | public static Write> list(Collection> writes) { 285 | return ctxt -> { 286 | final List> bounds = writes.stream().map(write -> write.bind(ctxt)).collect(Collectors.toList()); 287 | 288 | return new BoundWrite>() { 289 | @Override 290 | public int arity() { 291 | return bounds.stream().mapToInt(BoundWrite::arity).sum(); 292 | } 293 | 294 | @SuppressWarnings("unchecked") 295 | @Override 296 | public void set(@Nonnull PreparedStatement s, @Nonnull IndexRef ix, @Nullable List x) throws SQLException { 297 | if (x == null) throw new NullPointerException(); 298 | int i = 0; 299 | for (BoundWrite bound : bounds) { 300 | ((BoundWrite)bound).set(s, ix, x.get(i++)); 301 | } 302 | } 303 | 304 | @SuppressWarnings("unchecked") 305 | @Nonnull 306 | @Override 307 | public List asSQL(@Nullable List x) { 308 | if (x == null) throw new NullPointerException(); 309 | 310 | final List result = new ArrayList<>(); 311 | int i = 0; 312 | for (BoundWrite bound : bounds) { 313 | result.addAll(((BoundWrite)bound).asSQL(x.get(i++))); 314 | } 315 | return result; 316 | } 317 | }; 318 | }; 319 | } 320 | 321 | /** Variadic version of {@link #list(Collection)} */ 322 | @SafeVarargs 323 | public static Write> list(Write... writes) { 324 | return list(Arrays.asList(writes)); 325 | } 326 | 327 | private Writes() {} 328 | 329 | /** Writes the name of the enum constant into the database */ 330 | public static > Write enumAsString() { 331 | return map(Writes.STRING, x -> x == null ? null : x.name()); 332 | } 333 | 334 | /** Writes the ordinal of the enum constant into the database */ 335 | public static > Write enumAsOrdinal() { 336 | return map(Writes.INTEGER, x -> x == null ? null : x.ordinal()); 337 | } 338 | 339 | private static abstract class AbstractUnaryWrite implements Write { 340 | abstract String asSQL(@Nullable T x); 341 | abstract void set(PreparedStatement s, int ix, @Nullable T x) throws SQLException; 342 | 343 | @Override 344 | public BoundWrite bind(Context ctxt) { 345 | return new BoundWrite() { 346 | @Override 347 | public int arity() { 348 | return 1; 349 | } 350 | 351 | @Override 352 | public void set(@Nonnull PreparedStatement s, @Nonnull IndexRef ix, T x) throws SQLException { 353 | AbstractUnaryWrite.this.set(s, ix.take(), x); 354 | } 355 | 356 | @Nonnull 357 | @Override 358 | public List asSQL(T x) { 359 | return Collections.singletonList(AbstractUnaryWrite.this.asSQL(x)); 360 | } 361 | }; 362 | } 363 | } 364 | 365 | static class Map implements Write.Context { 366 | private final HashMap, Write> map = new HashMap<>(); 367 | 368 | public Map() {} 369 | 370 | public Map(Map that) { 371 | map.putAll(that.map); 372 | } 373 | 374 | public void put(Class klass, Write write) { 375 | map.put(klass, write); 376 | } 377 | 378 | @SuppressWarnings("unchecked") 379 | public Write get(Class klass) { 380 | final Write result = (Write) map.get(klass); 381 | if (result == null) { 382 | throw new IllegalArgumentException("Don't know how to transfer " + klass + " objects to JDBC"); 383 | } else { 384 | return result; 385 | } 386 | } 387 | } 388 | } 389 | -------------------------------------------------------------------------------- /src/main/java/uk/co/omegaprime/mdbi/Reads.java: -------------------------------------------------------------------------------- 1 | package uk.co.omegaprime.mdbi; 2 | 3 | import javax.annotation.Nonnull; 4 | import java.lang.reflect.Method; 5 | import java.lang.reflect.Modifier; 6 | import java.math.BigDecimal; 7 | import java.sql.ResultSet; 8 | import java.sql.SQLException; 9 | import java.sql.Timestamp; 10 | import java.time.Instant; 11 | import java.time.LocalDate; 12 | import java.time.LocalDateTime; 13 | import java.time.LocalTime; 14 | import java.util.*; 15 | import java.util.function.Function; 16 | import java.util.stream.Collectors; 17 | 18 | /** Useful methods for constructing instances of the {@link Read} interface. */ 19 | public class Reads { 20 | public static final Read PRIM_BOOLEAN = new AbstractUnaryRead(boolean.class) { 21 | @Override 22 | public Boolean get(ResultSet rs, int ix) throws SQLException { 23 | boolean result = rs.getBoolean(ix); 24 | if (rs.wasNull()) throw new NullPointerException("Found null in result"); 25 | return result; 26 | } 27 | }; 28 | public static final Read BOOLEAN = new AbstractUnaryRead(Boolean.class) { 29 | @Override 30 | public Boolean get(ResultSet rs, int ix) throws SQLException { 31 | boolean result = rs.getBoolean(ix); 32 | if (rs.wasNull()) return null; 33 | return result; 34 | } 35 | }; 36 | public static final Read PRIM_BYTE = new AbstractUnaryRead(byte.class) { 37 | @Override 38 | protected Byte get(ResultSet rs, int ix) throws SQLException { 39 | byte result = rs.getByte(ix); 40 | if (rs.wasNull()) throw new NullPointerException("Found null in result"); 41 | return result; 42 | } 43 | }; 44 | public static final Read BYTE = new AbstractUnaryRead(Byte.class) { 45 | @Override 46 | public Byte get(ResultSet rs, int ix) throws SQLException { 47 | byte result = rs.getByte(ix); 48 | if (rs.wasNull()) return null; 49 | return result; 50 | } 51 | }; 52 | public static final Read PRIM_CHAR = new AbstractUnaryRead(char.class) { 53 | @Override 54 | protected Character get(ResultSet rs, int ix) throws SQLException { 55 | String result = rs.getString(ix); 56 | if (rs.wasNull()) throw new NullPointerException("Found null in result"); 57 | if (result.length() != 1) throw new IllegalArgumentException("Found string " + result + " but was expecting single char"); 58 | return result.charAt(0); 59 | } 60 | }; 61 | public static final Read CHARACTER = new AbstractUnaryRead(Character.class) { 62 | @Override 63 | public Character get(ResultSet rs, int ix) throws SQLException { 64 | String result = rs.getString(ix); 65 | if (rs.wasNull()) return null; 66 | if (result.length() != 1) throw new IllegalArgumentException("Found string " + result + " but was expecting single char"); 67 | return result.charAt(0); 68 | } 69 | }; 70 | public static final Read PRIM_SHORT = new AbstractUnaryRead(short.class) { 71 | @Override 72 | protected Short get(ResultSet rs, int ix) throws SQLException { 73 | short result = rs.getShort(ix); 74 | if (rs.wasNull()) throw new NullPointerException("Found null in result"); 75 | return result; 76 | } 77 | }; 78 | public static final Read SHORT = new AbstractUnaryRead(Short.class) { 79 | @Override 80 | public Short get(ResultSet rs, int ix) throws SQLException { 81 | short result = rs.getShort(ix); 82 | if (rs.wasNull()) return null; 83 | return result; 84 | } 85 | }; 86 | public static final Read PRIM_INT = new AbstractUnaryRead(int.class) { 87 | @Override 88 | public Integer get(ResultSet rs, int ix) throws SQLException { 89 | final int result = rs.getInt(ix); 90 | if (rs.wasNull()) throw new NullPointerException("Found null in result"); 91 | return result; 92 | } 93 | }; 94 | public static final Read INTEGER = new AbstractUnaryRead(Integer.class) { 95 | @Override 96 | public Integer get(ResultSet rs, int ix) throws SQLException { 97 | final int result = rs.getInt(ix); 98 | if (rs.wasNull()) return null; 99 | return result; 100 | } 101 | }; 102 | public static final Read PRIM_LONG = new AbstractUnaryRead(long.class) { 103 | @Override 104 | protected Long get(ResultSet rs, int ix) throws SQLException { 105 | long result = rs.getLong(ix); 106 | if (rs.wasNull()) throw new NullPointerException("Found null in result"); 107 | return result; 108 | } 109 | }; 110 | public static final Read LONG = new AbstractUnaryRead(Long.class) { 111 | @Override 112 | public Long get(ResultSet rs, int ix) throws SQLException { 113 | long result = rs.getLong(ix); 114 | if (rs.wasNull()) return null; 115 | return result; 116 | } 117 | }; 118 | public static final Read PRIM_FLOAT = new AbstractUnaryRead(float.class) { 119 | @Override 120 | public Float get(ResultSet rs, int ix) throws SQLException { 121 | final float result = rs.getFloat(ix); 122 | if (rs.wasNull()) return Float.NaN; 123 | return result; 124 | } 125 | }; 126 | public static final Read FLOAT = new AbstractUnaryRead(Float.class) { 127 | @Override 128 | public Float get(ResultSet rs, int ix) throws SQLException { 129 | final float result = rs.getFloat(ix); 130 | if (rs.wasNull()) return null; 131 | return result; 132 | } 133 | }; 134 | public static final Read PRIM_DOUBLE = new AbstractUnaryRead(double.class) { 135 | @Override 136 | public Double get(ResultSet rs, int ix) throws SQLException { 137 | final double result = rs.getDouble(ix); 138 | if (rs.wasNull()) return Double.NaN; 139 | return result; 140 | } 141 | }; 142 | public static final Read DOUBLE = new AbstractUnaryRead(Double.class) { 143 | @Override 144 | public Double get(ResultSet rs, int ix) throws SQLException { 145 | final double result = rs.getDouble(ix); 146 | if (rs.wasNull()) return null; 147 | return result; 148 | } 149 | }; 150 | public static final Read STRING = new AbstractUnaryRead(String.class) { 151 | @Override public String get(ResultSet rs, int ix) throws SQLException { return rs.getString(ix); } 152 | }; 153 | public static final Read LOCAL_DATE = new AbstractUnaryRead(LocalDate.class) { 154 | @Override 155 | public LocalDate get(ResultSet rs, int ix) throws SQLException { 156 | final Timestamp ts = rs.getTimestamp(ix, Time.UTC_CALENDAR.get()); 157 | return ts == null ? null : Instant.ofEpochMilli(ts.getTime()).atZone(Time.UTC_ZONE_ID).toLocalDate(); 158 | } 159 | }; 160 | public static final Read LOCAL_TIME = new AbstractUnaryRead(LocalTime.class) { 161 | @Override 162 | public LocalTime get(ResultSet rs, int ix) throws SQLException { 163 | final Timestamp ts = rs.getTimestamp(ix, Time.UTC_CALENDAR.get()); 164 | return ts == null ? null : Instant.ofEpochMilli(ts.getTime()).atZone(Time.UTC_ZONE_ID).toLocalTime(); 165 | } 166 | }; 167 | public static final Read LOCAL_DATE_TIME = new AbstractUnaryRead(LocalDateTime.class) { 168 | @Override 169 | public LocalDateTime get(ResultSet rs, int ix) throws SQLException { 170 | final Timestamp ts = rs.getTimestamp(ix, Time.UTC_CALENDAR.get()); 171 | return ts == null ? null : Instant.ofEpochMilli(ts.getTime()).atZone(Time.UTC_ZONE_ID).toLocalDateTime(); 172 | } 173 | }; 174 | public static final Read BYTE_ARRAY = new AbstractUnaryRead(byte[].class) { 175 | @Override protected byte[] get(ResultSet rs, int ix) throws SQLException { return rs.getBytes(ix); } 176 | }; 177 | public static final Read BIG_DECIMAL = new AbstractUnaryRead(BigDecimal.class) { 178 | @Override protected BigDecimal get(ResultSet rs, int ix) throws SQLException { return rs.getBigDecimal(ix); } 179 | }; 180 | 181 | /** A {@code Read} instance that simply defers to the {@link Context} to decide how to construct an instance of the given class. */ 182 | public static Read useContext(Class klass) { 183 | return new ContextRead<>(klass); 184 | } 185 | 186 | /** Constructs a type that has only one public constructor. Constructor arguments are recursively constructed using the {@link Context}. */ 187 | public static Read tuple(Class klass) { 188 | return new TupleRead(klass); 189 | } 190 | 191 | /** Constructs a type that has only one public constructor. Constructor arguments are constructed using the context-default {@code Read} instance for the supplied classes. */ 192 | public static Read tupleWithFieldClasses(Class klass, Collection> klasses) { 193 | return tuple(klass, klasses.stream().map(argklass -> new ContextRead<>(argklass)).collect(Collectors.toList())); 194 | } 195 | 196 | /** Variadic version of {@link #tupleWithFieldClasses(Class, Collection)} */ 197 | public static Read tupleWithFieldClasses(Class klass, Class... klasses) { 198 | return tupleWithFieldClasses(klass, Arrays.asList(klasses)); 199 | } 200 | 201 | /** Constructs a type that has only one public constructor. Constructor arguments are constructed using the supplied {@code Read} instances. */ 202 | public static Read tuple(Class klass, Collection> reads) { 203 | return new TupleRead(klass, reads); 204 | } 205 | 206 | /** Variadic version of {@link #tuple(Class, Collection)} */ 207 | public static Read tuple(Class klass, Read... reads) { 208 | return tuple(klass, Arrays.asList(reads)); 209 | } 210 | 211 | /** 212 | * Generalized version of {@link #tuple(Class)} that lets you run an arbitrary function rather than specifically just a constructor 213 | *

214 | * The object you supply is intended to be an instance of a class with exactly one public instance method (probably an anonymous inner class). 215 | * A typical usecase is illustrated by the following test: 216 | *

217 | *

218 |      * m.execute(sql("insert into person (id, name) values (3, 'John')"));
219 |      *
220 |      * assertEquals("John has 3 bottles of beer", m.queryFirst(sql("select id, name from person"), Reads.ofFunction(new Object() {
221 |      *     public String f(int id, String name) { return name + " has " + id + " bottles of beer"; }
222 |      * })));
223 |      * 
224 | */ 225 | public static Read ofFunction(Object fun) { 226 | return new FunctionRead<>(Object.class, fun); 227 | } 228 | 229 | /** Version of {@link #ofFunction(Object)} that validates that the function returns a particular expected type */ 230 | public static Read ofFunction(Class klass, Object fun) { 231 | return new FunctionRead<>(klass, fun); 232 | } 233 | 234 | /** Mapping treating {@code Read} as a functor. */ 235 | public static Read map(Class klass, Class readKlass, Function f) { 236 | return map(klass, new ContextRead<>(readKlass), f); 237 | } 238 | 239 | /** Mapping treating {@code Read} as a functor. */ 240 | public static Read map(Class klass, Read read, Function f) { 241 | return new Read() { 242 | @Override 243 | public Class getElementClass() { 244 | return klass; 245 | } 246 | 247 | @Override 248 | public BoundRead bind(Context ctxt) { 249 | final BoundRead boundRead = read.bind(ctxt); 250 | return new BoundRead() { 251 | @Override 252 | public int arity() { 253 | return boundRead.arity(); 254 | } 255 | 256 | @Override 257 | public U get(@Nonnull ResultSet rs, @Nonnull IndexRef ix) throws SQLException { 258 | return f.apply(boundRead.get(rs, ix)); 259 | } 260 | }; 261 | } 262 | }; 263 | } 264 | 265 | private Reads() {} 266 | 267 | /** Constructs an enum by interpreting the value from the database as the name of a enum constant */ 268 | public static > Read enumAsString(Class klass) { 269 | return map(klass, Reads.STRING, x -> x == null ? null : Enum.valueOf(klass, x)); 270 | } 271 | 272 | /** Constructs an enum by interpreting the value from the database as the ordinal of a enum constant */ 273 | public static > Read enumAsOrdinal(Class klass) { 274 | final T[] constants = klass.getEnumConstants(); 275 | return map(klass, Reads.INTEGER, x -> x == null ? null : constants[x]); 276 | } 277 | 278 | static class Map implements Read.Context { 279 | private final HashMap, Read> map = new HashMap<>(); 280 | 281 | public Map() {} 282 | 283 | public Map(Map that) { 284 | map.putAll(that.map); 285 | } 286 | 287 | public void put(Class klass, Read write) { 288 | map.put(klass, write); 289 | } 290 | 291 | @SuppressWarnings("unchecked") 292 | public Read get(Class klass) { 293 | final Read result = (Read) map.get(klass); 294 | if (result == null) { 295 | throw new IllegalArgumentException("Don't know how to transfer " + klass + " objects from JDBC"); 296 | } else { 297 | return result; 298 | } 299 | } 300 | } 301 | 302 | private abstract static class AbstractUnaryRead implements Read { 303 | private final Class klass; 304 | 305 | public AbstractUnaryRead(Class klass) { 306 | this.klass = klass; 307 | } 308 | 309 | @Override 310 | public Class getElementClass() { 311 | return klass; 312 | } 313 | 314 | @Override 315 | public BoundRead bind(Context ctxt) { 316 | return new BoundRead() { 317 | @Override 318 | public int arity() { 319 | return 1; 320 | } 321 | 322 | @Override 323 | public T get(@Nonnull ResultSet rs, @Nonnull IndexRef ix) throws SQLException { 324 | return AbstractUnaryRead.this.get(rs, ix.take()); 325 | } 326 | }; 327 | } 328 | 329 | protected abstract T get(ResultSet rs, int ix) throws SQLException; 330 | } 331 | 332 | /** 333 | * Constructs a bean using reflection. 334 | *

335 | * The named bean properties are extracted from the SQL result in the order given. The value of the bean 336 | * property is constructed using the default {@code Read} associated for that type in the {@link Context}. 337 | */ 338 | public static Read bean(Class klass, String... fields) { 339 | return new BeanRead<>(klass, fields); 340 | } 341 | 342 | /** As {@link #bean(Class, String...)}, but allows you to explicitly specify the types of the fields. */ 343 | public static Read beanWithFieldClasses(Class klass, Collection fields, Collection> klasses) { 344 | return new BeanRead<>(klass, fields, klasses.stream().map(argklass -> new ContextRead<>(argklass)).collect(Collectors.toList())); 345 | } 346 | 347 | /** As {@link #bean(Class, String...)}, but allows you to customize how the property values are constructed. */ 348 | public static Read bean(Class klass, Collection fields, Collection> reads) { 349 | return new BeanRead<>(klass, fields, reads); 350 | } 351 | 352 | /** Reads the given classes one after another, aggregating the results from the row into a {@code List} */ 353 | public static Read> listWithClasses(Collection> klasses) { 354 | return list(klasses.stream().map(klass -> new ContextRead<>(klass)).collect(Collectors.toList())); 355 | } 356 | 357 | /** Variadic version of {@link #listWithClasses(Collection)} */ 358 | @SafeVarargs 359 | public static Read> listWithClasses(Class... klasses) { 360 | return listWithClasses(Arrays.asList(klasses)); 361 | } 362 | 363 | /** Reads the given elements one after another, aggregating the results from the row into a {@code List} */ 364 | public static Read> list(Collection> reads) { 365 | return new Read>() { 366 | @SuppressWarnings("unchecked") 367 | @Override 368 | public Class> getElementClass() { 369 | return (Class>)(Class)List.class; 370 | } 371 | 372 | @Override 373 | public BoundRead> bind(Context ctxt) { 374 | final List> bounds = reads.stream().map(read -> read.bind(ctxt)).collect(Collectors.toList()); 375 | 376 | return new BoundRead>() { 377 | @Override 378 | public int arity() { 379 | return bounds.stream().mapToInt(BoundRead::arity).sum(); 380 | } 381 | 382 | @Override 383 | public List get(@Nonnull ResultSet rs, @Nonnull IndexRef ix) throws SQLException { 384 | final List result = new ArrayList<>(); 385 | for (BoundRead bound : bounds) { 386 | result.add(bound.get(rs, ix)); 387 | } 388 | return result; 389 | } 390 | }; 391 | } 392 | }; 393 | } 394 | 395 | /** Variadic version of {@link #list(Collection)} */ 396 | @SafeVarargs 397 | public static Read> list(Read... reads) { 398 | return list(Arrays.asList(reads)); 399 | } 400 | 401 | /** As {@link #labelledMap(Collection)}, but using the {@code Read} instance associated with the class in the {@link Context}. */ 402 | public static Read> labelledMapWithClasses(Collection> klasses) { 403 | return labelledMap(klasses.stream().map(klass -> new ContextRead<>(klass)).collect(Collectors.toList())); 404 | } 405 | 406 | /** Variadic version of {@link #labelledMapWithClasses(Collection)} */ 407 | @SafeVarargs 408 | public static Read> labelledMapWithClasses(Class... klasses) { 409 | return labelledMapWithClasses(Arrays.asList(klasses)); 410 | } 411 | 412 | /** 413 | * Reads the given classes one after another, aggregating the results from the row into a {@code Map} keyed by the column name. 414 | *

415 | * If a {@code Read} instance spans more than one column, the name chosen will be that of the first column. 416 | *

417 | * If more than one column shares the same name, {@code IllegalArgumentException} will be thrown. 418 | */ 419 | public static Read> labelledMap(Collection> reads) { 420 | return new Read>() { 421 | @SuppressWarnings("unchecked") 422 | @Override 423 | public Class> getElementClass() { 424 | return (Class>)(Class)java.util.Map.class; 425 | } 426 | 427 | @Override 428 | public BoundRead> bind(Context ctxt) { 429 | final List> bounds = reads.stream().map(read -> read.bind(ctxt)).collect(Collectors.toList()); 430 | 431 | return new BoundRead>() { 432 | @Override 433 | public int arity() { 434 | return bounds.stream().mapToInt(BoundRead::arity).sum(); 435 | } 436 | 437 | @Override 438 | public java.util.Map get(@Nonnull ResultSet rs, @Nonnull IndexRef ix) throws SQLException { 439 | final java.util.Map result = new LinkedHashMap<>(); 440 | for (BoundRead bound : bounds) { 441 | // TODO: refactor so that we can do this lookup only once per query.. 442 | final String columnName = rs.getMetaData().getColumnName(ix.peek()); 443 | if (result.containsKey(columnName)) { 444 | throw new IllegalArgumentException("Column " + columnName + " occurs twice in the result"); 445 | } 446 | 447 | result.put(columnName, bound.get(rs, ix)); 448 | } 449 | return result; 450 | } 451 | }; 452 | } 453 | }; 454 | } 455 | 456 | /** Variadic version of {@link #labelledMap(Collection)} */ 457 | @SafeVarargs 458 | public static Read> labelledMap(Read... reads) { 459 | return labelledMap(Arrays.asList(reads)); 460 | } 461 | } 462 | --------------------------------------------------------------------------------