├── answers.txt ├── build.xml ├── imdb_data ├── 0.001 │ ├── Actor.dat │ ├── Casts.dat │ ├── Director.dat │ ├── Genre.dat │ ├── Movie.dat │ ├── Movie_Director.dat │ └── imdb.schema ├── 0.01 │ ├── Actor.dat │ ├── Casts.dat │ ├── Director.dat │ ├── Genre.dat │ ├── Movie.dat │ ├── Movie_Director.dat │ └── imdb.schema └── sample-0.001.tar.bz2 ├── lab2writeup.txt ├── lab3writeup.txt ├── lib ├── README ├── ant-contrib-1.0b3.jar ├── jline-0.9.94.jar ├── junit-4.5.jar └── zql.jar ├── src └── java │ └── simpledb │ ├── AbstractDbFileIterator.java │ ├── Aggregate.java │ ├── Aggregator.java │ ├── BufferPool.java │ ├── Catalog.java │ ├── CostCard.java │ ├── Database.java │ ├── DbException.java │ ├── DbFile.java │ ├── DbFileIterator.java │ ├── DbIterator.java │ ├── Debug.java │ ├── Delete.java │ ├── Field.java │ ├── Filter.java │ ├── HashEquiJoin.java │ ├── HeapFile.java │ ├── HeapFileEncoder.java │ ├── HeapFileIterator.java │ ├── HeapPage.java │ ├── HeapPageId.java │ ├── HeapPageIterator.java │ ├── Insert.java │ ├── IntField.java │ ├── IntHistogram.java │ ├── IntegerAggregator.java │ ├── Join.java │ ├── JoinOptimizer.java │ ├── JoinPredicate.java │ ├── LogFile.java │ ├── LogicalFilterNode.java │ ├── LogicalJoinNode.java │ ├── LogicalPlan.java │ ├── LogicalScanNode.java │ ├── LogicalSelectListNode.java │ ├── LogicalSubplanJoinNode.java │ ├── Operator.java │ ├── OperatorCardinality.java │ ├── OrderBy.java │ ├── Page.java │ ├── PageId.java │ ├── Parser.java │ ├── ParsingException.java │ ├── Permissions.java │ ├── PlanCache.java │ ├── Predicate.java │ ├── Project.java │ ├── Query.java │ ├── QueryPlanVisualizer.java │ ├── RecordId.java │ ├── SeqScan.java │ ├── SimpleDb.java │ ├── StringAggregator.java │ ├── StringField.java │ ├── StringHistogram.java │ ├── TableStats.java │ ├── Transaction.java │ ├── TransactionAbortedException.java │ ├── TransactionId.java │ ├── Tuple.java │ ├── TupleDesc.java │ ├── TupleIterator.java │ ├── Type.java │ └── Utility.java └── test └── simpledb ├── AggregateTest.java ├── CatalogTest.java ├── FilterTest.java ├── HeapFileReadTest.java ├── HeapFileWriteTest.java ├── HeapPageIdTest.java ├── HeapPageReadTest.java ├── HeapPageWriteTest.java ├── InsertTest.java ├── IntHistogramTest.java ├── IntegerAggregatorTest.java ├── JoinOptimizerTest.java ├── JoinPredicateTest.java ├── JoinTest.java ├── PredicateTest.java ├── RecordIdTest.java ├── StringAggregatorTest.java ├── TableStatsTest.java ├── TestUtil.java ├── TupleDescTest.java ├── TupleTest.java └── systemtest ├── AggregateTest.java ├── DeleteTest.java ├── EvictionTest.java ├── FilterBase.java ├── FilterTest.java ├── InsertTest.java ├── JoinTest.java ├── QueryTest.java ├── ScanTest.java ├── SimpleDbTestBase.java └── SystemTestUtil.java /answers.txt: -------------------------------------------------------------------------------- 1 | Collaborators: 2 | 3 | Calvin Chan: 304144970 4 | Simon Zou: 804347338 5 | 6 | TupleDesc: This was implemented using a vector of TDItems so as to have random access for the getFieldName and getFieldType functions. hashCode is implemented by hashing the toString. 7 | 8 | Tuple: This is implemented with an array of Fields so as to have random access for setting and getting fields. The iterator is implemented by wrapping the array in a Java ArrayList and returning the iterator (had to look this up). 9 | 10 | Catalog: A private Table class is written to store the name and file. Two HashMaps are used, one to map the table name to the table and the other to map the table id to the table. This allows for constant time access for the various getter methods (getTableId, getTupleDesc, etc.) 11 | 12 | BufferPool: The cache is implemented as a HashMap that maps PageIds to Pages. This allows the getPage function to check in constant time if the Page is in the cache and add it if necessary. 13 | 14 | HeapPageId: equals compares the two ids and hashCode concatenates the two ids into a string and uses Java's built in hashCode for strings. No complex data structures were necessary. 15 | 16 | RecordId: Similar to HeapPageId, equals compares the PageId and tuple number to determine equality and hashCode concatenates the two ids into a string and uses Java's built in hashCode for strings. No complex data structures were necessary. 17 | 18 | HeapPage: getNumTuples and getHeaderSize are implemented according to the spec. isSlotUsed finds the particular bit in a particular byte and then checks if that bit is on. getNumEmptySlots then iterates through the header using isSlotUsed to the get the count. A separate class was written for the HeapPage iterator, which calculates the number of tuples used and iterates through them. 19 | 20 | HeapFile: readPage uses Java's RandomAccessFile and calculates the offset based on the pid and reads the page into main memory. A separate HeapFileIterator class was written for the HeapFile iterator. The iterator uses the HeapPage iterator if it reaches the end of it, checks for new pages, and if they exist, starts using the next page's iterator. 21 | 22 | SeqScan: SeqScan calls the underlying DbFileIterator functions and otherwise has simple getter methods. 23 | 24 | Changes to API: No changes to public interfaces were made although an additional public function availableTuples() was added to HeapPage so that the HeapPageIterator could access and use it. 25 | 26 | Difficulties: We spent about 19 hours on the project. We got the idea for implementing hash codes by hashing string representations from asking the professor in office hours. Understanding how to do the bit manipulation in HeapPage was took some time. The HeapFile and HeapFileIterator were probably the most complex parts of the assignment and getting used to the Java syntax for working with File I/O, exceptions, and Random Access. It took us several hours to debug an error we were getting in systemtest/testsmall where we needed to cast getNumTuples() as a double before calling Math.ceil on it in getHeaderSize of HeapPage. 27 | 28 | -------------------------------------------------------------------------------- /imdb_data/0.001/Actor.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simzou/cs143-simpledb/9f7adc38bd22cd0f67ffb788b912aab9d0c03e27/imdb_data/0.001/Actor.dat -------------------------------------------------------------------------------- /imdb_data/0.001/Casts.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simzou/cs143-simpledb/9f7adc38bd22cd0f67ffb788b912aab9d0c03e27/imdb_data/0.001/Casts.dat -------------------------------------------------------------------------------- /imdb_data/0.001/Director.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simzou/cs143-simpledb/9f7adc38bd22cd0f67ffb788b912aab9d0c03e27/imdb_data/0.001/Director.dat -------------------------------------------------------------------------------- /imdb_data/0.001/Genre.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simzou/cs143-simpledb/9f7adc38bd22cd0f67ffb788b912aab9d0c03e27/imdb_data/0.001/Genre.dat -------------------------------------------------------------------------------- /imdb_data/0.001/Movie.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simzou/cs143-simpledb/9f7adc38bd22cd0f67ffb788b912aab9d0c03e27/imdb_data/0.001/Movie.dat -------------------------------------------------------------------------------- /imdb_data/0.001/Movie_Director.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simzou/cs143-simpledb/9f7adc38bd22cd0f67ffb788b912aab9d0c03e27/imdb_data/0.001/Movie_Director.dat -------------------------------------------------------------------------------- /imdb_data/0.001/imdb.schema: -------------------------------------------------------------------------------- 1 | Actor(id int pk, fname string, lname string, gender string) 2 | Movie(id int pk, name string, year int) 3 | Director(id int pk, fname string, lname string) 4 | Casts(pid int, mid int, role string) 5 | Movie_Director(did int, mid int) 6 | Genre(mid int, genre string) 7 | -------------------------------------------------------------------------------- /imdb_data/0.01/Actor.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simzou/cs143-simpledb/9f7adc38bd22cd0f67ffb788b912aab9d0c03e27/imdb_data/0.01/Actor.dat -------------------------------------------------------------------------------- /imdb_data/0.01/Casts.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simzou/cs143-simpledb/9f7adc38bd22cd0f67ffb788b912aab9d0c03e27/imdb_data/0.01/Casts.dat -------------------------------------------------------------------------------- /imdb_data/0.01/Director.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simzou/cs143-simpledb/9f7adc38bd22cd0f67ffb788b912aab9d0c03e27/imdb_data/0.01/Director.dat -------------------------------------------------------------------------------- /imdb_data/0.01/Genre.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simzou/cs143-simpledb/9f7adc38bd22cd0f67ffb788b912aab9d0c03e27/imdb_data/0.01/Genre.dat -------------------------------------------------------------------------------- /imdb_data/0.01/Movie.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simzou/cs143-simpledb/9f7adc38bd22cd0f67ffb788b912aab9d0c03e27/imdb_data/0.01/Movie.dat -------------------------------------------------------------------------------- /imdb_data/0.01/Movie_Director.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simzou/cs143-simpledb/9f7adc38bd22cd0f67ffb788b912aab9d0c03e27/imdb_data/0.01/Movie_Director.dat -------------------------------------------------------------------------------- /imdb_data/0.01/imdb.schema: -------------------------------------------------------------------------------- 1 | Actor(id int pk, fname string, lname string, gender string) 2 | Movie(id int pk, name string, year int) 3 | Director(id int pk, fname string, lname string) 4 | Casts(pid int, mid int, role string) 5 | Movie_Director(did int, mid int) 6 | Genre(mid int, genre string) 7 | -------------------------------------------------------------------------------- /imdb_data/sample-0.001.tar.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simzou/cs143-simpledb/9f7adc38bd22cd0f67ffb788b912aab9d0c03e27/imdb_data/sample-0.001.tar.bz2 -------------------------------------------------------------------------------- /lab2writeup.txt: -------------------------------------------------------------------------------- 1 | Collaborators: 2 | 3 | Calvin Chan: 304144970 4 | Simon Zou: 804347338 5 | Ethan Schreiber: The iterator in HeapFile.java is the one from the lab1 solution by Ethan/the TA. 6 | 7 | Predicate.java: This file only had some getter/setter methods and made use of Field's compare method 8 | 9 | JoinPredicate.java: This file was also very simple and made use of Field's compare function 10 | 11 | Filter.java: The only nontrival function here was fetchNext, which iterated through the child iterator, applying the predicate filter function to see do the selection. 12 | 13 | Join.java: The join is implemented as a simple nested loops join, which is slow, but was easiest to implement and for me to understand. The join is done by iterating through the values of the column of the first join field and using the predicate to determine if it should join based on the join field on the right. If so, a new TupleDesc is made and the two corresponding tuples are merged. 14 | 15 | IntegerAggregator.java, StringAggregator.java: The IntegerAggregator was implemented using a HashMap, mapping the group to values. The mergeTupleIntoGroup function would add the new tuple values into the map in constant time, which easily keeps track of things like Sum and Count. For Min and Max, the values currently in the map were compared against them to see if they would be replaced. The slightly trickier one to deal with was Average, since you can't compute it until you know the total number of elements in the group. For that, a sum and a count were tabulated using 2 hash maps and then the average was calculated in the iterator. The iterator is then implemented by iterating through the HashMap, creating the appropriate tuples, adding it to an ArrayList and returning the list's iterator. The StringAggregator was implemented in the same way, but only supports count. 16 | 17 | Aggregate.java: Aggregate uses the Aggregators written above by grouping and doing the aggregate calculations for all the tuples in the child iterator. It's iterator then uses the Aggregator's iterator to get the results of a Group By query. 18 | 19 | HeapPage.java: insertTuple does a linear search of the page through the header using isSlotUsed to find a free space and then inserts a tuple into that slot, updating the header accordingly. deleteTuple gets the recordId from the tuple to be deleted and removes it from the tuples array and updates the header as well. 20 | 21 | HeapFile.java: insertTuple looks linearly through the file for a page with a free slot and if it does not find one, creates a new page, inserts the tuple into that page (with HeapPage's insertTuple) and appends it to the file. deleteTuple determines the pageId from the tuple to be deleted and uses HeapPage's deleteTuple function to delete the tuple. 22 | 23 | Insert.java, Delete.java: fetchNext is implemented by iterating through the child iterator and using the BufferPool insert/deleteTuple function (described later). A counter is used to keep track of how many items were inputted or deleted and then is returned as a one element tuple. 24 | 25 | BufferPool.java: insert and deleteTuple make use of HeapFile.java's insert and deleteTuple functions. They get a list of modified pages from those functions and then mark them as dirty and update them in the cache. 26 | 27 | Page Eviction Policy: We iterate through the cache and evict the first page we are able to flush to disk. Since the spec did not require any special algorithm, this was the simplest thing to do and will just evict (probably) one of the first pages returned by the cache's iterator, which is a HashMap. Though simple to implement and easy to understand, it is not optimized for performance at all and relies on what order Java's HashMap iterator traverses through the BufferPool. 28 | 29 | Changes to API: None were made. 30 | 31 | Missing elements: All the unit/system tests passed. 32 | 33 | Difficulties/Time: The assignment took about 16 hours to do. For the classes that inherited from Operator, I didn't immediately figure out the get and set children functions were supposed to do until I read the javadocs. I'm still not clear why it's called "child". The aggregators made me think quite a bit about how to deal with all the different possible aggregate functions in a nice way. I'm still not very familiar with bit manipulation so updating the headers in HeapPage was challenging as well. The systemtest for Delete.java would not work (kept getting NullPointerExceptions) until I replaced our HeapFileIterator with the TA's solution. I'm still not sure why the original didn't work but it took some time to figure out the problem. -------------------------------------------------------------------------------- /lib/README: -------------------------------------------------------------------------------- 1 | junit-4.5.jar 2 | * http://junit.sourceforge.net/ 3 | * CPL (free for all use) 4 | 5 | zql.jar 6 | * http://www.gibello.com/code/zql/ 7 | * Free for non-commercial use 8 | 9 | JLine 10 | * http://jline.sourceforge.net/ 11 | * BSD (free for all use) 12 | 13 | mina-core-2.0.4.jar 14 | mina-filter-compression-2.0.4.jar 15 | * http://mina.apache.org/ 16 | * Apache License v2.0 (free for all use) 17 | 18 | slf4j-api-1.6.1.jar 19 | slf4j-log4j12-1.6.1.jar 20 | * http://www.slf4j.org/license.html 21 | * MIT license (free for all use) 22 | 23 | jzlib-1.0.7.jar 24 | * http://www.jcraft.com/jzlib/ 25 | * BSD (free for all use) 26 | 27 | javassist-3.16.1-GA.jar 28 | * http://www.javassist.org/ 29 | * MPL v1.1, LGPL and Apache License 30 | 31 | ant-contrib-1.0b3.jar 32 | * http://ant-contrib.sourceforge.net/ 33 | * Apache Software License 34 | 35 | log4j-1.2.17.jar 36 | * logging.apache.org/log4j/1.2/ 37 | * Apache Software license 2.0 38 | 39 | -------------------------------------------------------------------------------- /lib/ant-contrib-1.0b3.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simzou/cs143-simpledb/9f7adc38bd22cd0f67ffb788b912aab9d0c03e27/lib/ant-contrib-1.0b3.jar -------------------------------------------------------------------------------- /lib/jline-0.9.94.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simzou/cs143-simpledb/9f7adc38bd22cd0f67ffb788b912aab9d0c03e27/lib/jline-0.9.94.jar -------------------------------------------------------------------------------- /lib/junit-4.5.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simzou/cs143-simpledb/9f7adc38bd22cd0f67ffb788b912aab9d0c03e27/lib/junit-4.5.jar -------------------------------------------------------------------------------- /lib/zql.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simzou/cs143-simpledb/9f7adc38bd22cd0f67ffb788b912aab9d0c03e27/lib/zql.jar -------------------------------------------------------------------------------- /src/java/simpledb/AbstractDbFileIterator.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.util.NoSuchElementException; 4 | 5 | /** Helper for implementing DbFileIterators. Handles hasNext()/next() logic. */ 6 | public abstract class AbstractDbFileIterator implements DbFileIterator { 7 | 8 | public boolean hasNext() throws DbException, TransactionAbortedException { 9 | if (next == null) next = readNext(); 10 | return next != null; 11 | } 12 | 13 | public Tuple next() throws DbException, TransactionAbortedException, 14 | NoSuchElementException { 15 | if (next == null) { 16 | next = readNext(); 17 | if (next == null) throw new NoSuchElementException(); 18 | } 19 | 20 | Tuple result = next; 21 | next = null; 22 | return result; 23 | } 24 | 25 | /** If subclasses override this, they should call super.close(). */ 26 | public void close() { 27 | // Ensures that a future call to next() will fail 28 | next = null; 29 | } 30 | 31 | /** Reads the next tuple from the underlying source. 32 | @return the next Tuple in the iterator, null if the iteration is finished. */ 33 | protected abstract Tuple readNext() throws DbException, TransactionAbortedException; 34 | 35 | private Tuple next = null; 36 | } 37 | -------------------------------------------------------------------------------- /src/java/simpledb/Aggregator.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.io.Serializable; 4 | 5 | /** 6 | * The common interface for any class that can compute an aggregate over a 7 | * list of Tuples. 8 | */ 9 | public interface Aggregator extends Serializable { 10 | static final int NO_GROUPING = -1; 11 | 12 | /** 13 | * SUM_COUNT and SC_AVG will 14 | * only be used in lab6, you are not required 15 | * to implement them until then. 16 | * */ 17 | public enum Op implements Serializable { 18 | MIN, MAX, SUM, AVG, COUNT, 19 | /** 20 | * SUM_COUNT: compute sum and count simultaneously, will be 21 | * needed to compute distributed avg in lab6. 22 | * */ 23 | SUM_COUNT, 24 | /** 25 | * SC_AVG: compute the avg of a set of SUM_COUNT tuples, 26 | * will be used to compute distributed avg in lab6. 27 | * */ 28 | SC_AVG; 29 | 30 | /** 31 | * Interface to access operations by a string containing an integer 32 | * index for command-line convenience. 33 | * 34 | * @param s a string containing a valid integer Op index 35 | */ 36 | public static Op getOp(String s) { 37 | return getOp(Integer.parseInt(s)); 38 | } 39 | 40 | /** 41 | * Interface to access operations by integer value for command-line 42 | * convenience. 43 | * 44 | * @param i a valid integer Op index 45 | */ 46 | public static Op getOp(int i) { 47 | return values()[i]; 48 | } 49 | 50 | public String toString() 51 | { 52 | if (this==MIN) 53 | return "min"; 54 | if (this==MAX) 55 | return "max"; 56 | if (this==SUM) 57 | return "sum"; 58 | if (this==SUM_COUNT) 59 | return "sum_count"; 60 | if (this==AVG) 61 | return "avg"; 62 | if (this==COUNT) 63 | return "count"; 64 | if (this==SC_AVG) 65 | return "sc_avg"; 66 | throw new IllegalStateException("impossible to reach here"); 67 | } 68 | } 69 | 70 | /** 71 | * Merge a new tuple into the aggregate for a distinct group value; 72 | * creates a new group aggregate result if the group value has not yet 73 | * been encountered. 74 | * 75 | * @param tup the Tuple containing an aggregate field and a group-by field 76 | */ 77 | public void mergeTupleIntoGroup(Tuple tup); 78 | 79 | /** 80 | * Create a DbIterator over group aggregate results. 81 | * @see simpledb.TupleIterator for a possible helper 82 | */ 83 | public DbIterator iterator(); 84 | 85 | } 86 | -------------------------------------------------------------------------------- /src/java/simpledb/CostCard.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | import java.util.Vector; 3 | 4 | /** Class returned by {@link JoinOptimizer#computeCostAndCardOfSubplan} specifying the 5 | cost and cardinality of the optimal plan represented by plan. 6 | */ 7 | public class CostCard { 8 | /** The cost of the optimal subplan */ 9 | public double cost; 10 | /** The cardinality of the optimal subplan */ 11 | public int card; 12 | /** The optimal subplan */ 13 | public Vector plan; 14 | } 15 | -------------------------------------------------------------------------------- /src/java/simpledb/Database.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.io.*; 4 | import java.util.concurrent.atomic.AtomicReference; 5 | 6 | /** 7 | * Database is a class that initializes several static variables used by the 8 | * database system (the catalog, the buffer pool, and the log files, in 9 | * particular.) 10 | *

11 | * Provides a set of methods that can be used to access these variables from 12 | * anywhere. 13 | * 14 | * @Threadsafe 15 | */ 16 | public class Database { 17 | private static AtomicReference _instance = new AtomicReference(new Database()); 18 | private final Catalog _catalog; 19 | private final BufferPool _bufferpool; 20 | 21 | private final static String LOGFILENAME = "log"; 22 | private final LogFile _logfile; 23 | 24 | private Database() { 25 | _catalog = new Catalog(); 26 | _bufferpool = new BufferPool(BufferPool.DEFAULT_PAGES); 27 | LogFile tmp = null; 28 | try { 29 | tmp = new LogFile(new File(LOGFILENAME)); 30 | } catch (IOException e) { 31 | e.printStackTrace(); 32 | System.exit(1); 33 | } 34 | _logfile = tmp; 35 | // startControllerThread(); 36 | } 37 | 38 | /** Return the log file of the static Database instance */ 39 | public static LogFile getLogFile() { 40 | return _instance.get()._logfile; 41 | } 42 | 43 | /** Return the buffer pool of the static Database instance */ 44 | public static BufferPool getBufferPool() { 45 | return _instance.get()._bufferpool; 46 | } 47 | 48 | /** Return the catalog of the static Database instance */ 49 | public static Catalog getCatalog() { 50 | return _instance.get()._catalog; 51 | } 52 | 53 | /** 54 | * Method used for testing -- create a new instance of the buffer pool and 55 | * return it 56 | */ 57 | public static BufferPool resetBufferPool(int pages) { 58 | java.lang.reflect.Field bufferPoolF=null; 59 | try { 60 | bufferPoolF = Database.class.getDeclaredField("_bufferpool"); 61 | bufferPoolF.setAccessible(true); 62 | bufferPoolF.set(_instance.get(), new BufferPool(pages)); 63 | } catch (NoSuchFieldException e) { 64 | e.printStackTrace(); 65 | } catch (SecurityException e) { 66 | e.printStackTrace(); 67 | } catch (IllegalArgumentException e) { 68 | e.printStackTrace(); 69 | } catch (IllegalAccessException e) { 70 | e.printStackTrace(); 71 | } 72 | // _instance._bufferpool = new BufferPool(pages); 73 | return _instance.get()._bufferpool; 74 | } 75 | 76 | // reset the database, used for unit tests only. 77 | public static void reset() { 78 | _instance.set(new Database()); 79 | } 80 | 81 | } 82 | -------------------------------------------------------------------------------- /src/java/simpledb/DbException.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.lang.Exception; 4 | 5 | /** Generic database exception class */ 6 | public class DbException extends Exception { 7 | private static final long serialVersionUID = 1L; 8 | 9 | public DbException(String s) { 10 | super(s); 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /src/java/simpledb/DbFile.java: -------------------------------------------------------------------------------- 1 | 2 | package simpledb; 3 | 4 | import java.util.*; 5 | import java.io.*; 6 | 7 | /** 8 | * The interface for database files on disk. Each table is represented by a 9 | * single DbFile. DbFiles can fetch pages and iterate through tuples. Each 10 | * file has a unique id used to store metadata about the table in the Catalog. 11 | * DbFiles are generally accessed through the buffer pool, rather than directly 12 | * by operators. 13 | */ 14 | public interface DbFile { 15 | /** 16 | * Read the specified page from disk. 17 | * 18 | * @throws IllegalArgumentException if the page does not exist in this file. 19 | */ 20 | public Page readPage(PageId id); 21 | 22 | /** 23 | * Push the specified page to disk. 24 | * 25 | * @param p The page to write. page.getId().pageno() specifies the offset into the file where the page should be written. 26 | * @throws IOException if the write fails 27 | * 28 | */ 29 | public void writePage(Page p) throws IOException; 30 | 31 | /** 32 | * Inserts the specified tuple to the file on behalf of transaction. 33 | * This method will acquire a lock on the affected pages of the file, and 34 | * may block until the lock can be acquired. 35 | * 36 | * @param tid The transaction performing the update 37 | * @param t The tuple to add. This tuple should be updated to reflect that 38 | * it is now stored in this file. 39 | * @return An ArrayList contain the pages that were modified 40 | * @throws DbException if the tuple cannot be added 41 | * @throws IOException if the needed file can't be read/written 42 | */ 43 | public ArrayList insertTuple(TransactionId tid, Tuple t) 44 | throws DbException, IOException, TransactionAbortedException; 45 | 46 | /** 47 | * Removes the specified tuple from the file on behalf of the specified 48 | * transaction. 49 | * This method will acquire a lock on the affected pages of the file, and 50 | * may block until the lock can be acquired. 51 | * 52 | * @param tid The transaction performing the update 53 | * @param t The tuple to delete. This tuple should be updated to reflect that 54 | * it is no longer stored on any page. 55 | * @return An ArrayList contain the pages that were modified 56 | * @throws DbException if the tuple cannot be deleted or is not a member 57 | * of the file 58 | */ 59 | public ArrayList deleteTuple(TransactionId tid, Tuple t) 60 | throws DbException, IOException, TransactionAbortedException; 61 | 62 | /** 63 | * Returns an iterator over all the tuples stored in this DbFile. The 64 | * iterator must use {@link BufferPool#getPage}, rather than 65 | * {@link #readPage} to iterate through the pages. 66 | * 67 | * @return an iterator over all the tuples stored in this DbFile. 68 | */ 69 | public DbFileIterator iterator(TransactionId tid); 70 | 71 | /** 72 | * Returns a unique ID used to identify this DbFile in the Catalog. This id 73 | * can be used to look up the table via {@link Catalog#getDatabaseFile} and 74 | * {@link Catalog#getTupleDesc}. 75 | *

76 | * Implementation note: you will need to generate this tableid somewhere, 77 | * ensure that each HeapFile has a "unique id," and that you always 78 | * return the same value for a particular HeapFile. A simple implementation 79 | * is to use the hash code of the absolute path of the file underlying 80 | * the HeapFile, i.e. f.getAbsoluteFile().hashCode(). 81 | * 82 | * @return an ID uniquely identifying this HeapFile. 83 | */ 84 | public int getId(); 85 | 86 | /** 87 | * Returns the TupleDesc of the table stored in this DbFile. 88 | * @return TupleDesc of this DbFile. 89 | */ 90 | public TupleDesc getTupleDesc(); 91 | } 92 | -------------------------------------------------------------------------------- /src/java/simpledb/DbFileIterator.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | import java.util.*; 3 | 4 | /** 5 | * DbFileIterator is the iterator interface that all SimpleDB Dbfile should 6 | * implement. 7 | */ 8 | public interface DbFileIterator{ 9 | /** 10 | * Opens the iterator 11 | * @throws DbException when there are problems opening/accessing the database. 12 | */ 13 | public void open() 14 | throws DbException, TransactionAbortedException; 15 | 16 | /** @return true if there are more tuples available. */ 17 | public boolean hasNext() 18 | throws DbException, TransactionAbortedException; 19 | 20 | /** 21 | * Gets the next tuple from the operator (typically implementing by reading 22 | * from a child operator or an access method). 23 | * 24 | * @return The next tuple in the iterator. 25 | * @throws NoSuchElementException if there are no more tuples 26 | */ 27 | public Tuple next() 28 | throws DbException, TransactionAbortedException, NoSuchElementException; 29 | 30 | /** 31 | * Resets the iterator to the start. 32 | * @throws DbException When rewind is unsupported. 33 | */ 34 | public void rewind() throws DbException, TransactionAbortedException; 35 | 36 | /** 37 | * Closes the iterator. 38 | */ 39 | public void close(); 40 | } 41 | -------------------------------------------------------------------------------- /src/java/simpledb/DbIterator.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | import java.io.Serializable; 3 | import java.util.*; 4 | 5 | /** 6 | * DbIterator is the iterator interface that all SimpleDB operators should 7 | * implement. If the iterator is not open, none of the methods should work, 8 | * and should throw an IllegalStateException. In addition to any 9 | * resource allocation/deallocation, an open method should call any 10 | * child iterator open methods, and in a close method, an iterator 11 | * should call its children's close methods. 12 | */ 13 | public interface DbIterator extends Serializable{ 14 | /** 15 | * Opens the iterator. This must be called before any of the other methods. 16 | * @throws DbException when there are problems opening/accessing the database. 17 | */ 18 | public void open() 19 | throws DbException, TransactionAbortedException; 20 | 21 | /** Returns true if the iterator has more tuples. 22 | * @return true f the iterator has more tuples. 23 | * @throws IllegalStateException If the iterator has not been opened 24 | */ 25 | public boolean hasNext() throws DbException, TransactionAbortedException; 26 | 27 | /** 28 | * Returns the next tuple from the operator (typically implementing by reading 29 | * from a child operator or an access method). 30 | * 31 | * @return the next tuple in the iteration. 32 | * @throws NoSuchElementException if there are no more tuples. 33 | * @throws IllegalStateException If the iterator has not been opened 34 | */ 35 | public Tuple next() throws DbException, TransactionAbortedException, NoSuchElementException; 36 | 37 | /** 38 | * Resets the iterator to the start. 39 | * @throws DbException when rewind is unsupported. 40 | * @throws IllegalStateException If the iterator has not been opened 41 | */ 42 | public void rewind() throws DbException, TransactionAbortedException; 43 | 44 | /** 45 | * Returns the TupleDesc associated with this DbIterator. 46 | * @return the TupleDesc associated with this DbIterator. 47 | */ 48 | public TupleDesc getTupleDesc(); 49 | 50 | /** 51 | * Closes the iterator. When the iterator is closed, calling next(), 52 | * hasNext(), or rewind() should fail by throwing IllegalStateException. 53 | */ 54 | public void close(); 55 | 56 | } 57 | -------------------------------------------------------------------------------- /src/java/simpledb/Debug.java: -------------------------------------------------------------------------------- 1 | 2 | package simpledb; 3 | 4 | /** 5 | * Debug is a utility class that wraps println statements and allows 6 | * more or less command line output to be turned on. 7 | *

8 | * Change the value of the DEBUG_LEVEL constant using a system property: 9 | * simpledb.Debug. For example, on the command line, use -Dsimpledb.Debug=x, 10 | * or simply -Dsimpledb.Debug to enable it at level 0. 11 | * The log(level, message, ...) method will print to standard output if the 12 | * level number is less than or equal to the currently set DEBUG_LEVEL. 13 | */ 14 | 15 | public class Debug { 16 | private static final int DEBUG_LEVEL; 17 | static { 18 | String debug = System.getProperty("simpledb.Debug"); 19 | if (debug == null) { 20 | // No system property = disabled 21 | DEBUG_LEVEL = -1; 22 | } else if (debug == "") { 23 | // Empty property = level 0 24 | DEBUG_LEVEL = 0; 25 | } else { 26 | DEBUG_LEVEL = Integer.parseInt(debug); 27 | } 28 | } 29 | 30 | private static final int DEFAULT_LEVEL = 0; 31 | 32 | /** Log message if the log level >= level. Uses printf. */ 33 | public static void log(int level, String message, Object... args) { 34 | if (isEnabled(level)) { 35 | System.out.printf(message, args); 36 | System.out.println(); 37 | } 38 | } 39 | 40 | /** @return true if level is being logged. */ 41 | public static boolean isEnabled(int level) { 42 | return level <= DEBUG_LEVEL; 43 | } 44 | 45 | /** @return true if the default level is being logged. */ 46 | public static boolean isEnabled() { 47 | return isEnabled(DEFAULT_LEVEL); 48 | } 49 | 50 | /** Logs message at the default log level. */ 51 | public static void log(String message, Object... args) { 52 | log(DEFAULT_LEVEL, message, args); 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/java/simpledb/Delete.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.io.IOException; 4 | 5 | /** 6 | * The delete operator. Delete reads tuples from its child operator and removes 7 | * them from the table they belong to. 8 | */ 9 | public class Delete extends Operator { 10 | 11 | private static final long serialVersionUID = 1L; 12 | 13 | private TransactionId m_transactionId; 14 | private DbIterator m_it; 15 | private TupleDesc m_resultTupleDesc; 16 | private boolean m_deleted; 17 | 18 | 19 | /** 20 | * Constructor specifying the transaction that this delete belongs to as 21 | * well as the child to read from. 22 | * 23 | * @param t 24 | * The transaction this delete runs in 25 | * @param child 26 | * The child operator from which to read tuples for deletion 27 | */ 28 | public Delete(TransactionId t, DbIterator child) { 29 | // some code goes here 30 | m_transactionId = t; 31 | m_it = child; 32 | m_deleted = false; 33 | 34 | String[] names = new String[] {"Deleted"}; 35 | Type[] types = new Type[] {Type.INT_TYPE}; 36 | m_resultTupleDesc = new TupleDesc(types, names); 37 | 38 | } 39 | 40 | public TupleDesc getTupleDesc() { 41 | // some code goes here 42 | return m_resultTupleDesc; 43 | } 44 | 45 | public void open() throws DbException, TransactionAbortedException { 46 | // some code goes here 47 | super.open(); 48 | m_it.open(); 49 | m_deleted = false; 50 | } 51 | 52 | public void close() { 53 | // some code goes here 54 | super.close(); 55 | m_it.close(); 56 | } 57 | 58 | public void rewind() throws DbException, TransactionAbortedException { 59 | // some code goes here 60 | m_it.rewind(); 61 | } 62 | 63 | /** 64 | * Deletes tuples as they are read from the child operator. Deletes are 65 | * processed via the buffer pool (which can be accessed via the 66 | * Database.getBufferPool() method. 67 | * 68 | * @return A 1-field tuple containing the number of deleted records. 69 | * @see Database#getBufferPool 70 | * @see BufferPool#deleteTuple 71 | */ 72 | protected Tuple fetchNext() throws TransactionAbortedException, DbException { 73 | // some code goes here 74 | if (m_deleted) return null; 75 | int deletedCount = 0; 76 | while (m_it.hasNext()) 77 | { 78 | Tuple tup = m_it.next(); 79 | try 80 | { 81 | Database.getBufferPool().deleteTuple(m_transactionId, tup); 82 | } 83 | catch (IOException e) 84 | { 85 | throw new DbException("IO Exception on tuple deletion"); 86 | } 87 | deletedCount++; 88 | } 89 | Tuple resultTuple = new Tuple(m_resultTupleDesc); 90 | resultTuple.setField(0, new IntField(deletedCount)); 91 | m_deleted = true; 92 | return resultTuple; 93 | } 94 | 95 | @Override 96 | public DbIterator[] getChildren() { 97 | // some code goes here 98 | return new DbIterator[] {m_it}; 99 | } 100 | 101 | @Override 102 | public void setChildren(DbIterator[] children) { 103 | // some code goes here 104 | m_it = children[0]; 105 | } 106 | 107 | } 108 | -------------------------------------------------------------------------------- /src/java/simpledb/Field.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.io.*; 4 | 5 | /** 6 | * Interface for values of fields in tuples in SimpleDB. 7 | */ 8 | public interface Field extends Serializable{ 9 | /** 10 | * Write the bytes representing this field to the specified 11 | * DataOutputStream. 12 | * @see DataOutputStream 13 | * @param dos The DataOutputStream to write to. 14 | */ 15 | void serialize(DataOutputStream dos) throws IOException; 16 | 17 | /** 18 | * Compare the value of this field object to the passed in value. 19 | * @param op The operator 20 | * @param value The value to compare this Field to 21 | * @return Whether or not the comparison yields true. 22 | */ 23 | public boolean compare(Predicate.Op op, Field value); 24 | 25 | /** 26 | * Returns the type of this field (see {@link Type#INT_TYPE} or {@link Type#STRING_TYPE} 27 | * @return type of this field 28 | */ 29 | public Type getType(); 30 | 31 | /** 32 | * Hash code. 33 | * Different Field objects representing the same value should probably 34 | * return the same hashCode. 35 | */ 36 | public int hashCode(); 37 | public boolean equals(Object field); 38 | 39 | public String toString(); 40 | } 41 | -------------------------------------------------------------------------------- /src/java/simpledb/Filter.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.util.*; 4 | 5 | /** 6 | * Filter is an operator that implements a relational select. 7 | */ 8 | public class Filter extends Operator { 9 | 10 | private static final long serialVersionUID = 1L; 11 | 12 | private Predicate m_pred; 13 | private DbIterator m_it; 14 | /** 15 | * Constructor accepts a predicate to apply and a child operator to read 16 | * tuples to filter from. 17 | * 18 | * @param p 19 | * The predicate to filter tuples with 20 | * @param child 21 | * The child operator 22 | */ 23 | public Filter(Predicate p, DbIterator child) { 24 | // some code goes here 25 | m_pred = p; 26 | m_it = child; 27 | } 28 | 29 | public Predicate getPredicate() { 30 | // some code goes here 31 | return m_pred; 32 | } 33 | 34 | public TupleDesc getTupleDesc() { 35 | // some code goes here 36 | return m_it.getTupleDesc(); 37 | } 38 | 39 | public void open() throws DbException, NoSuchElementException, 40 | TransactionAbortedException { 41 | // some code goes here 42 | super.open(); 43 | m_it.open(); 44 | } 45 | 46 | public void close() { 47 | // some code goes here 48 | super.close(); 49 | m_it.close(); 50 | } 51 | 52 | public void rewind() throws DbException, TransactionAbortedException { 53 | // some code goes here 54 | m_it.rewind(); 55 | } 56 | 57 | /** 58 | * AbstractDbIterator.readNext implementation. Iterates over tuples from the 59 | * child operator, applying the predicate to them and returning those that 60 | * pass the predicate (i.e. for which the Predicate.filter() returns true.) 61 | * 62 | * @return The next tuple that passes the filter, or null if there are no 63 | * more tuples 64 | * @see Predicate#filter 65 | */ 66 | protected Tuple fetchNext() throws NoSuchElementException, 67 | TransactionAbortedException, DbException { 68 | // some code goes here 69 | while (m_it.hasNext()) 70 | { 71 | Tuple tup = m_it.next(); 72 | if (m_pred.filter(tup)) 73 | { 74 | return tup; 75 | } 76 | } 77 | return null; 78 | } 79 | 80 | @Override 81 | public DbIterator[] getChildren() { 82 | // some code goes here 83 | return new DbIterator[] {m_it}; 84 | } 85 | 86 | @Override 87 | public void setChildren(DbIterator[] children) { 88 | // some code goes here 89 | m_it = children[0]; 90 | } 91 | 92 | } 93 | -------------------------------------------------------------------------------- /src/java/simpledb/HashEquiJoin.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.util.*; 4 | 5 | /** 6 | * The Join operator implements the relational join operation. 7 | */ 8 | public class HashEquiJoin extends Operator { 9 | 10 | private static final long serialVersionUID = 1L; 11 | private JoinPredicate pred; 12 | private DbIterator child1, child2; 13 | private TupleDesc comboTD; 14 | transient private Tuple t1 = null; 15 | transient private Tuple t2 = null; 16 | 17 | /** 18 | * Constructor. Accepts to children to join and the predicate to join them 19 | * on 20 | * 21 | * @param p 22 | * The predicate to use to join the children 23 | * @param child1 24 | * Iterator for the left(outer) relation to join 25 | * @param child2 26 | * Iterator for the right(inner) relation to join 27 | */ 28 | public HashEquiJoin(JoinPredicate p, DbIterator child1, DbIterator child2) { 29 | this.pred = p; 30 | this.child1 = child1; 31 | this.child2 = child2; 32 | comboTD = TupleDesc.merge(child1.getTupleDesc(), child2.getTupleDesc()); 33 | } 34 | 35 | public JoinPredicate getJoinPredicate() { 36 | return pred; 37 | } 38 | 39 | public TupleDesc getTupleDesc() { 40 | return comboTD; 41 | } 42 | 43 | public String getJoinField1Name() 44 | { 45 | return this.child1.getTupleDesc().getFieldName(this.pred.getField1()); 46 | } 47 | 48 | public String getJoinField2Name() 49 | { 50 | return this.child2.getTupleDesc().getFieldName(this.pred.getField2()); 51 | } 52 | 53 | HashMap> map = new HashMap>(); 54 | public final static int MAP_SIZE = 20000; 55 | 56 | private boolean loadMap() throws DbException, TransactionAbortedException { 57 | int cnt = 0; 58 | map.clear(); 59 | while (child1.hasNext()) { 60 | t1 = child1.next(); 61 | ArrayList list = map.get(t1.getField(pred.getField1())); 62 | if (list == null) { 63 | list = new ArrayList(); 64 | map.put(t1.getField(pred.getField1()), list); 65 | } 66 | list.add(t1); 67 | if (cnt++ == MAP_SIZE) 68 | return true; 69 | } 70 | return cnt > 0; 71 | 72 | } 73 | 74 | 75 | public void open() throws DbException, NoSuchElementException, 76 | TransactionAbortedException { 77 | child1.open(); 78 | child2.open(); 79 | loadMap(); 80 | } 81 | 82 | public void close() { 83 | super.close(); 84 | child2.close(); 85 | child1.close(); 86 | this.t1=null; 87 | this.t2=null; 88 | this.listIt=null; 89 | this.map.clear(); 90 | } 91 | 92 | public void rewind() throws DbException, TransactionAbortedException { 93 | child1.rewind(); 94 | child2.rewind(); 95 | } 96 | 97 | transient Iterator listIt = null; 98 | 99 | /** 100 | * Returns the next tuple generated by the join, or null if there are no 101 | * more tuples. Logically, this is the next tuple in r1 cross r2 that 102 | * satisfies the join predicate. There are many possible implementations; 103 | * the simplest is a nested loops join. 104 | *

105 | * Note that the tuples returned from this particular implementation of Join 106 | * are simply the concatenation of joining tuples from the left and right 107 | * relation. Therefore, there will be two copies of the join attribute in 108 | * the results. (Removing such duplicate columns can be done with an 109 | * additional projection operator if needed.) 110 | *

111 | * For example, if one tuple is {1,2,3} and the other tuple is {1,5,6}, 112 | * joined on equality of the first column, then this returns {1,2,3,1,5,6}. 113 | * 114 | * @return The next matching tuple. 115 | * @see JoinPredicate#filter 116 | */ 117 | private Tuple processList() throws TransactionAbortedException, DbException { 118 | t1 = listIt.next(); 119 | 120 | int td1n = t1.getTupleDesc().numFields(); 121 | int td2n = t2.getTupleDesc().numFields(); 122 | 123 | // set fields in combined tuple 124 | Tuple t = new Tuple(comboTD); 125 | for (int i = 0; i < td1n; i++) 126 | t.setField(i, t1.getField(i)); 127 | for (int i = 0; i < td2n; i++) 128 | t.setField(td1n + i, t2.getField(i)); 129 | return t; 130 | 131 | } 132 | 133 | 134 | protected Tuple fetchNext() throws TransactionAbortedException, DbException { 135 | if (listIt != null && listIt.hasNext()) { 136 | return processList(); 137 | } 138 | 139 | // loop around child2 140 | while (child2.hasNext()) { 141 | t2 = child2.next(); 142 | 143 | // if match, create a combined tuple and fill it with the values 144 | // from both tuples 145 | ArrayList l = map.get(t2.getField(pred.getField2())); 146 | if (l == null) 147 | continue; 148 | listIt = l.iterator(); 149 | 150 | return processList(); 151 | 152 | } 153 | 154 | // child2 is done: advance child1 155 | child2.rewind(); 156 | if (loadMap()) { 157 | return fetchNext(); 158 | } 159 | 160 | return null; 161 | } 162 | 163 | @Override 164 | public DbIterator[] getChildren() { 165 | return new DbIterator[]{this.child1, this.child2}; 166 | } 167 | 168 | @Override 169 | public void setChildren(DbIterator[] children) { 170 | this.child1 = children[0]; 171 | this.child2 = children[1]; 172 | } 173 | 174 | 175 | } 176 | -------------------------------------------------------------------------------- /src/java/simpledb/HeapFileIterator.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.util.*; 4 | 5 | public class HeapFileIterator implements DbFileIterator { 6 | public HeapFileIterator(int HeapFileId, TransactionId tid, int numPages) { 7 | this.HeapFileId = HeapFileId; 8 | this.numPages = numPages; 9 | this.tid = tid; 10 | this.opened = false; 11 | } 12 | 13 | public void open() throws DbException, TransactionAbortedException { 14 | this.curPageNum = 0; 15 | this.curPageId = new HeapPageId(this.HeapFileId, this.curPageNum); 16 | this.curPage = (HeapPage) Database.getBufferPool().getPage(this.tid, this.curPageId, Permissions.READ_WRITE); 17 | this.curIterator = this.curPage.iterator(); 18 | this.opened = true; 19 | } 20 | 21 | public boolean hasNext() throws DbException, TransactionAbortedException { 22 | if (this.opened) { 23 | if (this.curPageNum < this.numPages-1 || this.curIterator.hasNext()) { 24 | return true; 25 | } else { 26 | return false; 27 | } 28 | } else { 29 | return false; 30 | } 31 | } 32 | 33 | public Tuple next() throws DbException, TransactionAbortedException, NoSuchElementException { 34 | 35 | if (this.opened) { 36 | if (this.hasNext()) { 37 | if (this.curIterator.hasNext()) { 38 | return this.curIterator.next(); 39 | } else { 40 | this.curPageNum++; 41 | this.curPageId = new HeapPageId(this.HeapFileId, this.curPageNum); 42 | this.curPage = (HeapPage) Database.getBufferPool().getPage(this.tid, this.curPageId, Permissions.READ_WRITE); 43 | this.curIterator = this.curPage.iterator(); 44 | return this.curIterator.next(); 45 | } 46 | } else { 47 | throw new NoSuchElementException(); 48 | } 49 | } else { 50 | throw new NoSuchElementException(); 51 | } 52 | } 53 | 54 | public void rewind() throws DbException, TransactionAbortedException { 55 | if (this.opened) { 56 | this.open(); 57 | } else { 58 | throw new DbException(""); 59 | } 60 | } 61 | 62 | public void close() { 63 | this.opened = false; 64 | } 65 | 66 | private boolean opened; 67 | private int HeapFileId; 68 | private TransactionId tid; 69 | private int curPageNum; 70 | private PageId curPageId; 71 | private HeapPage curPage; 72 | private Iterator curIterator; 73 | private int numPages; 74 | } 75 | -------------------------------------------------------------------------------- /src/java/simpledb/HeapPageId.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | /** Unique identifier for HeapPage objects. */ 4 | public class HeapPageId implements PageId { 5 | 6 | private int m_tableId; 7 | private int m_pageNum; 8 | /** 9 | * Constructor. Create a page id structure for a specific page of a 10 | * specific table. 11 | * 12 | * @param tableId The table that is being referenced 13 | * @param pgNo The page number in that table. 14 | */ 15 | public HeapPageId(int tableId, int pgNo) { 16 | m_tableId = tableId; 17 | m_pageNum = pgNo; 18 | } 19 | 20 | /** @return the table associated with this PageId */ 21 | public int getTableId() { 22 | // some code goes here 23 | return m_tableId; 24 | } 25 | 26 | /** 27 | * @return the page number in the table getTableId() associated with 28 | * this PageId 29 | */ 30 | public int pageNumber() { 31 | // some code goes here 32 | return m_pageNum; 33 | } 34 | 35 | /** 36 | * @return a hash code for this page, represented by the concatenation of 37 | * the table number and the page number (needed if a PageId is used as a 38 | * key in a hash table in the BufferPool, for example.) 39 | * @see BufferPool 40 | */ 41 | public int hashCode() { 42 | // some code goes here 43 | String toHash = "" + m_tableId + m_pageNum; 44 | return toHash.hashCode(); 45 | } 46 | 47 | /** 48 | * Compares one PageId to another. 49 | * 50 | * @param o The object to compare against (must be a PageId) 51 | * @return true if the objects are equal (e.g., page numbers and table 52 | * ids are the same) 53 | */ 54 | public boolean equals(Object o) { 55 | if (o instanceof PageId){ 56 | PageId pid = (PageId) o; 57 | return (this.getTableId() == pid.getTableId() && this.pageNumber() == pid.pageNumber()); 58 | } 59 | else return false; 60 | } 61 | 62 | /** 63 | * Return a representation of this object as an array of 64 | * integers, for writing to disk. Size of returned array must contain 65 | * number of integers that corresponds to number of args to one of the 66 | * constructors. 67 | */ 68 | public int[] serialize() { 69 | int data[] = new int[2]; 70 | 71 | data[0] = getTableId(); 72 | data[1] = pageNumber(); 73 | 74 | return data; 75 | } 76 | 77 | } 78 | -------------------------------------------------------------------------------- /src/java/simpledb/HeapPageIterator.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.util.*; 4 | 5 | public class HeapPageIterator implements Iterator { 6 | 7 | public HeapPageIterator(HeapPage heapPage) { 8 | this.heapPage = heapPage; 9 | this.curTuple = 0; 10 | this.numTuples = heapPage.availableTuples(); 11 | } 12 | 13 | public boolean hasNext() { 14 | if (this.curTuple < this.numTuples) { 15 | return true; 16 | } else { 17 | return false; 18 | } 19 | } 20 | 21 | public Tuple next() { 22 | return heapPage.tuples[this.curTuple++]; 23 | } 24 | 25 | public void remove() throws UnsupportedOperationException { 26 | throw new UnsupportedOperationException("Cannot remove"); 27 | } 28 | 29 | private HeapPage heapPage; 30 | private int numTuples; 31 | private int curTuple; 32 | } 33 | -------------------------------------------------------------------------------- /src/java/simpledb/Insert.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.io.IOException; 4 | 5 | /** 6 | * Inserts tuples read from the child operator into the tableid specified in the 7 | * constructor 8 | */ 9 | public class Insert extends Operator { 10 | 11 | private static final long serialVersionUID = 1L; 12 | 13 | private TransactionId m_transactionId; 14 | private DbIterator m_it; 15 | private int m_tableId; 16 | private boolean m_inserted; 17 | private TupleDesc m_resultTupleDesc; 18 | 19 | /** 20 | * Constructor. 21 | * 22 | * @param t 23 | * The transaction running the insert. 24 | * @param child 25 | * The child operator from which to read tuples to be inserted. 26 | * @param tableid 27 | * The table in which to insert tuples. 28 | * @throws DbException 29 | * if TupleDesc of child differs from table into which we are to 30 | * insert. 31 | */ 32 | public Insert(TransactionId t,DbIterator child, int tableid) 33 | throws DbException { 34 | // some code goes here 35 | m_transactionId = t; 36 | m_it = child; 37 | m_tableId = tableid; 38 | m_inserted = false; 39 | 40 | String[] names = new String[] {"Inserted"}; 41 | Type[] types = new Type[] {Type.INT_TYPE}; 42 | m_resultTupleDesc = new TupleDesc(types, names); 43 | 44 | } 45 | 46 | public TupleDesc getTupleDesc() { 47 | // some code goes here 48 | return m_resultTupleDesc; 49 | } 50 | 51 | public void open() throws DbException, TransactionAbortedException { 52 | // some code goes here 53 | super.open(); 54 | m_it.open(); 55 | m_inserted = false; 56 | } 57 | 58 | public void close() { 59 | // some code goes here 60 | super.close(); 61 | m_it.close(); 62 | } 63 | 64 | public void rewind() throws DbException, TransactionAbortedException { 65 | // some code goes here 66 | m_it.rewind(); 67 | } 68 | 69 | /** 70 | * Inserts tuples read from child into the tableid specified by the 71 | * constructor. It returns a one field tuple containing the number of 72 | * inserted records. Inserts should be passed through BufferPool. An 73 | * instances of BufferPool is available via Database.getBufferPool(). Note 74 | * that insert DOES NOT need check to see if a particular tuple is a 75 | * duplicate before inserting it. 76 | * 77 | * @return A 1-field tuple containing the number of inserted records, or 78 | * null if called more than once. 79 | * @see Database#getBufferPool 80 | * @see BufferPool#insertTuple 81 | */ 82 | protected Tuple fetchNext() throws TransactionAbortedException, DbException { 83 | // some code goes here 84 | if (m_inserted) return null; 85 | int insertedCount = 0; 86 | while (m_it.hasNext()) 87 | { 88 | Tuple tup = m_it.next(); 89 | try 90 | { 91 | Database.getBufferPool().insertTuple(m_transactionId, m_tableId, tup); 92 | } 93 | catch (IOException e) 94 | { 95 | throw new DbException("IO Exception on tuple insertion"); 96 | } 97 | insertedCount++; 98 | } 99 | Tuple resultTuple = new Tuple(m_resultTupleDesc); 100 | resultTuple.setField(0, new IntField(insertedCount)); 101 | m_inserted = true; 102 | return resultTuple; 103 | } 104 | 105 | @Override 106 | public DbIterator[] getChildren() { 107 | // some code goes here 108 | return new DbIterator[] {m_it}; 109 | } 110 | 111 | @Override 112 | public void setChildren(DbIterator[] children) { 113 | m_it = children[0]; 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /src/java/simpledb/IntField.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.io.*; 4 | 5 | /** 6 | * Instance of Field that stores a single integer. 7 | */ 8 | public class IntField implements Field { 9 | 10 | private static final long serialVersionUID = 1L; 11 | 12 | private final int value; 13 | 14 | public int getValue() { 15 | return value; 16 | } 17 | 18 | /** 19 | * Constructor. 20 | * 21 | * @param i The value of this field. 22 | */ 23 | public IntField(int i) { 24 | value = i; 25 | } 26 | 27 | public String toString() { 28 | return Integer.toString(value); 29 | } 30 | 31 | public int hashCode() { 32 | return value; 33 | } 34 | 35 | public boolean equals(Object field) { 36 | return ((IntField) field).value == value; 37 | } 38 | 39 | public void serialize(DataOutputStream dos) throws IOException { 40 | dos.writeInt(value); 41 | } 42 | 43 | /** 44 | * Compare the specified field to the value of this Field. 45 | * Return semantics are as specified by Field.compare 46 | * 47 | * @throws IllegalCastException if val is not an IntField 48 | * @see Field#compare 49 | */ 50 | public boolean compare(Predicate.Op op, Field val) { 51 | 52 | IntField iVal = (IntField) val; 53 | 54 | switch (op) { 55 | case EQUALS: 56 | return value == iVal.value; 57 | case NOT_EQUALS: 58 | return value != iVal.value; 59 | 60 | case GREATER_THAN: 61 | return value > iVal.value; 62 | 63 | case GREATER_THAN_OR_EQ: 64 | return value >= iVal.value; 65 | 66 | case LESS_THAN: 67 | return value < iVal.value; 68 | 69 | case LESS_THAN_OR_EQ: 70 | return value <= iVal.value; 71 | 72 | case LIKE: 73 | return value == iVal.value; 74 | } 75 | 76 | return false; 77 | } 78 | 79 | /** 80 | * Return the Type of this field. 81 | * @return Type.INT_TYPE 82 | */ 83 | public Type getType() { 84 | return Type.INT_TYPE; 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /src/java/simpledb/IntegerAggregator.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.util.ArrayList; 4 | import java.util.HashMap; 5 | 6 | /** 7 | * Knows how to compute some aggregate over a set of IntFields. 8 | */ 9 | public class IntegerAggregator implements Aggregator { 10 | 11 | private static final long serialVersionUID = 1L; 12 | 13 | private int m_groupByFieldIndex; 14 | private Type m_groupByFieldType; 15 | private int m_aggregateFieldIndex; 16 | private Op m_op; 17 | private HashMap m_aggregateData; 18 | private HashMap m_count; 19 | 20 | 21 | /** 22 | * Aggregate constructor 23 | * 24 | * @param gbfield 25 | * the 0-based index of the group-by field in the tuple, or 26 | * NO_GROUPING if there is no grouping 27 | * @param gbfieldtype 28 | * the type of the group by field (e.g., Type.INT_TYPE), or null 29 | * if there is no grouping 30 | * @param afield 31 | * the 0-based index of the aggregate field in the tuple 32 | * @param what 33 | * the aggregation operator 34 | */ 35 | 36 | public IntegerAggregator(int gbfield, Type gbfieldtype, int afield, Op what) { 37 | // some code goes here 38 | m_groupByFieldIndex = gbfield; 39 | m_groupByFieldType = gbfieldtype; 40 | m_aggregateFieldIndex = afield; 41 | m_op = what; 42 | m_aggregateData = new HashMap(); 43 | m_count = new HashMap(); 44 | } 45 | 46 | private int initialData() 47 | { 48 | switch(m_op) 49 | { 50 | case MIN: return Integer.MAX_VALUE; 51 | case MAX: return Integer.MIN_VALUE; 52 | case SUM: case COUNT: case AVG: return 0; 53 | default: return 0; // shouldn't reach here 54 | } 55 | } 56 | 57 | /** 58 | * Merge a new tuple into the aggregate, grouping as indicated in the 59 | * constructor 60 | * 61 | * @param tup 62 | * the Tuple containing an aggregate field and a group-by field 63 | */ 64 | public void mergeTupleIntoGroup(Tuple tup) { 65 | // some code goes here 66 | Field tupleGroupByField = (m_groupByFieldIndex == Aggregator.NO_GROUPING) ? null : tup.getField(m_groupByFieldIndex); 67 | 68 | if (!m_aggregateData.containsKey(tupleGroupByField)) 69 | { 70 | m_aggregateData.put(tupleGroupByField, initialData()); 71 | m_count.put(tupleGroupByField, 0); 72 | } 73 | 74 | int tupleValue = ((IntField) tup.getField(m_aggregateFieldIndex)).getValue(); 75 | int currentValue = m_aggregateData.get(tupleGroupByField); 76 | int currentCount = m_count.get(tupleGroupByField); 77 | int newValue = currentValue; 78 | switch(m_op) 79 | { 80 | case MIN: 81 | newValue = (tupleValue > currentValue) ? currentValue : tupleValue; 82 | break; 83 | case MAX: 84 | newValue = (tupleValue < currentValue) ? currentValue : tupleValue; 85 | break; 86 | case SUM: case AVG: 87 | // can't calculate average until all the tuples are in 88 | // In the mean time, keep track of sum and count and 89 | // calculate the averages in the iterator 90 | m_count.put(tupleGroupByField, currentCount+1); 91 | newValue = tupleValue + currentValue; 92 | break; 93 | case COUNT: 94 | newValue = currentValue + 1; 95 | break; 96 | default: 97 | break; 98 | } 99 | m_aggregateData.put(tupleGroupByField, newValue); 100 | } 101 | 102 | private TupleDesc createGroupByTupleDesc() 103 | { 104 | String[] names; 105 | Type[] types; 106 | if (m_groupByFieldIndex == Aggregator.NO_GROUPING) 107 | { 108 | names = new String[] {"aggregateValue"}; 109 | types = new Type[] {Type.INT_TYPE}; 110 | } 111 | else 112 | { 113 | names = new String[] {"groupValue", "aggregateValue"}; 114 | types = new Type[] {m_groupByFieldType, Type.INT_TYPE}; 115 | } 116 | return new TupleDesc(types, names); 117 | } 118 | 119 | /** 120 | * Create a DbIterator over group aggregate results. 121 | * 122 | * @return a DbIterator whose tuples are the pair (groupVal, aggregateVal) 123 | * if using group, or a single (aggregateVal) if no grouping. The 124 | * aggregateVal is determined by the type of aggregate specified in 125 | * the constructor. 126 | */ 127 | public DbIterator iterator() { 128 | // some code goes here 129 | ArrayList tuples = new ArrayList(); 130 | TupleDesc tupledesc = createGroupByTupleDesc(); 131 | Tuple addMe; 132 | for (Field group : m_aggregateData.keySet()) 133 | { 134 | int aggregateVal; 135 | if (m_op == Op.AVG) 136 | { 137 | aggregateVal = m_aggregateData.get(group) / m_count.get(group); 138 | } 139 | else 140 | { 141 | aggregateVal = m_aggregateData.get(group); 142 | } 143 | addMe = new Tuple(tupledesc); 144 | if (m_groupByFieldIndex == Aggregator.NO_GROUPING){ 145 | addMe.setField(0, new IntField(aggregateVal)); 146 | } 147 | else { 148 | addMe.setField(0, group); 149 | addMe.setField(1, new IntField(aggregateVal)); 150 | } 151 | tuples.add(addMe); 152 | } 153 | return new TupleIterator(tupledesc, tuples); 154 | } 155 | 156 | } 157 | -------------------------------------------------------------------------------- /src/java/simpledb/Join.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.util.*; 4 | 5 | /** 6 | * The Join operator implements the relational join operation. 7 | */ 8 | public class Join extends Operator { 9 | 10 | private static final long serialVersionUID = 1L; 11 | private JoinPredicate m_joinpred; 12 | private DbIterator m_child1; 13 | private DbIterator m_child2; 14 | private Tuple m_currentLeftTuple; 15 | 16 | /** 17 | * Constructor. Accepts to children to join and the predicate to join them 18 | * on 19 | * 20 | * @param p 21 | * The predicate to use to join the children 22 | * @param child1 23 | * Iterator for the left(outer) relation to join 24 | * @param child2 25 | * Iterator for the right(inner) relation to join 26 | */ 27 | public Join(JoinPredicate p, DbIterator child1, DbIterator child2) { 28 | // some code goes here 29 | m_joinpred = p; 30 | m_child1 = child1; 31 | m_child2 = child2; 32 | m_currentLeftTuple = null; 33 | } 34 | 35 | public JoinPredicate getJoinPredicate() { 36 | // some code goes here 37 | return m_joinpred; 38 | } 39 | 40 | /** 41 | * @return 42 | * the field name of join field1. Should be quantified by 43 | * alias or table name. 44 | * */ 45 | public String getJoinField1Name() { 46 | // some code goes here 47 | return m_child1.getTupleDesc().getFieldName(m_joinpred.getField1()); 48 | } 49 | 50 | /** 51 | * @return 52 | * the field name of join field2. Should be quantified by 53 | * alias or table name. 54 | * */ 55 | public String getJoinField2Name() { 56 | // some code goes here 57 | return m_child2.getTupleDesc().getFieldName(m_joinpred.getField2()); 58 | } 59 | 60 | /** 61 | * @see simpledb.TupleDesc#merge(TupleDesc, TupleDesc) for possible 62 | * implementation logic. 63 | */ 64 | public TupleDesc getTupleDesc() { 65 | // some code goes here 66 | return TupleDesc.merge(m_child1.getTupleDesc(), m_child2.getTupleDesc()); 67 | } 68 | 69 | public void open() throws DbException, NoSuchElementException, 70 | TransactionAbortedException { 71 | // some code goes here 72 | super.open(); 73 | m_child1.open(); 74 | m_child2.open(); 75 | } 76 | 77 | public void close() { 78 | // some code goes here 79 | super.close(); 80 | m_child1.close(); 81 | m_child2.close(); 82 | } 83 | 84 | public void rewind() throws DbException, TransactionAbortedException { 85 | // some code goes here 86 | m_child1.rewind(); 87 | m_child2.rewind(); 88 | m_currentLeftTuple = null; 89 | } 90 | 91 | /** 92 | * Returns the next tuple generated by the join, or null if there are no 93 | * more tuples. Logically, this is the next tuple in r1 cross1 2 r2 that 94 | * satisfies the join predicate. There are many possible implementations; 95 | * the simplest is a nested loops join. 96 | *

97 | * Note that the tuples returned from this particular implementation of Join 98 | * are simply the concatenation of joining tuples from the left and right 99 | * relation. Therefore, if an equality predicate is used there will be two 100 | * copies of the join attribute in the results. (Removing such duplicate 101 | * columns can be done with an additional projection operator if needed.) 102 | *

103 | * For example, if one tuple is {1,2,3} and the other tuple is {1,5,6}, 104 | * joined on equality of the first column, then this returns {1,2,3,1,5,6}. 105 | * 106 | * @return The next matching tuple. 107 | * @see JoinPredicate#filter 108 | */ 109 | protected Tuple fetchNext() throws TransactionAbortedException, DbException { 110 | // some code goes here 111 | while ((m_currentLeftTuple != null) || m_child1.hasNext()) 112 | { 113 | Tuple leftTuple; 114 | if ((m_currentLeftTuple != null)) 115 | { 116 | leftTuple = m_currentLeftTuple; 117 | } 118 | else 119 | { 120 | m_currentLeftTuple = m_child1.next(); 121 | leftTuple = m_currentLeftTuple; 122 | } 123 | // System.out.print("left tuple: " + leftTuple.toString()); 124 | // System.out.print("current left tuple: " + m_currentLeftTuple.toString()); 125 | while (m_child2.hasNext()) 126 | { 127 | Tuple rightTuple = m_child2.next(); 128 | // System.out.print("right tuple: " + rightTuple.toString()); 129 | if (m_joinpred.filter(leftTuple, rightTuple)) 130 | { 131 | return joinTuples(leftTuple, rightTuple); 132 | } 133 | } 134 | m_currentLeftTuple = null; 135 | m_child2.rewind(); 136 | } 137 | return null; 138 | } 139 | 140 | private Tuple joinTuples(Tuple leftTuple, Tuple rightTuple) 141 | { 142 | int leftTupleSize = leftTuple.getTupleDesc().numFields(); 143 | int rightTupleSize = rightTuple.getTupleDesc().numFields(); 144 | Tuple mergedTuple = new Tuple(this.getTupleDesc()); 145 | for (int i = 0; i < leftTupleSize; i++) 146 | { 147 | mergedTuple.setField(i, leftTuple.getField(i)); 148 | } 149 | for (int i = 0; i < rightTupleSize; i++) 150 | { 151 | mergedTuple.setField(leftTupleSize + i, rightTuple.getField(i)); 152 | } 153 | // System.out.println("merged tuple: " + mergedTuple.toString()); 154 | return mergedTuple; 155 | } 156 | 157 | @Override 158 | public DbIterator[] getChildren() { 159 | // some code goes here 160 | return new DbIterator[] {m_child1, m_child2}; 161 | } 162 | 163 | @Override 164 | public void setChildren(DbIterator[] children) { 165 | // some code goes here 166 | m_child1 = children[0]; 167 | m_child2 = children[1]; 168 | } 169 | 170 | } 171 | -------------------------------------------------------------------------------- /src/java/simpledb/JoinPredicate.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.io.Serializable; 4 | 5 | /** 6 | * JoinPredicate compares fields of two tuples using a predicate. JoinPredicate 7 | * is most likely used by the Join operator. 8 | */ 9 | public class JoinPredicate implements Serializable { 10 | 11 | private static final long serialVersionUID = 1L; 12 | private int m_field1; 13 | private int m_field2; 14 | private Predicate.Op m_op; 15 | 16 | /** 17 | * Constructor -- create a new predicate over two fields of two tuples. 18 | * 19 | * @param field1 20 | * The field index into the first tuple in the predicate 21 | * @param field2 22 | * The field index into the second tuple in the predicate 23 | * @param op 24 | * The operation to apply (as defined in Predicate.Op); either 25 | * Predicate.Op.GREATER_THAN, Predicate.Op.LESS_THAN, 26 | * Predicate.Op.EQUAL, Predicate.Op.GREATER_THAN_OR_EQ, or 27 | * Predicate.Op.LESS_THAN_OR_EQ 28 | * @see Predicate 29 | */ 30 | public JoinPredicate(int field1, Predicate.Op op, int field2) { 31 | m_field1 = field1; 32 | m_field2 = field2; 33 | m_op = op; 34 | // some code goes here 35 | } 36 | 37 | /** 38 | * Apply the predicate to the two specified tuples. The comparison can be 39 | * made through Field's compare method. 40 | * 41 | * @return true if the tuples satisfy the predicate. 42 | */ 43 | public boolean filter(Tuple t1, Tuple t2) { 44 | // some code goes here 45 | return t1.getField(m_field1).compare(m_op, t2.getField(m_field2)); 46 | } 47 | 48 | public int getField1() 49 | { 50 | // some code goes here 51 | return m_field1; 52 | } 53 | 54 | public int getField2() 55 | { 56 | // some code goes here 57 | return m_field2; 58 | } 59 | 60 | public Predicate.Op getOperator() 61 | { 62 | // some code goes here 63 | return m_op; 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/java/simpledb/LogicalFilterNode.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | /** A LogicalFilterNode represents the parameters of a filter in the WHERE clause of a query. 4 |

5 | Filter is of the form t.f p c 6 |

7 | Where t is a table, f is a field in t, p is a predicate, and c is a constant 8 | */ 9 | public class LogicalFilterNode { 10 | /** The alias of a table (or the name if no alias) over which the filter ranges */ 11 | public String tableAlias; 12 | 13 | /** The predicate in the filter */ 14 | public Predicate.Op p; 15 | 16 | /* The constant on the right side of the filter */ 17 | public String c; 18 | 19 | /** The field from t which is in the filter. The pure name, without alias or tablename*/ 20 | public String fieldPureName; 21 | 22 | public String fieldQuantifiedName; 23 | 24 | public LogicalFilterNode(String table, String field, Predicate.Op pred, String constant) { 25 | tableAlias = table; 26 | p = pred; 27 | c = constant; 28 | String[] tmps = field.split("[.]"); 29 | if (tmps.length>1) 30 | fieldPureName = tmps[tmps.length-1]; 31 | else 32 | fieldPureName=field; 33 | this.fieldQuantifiedName = tableAlias+"."+fieldPureName; 34 | } 35 | } -------------------------------------------------------------------------------- /src/java/simpledb/LogicalJoinNode.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | /** A LogicalJoinNode represens the state needed of a join of two 4 | * tables in a LogicalQueryPlan */ 5 | public class LogicalJoinNode { 6 | 7 | /** The first table to join (may be null). It's the alias of the table (if no alias, the true table name) */ 8 | public String t1Alias; 9 | 10 | /** The second table to join (may be null). It's the alias of the table, (if no alias, the true table name).*/ 11 | public String t2Alias; 12 | 13 | /** The name of the field in t1 to join with. It's the pure name of a field, rather that alias.field. */ 14 | public String f1PureName; 15 | 16 | public String f1QuantifiedName; 17 | 18 | /** The name of the field in t2 to join with. It's the pure name of a field.*/ 19 | public String f2PureName; 20 | 21 | public String f2QuantifiedName; 22 | 23 | /** The join predicate */ 24 | public Predicate.Op p; 25 | 26 | public LogicalJoinNode() { 27 | } 28 | 29 | public LogicalJoinNode(String table1, String table2, String joinField1, String joinField2, Predicate.Op pred) { 30 | t1Alias = table1; 31 | t2Alias = table2; 32 | String[] tmps = joinField1.split("[.]"); 33 | if (tmps.length>1) 34 | f1PureName = tmps[tmps.length-1]; 35 | else 36 | f1PureName=joinField1; 37 | tmps = joinField2.split("[.]"); 38 | if (tmps.length>1) 39 | f2PureName = tmps[tmps.length-1]; 40 | else 41 | f2PureName = joinField2; 42 | p = pred; 43 | this.f1QuantifiedName = t1Alias+"."+this.f1PureName; 44 | this.f2QuantifiedName = t2Alias+"."+this.f2PureName; 45 | } 46 | 47 | /** Return a new LogicalJoinNode with the inner and outer (t1.f1 48 | * and t2.f2) tables swapped. */ 49 | public LogicalJoinNode swapInnerOuter() { 50 | Predicate.Op newp; 51 | if (p == Predicate.Op.GREATER_THAN) 52 | newp = Predicate.Op.LESS_THAN; 53 | else if (p == Predicate.Op.GREATER_THAN_OR_EQ) 54 | newp = Predicate.Op.LESS_THAN_OR_EQ; 55 | else if (p == Predicate.Op.LESS_THAN) 56 | newp = Predicate.Op.GREATER_THAN; 57 | else if (p == Predicate.Op.LESS_THAN_OR_EQ) 58 | newp = Predicate.Op.GREATER_THAN_OR_EQ; 59 | else 60 | newp = p; 61 | 62 | LogicalJoinNode j2 = new LogicalJoinNode(t2Alias,t1Alias,f2PureName,f1PureName, newp); 63 | return j2; 64 | } 65 | 66 | @Override public boolean equals(Object o) { 67 | LogicalJoinNode j2 =(LogicalJoinNode)o; 68 | return (j2.t1Alias.equals(t1Alias) || j2.t1Alias.equals(t2Alias)) && (j2.t2Alias.equals(t1Alias) || j2.t2Alias.equals(t2Alias)); 69 | } 70 | 71 | @Override public String toString() { 72 | return t1Alias + ":" + t2Alias ;//+ ";" + f1 + " " + p + " " + f2; 73 | } 74 | 75 | @Override public int hashCode() { 76 | return t1Alias.hashCode() + t2Alias.hashCode() + f1PureName.hashCode() + f2PureName.hashCode(); 77 | } 78 | } 79 | 80 | 81 | 82 | 83 | -------------------------------------------------------------------------------- /src/java/simpledb/LogicalScanNode.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | /** A LogicalScanNode represents table in the FROM list in a 4 | * LogicalQueryPlan */ 5 | public class LogicalScanNode { 6 | 7 | /** The name (alias) of the table as it is used in the query */ 8 | public String alias; 9 | 10 | /** The table identifier (can be passed to {@link Catalog#getDatabaseFile}) 11 | * to retrieve a DbFile */ 12 | public int t; 13 | 14 | public LogicalScanNode(int table, String tableAlias) { 15 | this.alias = tableAlias; 16 | this.t = table; 17 | } 18 | } 19 | 20 | -------------------------------------------------------------------------------- /src/java/simpledb/LogicalSelectListNode.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | /** A LogicalSelectListNode represents a clause in the select list in 4 | * a LogicalQueryPlan 5 | */ 6 | public class LogicalSelectListNode { 7 | /** The field name being selected; the name may be (optionally) be 8 | * qualified with a table name or alias. 9 | */ 10 | public String fname; 11 | 12 | /** The aggregation operation over the field (if any) */ 13 | public String aggOp; 14 | 15 | public LogicalSelectListNode(String aggOp, String fname) { 16 | this.aggOp = aggOp; 17 | this.fname = fname; 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/java/simpledb/LogicalSubplanJoinNode.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | /** A LogicalSubplanJoinNode represens the state needed of a join of a 4 | * table to a subplan in a LogicalQueryPlan -- inherits state from 5 | * {@link LogicalJoinNode}; t2 and f2 should always be null 6 | */ 7 | public class LogicalSubplanJoinNode extends LogicalJoinNode { 8 | 9 | /** The subplan (used on the inner) of the join */ 10 | DbIterator subPlan; 11 | 12 | public LogicalSubplanJoinNode(String table1, String joinField1, DbIterator sp, Predicate.Op pred) { 13 | t1Alias = table1; 14 | String[] tmps = joinField1.split("[.]"); 15 | if (tmps.length>1) 16 | f1PureName = tmps[tmps.length-1]; 17 | else 18 | f1PureName=joinField1; 19 | f1QuantifiedName=t1Alias+"."+f1PureName; 20 | subPlan = sp; 21 | p = pred; 22 | } 23 | 24 | @Override public int hashCode() { 25 | return t1Alias.hashCode() + f1PureName.hashCode() + subPlan.hashCode(); 26 | } 27 | 28 | @Override public boolean equals(Object o) { 29 | LogicalJoinNode j2 =(LogicalJoinNode)o; 30 | if (!(o instanceof LogicalSubplanJoinNode)) 31 | return false; 32 | 33 | return (j2.t1Alias.equals(t1Alias) && j2.f1PureName.equals(f1PureName) && ((LogicalSubplanJoinNode)o).subPlan.equals(subPlan)); 34 | } 35 | 36 | public LogicalSubplanJoinNode swapInnerOuter() { 37 | LogicalSubplanJoinNode j2 = new LogicalSubplanJoinNode(t1Alias,f1PureName,subPlan, p); 38 | return j2; 39 | } 40 | 41 | } 42 | -------------------------------------------------------------------------------- /src/java/simpledb/Operator.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.util.NoSuchElementException; 4 | 5 | /** 6 | * Abstract class for implementing operators. It handles close, 7 | * next and hasNext. Subclasses only need to implement 8 | * open and readNext. 9 | */ 10 | public abstract class Operator implements DbIterator { 11 | 12 | private static final long serialVersionUID = 1L; 13 | 14 | public boolean hasNext() throws DbException, TransactionAbortedException { 15 | if (!this.open) 16 | throw new IllegalStateException("Operator not yet open"); 17 | 18 | if (next == null) 19 | next = fetchNext(); 20 | return next != null; 21 | } 22 | 23 | public Tuple next() throws DbException, TransactionAbortedException, 24 | NoSuchElementException { 25 | if (next == null) { 26 | next = fetchNext(); 27 | if (next == null) 28 | throw new NoSuchElementException(); 29 | } 30 | 31 | Tuple result = next; 32 | next = null; 33 | return result; 34 | } 35 | 36 | /** 37 | * Returns the next Tuple in the iterator, or null if the iteration is 38 | * finished. Operator uses this method to implement both next 39 | * and hasNext. 40 | * 41 | * @return the next Tuple in the iterator, or null if the iteration is 42 | * finished. 43 | */ 44 | protected abstract Tuple fetchNext() throws DbException, 45 | TransactionAbortedException; 46 | 47 | /** 48 | * Closes this iterator. If overridden by a subclass, they should call 49 | * super.close() in order for Operator's internal state to be consistent. 50 | */ 51 | public void close() { 52 | // Ensures that a future call to next() will fail 53 | next = null; 54 | this.open = false; 55 | } 56 | 57 | private Tuple next = null; 58 | private boolean open = false; 59 | private int estimatedCardinality = 0; 60 | 61 | public void open() throws DbException, TransactionAbortedException { 62 | this.open = true; 63 | } 64 | 65 | /** 66 | * @return return the children DbIterators of this operator. If there is 67 | * only one child, return an array of only one element. For join 68 | * operators, the order of the children is not important. But they 69 | * should be consistent among multiple calls. 70 | * */ 71 | public abstract DbIterator[] getChildren(); 72 | 73 | /** 74 | * Set the children(child) of this operator. If the operator has only one 75 | * child, children[0] should be used. If the operator is a join, children[0] 76 | * and children[1] should be used. 77 | * 78 | * 79 | * @param children 80 | * the DbIterators which are to be set as the children(child) of 81 | * this operator 82 | * */ 83 | public abstract void setChildren(DbIterator[] children); 84 | 85 | /** 86 | * @return return the TupleDesc of the output tuples of this operator 87 | * */ 88 | public abstract TupleDesc getTupleDesc(); 89 | 90 | /** 91 | * @return The estimated cardinality of this operator. Will only be used in 92 | * lab6 93 | * */ 94 | public int getEstimatedCardinality() { 95 | return this.estimatedCardinality; 96 | } 97 | 98 | /** 99 | * @param card 100 | * The estimated cardinality of this operator Will only be used 101 | * in lab6 102 | * */ 103 | protected void setEstimatedCardinality(int card) { 104 | this.estimatedCardinality = card; 105 | } 106 | 107 | } 108 | -------------------------------------------------------------------------------- /src/java/simpledb/OrderBy.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.util.*; 4 | 5 | /** 6 | * OrderBy is an operator that implements a relational ORDER BY. 7 | */ 8 | public class OrderBy extends Operator { 9 | 10 | private static final long serialVersionUID = 1L; 11 | private DbIterator child; 12 | private TupleDesc td; 13 | private ArrayList childTups = new ArrayList(); 14 | private int orderByField; 15 | private String orderByFieldName; 16 | private Iterator it; 17 | private boolean asc; 18 | 19 | /** 20 | * Creates a new OrderBy node over the tuples from the iterator. 21 | * 22 | * @param orderbyField 23 | * the field to which the sort is applied. 24 | * @param asc 25 | * true if the sort order is ascending. 26 | * @param child 27 | * the tuples to sort. 28 | */ 29 | public OrderBy(int orderbyField, boolean asc, DbIterator child) { 30 | this.child = child; 31 | td = child.getTupleDesc(); 32 | this.orderByField = orderbyField; 33 | this.orderByFieldName = td.getFieldName(orderbyField); 34 | this.asc = asc; 35 | } 36 | 37 | public boolean isASC() 38 | { 39 | return this.asc; 40 | } 41 | 42 | public int getOrderByField() 43 | { 44 | return this.orderByField; 45 | } 46 | 47 | public String getOrderFieldName() 48 | { 49 | return this.orderByFieldName; 50 | } 51 | 52 | public TupleDesc getTupleDesc() { 53 | return td; 54 | } 55 | 56 | public void open() throws DbException, NoSuchElementException, 57 | TransactionAbortedException { 58 | child.open(); 59 | // load all the tuples in a collection, and sort it 60 | while (child.hasNext()) 61 | childTups.add((Tuple) child.next()); 62 | Collections.sort(childTups, new TupleComparator(orderByField, asc)); 63 | it = childTups.iterator(); 64 | super.open(); 65 | } 66 | 67 | public void close() { 68 | super.close(); 69 | it = null; 70 | } 71 | 72 | public void rewind() throws DbException, TransactionAbortedException { 73 | it = childTups.iterator(); 74 | } 75 | 76 | /** 77 | * Operator.fetchNext implementation. Returns tuples from the child operator 78 | * in order 79 | * 80 | * @return The next tuple in the ordering, or null if there are no more 81 | * tuples 82 | */ 83 | protected Tuple fetchNext() throws NoSuchElementException, 84 | TransactionAbortedException, DbException { 85 | if (it != null && it.hasNext()) { 86 | return it.next(); 87 | } else 88 | return null; 89 | } 90 | 91 | @Override 92 | public DbIterator[] getChildren() { 93 | return new DbIterator[] { this.child }; 94 | } 95 | 96 | @Override 97 | public void setChildren(DbIterator[] children) { 98 | this.child = children[0]; 99 | } 100 | 101 | } 102 | 103 | class TupleComparator implements Comparator { 104 | int field; 105 | boolean asc; 106 | 107 | public TupleComparator(int field, boolean asc) { 108 | this.field = field; 109 | this.asc = asc; 110 | } 111 | 112 | public int compare(Tuple o1, Tuple o2) { 113 | Field t1 = (o1).getField(field); 114 | Field t2 = (o2).getField(field); 115 | if (t1.compare(Predicate.Op.EQUALS, t2)) 116 | return 0; 117 | if (t1.compare(Predicate.Op.GREATER_THAN, t2)) 118 | return asc ? 1 : -1; 119 | else 120 | return asc ? -1 : 1; 121 | } 122 | 123 | } 124 | -------------------------------------------------------------------------------- /src/java/simpledb/Page.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.util.*; 4 | import java.io.*; 5 | 6 | /** 7 | * Page is the interface used to represent pages that are resident in the 8 | * BufferPool. Typically, DbFiles will read and write pages from disk. 9 | *

10 | * Pages may be "dirty", indicating that they have been modified since they 11 | * were last written out to disk. 12 | * 13 | * For recovery purposes, pages MUST have a single constructor of the form: 14 | * Page(PageId id, byte[] data) 15 | */ 16 | public interface Page { 17 | 18 | /** 19 | * Return the id of this page. The id is a unique identifier for a page 20 | * that can be used to look up the page on disk or determine if the page 21 | * is resident in the buffer pool. 22 | * 23 | * @return the id of this page 24 | */ 25 | public PageId getId(); 26 | 27 | /** 28 | * Get the id of the transaction that last dirtied this page, or null if the page is clean.. 29 | * 30 | * @return The id of the transaction that last dirtied this page, or null 31 | */ 32 | public TransactionId isDirty(); 33 | 34 | /** 35 | * Set the dirty state of this page as dirtied by a particular transaction 36 | */ 37 | public void markDirty(boolean dirty, TransactionId tid); 38 | 39 | /** 40 | * Generates a byte array representing the contents of this page. 41 | * Used to serialize this page to disk. 42 | *

43 | * The invariant here is that it should be possible to pass the byte array 44 | * generated by getPageData to the Page constructor and have it produce 45 | * an identical Page object. 46 | * 47 | * @return A byte array correspond to the bytes of this page. 48 | */ 49 | 50 | public byte[] getPageData(); 51 | 52 | /** Provide a representation of this page before any modifications were made 53 | to it. Used by recovery. 54 | */ 55 | public Page getBeforeImage(); 56 | 57 | /* 58 | * a transaction that wrote this page just committed it. 59 | * copy current content to the before image. 60 | */ 61 | public void setBeforeImage(); 62 | 63 | // public Iterator iterator(); 64 | } 65 | -------------------------------------------------------------------------------- /src/java/simpledb/PageId.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | /** PageId is an interface to a specific page of a specific table. */ 4 | public interface PageId { 5 | 6 | /** Return a representation of this page id object as a collection of 7 | integers (used for logging) 8 | 9 | This class MUST have a constructor that accepts n integer parameters, 10 | where n is the number of integers returned in the array from serialize. 11 | */ 12 | public int[] serialize(); 13 | 14 | /** @return the unique tableid hashcode with this PageId */ 15 | public int getTableId(); 16 | 17 | /** 18 | * @return a hash code for this page, represented by the concatenation of 19 | * the table number and the page number (needed if a PageId is used as a 20 | * key in a hash table in the BufferPool, for example.) 21 | * @see BufferPool 22 | */ 23 | public int hashCode(); 24 | 25 | /** 26 | * Compares one PageId to another. 27 | * 28 | * @param o The object to compare against (must be a PageId) 29 | * @return true if the objects are equal (e.g., page numbers and table 30 | * ids are the same) 31 | */ 32 | public boolean equals(Object o); 33 | 34 | public int pageNumber(); 35 | } 36 | 37 | -------------------------------------------------------------------------------- /src/java/simpledb/ParsingException.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | import java.lang.Exception; 3 | 4 | public class ParsingException extends Exception { 5 | public ParsingException(String string) { 6 | super(string); 7 | } 8 | 9 | public ParsingException(Exception e) { 10 | super(e); 11 | } 12 | 13 | /** 14 | * 15 | */ 16 | private static final long serialVersionUID = 1L; 17 | } 18 | -------------------------------------------------------------------------------- /src/java/simpledb/Permissions.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | /** 4 | * Class representing requested permissions to a relation/file. 5 | * Private constructor with two static objects READ_ONLY and READ_WRITE that 6 | * represent the two levels of permission. 7 | */ 8 | public class Permissions { 9 | int permLevel; 10 | 11 | private Permissions(int permLevel) { 12 | this.permLevel = permLevel; 13 | } 14 | 15 | public String toString() { 16 | if (permLevel == 0) 17 | return "READ_ONLY"; 18 | if (permLevel == 1) 19 | return "READ_WRITE"; 20 | return "UNKNOWN"; 21 | } 22 | 23 | public static final Permissions READ_ONLY = new Permissions(0); 24 | public static final Permissions READ_WRITE = new Permissions(1); 25 | 26 | } 27 | -------------------------------------------------------------------------------- /src/java/simpledb/PlanCache.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | import java.util.HashMap; 3 | import java.util.Set; 4 | import java.util.Vector; 5 | 6 | /** A PlanCache is a helper class that can be used to store the best 7 | * way to order a given set of joins */ 8 | public class PlanCache { 9 | HashMap,Vector> bestOrders= new HashMap,Vector>(); 10 | HashMap,Double> bestCosts= new HashMap,Double>(); 11 | HashMap,Integer> bestCardinalities = new HashMap,Integer>(); 12 | 13 | /** Add a new cost, cardinality and ordering for a particular join set. Does not verify that the 14 | new cost is less than any previously added cost -- simply adds or replaces an existing plan for the 15 | specified join set 16 | @param s the set of joins for which a new ordering (plan) is being added 17 | @param cost the estimated cost of the specified plan 18 | @param card the estimatied cardinality of the specified plan 19 | @param order the ordering of the joins in the plan 20 | */ 21 | void addPlan(Set s, double cost, int card, Vector order) { 22 | bestOrders.put(s,order); 23 | bestCosts.put(s,cost); 24 | bestCardinalities.put(s,card); 25 | } 26 | 27 | /** Find the best join order in the cache for the specified plan 28 | @param s the set of joins to look up the best order for 29 | @return the best order for s in the cache 30 | */ 31 | Vector getOrder(Set s) { 32 | return bestOrders.get(s); 33 | } 34 | 35 | /** Find the cost of the best join order in the cache for the specified plan 36 | @param s the set of joins to look up the best cost for 37 | @return the cost of the best order for s in the cache 38 | */ 39 | double getCost(Set s) { 40 | return bestCosts.get(s); 41 | } 42 | 43 | /** Find the cardinality of the best join order in the cache for the specified plan 44 | @param s the set of joins to look up the best cardinality for 45 | @return the cardinality of the best order for s in the cache 46 | */ 47 | int getCard(Set s) { 48 | return bestCardinalities.get(s); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/java/simpledb/Predicate.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.io.Serializable; 4 | 5 | /** 6 | * Predicate compares tuples to a specified Field value. 7 | */ 8 | public class Predicate implements Serializable { 9 | 10 | private static final long serialVersionUID = 1L; 11 | 12 | private int m_field; 13 | private Op m_op; 14 | private Field m_operand; 15 | /** Constants used for return codes in Field.compare */ 16 | public enum Op implements Serializable { 17 | EQUALS, GREATER_THAN, LESS_THAN, LESS_THAN_OR_EQ, GREATER_THAN_OR_EQ, LIKE, NOT_EQUALS; 18 | 19 | /** 20 | * Interface to access operations by integer value for command-line 21 | * convenience. 22 | * 23 | * @param i 24 | * a valid integer Op index 25 | */ 26 | public static Op getOp(int i) { 27 | return values()[i]; 28 | } 29 | 30 | public String toString() { 31 | if (this == EQUALS) 32 | return "="; 33 | if (this == GREATER_THAN) 34 | return ">"; 35 | if (this == LESS_THAN) 36 | return "<"; 37 | if (this == LESS_THAN_OR_EQ) 38 | return "<="; 39 | if (this == GREATER_THAN_OR_EQ) 40 | return ">="; 41 | if (this == LIKE) 42 | return "LIKE"; 43 | if (this == NOT_EQUALS) 44 | return "<>"; 45 | throw new IllegalStateException("impossible to reach here"); 46 | } 47 | 48 | } 49 | 50 | /** 51 | * Constructor. 52 | * 53 | * @param field 54 | * field number of passed in tuples to compare against. 55 | * @param op 56 | * operation to use for comparison 57 | * @param operand 58 | * field value to compare passed in tuples to 59 | */ 60 | public Predicate(int field, Op op, Field operand) { 61 | m_field = field; 62 | m_op = op; 63 | m_operand = operand; 64 | // some code goes here 65 | } 66 | 67 | /** 68 | * @return the field number 69 | */ 70 | public int getField() 71 | { 72 | // some code goes here 73 | return m_field; 74 | } 75 | 76 | /** 77 | * @return the operator 78 | */ 79 | public Op getOp() 80 | { 81 | // some code goes here 82 | return m_op; 83 | } 84 | 85 | /** 86 | * @return the operand 87 | */ 88 | public Field getOperand() 89 | { 90 | // some code goes here 91 | return m_operand; 92 | } 93 | 94 | /** 95 | * Compares the field number of t specified in the constructor to the 96 | * operand field specified in the constructor using the operator specific in 97 | * the constructor. The comparison can be made through Field's compare 98 | * method. 99 | * 100 | * @param t 101 | * The tuple to compare against 102 | * @return true if the comparison is true, false otherwise. 103 | */ 104 | public boolean filter(Tuple t) { 105 | // some code goes here 106 | Field tuple_field = t.getField(getField()); 107 | return tuple_field.compare(getOp(), m_operand); 108 | } 109 | 110 | /** 111 | * Returns something useful, like "f = field_id op = op_string operand = 112 | * operand_string 113 | */ 114 | public String toString() { 115 | // some code goes here 116 | return "f = " + m_field + " op = " + m_op.toString() + " operand = " + m_operand; 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /src/java/simpledb/Project.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.util.*; 4 | 5 | /** 6 | * Project is an operator that implements a relational projection. 7 | */ 8 | public class Project extends Operator { 9 | 10 | private static final long serialVersionUID = 1L; 11 | private DbIterator child; 12 | private TupleDesc td; 13 | private ArrayList outFieldIds; 14 | 15 | /** 16 | * Constructor accepts a child operator to read tuples to apply projection 17 | * to and a list of fields in output tuple 18 | * 19 | * @param fieldList 20 | * The ids of the fields child's tupleDesc to project out 21 | * @param typesList 22 | * the types of the fields in the final projection 23 | * @param child 24 | * The child operator 25 | */ 26 | public Project(ArrayList fieldList, ArrayList typesList, 27 | DbIterator child) { 28 | this(fieldList,typesList.toArray(new Type[]{}),child); 29 | } 30 | 31 | public Project(ArrayList fieldList, Type[] types, 32 | DbIterator child) { 33 | this.child = child; 34 | outFieldIds = fieldList; 35 | String[] fieldAr = new String[fieldList.size()]; 36 | TupleDesc childtd = child.getTupleDesc(); 37 | 38 | for (int i = 0; i < fieldAr.length; i++) { 39 | fieldAr[i] = childtd.getFieldName(fieldList.get(i)); 40 | } 41 | td = new TupleDesc(types, fieldAr); 42 | } 43 | 44 | public TupleDesc getTupleDesc() { 45 | return td; 46 | } 47 | 48 | public void open() throws DbException, NoSuchElementException, 49 | TransactionAbortedException { 50 | child.open(); 51 | super.open(); 52 | } 53 | 54 | public void close() { 55 | super.close(); 56 | child.close(); 57 | } 58 | 59 | public void rewind() throws DbException, TransactionAbortedException { 60 | child.rewind(); 61 | } 62 | 63 | /** 64 | * Operator.fetchNext implementation. Iterates over tuples from the child 65 | * operator, projecting out the fields from the tuple 66 | * 67 | * @return The next tuple, or null if there are no more tuples 68 | */ 69 | protected Tuple fetchNext() throws NoSuchElementException, 70 | TransactionAbortedException, DbException { 71 | while (child.hasNext()) { 72 | Tuple t = child.next(); 73 | Tuple newTuple = new Tuple(td); 74 | newTuple.setRecordId(t.getRecordId()); 75 | for (int i = 0; i < td.numFields(); i++) { 76 | newTuple.setField(i, t.getField(outFieldIds.get(i))); 77 | } 78 | return newTuple; 79 | } 80 | return null; 81 | } 82 | 83 | @Override 84 | public DbIterator[] getChildren() { 85 | return new DbIterator[] { this.child }; 86 | } 87 | 88 | @Override 89 | public void setChildren(DbIterator[] children) { 90 | if (this.child!=children[0]) 91 | { 92 | this.child = children[0]; 93 | } 94 | } 95 | 96 | } 97 | -------------------------------------------------------------------------------- /src/java/simpledb/Query.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.io.*; 4 | import java.util.*; 5 | 6 | /** 7 | * Query is a wrapper class to manage the execution of queries. It takes a query 8 | * plan in the form of a high level DbIterator (built by initiating the 9 | * constructors of query plans) and runs it as a part of a specified 10 | * transaction. 11 | * 12 | * @author Sam Madden 13 | */ 14 | 15 | public class Query implements Serializable { 16 | 17 | private static final long serialVersionUID = 1L; 18 | 19 | transient private DbIterator op; 20 | transient private LogicalPlan logicalPlan; 21 | TransactionId tid; 22 | transient private boolean started = false; 23 | 24 | public TransactionId getTransactionId() { 25 | return this.tid; 26 | } 27 | 28 | public void setLogicalPlan(LogicalPlan lp) { 29 | this.logicalPlan = lp; 30 | } 31 | 32 | public LogicalPlan getLogicalPlan() { 33 | return this.logicalPlan; 34 | } 35 | 36 | public void setPhysicalPlan(DbIterator pp) { 37 | this.op = pp; 38 | } 39 | 40 | public DbIterator getPhysicalPlan() { 41 | return this.op; 42 | } 43 | 44 | public Query(TransactionId t) { 45 | tid = t; 46 | } 47 | 48 | public Query(DbIterator root, TransactionId t) { 49 | op = root; 50 | tid = t; 51 | } 52 | 53 | public void start() throws IOException, DbException, 54 | TransactionAbortedException { 55 | op.open(); 56 | 57 | started = true; 58 | } 59 | 60 | public TupleDesc getOutputTupleDesc() { 61 | return this.op.getTupleDesc(); 62 | } 63 | 64 | /** @return true if there are more tuples remaining. */ 65 | public boolean hasNext() throws DbException, TransactionAbortedException { 66 | return op.hasNext(); 67 | } 68 | 69 | /** 70 | * Returns the next tuple, or throws NoSuchElementException if the iterator 71 | * is closed. 72 | * 73 | * @return The next tuple in the iterator 74 | * @throws DbException 75 | * If there is an error in the database system 76 | * @throws NoSuchElementException 77 | * If the iterator has finished iterating 78 | * @throws TransactionAbortedException 79 | * If the transaction is aborted (e.g., due to a deadlock) 80 | */ 81 | public Tuple next() throws DbException, NoSuchElementException, 82 | TransactionAbortedException { 83 | if (!started) 84 | throw new DbException("Database not started."); 85 | 86 | return op.next(); 87 | } 88 | 89 | /** Close the iterator */ 90 | public void close() throws IOException { 91 | op.close(); 92 | started = false; 93 | } 94 | 95 | public void execute() throws IOException, DbException, TransactionAbortedException { 96 | TupleDesc td = this.getOutputTupleDesc(); 97 | 98 | String names = ""; 99 | for (int i = 0; i < td.numFields(); i++) { 100 | names += td.getFieldName(i) + "\t"; 101 | } 102 | System.out.println(names); 103 | for (int i = 0; i < names.length() + td.numFields() * 4; i++) { 104 | System.out.print("-"); 105 | } 106 | System.out.println(""); 107 | 108 | this.start(); 109 | int cnt = 0; 110 | while (this.hasNext()) { 111 | Tuple tup = this.next(); 112 | System.out.println(tup); 113 | cnt++; 114 | } 115 | System.out.println("\n " + cnt + " rows."); 116 | this.close(); 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /src/java/simpledb/RecordId.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.io.Serializable; 4 | 5 | /** 6 | * A RecordId is a reference to a specific tuple on a specific page of a 7 | * specific table. 8 | */ 9 | public class RecordId implements Serializable { 10 | 11 | private static final long serialVersionUID = 1L; 12 | 13 | /** 14 | * Creates a new RecordId referring to the specified PageId and tuple 15 | * number. 16 | * 17 | * @param pid 18 | * the pageid of the page on which the tuple resides 19 | * @param tupleno 20 | * the tuple number within the page. 21 | */ 22 | private PageId pid; 23 | private Integer tupleno; 24 | 25 | public RecordId(PageId pid, int tupleno) { 26 | // some code goes here 27 | this.pid = pid; 28 | this.tupleno = tupleno; 29 | } 30 | 31 | /** 32 | * @return the tuple number this RecordId references. 33 | */ 34 | public int tupleno() { 35 | // some code goes here 36 | return tupleno; 37 | } 38 | 39 | /** 40 | * @return the page id this RecordId references. 41 | */ 42 | public PageId getPageId() { 43 | // some code goes here 44 | return pid; 45 | } 46 | 47 | /** 48 | * Two RecordId objects are considered equal if they represent the same 49 | * tuple. 50 | * 51 | * @return True if this and o represent the same tuple 52 | */ 53 | @Override 54 | public boolean equals(Object o) { 55 | // some code goes here 56 | if (o instanceof RecordId) { 57 | RecordId arg = (RecordId) o; 58 | if (this.pid.equals(arg.pid) && this.tupleno == arg.tupleno){ 59 | return true; 60 | } 61 | } 62 | return false; 63 | } 64 | 65 | /** 66 | * You should implement the hashCode() so that two equal RecordId instances 67 | * (with respect to equals()) have the same hashCode(). 68 | * 69 | * @return An int that is the same for equal RecordId objects. 70 | */ 71 | @Override 72 | public int hashCode() { 73 | // some code goes here 74 | String hash = this.pid.hashCode() + this.tupleno.toString(); 75 | return hash.hashCode(); 76 | } 77 | 78 | } 79 | -------------------------------------------------------------------------------- /src/java/simpledb/SeqScan.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.util.*; 4 | 5 | /** 6 | * SeqScan is an implementation of a sequential scan access method that reads 7 | * each tuple of a table in no particular order (e.g., as they are laid out on 8 | * disk). 9 | */ 10 | public class SeqScan implements DbIterator { 11 | 12 | private static final long serialVersionUID = 1L; 13 | 14 | private TransactionId m_transId; 15 | private int m_tableId; 16 | private String m_tableAlias; 17 | private DbFile m_hFile; 18 | private DbFileIterator m_dbiterator; 19 | 20 | /** 21 | * Creates a sequential scan over the specified table as a part of the 22 | * specified transaction. 23 | * 24 | * @param tid 25 | * The transaction this scan is running as a part of. 26 | * @param tableid 27 | * the table to scan. 28 | * @param tableAlias 29 | * the alias of this table (needed by the parser); the returned 30 | * tupleDesc should have fields with name tableAlias.fieldName 31 | * (note: this class is not responsible for handling a case where 32 | * tableAlias or fieldName are null. It shouldn't crash if they 33 | * are, but the resulting name can be null.fieldName, 34 | * tableAlias.null, or null.null). 35 | */ 36 | public SeqScan(TransactionId tid, int tableid, String tableAlias) { 37 | m_transId = tid; 38 | m_tableAlias = tableAlias; 39 | m_tableId = tableid; 40 | m_hFile = Database.getCatalog().getDatabaseFile(tableid); 41 | //m_dbiterator = new HeapFileIterator(m_hFile.getId(), m_transId, m_hFile.numPages()); 42 | m_dbiterator = m_hFile.iterator(m_transId); 43 | } 44 | 45 | /** 46 | * @return 47 | * return the table name of the table the operator scans. This should 48 | * be the actual name of the table in the catalog of the database 49 | * */ 50 | public String getTableName() { 51 | return Database.getCatalog().getTableName(m_tableId); 52 | } 53 | 54 | /** 55 | * @return Return the alias of the table this operator scans. 56 | * */ 57 | public String getAlias() 58 | { 59 | return m_tableAlias; 60 | } 61 | 62 | /** 63 | * Reset the tableid, and tableAlias of this operator. 64 | * @param tableid 65 | * the table to scan. 66 | * @param tableAlias 67 | * the alias of this table (needed by the parser); the returned 68 | * tupleDesc should have fields with name tableAlias.fieldName 69 | * (note: this class is not responsible for handling a case where 70 | * tableAlias or fieldName are null. It shouldn't crash if they 71 | * are, but the resulting name can be null.fieldName, 72 | * tableAlias.null, or null.null). 73 | */ 74 | public void reset(int tableid, String tableAlias) { 75 | m_tableId = tableid; 76 | m_tableAlias = tableAlias; 77 | } 78 | 79 | public SeqScan(TransactionId tid, int tableid) { 80 | this(tid, tableid, Database.getCatalog().getTableName(tableid)); 81 | } 82 | 83 | public void open() throws DbException, TransactionAbortedException { 84 | m_dbiterator.open(); 85 | } 86 | 87 | /** 88 | * Returns the TupleDesc with field names from the underlying HeapFile, 89 | * prefixed with the tableAlias string from the constructor. This prefix 90 | * becomes useful when joining tables containing a field(s) with the same 91 | * name. 92 | * 93 | * @return the TupleDesc with field names from the underlying HeapFile, 94 | * prefixed with the tableAlias string from the constructor. 95 | */ 96 | public TupleDesc getTupleDesc() { 97 | // some code goes here 98 | TupleDesc origTupleDesc = Database.getCatalog().getTupleDesc(m_tableId); 99 | int tdSize = origTupleDesc.numFields(); 100 | Type [] newTypes = new Type[tdSize]; 101 | String [] newFields = new String[tdSize]; 102 | for (int i = 0; i < tdSize; i++){ 103 | newTypes[i] = origTupleDesc.getFieldType(i); 104 | newFields[i] = m_tableAlias + "." + origTupleDesc.getFieldName(i); 105 | } 106 | return new TupleDesc(newTypes, newFields); 107 | } 108 | 109 | public boolean hasNext() throws TransactionAbortedException, DbException { 110 | return m_dbiterator.hasNext(); 111 | } 112 | 113 | public Tuple next() throws NoSuchElementException, 114 | TransactionAbortedException, DbException { 115 | return m_dbiterator.next(); 116 | } 117 | 118 | public void close() { 119 | m_dbiterator.close(); 120 | } 121 | 122 | public void rewind() throws DbException, NoSuchElementException, 123 | TransactionAbortedException { 124 | m_dbiterator.rewind(); 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /src/java/simpledb/SimpleDb.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | import java.io.*; 3 | 4 | public class SimpleDb { 5 | public static void main (String args[]) 6 | throws DbException, TransactionAbortedException, IOException { 7 | // convert a file 8 | if(args[0].equals("convert")) { 9 | try { 10 | if (args.length<3 || args.length>5){ 11 | System.err.println("Unexpected number of arguments to convert "); 12 | return; 13 | } 14 | File sourceTxtFile=new File(args[1]); 15 | File targetDatFile=new File(args[1].replaceAll(".txt", ".dat")); 16 | int numOfAttributes=Integer.parseInt(args[2]); 17 | Type[] ts = new Type[numOfAttributes]; 18 | char fieldSeparator=','; 19 | 20 | if (args.length == 3) 21 | for (int i=0;i c = Class.forName("simpledb.Parser"); 80 | Class s = String[].class; 81 | 82 | java.lang.reflect.Method m = c.getMethod("main", s); 83 | m.invoke(null, (java.lang.Object)newargs); 84 | } catch (ClassNotFoundException cne) { 85 | System.out.println("Class Parser not found -- perhaps you are trying to run the parser as a part of lab1?"); 86 | } 87 | catch (Exception e) { 88 | System.out.println("Error in parser."); 89 | e.printStackTrace(); 90 | } 91 | 92 | } 93 | else { 94 | System.err.println("Unknown command: " + args[0]); 95 | System.exit(1); 96 | } 97 | } 98 | 99 | } 100 | -------------------------------------------------------------------------------- /src/java/simpledb/StringAggregator.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.util.ArrayList; 4 | import java.util.HashMap; 5 | 6 | import simpledb.Aggregator.Op; 7 | 8 | /** 9 | * Knows how to compute some aggregate over a set of StringFields. 10 | */ 11 | public class StringAggregator implements Aggregator { 12 | 13 | private static final long serialVersionUID = 1L; 14 | 15 | private int m_groupByFieldIndex; 16 | private Type m_groupByFieldType; 17 | private int m_aggregateFieldIndex; 18 | private Op m_op; 19 | private HashMap m_count; 20 | 21 | /** 22 | * Aggregate constructor 23 | * @param gbfield the 0-based index of the group-by field in the tuple, or NO_GROUPING if there is no grouping 24 | * @param gbfieldtype the type of the group by field (e.g., Type.INT_TYPE), or null if there is no grouping 25 | * @param afield the 0-based index of the aggregate field in the tuple 26 | * @param what aggregation operator to use -- only supports COUNT 27 | * @throws IllegalArgumentException if what != COUNT 28 | */ 29 | 30 | public StringAggregator(int gbfield, Type gbfieldtype, int afield, Op what) { 31 | // some code goes here 32 | m_groupByFieldIndex = gbfield; 33 | m_groupByFieldType = gbfieldtype; 34 | m_aggregateFieldIndex = afield; 35 | m_op = what; 36 | assert(m_op == Op.COUNT); 37 | m_count = new HashMap(); 38 | } 39 | 40 | /** 41 | * Merge a new tuple into the aggregate, grouping as indicated in the constructor 42 | * @param tup the Tuple containing an aggregate field and a group-by field 43 | */ 44 | public void mergeTupleIntoGroup(Tuple tup) { 45 | // some code goes here 46 | Field tupleGroupByField = (m_groupByFieldIndex == Aggregator.NO_GROUPING) ? null : tup.getField(m_groupByFieldIndex); 47 | 48 | if (!m_count.containsKey(tupleGroupByField)) 49 | { 50 | m_count.put(tupleGroupByField, 0); 51 | } 52 | 53 | int currentCount = m_count.get(tupleGroupByField); 54 | m_count.put(tupleGroupByField, currentCount+1); 55 | 56 | } 57 | 58 | private TupleDesc createGroupByTupleDesc() 59 | { 60 | String[] names; 61 | Type[] types; 62 | if (m_groupByFieldIndex == Aggregator.NO_GROUPING) 63 | { 64 | names = new String[] {"aggregateValue"}; 65 | types = new Type[] {Type.INT_TYPE}; 66 | } 67 | else 68 | { 69 | names = new String[] {"groupValue", "aggregateValue"}; 70 | types = new Type[] {m_groupByFieldType, Type.INT_TYPE}; 71 | } 72 | return new TupleDesc(types, names); 73 | } 74 | /** 75 | * Create a DbIterator over group aggregate results. 76 | * 77 | * @return a DbIterator whose tuples are the pair (groupVal, 78 | * aggregateVal) if using group, or a single (aggregateVal) if no 79 | * grouping. The aggregateVal is determined by the type of 80 | * aggregate specified in the constructor. 81 | */ 82 | public DbIterator iterator() { 83 | // some code goes here 84 | ArrayList tuples = new ArrayList(); 85 | TupleDesc tupledesc = createGroupByTupleDesc(); 86 | Tuple addMe; 87 | for (Field group : m_count.keySet()) 88 | { 89 | int aggregateVal = m_count.get(group); 90 | addMe = new Tuple(tupledesc); 91 | if (m_groupByFieldIndex == Aggregator.NO_GROUPING){ 92 | addMe.setField(0, new IntField(aggregateVal)); 93 | } 94 | else { 95 | addMe.setField(0, group); 96 | addMe.setField(1, new IntField(aggregateVal)); 97 | } 98 | tuples.add(addMe); 99 | } 100 | return new TupleIterator(tupledesc, tuples); 101 | } 102 | 103 | } 104 | -------------------------------------------------------------------------------- /src/java/simpledb/StringField.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.io.*; 4 | 5 | /** 6 | * Instance of Field that stores a single String of a fixed length. 7 | */ 8 | public class StringField implements Field { 9 | 10 | private static final long serialVersionUID = 1L; 11 | 12 | private final String value; 13 | private final int maxSize; 14 | 15 | public String getValue() { 16 | return value; 17 | } 18 | 19 | /** 20 | * Constructor. 21 | * 22 | * @param s 23 | * The value of this field. 24 | * @param maxSize 25 | * The maximum size of this string 26 | */ 27 | public StringField(String s, int maxSize) { 28 | this.maxSize = maxSize; 29 | 30 | if (s.length() > maxSize) 31 | value = s.substring(0, maxSize); 32 | else 33 | value = s; 34 | } 35 | 36 | public String toString() { 37 | return value; 38 | } 39 | 40 | public int hashCode() { 41 | return value.hashCode(); 42 | } 43 | 44 | public boolean equals(Object field) { 45 | return ((StringField) field).value.equals(value); 46 | } 47 | 48 | /** 49 | * Write this string to dos. Always writes maxSize + 4 bytes to the passed 50 | * in dos. First four bytes are string length, next bytes are string, with 51 | * remainder padded with 0 to maxSize. 52 | * 53 | * @param dos 54 | * Where the string is written 55 | */ 56 | public void serialize(DataOutputStream dos) throws IOException { 57 | String s = value; 58 | int overflow = maxSize - s.length(); 59 | if (overflow < 0) { 60 | String news = s.substring(0, maxSize); 61 | s = news; 62 | } 63 | dos.writeInt(s.length()); 64 | dos.writeBytes(s); 65 | while (overflow-- > 0) 66 | dos.write((byte) 0); 67 | } 68 | 69 | /** 70 | * Compare the specified field to the value of this Field. Return semantics 71 | * are as specified by Field.compare 72 | * 73 | * @throws IllegalCastException 74 | * if val is not a StringField 75 | * @see Field#compare 76 | */ 77 | public boolean compare(Predicate.Op op, Field val) { 78 | 79 | StringField iVal = (StringField) val; 80 | int cmpVal = value.compareTo(iVal.value); 81 | 82 | switch (op) { 83 | case EQUALS: 84 | return cmpVal == 0; 85 | 86 | case NOT_EQUALS: 87 | return cmpVal != 0; 88 | 89 | case GREATER_THAN: 90 | return cmpVal > 0; 91 | 92 | case GREATER_THAN_OR_EQ: 93 | return cmpVal >= 0; 94 | 95 | case LESS_THAN: 96 | return cmpVal < 0; 97 | 98 | case LESS_THAN_OR_EQ: 99 | return cmpVal <= 0; 100 | 101 | case LIKE: 102 | return value.indexOf(iVal.value) >= 0; 103 | } 104 | 105 | return false; 106 | } 107 | 108 | /** 109 | * @return the Type for this Field 110 | */ 111 | public Type getType() { 112 | 113 | return Type.STRING_TYPE; 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /src/java/simpledb/StringHistogram.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | /** A class to represent a fixed-width histogram over a single String-based field. 4 | */ 5 | public class StringHistogram { 6 | IntHistogram hist; 7 | 8 | /** Create a new StringHistogram with a specified number of buckets. 9 |

10 | Our implementation is written in terms of an IntHistogram by converting 11 | each String to an integer. 12 | @param buckets the number of buckets */ 13 | public StringHistogram(int buckets) { 14 | hist = new IntHistogram(buckets, minVal(), maxVal()); 15 | } 16 | 17 | /** Convert a string to an integer, with the property that 18 | if the return value(s1) < return value(s2), then s1 < s2 19 | */ 20 | private int stringToInt(String s) { 21 | int i ; 22 | int v = 0; 23 | for (i = 3; i >= 0;i--) { 24 | if (s.length() > 3-i) { 25 | int ci = (int)s.charAt(3-i); 26 | v += (ci) << (i * 8); 27 | } 28 | } 29 | 30 | // XXX: hack to avoid getting wrong results for 31 | // strings which don't output in the range min to max 32 | if (!(s.equals("") || s.equals("zzzz"))) { 33 | if (v < minVal()) { 34 | v = minVal(); 35 | } 36 | 37 | if (v > maxVal()) { 38 | v = maxVal(); 39 | } 40 | } 41 | 42 | return v; 43 | } 44 | 45 | /** @return the maximum value indexed by the histogram */ 46 | int maxVal() { 47 | return stringToInt("zzzz"); 48 | } 49 | 50 | /** @return the minimum value indexed by the histogram */ 51 | int minVal() { 52 | return stringToInt(""); 53 | } 54 | 55 | /** Add a new value to thte histogram */ 56 | public void addValue(String s) { 57 | int val = stringToInt(s); 58 | hist.addValue(val); 59 | } 60 | 61 | /** Estimate the selectivity (as a double between 0 and 1) of the specified predicate over the specified string 62 | @param op The operation being applied 63 | @param s The string to apply op to 64 | */ 65 | public double estimateSelectivity(Predicate.Op op, String s) { 66 | int val = stringToInt(s); 67 | return hist.estimateSelectivity(op, val); 68 | } 69 | 70 | /** 71 | * @return 72 | * the average selectivity of this histogram. 73 | * 74 | * This is not an indispensable method to implement the basic 75 | * join optimization. It may be needed if you want to 76 | * implement a more efficient optimization 77 | * */ 78 | public double avgSelectivity() 79 | { 80 | return hist.avgSelectivity(); 81 | } 82 | } -------------------------------------------------------------------------------- /src/java/simpledb/Transaction.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.io.*; 4 | 5 | /** 6 | * Transaction encapsulates information about the state of 7 | * a transaction and manages transaction commit / abort. 8 | */ 9 | 10 | public class Transaction { 11 | private final TransactionId tid; 12 | volatile boolean started = false; 13 | 14 | public Transaction() { 15 | tid = new TransactionId(); 16 | } 17 | 18 | /** Start the transaction running */ 19 | public void start() { 20 | started = true; 21 | try { 22 | Database.getLogFile().logXactionBegin(tid); 23 | } catch (IOException e) { 24 | e.printStackTrace(); 25 | } 26 | } 27 | 28 | public TransactionId getId() { 29 | return tid; 30 | } 31 | 32 | /** Finish the transaction */ 33 | public void commit() throws IOException { 34 | transactionComplete(false); 35 | } 36 | 37 | /** Finish the transaction */ 38 | public void abort() throws IOException { 39 | transactionComplete(true); 40 | } 41 | 42 | /** Handle the details of transaction commit / abort */ 43 | public void transactionComplete(boolean abort) throws IOException { 44 | 45 | if (started) { 46 | //write commit / abort records 47 | if (abort) { 48 | Database.getLogFile().logAbort(tid); //does rollback too 49 | } else { 50 | //write all the dirty pages for this transaction out 51 | Database.getBufferPool().flushPages(tid); 52 | Database.getLogFile().logCommit(tid); 53 | } 54 | 55 | try { 56 | Database.getBufferPool().transactionComplete(tid, !abort); // release locks 57 | } catch (IOException e) { 58 | e.printStackTrace(); 59 | } 60 | 61 | //setting this here means we could possibly write multiple abort records -- OK? 62 | started = false; 63 | } 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/java/simpledb/TransactionAbortedException.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.lang.Exception; 4 | 5 | /** Exception that is thrown when a transaction has aborted. */ 6 | public class TransactionAbortedException extends Exception { 7 | private static final long serialVersionUID = 1L; 8 | 9 | public TransactionAbortedException() { 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /src/java/simpledb/TransactionId.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.io.Serializable; 4 | import java.util.concurrent.atomic.AtomicLong; 5 | 6 | /** 7 | * TransactionId is a class that contains the identifier of a transaction. 8 | */ 9 | public class TransactionId implements Serializable { 10 | 11 | private static final long serialVersionUID = 1L; 12 | 13 | static AtomicLong counter = new AtomicLong(0); 14 | final long myid; 15 | 16 | public TransactionId() { 17 | myid = counter.getAndIncrement(); 18 | } 19 | 20 | public long getId() { 21 | return myid; 22 | } 23 | 24 | public boolean equals(Object tid) { 25 | return ((TransactionId) tid).myid == myid; 26 | } 27 | 28 | public int hashCode() { 29 | return (int) myid; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/java/simpledb/Tuple.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.io.Serializable; 4 | import java.util.Arrays; 5 | import java.util.Iterator; 6 | 7 | /** 8 | * Tuple maintains information about the contents of a tuple. Tuples have a 9 | * specified schema specified by a TupleDesc object and contain Field objects 10 | * with the data for each field. 11 | */ 12 | public class Tuple implements Serializable { 13 | 14 | private static final long serialVersionUID = 1L; 15 | 16 | private TupleDesc m_td; 17 | private RecordId m_rid; 18 | private Field[] m_fields; 19 | 20 | /** 21 | * Create a new tuple with the specified schema (type). 22 | * 23 | * @param td 24 | * the schema of this tuple. It must be a valid TupleDesc 25 | * instance with at least one field. 26 | */ 27 | public Tuple(TupleDesc td) { 28 | assert (td instanceof TupleDesc); 29 | assert (td.numFields() > 0); 30 | m_td = td; 31 | m_fields = new Field[td.numFields()]; 32 | // some code goes here 33 | } 34 | 35 | /** 36 | * @return The TupleDesc representing the schema of this tuple. 37 | */ 38 | public TupleDesc getTupleDesc() { 39 | // some code goes here 40 | return m_td; 41 | } 42 | 43 | /** 44 | * @return The RecordId representing the location of this tuple on disk. May 45 | * be null. 46 | */ 47 | public RecordId getRecordId() { 48 | // some code goes here 49 | return m_rid; 50 | } 51 | 52 | /** 53 | * Set the RecordId information for this tuple. 54 | * 55 | * @param rid 56 | * the new RecordId for this tuple. 57 | */ 58 | public void setRecordId(RecordId rid) { 59 | // some code goes here 60 | m_rid = rid; 61 | } 62 | 63 | /** 64 | * Change the value of the ith field of this tuple. 65 | * 66 | * @param i 67 | * index of the field to change. It must be a valid index. 68 | * @param f 69 | * new value for the field. 70 | */ 71 | public void setField(int i, Field f) { 72 | // some code goes here 73 | assert (i >= 0); 74 | assert (i < m_fields.length); 75 | m_fields[i] = f; 76 | } 77 | 78 | /** 79 | * @return the value of the ith field, or null if it has not been set. 80 | * 81 | * @param i 82 | * field index to return. Must be a valid index. 83 | */ 84 | public Field getField(int i) { 85 | // some code goes here 86 | assert (i >= 0); 87 | assert (i < m_fields.length); 88 | return m_fields[i]; 89 | } 90 | 91 | /** 92 | * Returns the contents of this Tuple as a string. Note that to pass the 93 | * system tests, the format needs to be as follows: 94 | * 95 | * column1\tcolumn2\tcolumn3\t...\tcolumnN\n 96 | * 97 | * where \t is any whitespace, except newline, and \n is a newline 98 | */ 99 | public String toString() { 100 | // some code goes here 101 | String s = ""; 102 | for (int i = 0; i < m_fields.length-1; i++){ 103 | s += m_fields[i] + "\t"; 104 | } 105 | s += m_fields[m_fields.length-1] + "\n"; 106 | return s; 107 | } 108 | 109 | /** 110 | * @return 111 | * An iterator which iterates over all the fields of this tuple 112 | * */ 113 | public Iterator fields() 114 | { 115 | // some code goes here 116 | return Arrays.asList(m_fields).iterator(); 117 | } 118 | 119 | /** 120 | * reset the TupleDesc of this tuple 121 | * */ 122 | public void resetTupleDesc(TupleDesc td) 123 | { 124 | m_td = td; 125 | m_fields = new Field[td.getSize()]; 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /src/java/simpledb/TupleIterator.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.util.*; 4 | 5 | /** 6 | * Implements a DbIterator by wrapping an Iterable. 7 | */ 8 | public class TupleIterator implements DbIterator { 9 | /** 10 | * 11 | */ 12 | private static final long serialVersionUID = 1L; 13 | Iterator i = null; 14 | TupleDesc td = null; 15 | Iterable tuples = null; 16 | 17 | /** 18 | * Constructs an iterator from the specified Iterable, and the specified 19 | * descriptor. 20 | * 21 | * @param tuples 22 | * The set of tuples to iterate over 23 | */ 24 | public TupleIterator(TupleDesc td, Iterable tuples) { 25 | this.td = td; 26 | this.tuples = tuples; 27 | 28 | // check that all tuples are the right TupleDesc 29 | for (Tuple t : tuples) { 30 | if (!t.getTupleDesc().equals(td)) 31 | throw new IllegalArgumentException( 32 | "incompatible tuple in tuple set"); 33 | } 34 | } 35 | 36 | public void open() { 37 | i = tuples.iterator(); 38 | } 39 | 40 | public boolean hasNext() { 41 | return i.hasNext(); 42 | } 43 | 44 | public Tuple next() { 45 | return i.next(); 46 | } 47 | 48 | public void rewind() { 49 | close(); 50 | open(); 51 | } 52 | 53 | public TupleDesc getTupleDesc() { 54 | return td; 55 | } 56 | 57 | public void close() { 58 | i = null; 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/java/simpledb/Type.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.text.ParseException; 4 | import java.io.*; 5 | 6 | /** 7 | * Class representing a type in SimpleDB. 8 | * Types are static objects defined by this class; hence, the Type 9 | * constructor is private. 10 | */ 11 | public enum Type implements Serializable { 12 | INT_TYPE() { 13 | @Override 14 | public int getLen() { 15 | return 4; 16 | } 17 | 18 | @Override 19 | public Field parse(DataInputStream dis) throws ParseException { 20 | try { 21 | return new IntField(dis.readInt()); 22 | } catch (IOException e) { 23 | throw new ParseException("couldn't parse", 0); 24 | } 25 | } 26 | 27 | }, STRING_TYPE() { 28 | @Override 29 | public int getLen() { 30 | return STRING_LEN+4; 31 | } 32 | 33 | @Override 34 | public Field parse(DataInputStream dis) throws ParseException { 35 | try { 36 | int strLen = dis.readInt(); 37 | byte bs[] = new byte[strLen]; 38 | dis.read(bs); 39 | dis.skipBytes(STRING_LEN-strLen); 40 | return new StringField(new String(bs), STRING_LEN); 41 | } catch (IOException e) { 42 | throw new ParseException("couldn't parse", 0); 43 | } 44 | } 45 | }; 46 | 47 | public static final int STRING_LEN = 128; 48 | 49 | /** 50 | * @return the number of bytes required to store a field of this type. 51 | */ 52 | public abstract int getLen(); 53 | 54 | /** 55 | * @return a Field object of the same type as this object that has contents 56 | * read from the specified DataInputStream. 57 | * @param dis The input stream to read from 58 | * @throws ParseException if the data read from the input stream is not 59 | * of the appropriate type. 60 | */ 61 | public abstract Field parse(DataInputStream dis) throws ParseException; 62 | 63 | } 64 | -------------------------------------------------------------------------------- /src/java/simpledb/Utility.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.io.*; 4 | import java.util.ArrayList; 5 | import java.util.UUID; 6 | 7 | /** Helper methods used for testing and implementing random features. */ 8 | public class Utility { 9 | /** 10 | * @return a Type array of length len populated with Type.INT_TYPE 11 | */ 12 | public static Type[] getTypes(int len) { 13 | Type[] types = new Type[len]; 14 | for (int i = 0; i < len; ++i) 15 | types[i] = Type.INT_TYPE; 16 | return types; 17 | } 18 | 19 | /** 20 | * @return a String array of length len populated with the (possibly null) strings in val, 21 | * and an appended increasing integer at the end (val1, val2, etc.). 22 | */ 23 | public static String[] getStrings(int len, String val) { 24 | String[] strings = new String[len]; 25 | for (int i = 0; i < len; ++i) 26 | strings[i] = val + i; 27 | return strings; 28 | } 29 | 30 | /** 31 | * @return a TupleDesc with n fields of type Type.INT_TYPE, each named 32 | * name + n (name1, name2, etc.). 33 | */ 34 | public static TupleDesc getTupleDesc(int n, String name) { 35 | return new TupleDesc(getTypes(n), getStrings(n, name)); 36 | } 37 | 38 | /** 39 | * @return a TupleDesc with n fields of type Type.INT_TYPE 40 | */ 41 | public static TupleDesc getTupleDesc(int n) { 42 | return new TupleDesc(getTypes(n)); 43 | } 44 | 45 | /** 46 | * @return a Tuple with a single IntField with value n and with 47 | * RecordId(HeapPageId(1,2), 3) 48 | */ 49 | public static Tuple getHeapTuple(int n) { 50 | Tuple tup = new Tuple(getTupleDesc(1)); 51 | tup.setRecordId(new RecordId(new HeapPageId(1, 2), 3)); 52 | tup.setField(0, new IntField(n)); 53 | return tup; 54 | } 55 | 56 | /** 57 | * @return a Tuple with an IntField for every element of tupdata 58 | * and RecordId(HeapPageId(1, 2), 3) 59 | */ 60 | public static Tuple getHeapTuple(int[] tupdata) { 61 | Tuple tup = new Tuple(getTupleDesc(tupdata.length)); 62 | tup.setRecordId(new RecordId(new HeapPageId(1, 2), 3)); 63 | for (int i = 0; i < tupdata.length; ++i) 64 | tup.setField(i, new IntField(tupdata[i])); 65 | return tup; 66 | } 67 | 68 | /** 69 | * @return a Tuple with a 'width' IntFields each with value n and 70 | * with RecordId(HeapPageId(1, 2), 3) 71 | */ 72 | public static Tuple getHeapTuple(int n, int width) { 73 | Tuple tup = new Tuple(getTupleDesc(width)); 74 | tup.setRecordId(new RecordId(new HeapPageId(1, 2), 3)); 75 | for (int i = 0; i < width; ++i) 76 | tup.setField(i, new IntField(n)); 77 | return tup; 78 | } 79 | 80 | /** 81 | * @return a Tuple with a 'width' IntFields with the value tupledata[i] 82 | * in each field. 83 | * do not set it's RecordId, hence do not distinguish which 84 | * sort of file it belongs to. 85 | */ 86 | public static Tuple getTuple(int[] tupledata, int width) { 87 | if(tupledata.length != width) { 88 | System.out.println("get Hash Tuple has the wrong length~"); 89 | System.exit(1); 90 | } 91 | Tuple tup = new Tuple(getTupleDesc(width)); 92 | for (int i = 0; i < width; ++i) 93 | tup.setField(i, new IntField(tupledata[i])); 94 | return tup; 95 | } 96 | 97 | /** 98 | * A utility method to create a new HeapFile with a single empty page, 99 | * assuming the path does not already exist. If the path exists, the file 100 | * will be overwritten. The new table will be added to the Catalog with 101 | * the specified number of columns as IntFields. 102 | */ 103 | public static HeapFile createEmptyHeapFile(String path, int cols) 104 | throws IOException { 105 | File f = new File(path); 106 | // touch the file 107 | FileOutputStream fos = new FileOutputStream(f); 108 | fos.write(new byte[0]); 109 | fos.close(); 110 | 111 | HeapFile hf = openHeapFile(cols, f); 112 | HeapPageId pid = new HeapPageId(hf.getId(), 0); 113 | 114 | HeapPage page = null; 115 | try { 116 | page = new HeapPage(pid, HeapPage.createEmptyPageData()); 117 | } catch (IOException e) { 118 | // this should never happen for an empty page; bail; 119 | throw new RuntimeException("failed to create empty page in HeapFile"); 120 | } 121 | 122 | hf.writePage(page); 123 | return hf; 124 | } 125 | 126 | /** Opens a HeapFile and adds it to the catalog. 127 | * 128 | * @param cols number of columns in the table. 129 | * @param f location of the file storing the table. 130 | * @return the opened table. 131 | */ 132 | public static HeapFile openHeapFile(int cols, File f) { 133 | // create the HeapFile and add it to the catalog 134 | TupleDesc td = getTupleDesc(cols); 135 | HeapFile hf = new HeapFile(f, td); 136 | Database.getCatalog().addTable(hf, UUID.randomUUID().toString()); 137 | return hf; 138 | } 139 | 140 | public static HeapFile openHeapFile(int cols, String colPrefix, File f) { 141 | // create the HeapFile and add it to the catalog 142 | TupleDesc td = getTupleDesc(cols, colPrefix); 143 | HeapFile hf = new HeapFile(f, td); 144 | Database.getCatalog().addTable(hf, UUID.randomUUID().toString()); 145 | return hf; 146 | } 147 | 148 | public static String listToString(ArrayList list) { 149 | String out = ""; 150 | for (Integer i : list) { 151 | if (out.length() > 0) out += "\t"; 152 | out += i; 153 | } 154 | return out; 155 | } 156 | } 157 | 158 | -------------------------------------------------------------------------------- /test/simpledb/AggregateTest.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import static org.junit.Assert.assertEquals; 4 | import static org.junit.Assert.assertNotNull; 5 | import static org.junit.Assert.assertTrue; 6 | import junit.framework.JUnit4TestAdapter; 7 | 8 | import org.junit.Before; 9 | import org.junit.Test; 10 | 11 | import simpledb.systemtest.SimpleDbTestBase; 12 | 13 | public class AggregateTest extends SimpleDbTestBase { 14 | 15 | int width1 = 2; 16 | DbIterator scan1; 17 | DbIterator scan2; 18 | DbIterator scan3; 19 | 20 | DbIterator sum; 21 | DbIterator sumstring; 22 | 23 | DbIterator avg; 24 | DbIterator max; 25 | DbIterator min; 26 | DbIterator count; 27 | 28 | /** 29 | * Initialize each unit test 30 | */ 31 | @Before public void createTupleLists() throws Exception { 32 | this.scan1 = TestUtil.createTupleList(width1, 33 | new int[] { 1, 2, 34 | 1, 4, 35 | 1, 6, 36 | 3, 2, 37 | 3, 4, 38 | 3, 6, 39 | 5, 7 }); 40 | this.scan2 = TestUtil.createTupleList(width1, 41 | new Object[] { 1, "a", 42 | 1, "a", 43 | 1, "a", 44 | 3, "a", 45 | 3, "a", 46 | 3, "a", 47 | 5, "a" }); 48 | this.scan3 = TestUtil.createTupleList(width1, 49 | new Object[] { "a", 2, 50 | "a", 4, 51 | "a", 6, 52 | "b", 2, 53 | "b", 4, 54 | "b", 6, 55 | "c", 7 }); 56 | 57 | this.sum = TestUtil.createTupleList(width1, 58 | new int[] { 1, 12, 59 | 3, 12, 60 | 5, 7 }); 61 | this.sumstring = TestUtil.createTupleList(width1, 62 | new Object[] { "a", 12, 63 | "b", 12, 64 | "c", 7 }); 65 | 66 | this.avg = TestUtil.createTupleList(width1, 67 | new int[] { 1, 4, 68 | 3, 4, 69 | 5, 7 }); 70 | this.min = TestUtil.createTupleList(width1, 71 | new int[] { 1, 2, 72 | 3, 2, 73 | 5, 7 }); 74 | this.max = TestUtil.createTupleList(width1, 75 | new int[] { 1, 6, 76 | 3, 6, 77 | 5, 7 }); 78 | this.count = TestUtil.createTupleList(width1, 79 | new int[] { 1, 3, 80 | 3, 3, 81 | 5, 1 }); 82 | 83 | } 84 | 85 | /** 86 | * Unit test for Aggregate.getTupleDesc() 87 | */ 88 | @Test public void getTupleDesc() { 89 | Aggregate op = new Aggregate(scan1, 0, 0, 90 | Aggregator.Op.MIN); 91 | TupleDesc expected = Utility.getTupleDesc(2); 92 | TupleDesc actual = op.getTupleDesc(); 93 | assertEquals(expected, actual); 94 | } 95 | 96 | /** 97 | * Unit test for Aggregate.rewind() 98 | */ 99 | @Test public void rewind() throws Exception { 100 | Aggregate op = new Aggregate(scan1, 1, 0, 101 | Aggregator.Op.MIN); 102 | op.open(); 103 | while (op.hasNext()) { 104 | assertNotNull(op.next()); 105 | } 106 | assertTrue(TestUtil.checkExhausted(op)); 107 | 108 | op.rewind(); 109 | min.open(); 110 | TestUtil.matchAllTuples(min, op); 111 | } 112 | 113 | /** 114 | * Unit test for Aggregate.getNext() using a count aggregate with string types 115 | */ 116 | @Test public void countStringAggregate() throws Exception { 117 | Aggregate op = new Aggregate(scan2, 1, 0, 118 | Aggregator.Op.COUNT); 119 | op.open(); 120 | count.open(); 121 | TestUtil.matchAllTuples(count, op); 122 | } 123 | 124 | /** 125 | * Unit test for Aggregate.getNext() using a count aggregate with string types 126 | */ 127 | @Test public void sumStringGroupBy() throws Exception { 128 | Aggregate op = new Aggregate(scan3, 1, 0, 129 | Aggregator.Op.SUM); 130 | op.open(); 131 | sumstring.open(); 132 | TestUtil.matchAllTuples(sumstring, op); 133 | } 134 | 135 | /** 136 | * Unit test for Aggregate.getNext() using a sum aggregate 137 | */ 138 | @Test public void sumAggregate() throws Exception { 139 | Aggregate op = new Aggregate(scan1, 1, 0, 140 | Aggregator.Op.SUM); 141 | op.open(); 142 | sum.open(); 143 | TestUtil.matchAllTuples(sum, op); 144 | } 145 | 146 | /** 147 | * Unit test for Aggregate.getNext() using an avg aggregate 148 | */ 149 | @Test public void avgAggregate() throws Exception { 150 | Aggregate op = new Aggregate(scan1, 1, 0, 151 | Aggregator.Op.AVG); 152 | op.open(); 153 | avg.open(); 154 | TestUtil.matchAllTuples(avg, op); 155 | } 156 | 157 | /** 158 | * Unit test for Aggregate.getNext() using a max aggregate 159 | */ 160 | @Test public void maxAggregate() throws Exception { 161 | Aggregate op = new Aggregate(scan1, 1, 0, 162 | Aggregator.Op.MAX); 163 | op.open(); 164 | max.open(); 165 | TestUtil.matchAllTuples(max, op); 166 | } 167 | 168 | /** 169 | * Unit test for Aggregate.getNext() using a min aggregate 170 | */ 171 | @Test public void minAggregate() throws Exception { 172 | Aggregate op = new Aggregate(scan1, 1, 0, 173 | Aggregator.Op.MIN); 174 | op.open(); 175 | min.open(); 176 | TestUtil.matchAllTuples(min, op); 177 | } 178 | 179 | /** 180 | * JUnit suite target 181 | */ 182 | public static junit.framework.Test suite() { 183 | return new JUnit4TestAdapter(AggregateTest.class); 184 | } 185 | } 186 | 187 | -------------------------------------------------------------------------------- /test/simpledb/CatalogTest.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import static org.junit.Assert.assertEquals; 4 | 5 | import java.util.NoSuchElementException; 6 | 7 | import junit.framework.Assert; 8 | import junit.framework.JUnit4TestAdapter; 9 | 10 | import org.junit.Before; 11 | import org.junit.Test; 12 | 13 | import simpledb.TestUtil.SkeletonFile; 14 | import simpledb.systemtest.SimpleDbTestBase; 15 | import simpledb.systemtest.SystemTestUtil; 16 | 17 | public class CatalogTest extends SimpleDbTestBase { 18 | private static String name = "test"; 19 | private String nameThisTestRun; 20 | 21 | @Before public void addTables() throws Exception { 22 | Database.getCatalog().clear(); 23 | nameThisTestRun = SystemTestUtil.getUUID(); 24 | Database.getCatalog().addTable(new SkeletonFile(-1, Utility.getTupleDesc(2)), nameThisTestRun); 25 | Database.getCatalog().addTable(new SkeletonFile(-2, Utility.getTupleDesc(2)), name); 26 | } 27 | 28 | /** 29 | * Unit test for Catalog.getTupleDesc() 30 | */ 31 | @Test public void getTupleDesc() throws Exception { 32 | TupleDesc expected = Utility.getTupleDesc(2); 33 | TupleDesc actual = Database.getCatalog().getTupleDesc(-1); 34 | 35 | assertEquals(expected, actual); 36 | } 37 | 38 | /** 39 | * Unit test for Catalog.getTableId() 40 | */ 41 | @Test public void getTableId() { 42 | assertEquals(-2, Database.getCatalog().getTableId(name)); 43 | assertEquals(-1, Database.getCatalog().getTableId(nameThisTestRun)); 44 | 45 | try { 46 | Database.getCatalog().getTableId(null); 47 | Assert.fail("Should not find table with null name"); 48 | } catch (NoSuchElementException e) { 49 | // Expected to get here 50 | } 51 | 52 | try { 53 | Database.getCatalog().getTableId("foo"); 54 | Assert.fail("Should not find table with name foo"); 55 | } catch (NoSuchElementException e) { 56 | // Expected to get here 57 | } 58 | } 59 | 60 | /** 61 | * Unit test for Catalog.getDatabaseFile() 62 | */ 63 | 64 | @Test public void getDatabaseFile() throws Exception { 65 | DbFile f = Database.getCatalog().getDatabaseFile(-1); 66 | 67 | // NOTE(ghuo): we try not to dig too deeply into the DbFile API here; we 68 | // rely on HeapFileTest for that. perform some basic checks. 69 | assertEquals(-1, f.getId()); 70 | } 71 | 72 | /** 73 | * JUnit suite target 74 | */ 75 | public static junit.framework.Test suite() { 76 | return new JUnit4TestAdapter(CatalogTest.class); 77 | } 78 | } 79 | 80 | -------------------------------------------------------------------------------- /test/simpledb/FilterTest.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import static org.junit.Assert.assertEquals; 4 | import static org.junit.Assert.assertNotNull; 5 | import static org.junit.Assert.assertTrue; 6 | import junit.framework.JUnit4TestAdapter; 7 | 8 | import org.junit.Before; 9 | import org.junit.Test; 10 | 11 | import simpledb.systemtest.SimpleDbTestBase; 12 | 13 | public class FilterTest extends SimpleDbTestBase { 14 | 15 | int testWidth = 3; 16 | DbIterator scan; 17 | 18 | /** 19 | * Initialize each unit test 20 | */ 21 | @Before public void setUp() { 22 | this.scan = new TestUtil.MockScan(-5, 5, testWidth); 23 | } 24 | 25 | /** 26 | * Unit test for Filter.getTupleDesc() 27 | */ 28 | @Test public void getTupleDesc() { 29 | Predicate pred = new Predicate(0, Predicate.Op.EQUALS, TestUtil.getField(0)); 30 | Filter op = new Filter(pred, scan); 31 | TupleDesc expected = Utility.getTupleDesc(testWidth); 32 | TupleDesc actual = op.getTupleDesc(); 33 | assertEquals(expected, actual); 34 | } 35 | 36 | /** 37 | * Unit test for Filter.rewind() 38 | */ 39 | @Test public void rewind() throws Exception { 40 | Predicate pred = new Predicate(0, Predicate.Op.EQUALS, TestUtil.getField(0)); 41 | Filter op = new Filter(pred, scan); 42 | op.open(); 43 | assertTrue(op.hasNext()); 44 | assertNotNull(op.next()); 45 | assertTrue(TestUtil.checkExhausted(op)); 46 | 47 | op.rewind(); 48 | Tuple expected = Utility.getHeapTuple(0, testWidth); 49 | Tuple actual = op.next(); 50 | assertTrue(TestUtil.compareTuples(expected, actual)); 51 | op.close(); 52 | } 53 | 54 | /** 55 | * Unit test for Filter.getNext() using a < predicate that filters 56 | * some tuples 57 | */ 58 | @Test public void filterSomeLessThan() throws Exception { 59 | Predicate pred; 60 | pred = new Predicate(0, Predicate.Op.LESS_THAN, TestUtil.getField(2)); 61 | Filter op = new Filter(pred, scan); 62 | TestUtil.MockScan expectedOut = new TestUtil.MockScan(-5, 2, testWidth); 63 | op.open(); 64 | TestUtil.compareDbIterators(op, expectedOut); 65 | op.close(); 66 | } 67 | 68 | /** 69 | * Unit test for Filter.getNext() using a < predicate that filters 70 | * everything 71 | */ 72 | @Test public void filterAllLessThan() throws Exception { 73 | Predicate pred; 74 | pred = new Predicate(0, Predicate.Op.LESS_THAN, TestUtil.getField(-5)); 75 | Filter op = new Filter(pred, scan); 76 | op.open(); 77 | assertTrue(TestUtil.checkExhausted(op)); 78 | op.close(); 79 | } 80 | 81 | /** 82 | * Unit test for Filter.getNext() using an = predicate 83 | */ 84 | @Test public void filterEqual() throws Exception { 85 | Predicate pred; 86 | this.scan = new TestUtil.MockScan(-5, 5, testWidth); 87 | pred = new Predicate(0, Predicate.Op.EQUALS, TestUtil.getField(-5)); 88 | Filter op = new Filter(pred, scan); 89 | op.open(); 90 | assertTrue(TestUtil.compareTuples(Utility.getHeapTuple(-5, testWidth), 91 | op.next())); 92 | op.close(); 93 | 94 | this.scan = new TestUtil.MockScan(-5, 5, testWidth); 95 | pred = new Predicate(0, Predicate.Op.EQUALS, TestUtil.getField(0)); 96 | op = new Filter(pred, scan); 97 | op.open(); 98 | assertTrue(TestUtil.compareTuples(Utility.getHeapTuple(0, testWidth), 99 | op.next())); 100 | op.close(); 101 | 102 | this.scan = new TestUtil.MockScan(-5, 5, testWidth); 103 | pred = new Predicate(0, Predicate.Op.EQUALS, TestUtil.getField(4)); 104 | op = new Filter(pred, scan); 105 | op.open(); 106 | assertTrue(TestUtil.compareTuples(Utility.getHeapTuple(4, testWidth), 107 | op.next())); 108 | op.close(); 109 | } 110 | 111 | /** 112 | * Unit test for Filter.getNext() using an = predicate passing no tuples 113 | */ 114 | @Test public void filterEqualNoTuples() throws Exception { 115 | Predicate pred; 116 | pred = new Predicate(0, Predicate.Op.EQUALS, TestUtil.getField(5)); 117 | Filter op = new Filter(pred, scan); 118 | op.open(); 119 | TestUtil.checkExhausted(op); 120 | op.close(); 121 | } 122 | 123 | /** 124 | * JUnit suite target 125 | */ 126 | public static junit.framework.Test suite() { 127 | return new JUnit4TestAdapter(FilterTest.class); 128 | } 129 | } 130 | 131 | -------------------------------------------------------------------------------- /test/simpledb/HeapFileReadTest.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import simpledb.systemtest.SimpleDbTestBase; 4 | import simpledb.systemtest.SystemTestUtil; 5 | 6 | import java.util.*; 7 | import org.junit.After; 8 | import org.junit.Before; 9 | import org.junit.Test; 10 | 11 | import static org.junit.Assert.*; 12 | import junit.framework.JUnit4TestAdapter; 13 | 14 | public class HeapFileReadTest extends SimpleDbTestBase { 15 | private HeapFile hf; 16 | private TransactionId tid; 17 | private TupleDesc td; 18 | 19 | /** 20 | * Set up initial resources for each unit test. 21 | */ 22 | @Before 23 | public void setUp() throws Exception { 24 | hf = SystemTestUtil.createRandomHeapFile(2, 20, null, null); 25 | td = Utility.getTupleDesc(2); 26 | tid = new TransactionId(); 27 | } 28 | 29 | @After 30 | public void tearDown() throws Exception { 31 | Database.getBufferPool().transactionComplete(tid); 32 | } 33 | 34 | /** 35 | * Unit test for HeapFile.getId() 36 | */ 37 | @Test 38 | public void getId() throws Exception { 39 | int id = hf.getId(); 40 | 41 | // NOTE(ghuo): the value could be anything. test determinism, at least. 42 | assertEquals(id, hf.getId()); 43 | assertEquals(id, hf.getId()); 44 | 45 | HeapFile other = SystemTestUtil.createRandomHeapFile(1, 1, null, null); 46 | assertTrue(id != other.getId()); 47 | } 48 | 49 | /** 50 | * Unit test for HeapFile.getTupleDesc() 51 | */ 52 | @Test 53 | public void getTupleDesc() throws Exception { 54 | assertEquals(td, hf.getTupleDesc()); 55 | } 56 | /** 57 | * Unit test for HeapFile.numPages() 58 | */ 59 | @Test 60 | public void numPages() throws Exception { 61 | assertEquals(1, hf.numPages()); 62 | // assertEquals(1, empty.numPages()); 63 | } 64 | 65 | /** 66 | * Unit test for HeapFile.readPage() 67 | */ 68 | @Test 69 | public void readPage() throws Exception { 70 | HeapPageId pid = new HeapPageId(hf.getId(), 0); 71 | HeapPage page = (HeapPage) hf.readPage(pid); 72 | 73 | // NOTE(ghuo): we try not to dig too deeply into the Page API here; we 74 | // rely on HeapPageTest for that. perform some basic checks. 75 | assertEquals(484, page.getNumEmptySlots()); 76 | assertTrue(page.isSlotUsed(1)); 77 | assertFalse(page.isSlotUsed(20)); 78 | } 79 | 80 | @Test 81 | public void testIteratorBasic() throws Exception { 82 | HeapFile smallFile = SystemTestUtil.createRandomHeapFile(2, 3, null, 83 | null); 84 | 85 | DbFileIterator it = smallFile.iterator(tid); 86 | // Not open yet 87 | assertFalse(it.hasNext()); 88 | try { 89 | it.next(); 90 | fail("expected exception"); 91 | } catch (NoSuchElementException e) { 92 | } 93 | 94 | it.open(); 95 | int count = 0; 96 | while (it.hasNext()) { 97 | assertNotNull(it.next()); 98 | count += 1; 99 | } 100 | assertEquals(3, count); 101 | it.close(); 102 | } 103 | 104 | @Test 105 | public void testIteratorClose() throws Exception { 106 | // make more than 1 page. Previous closed iterator would start fetching 107 | // from page 1. 108 | HeapFile twoPageFile = SystemTestUtil.createRandomHeapFile(2, 520, 109 | null, null); 110 | 111 | DbFileIterator it = twoPageFile.iterator(tid); 112 | it.open(); 113 | assertTrue(it.hasNext()); 114 | it.close(); 115 | try { 116 | it.next(); 117 | fail("expected exception"); 118 | } catch (NoSuchElementException e) { 119 | } 120 | // close twice is harmless 121 | it.close(); 122 | } 123 | 124 | /** 125 | * JUnit suite target 126 | */ 127 | public static junit.framework.Test suite() { 128 | return new JUnit4TestAdapter(HeapFileReadTest.class); 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /test/simpledb/HeapFileWriteTest.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import org.junit.After; 4 | import org.junit.Before; 5 | import org.junit.Test; 6 | 7 | import static org.junit.Assert.*; 8 | import junit.framework.JUnit4TestAdapter; 9 | 10 | public class HeapFileWriteTest extends TestUtil.CreateHeapFile { 11 | private TransactionId tid; 12 | 13 | /** 14 | * Set up initial resources for each unit test. 15 | */ 16 | @Before public void setUp() throws Exception { 17 | super.setUp(); 18 | tid = new TransactionId(); 19 | } 20 | 21 | @After public void tearDown() throws Exception { 22 | Database.getBufferPool().transactionComplete(tid); 23 | } 24 | 25 | /** 26 | * Unit test for HeapFile.addTuple() 27 | */ 28 | @Test public void addTuple() throws Exception { 29 | // we should be able to add 504 tuples on an empty page. 30 | for (int i = 0; i < 504; ++i) { 31 | empty.insertTuple(tid, Utility.getHeapTuple(i, 2)); 32 | assertEquals(1, empty.numPages()); 33 | } 34 | 35 | // the next 512 additions should live on a new page 36 | for (int i = 0; i < 504; ++i) { 37 | empty.insertTuple(tid, Utility.getHeapTuple(i, 2)); 38 | assertEquals(2, empty.numPages()); 39 | } 40 | 41 | // and one more, just for fun... 42 | empty.insertTuple(tid, Utility.getHeapTuple(0, 2)); 43 | assertEquals(3, empty.numPages()); 44 | } 45 | 46 | /** 47 | * JUnit suite target 48 | */ 49 | public static junit.framework.Test suite() { 50 | return new JUnit4TestAdapter(HeapFileWriteTest.class); 51 | } 52 | } 53 | 54 | -------------------------------------------------------------------------------- /test/simpledb/HeapPageIdTest.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import static org.junit.Assert.assertEquals; 4 | import static org.junit.Assert.assertFalse; 5 | import static org.junit.Assert.assertTrue; 6 | import junit.framework.JUnit4TestAdapter; 7 | 8 | import org.junit.Before; 9 | import org.junit.Test; 10 | 11 | import simpledb.systemtest.SimpleDbTestBase; 12 | 13 | public class HeapPageIdTest extends SimpleDbTestBase { 14 | 15 | private HeapPageId pid; 16 | 17 | @Before public void createPid() { 18 | pid = new HeapPageId(1, 1); 19 | } 20 | 21 | /** 22 | * Unit test for HeapPageId.getTableId() 23 | */ 24 | @Test public void getTableId() { 25 | assertEquals(1, pid.getTableId()); 26 | } 27 | 28 | /** 29 | * Unit test for HeapPageId.pageno() 30 | */ 31 | @Test public void pageno() { 32 | assertEquals(1, pid.pageNumber()); 33 | } 34 | 35 | /** 36 | * Unit test for HeapPageId.hashCode() 37 | */ 38 | @Test public void testHashCode() { 39 | int code1, code2; 40 | 41 | // NOTE(ghuo): the hashCode could be anything. test determinism, 42 | // at least. 43 | pid = new HeapPageId(1, 1); 44 | code1 = pid.hashCode(); 45 | assertEquals(code1, pid.hashCode()); 46 | assertEquals(code1, pid.hashCode()); 47 | 48 | pid = new HeapPageId(2, 2); 49 | code2 = pid.hashCode(); 50 | assertEquals(code2, pid.hashCode()); 51 | assertEquals(code2, pid.hashCode()); 52 | } 53 | 54 | /** 55 | * Unit test for HeapPageId.equals() 56 | */ 57 | @Test public void equals() { 58 | HeapPageId pid1 = new HeapPageId(1, 1); 59 | HeapPageId pid1Copy = new HeapPageId(1, 1); 60 | HeapPageId pid2 = new HeapPageId(2, 2); 61 | 62 | // .equals() with null should return false 63 | assertFalse(pid1.equals(null)); 64 | 65 | // .equals() with the wrong type should return false 66 | assertFalse(pid1.equals(new Object())); 67 | 68 | assertTrue(pid1.equals(pid1)); 69 | assertTrue(pid1.equals(pid1Copy)); 70 | assertTrue(pid1Copy.equals(pid1)); 71 | assertTrue(pid2.equals(pid2)); 72 | 73 | assertFalse(pid1.equals(pid2)); 74 | assertFalse(pid1Copy.equals(pid2)); 75 | assertFalse(pid2.equals(pid1)); 76 | assertFalse(pid2.equals(pid1Copy)); 77 | } 78 | 79 | /** 80 | * JUnit suite target 81 | */ 82 | public static junit.framework.Test suite() { 83 | return new JUnit4TestAdapter(HeapPageIdTest.class); 84 | } 85 | } 86 | 87 | -------------------------------------------------------------------------------- /test/simpledb/HeapPageReadTest.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import simpledb.TestUtil.SkeletonFile; 4 | import simpledb.systemtest.SimpleDbTestBase; 5 | import simpledb.systemtest.SystemTestUtil; 6 | 7 | import java.io.File; 8 | import java.io.IOException; 9 | import java.util.*; 10 | 11 | import org.junit.Before; 12 | import org.junit.Test; 13 | import static org.junit.Assert.assertEquals; 14 | import static org.junit.Assert.assertFalse; 15 | import static org.junit.Assert.assertTrue; 16 | import junit.framework.JUnit4TestAdapter; 17 | 18 | public class HeapPageReadTest extends SimpleDbTestBase { 19 | private HeapPageId pid; 20 | 21 | public static final int[][] EXAMPLE_VALUES = new int[][] { 22 | { 31933, 862 }, 23 | { 29402, 56883 }, 24 | { 1468, 5825 }, 25 | { 17876, 52278 }, 26 | { 6350, 36090 }, 27 | { 34784, 43771 }, 28 | { 28617, 56874 }, 29 | { 19209, 23253 }, 30 | { 56462, 24979 }, 31 | { 51440, 56685 }, 32 | { 3596, 62307 }, 33 | { 45569, 2719 }, 34 | { 22064, 43575 }, 35 | { 42812, 44947 }, 36 | { 22189, 19724 }, 37 | { 33549, 36554 }, 38 | { 9086, 53184 }, 39 | { 42878, 33394 }, 40 | { 62778, 21122 }, 41 | { 17197, 16388 } 42 | }; 43 | 44 | public static final byte[] EXAMPLE_DATA; 45 | static { 46 | // Build the input table 47 | ArrayList> table = new ArrayList>(); 48 | for (int[] tuple : EXAMPLE_VALUES) { 49 | ArrayList listTuple = new ArrayList(); 50 | for (int value : tuple) { 51 | listTuple.add(value); 52 | } 53 | table.add(listTuple); 54 | } 55 | 56 | // Convert it to a HeapFile and read in the bytes 57 | try { 58 | File temp = File.createTempFile("table", ".dat"); 59 | temp.deleteOnExit(); 60 | HeapFileEncoder.convert(table, temp, BufferPool.getPageSize(), 2); 61 | EXAMPLE_DATA = TestUtil.readFileBytes(temp.getAbsolutePath()); 62 | } catch (IOException e) { 63 | throw new RuntimeException(e); 64 | } 65 | } 66 | 67 | /** 68 | * Set up initial resources for each unit test. 69 | */ 70 | @Before public void addTable() throws Exception { 71 | this.pid = new HeapPageId(-1, -1); 72 | Database.getCatalog().addTable(new SkeletonFile(-1, Utility.getTupleDesc(2)), SystemTestUtil.getUUID()); 73 | } 74 | 75 | /** 76 | * Unit test for HeapPage.getId() 77 | */ 78 | @Test public void getId() throws Exception { 79 | HeapPage page = new HeapPage(pid, EXAMPLE_DATA); 80 | assertEquals(pid, page.getId()); 81 | } 82 | 83 | /** 84 | * Unit test for HeapPage.iterator() 85 | */ 86 | @Test public void testIterator() throws Exception { 87 | HeapPage page = new HeapPage(pid, EXAMPLE_DATA); 88 | Iterator it = page.iterator(); 89 | 90 | int row = 0; 91 | while (it.hasNext()) { 92 | Tuple tup = it.next(); 93 | IntField f0 = (IntField) tup.getField(0); 94 | IntField f1 = (IntField) tup.getField(1); 95 | 96 | assertEquals(EXAMPLE_VALUES[row][0], f0.getValue()); 97 | assertEquals(EXAMPLE_VALUES[row][1], f1.getValue()); 98 | row++; 99 | } 100 | } 101 | 102 | /** 103 | * Unit test for HeapPage.getNumEmptySlots() 104 | */ 105 | @Test public void getNumEmptySlots() throws Exception { 106 | HeapPage page = new HeapPage(pid, EXAMPLE_DATA); 107 | assertEquals(484, page.getNumEmptySlots()); 108 | } 109 | 110 | /** 111 | * Unit test for HeapPage.isSlotUsed() 112 | */ 113 | @Test public void getSlot() throws Exception { 114 | HeapPage page = new HeapPage(pid, EXAMPLE_DATA); 115 | 116 | for (int i = 0; i < 20; ++i) 117 | assertTrue(page.isSlotUsed(i)); 118 | 119 | for (int i = 20; i < 504; ++i) 120 | assertFalse(page.isSlotUsed(i)); 121 | } 122 | 123 | /** 124 | * JUnit suite target 125 | */ 126 | public static junit.framework.Test suite() { 127 | return new JUnit4TestAdapter(HeapPageReadTest.class); 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /test/simpledb/HeapPageWriteTest.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import static org.junit.Assert.assertEquals; 4 | import static org.junit.Assert.assertTrue; 5 | 6 | import java.io.IOException; 7 | import java.util.Iterator; 8 | import java.util.LinkedList; 9 | 10 | import junit.framework.JUnit4TestAdapter; 11 | 12 | import org.junit.Before; 13 | import org.junit.Test; 14 | 15 | import simpledb.TestUtil.SkeletonFile; 16 | import simpledb.systemtest.SimpleDbTestBase; 17 | import simpledb.systemtest.SystemTestUtil; 18 | 19 | public class HeapPageWriteTest extends SimpleDbTestBase { 20 | 21 | private HeapPageId pid; 22 | 23 | /** 24 | * Set up initial resources for each unit test. 25 | */ 26 | @Before public void addTable() throws IOException { 27 | this.pid = new HeapPageId(-1, -1); 28 | Database.getCatalog().addTable(new SkeletonFile(-1, Utility.getTupleDesc(2)), SystemTestUtil.getUUID()); 29 | } 30 | 31 | /** 32 | * Unit test for HeapPage.isDirty() 33 | */ 34 | @Test public void testDirty() throws Exception { 35 | TransactionId tid = new TransactionId(); 36 | HeapPage page = new HeapPage(pid, HeapPageReadTest.EXAMPLE_DATA); 37 | page.markDirty(true, tid); 38 | TransactionId dirtier = page.isDirty(); 39 | assertEquals(true, dirtier != null); 40 | assertEquals(true, dirtier == tid); 41 | 42 | page.markDirty(false, tid); 43 | dirtier = page.isDirty(); 44 | assertEquals(false, dirtier != null); 45 | } 46 | 47 | /** 48 | * Unit test for HeapPage.addTuple() 49 | */ 50 | @Test public void addTuple() throws Exception { 51 | HeapPage page = new HeapPage(pid, HeapPageReadTest.EXAMPLE_DATA); 52 | int free = page.getNumEmptySlots(); 53 | 54 | // NOTE(ghuo): this nested loop existence check is slow, but it 55 | // shouldn't make a difference for n = 504 slots. 56 | 57 | for (int i = 0; i < free; ++i) { 58 | Tuple addition = Utility.getHeapTuple(i, 2); 59 | page.insertTuple(addition); 60 | assertEquals(free-i-1, page.getNumEmptySlots()); 61 | 62 | // loop through the iterator to ensure that the tuple actually exists 63 | // on the page 64 | Iteratorit = page.iterator(); 65 | boolean found = false; 66 | while (it.hasNext()) { 67 | Tuple tup = it.next(); 68 | if (TestUtil.compareTuples(addition, tup)) { 69 | found = true; 70 | 71 | // verify that the RecordId is sane 72 | assertEquals(page.getId(), tup.getRecordId().getPageId()); 73 | break; 74 | } 75 | } 76 | assertTrue(found); 77 | } 78 | 79 | // now, the page should be full. 80 | try { 81 | page.insertTuple(Utility.getHeapTuple(0, 2)); 82 | throw new Exception("page should be full; expected DbException"); 83 | } catch (DbException e) { 84 | // explicitly ignored 85 | } 86 | } 87 | 88 | /** 89 | * Unit test for HeapPage.deleteTuple() with false tuples 90 | */ 91 | @Test(expected=DbException.class) 92 | public void deleteNonexistentTuple() throws Exception { 93 | HeapPage page = new HeapPage(pid, HeapPageReadTest.EXAMPLE_DATA); 94 | page.deleteTuple(Utility.getHeapTuple(2, 2)); 95 | } 96 | 97 | /** 98 | * Unit test for HeapPage.deleteTuple() 99 | */ 100 | @Test public void deleteTuple() throws Exception { 101 | HeapPage page = new HeapPage(pid, HeapPageReadTest.EXAMPLE_DATA); 102 | int free = page.getNumEmptySlots(); 103 | 104 | // first, build a list of the tuples on the page. 105 | Iterator it = page.iterator(); 106 | LinkedList tuples = new LinkedList(); 107 | while (it.hasNext()) 108 | tuples.add(it.next()); 109 | Tuple first = tuples.getFirst(); 110 | 111 | // now, delete them one-by-one from both the front and the end. 112 | int deleted = 0; 113 | while (tuples.size() > 0) { 114 | page.deleteTuple(tuples.removeFirst()); 115 | page.deleteTuple(tuples.removeLast()); 116 | deleted += 2; 117 | assertEquals(free + deleted, page.getNumEmptySlots()); 118 | } 119 | 120 | // now, the page should be empty. 121 | try { 122 | page.deleteTuple(first); 123 | throw new Exception("page should be empty; expected DbException"); 124 | } catch (DbException e) { 125 | // explicitly ignored 126 | } 127 | } 128 | 129 | /** 130 | * JUnit suite target 131 | */ 132 | public static junit.framework.Test suite() { 133 | return new JUnit4TestAdapter(HeapPageWriteTest.class); 134 | } 135 | } 136 | 137 | -------------------------------------------------------------------------------- /test/simpledb/InsertTest.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import org.junit.Before; 4 | import org.junit.Test; 5 | 6 | import static org.junit.Assert.assertEquals; 7 | import static org.junit.Assert.assertTrue; 8 | import junit.framework.JUnit4TestAdapter; 9 | 10 | /** 11 | * We reserve more heavy-duty insertion testing for HeapFile and HeapPage. 12 | * This suite is superficial. 13 | */ 14 | public class InsertTest extends TestUtil.CreateHeapFile { 15 | 16 | private DbIterator scan1; 17 | private TransactionId tid; 18 | 19 | /** 20 | * Initialize each unit test 21 | */ 22 | @Before public void setUp() throws Exception { 23 | super.setUp(); 24 | this.scan1 = TestUtil.createTupleList(2, 25 | new int[] { 1, 2, 26 | 1, 4, 27 | 1, 6, 28 | 3, 2, 29 | 3, 4, 30 | 3, 6, 31 | 5, 7 }); 32 | tid = new TransactionId(); 33 | } 34 | 35 | /** 36 | * Unit test for Insert.getTupleDesc() 37 | */ 38 | @Test public void getTupleDesc() throws Exception { 39 | Insert op = new Insert(tid,scan1, empty.getId()); 40 | TupleDesc expected = Utility.getTupleDesc(1); 41 | TupleDesc actual = op.getTupleDesc(); 42 | assertEquals(expected, actual); 43 | } 44 | 45 | /** 46 | * Unit test for Insert.getNext(), inserting elements into an empty file 47 | */ 48 | @Test public void getNext() throws Exception { 49 | Insert op = new Insert(tid,scan1, empty.getId()); 50 | op.open(); 51 | assertTrue(TestUtil.compareTuples( 52 | Utility.getHeapTuple(7, 1), // the length of scan1 53 | op.next())); 54 | 55 | // we should fit on one page 56 | assertEquals(1, empty.numPages()); 57 | } 58 | 59 | /** 60 | * JUnit suite target 61 | */ 62 | public static junit.framework.Test suite() { 63 | return new JUnit4TestAdapter(InsertTest.class); 64 | } 65 | } 66 | 67 | -------------------------------------------------------------------------------- /test/simpledb/IntegerAggregatorTest.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import static org.junit.Assert.assertEquals; 4 | 5 | import java.util.NoSuchElementException; 6 | 7 | import junit.framework.JUnit4TestAdapter; 8 | 9 | import org.junit.Before; 10 | import org.junit.Test; 11 | 12 | import simpledb.systemtest.SimpleDbTestBase; 13 | 14 | public class IntegerAggregatorTest extends SimpleDbTestBase { 15 | 16 | int width1 = 2; 17 | DbIterator scan1; 18 | int[][] sum = null; 19 | int[][] min = null; 20 | int[][] max = null; 21 | int[][] avg = null; 22 | 23 | /** 24 | * Initialize each unit test 25 | */ 26 | @Before public void createTupleList() throws Exception { 27 | this.scan1 = TestUtil.createTupleList(width1, 28 | new int[] { 1, 2, 29 | 1, 4, 30 | 1, 6, 31 | 3, 2, 32 | 3, 4, 33 | 3, 6, 34 | 5, 7 }); 35 | 36 | // verify how the results progress after a few merges 37 | this.sum = new int[][] { 38 | { 1, 2 }, 39 | { 1, 6 }, 40 | { 1, 12 }, 41 | { 1, 12, 3, 2 } 42 | }; 43 | 44 | this.min = new int[][] { 45 | { 1, 2 }, 46 | { 1, 2 }, 47 | { 1, 2 }, 48 | { 1, 2, 3, 2 } 49 | }; 50 | 51 | this.max = new int[][] { 52 | { 1, 2 }, 53 | { 1, 4 }, 54 | { 1, 6 }, 55 | { 1, 6, 3, 2 } 56 | }; 57 | 58 | this.avg = new int[][] { 59 | { 1, 2 }, 60 | { 1, 3 }, 61 | { 1, 4 }, 62 | { 1, 4, 3, 2 } 63 | }; 64 | } 65 | 66 | /** 67 | * Test IntegerAggregator.mergeTupleIntoGroup() and iterator() over a sum 68 | */ 69 | @Test public void mergeSum() throws Exception { 70 | scan1.open(); 71 | IntegerAggregator agg = new IntegerAggregator(0, Type.INT_TYPE, 1, Aggregator.Op.SUM); 72 | 73 | for (int[] step : sum) { 74 | agg.mergeTupleIntoGroup(scan1.next()); 75 | DbIterator it = agg.iterator(); 76 | it.open(); 77 | TestUtil.matchAllTuples(TestUtil.createTupleList(width1, step), it); 78 | } 79 | } 80 | 81 | /** 82 | * Test IntegerAggregator.mergeTupleIntoGroup() and iterator() over a min 83 | */ 84 | @Test public void mergeMin() throws Exception { 85 | scan1.open(); 86 | IntegerAggregator agg = new IntegerAggregator(0,Type.INT_TYPE, 1, Aggregator.Op.MIN); 87 | 88 | DbIterator it; 89 | for (int[] step : min) { 90 | agg.mergeTupleIntoGroup(scan1.next()); 91 | it = agg.iterator(); 92 | it.open(); 93 | TestUtil.matchAllTuples(TestUtil.createTupleList(width1, step), it); 94 | } 95 | } 96 | 97 | /** 98 | * Test IntegerAggregator.mergeTupleIntoGroup() and iterator() over a max 99 | */ 100 | @Test public void mergeMax() throws Exception { 101 | scan1.open(); 102 | IntegerAggregator agg = new IntegerAggregator(0, Type.INT_TYPE, 1, Aggregator.Op.MAX); 103 | 104 | DbIterator it; 105 | for (int[] step : max) { 106 | agg.mergeTupleIntoGroup(scan1.next()); 107 | it = agg.iterator(); 108 | it.open(); 109 | TestUtil.matchAllTuples(TestUtil.createTupleList(width1, step), it); 110 | } 111 | } 112 | 113 | /** 114 | * Test IntegerAggregator.mergeTupleIntoGroup() and iterator() over an avg 115 | */ 116 | @Test public void mergeAvg() throws Exception { 117 | scan1.open(); 118 | IntegerAggregator agg = new IntegerAggregator(0, Type.INT_TYPE, 1, Aggregator.Op.AVG); 119 | 120 | DbIterator it; 121 | for (int[] step : avg) { 122 | agg.mergeTupleIntoGroup(scan1.next()); 123 | it = agg.iterator(); 124 | it.open(); 125 | TestUtil.matchAllTuples(TestUtil.createTupleList(width1, step), it); 126 | } 127 | } 128 | 129 | /** 130 | * Test IntegerAggregator.iterator() for DbIterator behaviour 131 | */ 132 | @Test public void testIterator() throws Exception { 133 | // first, populate the aggregator via sum over scan1 134 | scan1.open(); 135 | IntegerAggregator agg = new IntegerAggregator(0, Type.INT_TYPE, 1, Aggregator.Op.SUM); 136 | try { 137 | while (true) 138 | agg.mergeTupleIntoGroup(scan1.next()); 139 | } catch (NoSuchElementException e) { 140 | // explicitly ignored 141 | } 142 | 143 | DbIterator it = agg.iterator(); 144 | it.open(); 145 | 146 | // verify it has three elements 147 | int count = 0; 148 | try { 149 | while (true) { 150 | it.next(); 151 | count++; 152 | } 153 | } catch (NoSuchElementException e) { 154 | // explicitly ignored 155 | } 156 | assertEquals(3, count); 157 | 158 | // rewind and try again 159 | it.rewind(); 160 | count = 0; 161 | try { 162 | while (true) { 163 | it.next(); 164 | count++; 165 | } 166 | } catch (NoSuchElementException e) { 167 | // explicitly ignored 168 | } 169 | assertEquals(3, count); 170 | 171 | // close it and check that we don't get anything 172 | it.close(); 173 | try { 174 | it.next(); 175 | throw new Exception("IntegerAggregator iterator yielded tuple after close"); 176 | } catch (Exception e) { 177 | // explicitly ignored 178 | } 179 | } 180 | 181 | /** 182 | * JUnit suite target 183 | */ 184 | public static junit.framework.Test suite() { 185 | return new JUnit4TestAdapter(IntegerAggregatorTest.class); 186 | } 187 | } 188 | 189 | -------------------------------------------------------------------------------- /test/simpledb/JoinPredicateTest.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import org.junit.Test; 4 | 5 | import simpledb.systemtest.SimpleDbTestBase; 6 | import static org.junit.Assert.assertTrue; 7 | import static org.junit.Assert.assertFalse; 8 | import junit.framework.JUnit4TestAdapter; 9 | 10 | public class JoinPredicateTest extends SimpleDbTestBase { 11 | 12 | /** 13 | * Unit test for JoinPredicate.filter() 14 | */ 15 | @Test public void filterVaryingVals() { 16 | int[] vals = new int[] { -1, 0, 1 }; 17 | 18 | for (int i : vals) { 19 | JoinPredicate p = new JoinPredicate(0, 20 | Predicate.Op.EQUALS, 0); 21 | assertFalse(p.filter(Utility.getHeapTuple(i), Utility.getHeapTuple(i - 1))); 22 | assertTrue(p.filter(Utility.getHeapTuple(i), Utility.getHeapTuple(i))); 23 | assertFalse(p.filter(Utility.getHeapTuple(i), Utility.getHeapTuple(i + 1))); 24 | } 25 | 26 | for (int i : vals) { 27 | JoinPredicate p = new JoinPredicate(0, 28 | Predicate.Op.GREATER_THAN, 0); 29 | assertTrue(p.filter(Utility.getHeapTuple(i), Utility.getHeapTuple(i - 1))); 30 | assertFalse(p.filter(Utility.getHeapTuple(i), Utility.getHeapTuple(i))); 31 | assertFalse(p.filter(Utility.getHeapTuple(i), Utility.getHeapTuple(i + 1))); 32 | } 33 | 34 | for (int i : vals) { 35 | JoinPredicate p = new JoinPredicate(0, 36 | Predicate.Op.GREATER_THAN_OR_EQ, 0); 37 | assertTrue(p.filter(Utility.getHeapTuple(i), Utility.getHeapTuple(i - 1))); 38 | assertTrue(p.filter(Utility.getHeapTuple(i), Utility.getHeapTuple(i))); 39 | assertFalse(p.filter(Utility.getHeapTuple(i), Utility.getHeapTuple(i + 1))); 40 | } 41 | 42 | for (int i : vals) { 43 | JoinPredicate p = new JoinPredicate(0, 44 | Predicate.Op.LESS_THAN, 0); 45 | assertFalse(p.filter(Utility.getHeapTuple(i), Utility.getHeapTuple(i - 1))); 46 | assertFalse(p.filter(Utility.getHeapTuple(i), Utility.getHeapTuple(i))); 47 | assertTrue(p.filter(Utility.getHeapTuple(i), Utility.getHeapTuple(i + 1))); 48 | } 49 | 50 | for (int i : vals) { 51 | JoinPredicate p = new JoinPredicate(0, 52 | Predicate.Op.LESS_THAN_OR_EQ, 0); 53 | assertFalse(p.filter(Utility.getHeapTuple(i), Utility.getHeapTuple(i - 1))); 54 | assertTrue(p.filter(Utility.getHeapTuple(i), Utility.getHeapTuple(i))); 55 | assertTrue(p.filter(Utility.getHeapTuple(i), Utility.getHeapTuple(i + 1))); 56 | } 57 | } 58 | 59 | /** 60 | * JUnit suite target 61 | */ 62 | public static junit.framework.Test suite() { 63 | return new JUnit4TestAdapter(JoinPredicateTest.class); 64 | } 65 | } 66 | 67 | -------------------------------------------------------------------------------- /test/simpledb/JoinTest.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import static org.junit.Assert.assertEquals; 4 | import static org.junit.Assert.assertNotNull; 5 | import static org.junit.Assert.assertTrue; 6 | import junit.framework.JUnit4TestAdapter; 7 | 8 | import org.junit.Before; 9 | import org.junit.Test; 10 | 11 | import simpledb.systemtest.SimpleDbTestBase; 12 | 13 | public class JoinTest extends SimpleDbTestBase { 14 | 15 | int width1 = 2; 16 | int width2 = 3; 17 | DbIterator scan1; 18 | DbIterator scan2; 19 | DbIterator eqJoin; 20 | DbIterator gtJoin; 21 | 22 | /** 23 | * Initialize each unit test 24 | */ 25 | @Before public void createTupleLists() throws Exception { 26 | this.scan1 = TestUtil.createTupleList(width1, 27 | new int[] { 1, 2, 28 | 3, 4, 29 | 5, 6, 30 | 7, 8 }); 31 | this.scan2 = TestUtil.createTupleList(width2, 32 | new int[] { 1, 2, 3, 33 | 2, 3, 4, 34 | 3, 4, 5, 35 | 4, 5, 6, 36 | 5, 6, 7 }); 37 | this.eqJoin = TestUtil.createTupleList(width1 + width2, 38 | new int[] { 1, 2, 1, 2, 3, 39 | 3, 4, 3, 4, 5, 40 | 5, 6, 5, 6, 7 }); 41 | this.gtJoin = TestUtil.createTupleList(width1 + width2, 42 | new int[] { 43 | 3, 4, 1, 2, 3, // 1, 2 < 3 44 | 3, 4, 2, 3, 4, 45 | 5, 6, 1, 2, 3, // 1, 2, 3, 4 < 5 46 | 5, 6, 2, 3, 4, 47 | 5, 6, 3, 4, 5, 48 | 5, 6, 4, 5, 6, 49 | 7, 8, 1, 2, 3, // 1, 2, 3, 4, 5 < 7 50 | 7, 8, 2, 3, 4, 51 | 7, 8, 3, 4, 5, 52 | 7, 8, 4, 5, 6, 53 | 7, 8, 5, 6, 7 }); 54 | } 55 | 56 | /** 57 | * Unit test for Join.getTupleDesc() 58 | */ 59 | @Test public void getTupleDesc() { 60 | JoinPredicate pred = new JoinPredicate(0, Predicate.Op.EQUALS, 0); 61 | Join op = new Join(pred, scan1, scan2); 62 | TupleDesc expected = Utility.getTupleDesc(width1 + width2); 63 | TupleDesc actual = op.getTupleDesc(); 64 | assertEquals(expected, actual); 65 | } 66 | 67 | /** 68 | * Unit test for Join.rewind() 69 | */ 70 | @Test public void rewind() throws Exception { 71 | JoinPredicate pred = new JoinPredicate(0, Predicate.Op.EQUALS, 0); 72 | Join op = new Join(pred, scan1, scan2); 73 | op.open(); 74 | while (op.hasNext()) { 75 | assertNotNull(op.next()); 76 | } 77 | assertTrue(TestUtil.checkExhausted(op)); 78 | op.rewind(); 79 | 80 | eqJoin.open(); 81 | Tuple expected = eqJoin.next(); 82 | Tuple actual = op.next(); 83 | assertTrue(TestUtil.compareTuples(expected, actual)); 84 | } 85 | 86 | /** 87 | * Unit test for Join.getNext() using a > predicate 88 | */ 89 | @Test public void gtJoin() throws Exception { 90 | JoinPredicate pred = new JoinPredicate(0, Predicate.Op.GREATER_THAN, 0); 91 | Join op = new Join(pred, scan1, scan2); 92 | op.open(); 93 | gtJoin.open(); 94 | TestUtil.matchAllTuples(gtJoin, op); 95 | } 96 | 97 | /** 98 | * Unit test for Join.getNext() using an = predicate 99 | */ 100 | @Test public void eqJoin() throws Exception { 101 | JoinPredicate pred = new JoinPredicate(0, Predicate.Op.EQUALS, 0); 102 | Join op = new Join(pred, scan1, scan2); 103 | op.open(); 104 | eqJoin.open(); 105 | TestUtil.matchAllTuples(eqJoin, op); 106 | } 107 | 108 | /** 109 | * JUnit suite target 110 | */ 111 | public static junit.framework.Test suite() { 112 | return new JUnit4TestAdapter(JoinTest.class); 113 | } 114 | } 115 | 116 | -------------------------------------------------------------------------------- /test/simpledb/PredicateTest.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import org.junit.Test; 4 | 5 | import simpledb.systemtest.SimpleDbTestBase; 6 | import static org.junit.Assert.assertTrue; 7 | import static org.junit.Assert.assertFalse; 8 | import junit.framework.JUnit4TestAdapter; 9 | 10 | public class PredicateTest extends SimpleDbTestBase{ 11 | 12 | /** 13 | * Unit test for Predicate.filter() 14 | */ 15 | @Test public void filter() { 16 | int[] vals = new int[] { -1, 0, 1 }; 17 | 18 | for (int i : vals) { 19 | Predicate p = new Predicate(0, Predicate.Op.EQUALS, TestUtil.getField(i)); 20 | assertFalse(p.filter(Utility.getHeapTuple(i - 1))); 21 | assertTrue(p.filter(Utility.getHeapTuple(i))); 22 | assertFalse(p.filter(Utility.getHeapTuple(i + 1))); 23 | } 24 | 25 | for (int i : vals) { 26 | Predicate p = new Predicate(0, Predicate.Op.GREATER_THAN, 27 | TestUtil.getField(i)); 28 | assertFalse(p.filter(Utility.getHeapTuple(i - 1))); 29 | assertFalse(p.filter(Utility.getHeapTuple(i))); 30 | assertTrue(p.filter(Utility.getHeapTuple(i + 1))); 31 | } 32 | 33 | for (int i : vals) { 34 | Predicate p = new Predicate(0, Predicate.Op.GREATER_THAN_OR_EQ, 35 | TestUtil.getField(i)); 36 | assertFalse(p.filter(Utility.getHeapTuple(i - 1))); 37 | assertTrue(p.filter(Utility.getHeapTuple(i))); 38 | assertTrue(p.filter(Utility.getHeapTuple(i + 1))); 39 | } 40 | 41 | for (int i : vals) { 42 | Predicate p = new Predicate(0, Predicate.Op.LESS_THAN, 43 | TestUtil.getField(i)); 44 | assertTrue(p.filter(Utility.getHeapTuple(i - 1))); 45 | assertFalse(p.filter(Utility.getHeapTuple(i))); 46 | assertFalse(p.filter(Utility.getHeapTuple(i + 1))); 47 | } 48 | 49 | for (int i : vals) { 50 | Predicate p = new Predicate(0, Predicate.Op.LESS_THAN_OR_EQ, 51 | TestUtil.getField(i)); 52 | assertTrue(p.filter(Utility.getHeapTuple(i - 1))); 53 | assertTrue(p.filter(Utility.getHeapTuple(i))); 54 | assertFalse(p.filter(Utility.getHeapTuple(i + 1))); 55 | } 56 | } 57 | 58 | /** 59 | * JUnit suite target 60 | */ 61 | public static junit.framework.Test suite() { 62 | return new JUnit4TestAdapter(PredicateTest.class); 63 | } 64 | } 65 | 66 | -------------------------------------------------------------------------------- /test/simpledb/RecordIdTest.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import static org.junit.Assert.assertEquals; 4 | import static org.junit.Assert.assertFalse; 5 | import junit.framework.JUnit4TestAdapter; 6 | 7 | import org.junit.Before; 8 | import org.junit.Test; 9 | 10 | import simpledb.systemtest.SimpleDbTestBase; 11 | 12 | public class RecordIdTest extends SimpleDbTestBase { 13 | 14 | private static RecordId hrid; 15 | private static RecordId hrid2; 16 | private static RecordId hrid3; 17 | private static RecordId hrid4; 18 | 19 | @Before public void createPids() { 20 | HeapPageId hpid = new HeapPageId(-1, 2); 21 | HeapPageId hpid2 = new HeapPageId(-1, 2); 22 | HeapPageId hpid3 = new HeapPageId(-2, 2); 23 | hrid = new RecordId(hpid, 3); 24 | hrid2 = new RecordId(hpid2, 3); 25 | hrid3 = new RecordId(hpid, 4); 26 | hrid4 = new RecordId(hpid3, 3); 27 | 28 | } 29 | 30 | /** 31 | * Unit test for RecordId.getPageId() 32 | */ 33 | @Test public void getPageId() { 34 | HeapPageId hpid = new HeapPageId(-1, 2); 35 | assertEquals(hpid, hrid.getPageId()); 36 | 37 | } 38 | 39 | /** 40 | * Unit test for RecordId.tupleno() 41 | */ 42 | @Test public void tupleno() { 43 | assertEquals(3, hrid.tupleno()); 44 | } 45 | 46 | /** 47 | * Unit test for RecordId.equals() 48 | */ 49 | @Test public void equals() { 50 | assertEquals(hrid, hrid2); 51 | assertEquals(hrid2, hrid); 52 | assertFalse(hrid.equals(hrid3)); 53 | assertFalse(hrid3.equals(hrid)); 54 | assertFalse(hrid2.equals(hrid4)); 55 | assertFalse(hrid4.equals(hrid2)); 56 | } 57 | 58 | /** 59 | * Unit test for RecordId.hashCode() 60 | */ 61 | @Test public void hCode() { 62 | assertEquals(hrid.hashCode(), hrid2.hashCode()); 63 | } 64 | 65 | /** 66 | * JUnit suite target 67 | */ 68 | public static junit.framework.Test suite() { 69 | return new JUnit4TestAdapter(RecordIdTest.class); 70 | } 71 | } 72 | 73 | -------------------------------------------------------------------------------- /test/simpledb/StringAggregatorTest.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import java.util.*; 4 | 5 | import org.junit.Before; 6 | import org.junit.Test; 7 | 8 | import simpledb.systemtest.SimpleDbTestBase; 9 | import static org.junit.Assert.assertEquals; 10 | import junit.framework.JUnit4TestAdapter; 11 | 12 | public class StringAggregatorTest extends SimpleDbTestBase { 13 | 14 | int width1 = 2; 15 | DbIterator scan1; 16 | int[][] count = null; 17 | 18 | /** 19 | * Initialize each unit test 20 | */ 21 | @Before public void createTupleList() throws Exception { 22 | this.scan1 = TestUtil.createTupleList(width1, 23 | new Object[] { 1, "a", 24 | 1, "b", 25 | 1, "c", 26 | 3, "d", 27 | 3, "e", 28 | 3, "f", 29 | 5, "g" }); 30 | 31 | // verify how the results progress after a few merges 32 | this.count = new int[][] { 33 | { 1, 1 }, 34 | { 1, 2 }, 35 | { 1, 3 }, 36 | { 1, 3, 3, 1 } 37 | }; 38 | 39 | } 40 | 41 | /** 42 | * Test String.mergeTupleIntoGroup() and iterator() over a COUNT 43 | */ 44 | @Test public void mergeCount() throws Exception { 45 | scan1.open(); 46 | StringAggregator agg = new StringAggregator(0, Type.INT_TYPE, 1, Aggregator.Op.COUNT); 47 | 48 | for (int[] step : count) { 49 | agg.mergeTupleIntoGroup(scan1.next()); 50 | DbIterator it = agg.iterator(); 51 | it.open(); 52 | TestUtil.matchAllTuples(TestUtil.createTupleList(width1, step), it); 53 | } 54 | } 55 | 56 | /** 57 | * Test StringAggregator.iterator() for DbIterator behaviour 58 | */ 59 | @Test public void testIterator() throws Exception { 60 | // first, populate the aggregator via sum over scan1 61 | scan1.open(); 62 | StringAggregator agg = new StringAggregator(0, Type.INT_TYPE, 1, Aggregator.Op.COUNT); 63 | try { 64 | while (true) 65 | agg.mergeTupleIntoGroup(scan1.next()); 66 | } catch (NoSuchElementException e) { 67 | // explicitly ignored 68 | } 69 | 70 | DbIterator it = agg.iterator(); 71 | it.open(); 72 | 73 | // verify it has three elements 74 | int count = 0; 75 | try { 76 | while (true) { 77 | it.next(); 78 | count++; 79 | } 80 | } catch (NoSuchElementException e) { 81 | // explicitly ignored 82 | } 83 | assertEquals(3, count); 84 | 85 | // rewind and try again 86 | it.rewind(); 87 | count = 0; 88 | try { 89 | while (true) { 90 | it.next(); 91 | count++; 92 | } 93 | } catch (NoSuchElementException e) { 94 | // explicitly ignored 95 | } 96 | assertEquals(3, count); 97 | 98 | // close it and check that we don't get anything 99 | it.close(); 100 | try { 101 | it.next(); 102 | throw new Exception("StringAggreator iterator yielded tuple after close"); 103 | } catch (Exception e) { 104 | // explicitly ignored 105 | } 106 | } 107 | 108 | /** 109 | * JUnit suite target 110 | */ 111 | public static junit.framework.Test suite() { 112 | return new JUnit4TestAdapter(StringAggregatorTest.class); 113 | } 114 | } 115 | 116 | -------------------------------------------------------------------------------- /test/simpledb/TupleTest.java: -------------------------------------------------------------------------------- 1 | package simpledb; 2 | 3 | import static org.junit.Assert.assertEquals; 4 | import junit.framework.JUnit4TestAdapter; 5 | 6 | import org.junit.Test; 7 | 8 | import simpledb.systemtest.SimpleDbTestBase; 9 | 10 | public class TupleTest extends SimpleDbTestBase { 11 | 12 | /** 13 | * Unit test for Tuple.getField() and Tuple.setField() 14 | */ 15 | @Test public void modifyFields() { 16 | TupleDesc td = Utility.getTupleDesc(2); 17 | 18 | Tuple tup = new Tuple(td); 19 | tup.setField(0, new IntField(-1)); 20 | tup.setField(1, new IntField(0)); 21 | 22 | assertEquals(new IntField(-1), tup.getField(0)); 23 | assertEquals(new IntField(0), tup.getField(1)); 24 | 25 | tup.setField(0, new IntField(1)); 26 | tup.setField(1, new IntField(37)); 27 | 28 | assertEquals(new IntField(1), tup.getField(0)); 29 | assertEquals(new IntField(37), tup.getField(1)); 30 | } 31 | 32 | /** 33 | * Unit test for Tuple.getTupleDesc() 34 | */ 35 | @Test public void getTupleDesc() { 36 | TupleDesc td = Utility.getTupleDesc(5); 37 | Tuple tup = new Tuple(td); 38 | assertEquals(td, tup.getTupleDesc()); 39 | } 40 | 41 | /** 42 | * Unit test for Tuple.getRecordId() and Tuple.setRecordId() 43 | */ 44 | @Test public void modifyRecordId() { 45 | Tuple tup1 = new Tuple(Utility.getTupleDesc(1)); 46 | HeapPageId pid1 = new HeapPageId(0,0); 47 | RecordId rid1 = new RecordId(pid1, 0); 48 | tup1.setRecordId(rid1); 49 | 50 | try { 51 | assertEquals(rid1, tup1.getRecordId()); 52 | } catch (java.lang.UnsupportedOperationException e) { 53 | //rethrow the exception with an explanation 54 | throw new UnsupportedOperationException("modifyRecordId() test failed due to " + 55 | "RecordId.equals() not being implemented. This is not required for Lab 1, " + 56 | "but should pass when you do implement the RecordId class."); 57 | } 58 | } 59 | 60 | /** 61 | * JUnit suite target 62 | */ 63 | public static junit.framework.Test suite() { 64 | return new JUnit4TestAdapter(TupleTest.class); 65 | } 66 | } 67 | 68 | -------------------------------------------------------------------------------- /test/simpledb/systemtest/AggregateTest.java: -------------------------------------------------------------------------------- 1 | package simpledb.systemtest; 2 | 3 | import java.io.IOException; 4 | import java.util.ArrayList; 5 | import java.util.HashMap; 6 | import java.util.Map; 7 | 8 | import simpledb.*; 9 | 10 | import org.junit.Test; 11 | 12 | public class AggregateTest extends SimpleDbTestBase { 13 | public void validateAggregate(DbFile table, Aggregator.Op operation, int aggregateColumn, int groupColumn, ArrayList> expectedResult) 14 | throws DbException, TransactionAbortedException, IOException { 15 | TransactionId tid = new TransactionId(); 16 | SeqScan ss = new SeqScan(tid, table.getId(), ""); 17 | Aggregate ag = new Aggregate(ss, aggregateColumn, groupColumn, operation); 18 | 19 | SystemTestUtil.matchTuples(ag, expectedResult); 20 | Database.getBufferPool().transactionComplete(tid); 21 | } 22 | 23 | private int computeAggregate(ArrayList values, Aggregator.Op operation) { 24 | if (operation == Aggregator.Op.COUNT) return values.size(); 25 | 26 | int value = 0; 27 | if (operation == Aggregator.Op.MIN) value = Integer.MAX_VALUE; 28 | else if (operation == Aggregator.Op.MAX) value = Integer.MIN_VALUE; 29 | 30 | for (int v : values) { 31 | switch (operation) { 32 | case MAX: 33 | if (v > value) value = v; 34 | break; 35 | case MIN: 36 | if (v < value) value = v; 37 | break; 38 | case AVG: 39 | case SUM: 40 | value += v; 41 | break; 42 | default: 43 | throw new IllegalArgumentException("Unsupported operation " + operation); 44 | } 45 | } 46 | 47 | if (operation == Aggregator.Op.AVG) value /= values.size(); 48 | return value; 49 | } 50 | 51 | private ArrayList> aggregate(ArrayList> tuples, Aggregator.Op operation, int aggregateColumn, int groupColumn) { 52 | // Group the values 53 | HashMap> values = new HashMap>(); 54 | for (ArrayList t : tuples) { 55 | Integer key = null; 56 | if (groupColumn != Aggregator.NO_GROUPING) key = t.get(groupColumn); 57 | Integer value = t.get(aggregateColumn); 58 | 59 | if (!values.containsKey(key)) values.put(key, new ArrayList()); 60 | values.get(key).add(value); 61 | } 62 | 63 | ArrayList> results = new ArrayList>(); 64 | for (Map.Entry> e : values.entrySet()) { 65 | ArrayList result = new ArrayList(); 66 | if (groupColumn != Aggregator.NO_GROUPING) result.add(e.getKey()); 67 | result.add(computeAggregate(e.getValue(), operation)); 68 | results.add(result); 69 | } 70 | return results; 71 | } 72 | 73 | private final static int ROWS = 1024; 74 | private final static int MAX_VALUE = 64; 75 | private final static int COLUMNS = 3; 76 | private void doAggregate(Aggregator.Op operation, int groupColumn) 77 | throws IOException, DbException, TransactionAbortedException { 78 | // Create the table 79 | ArrayList> createdTuples = new ArrayList>(); 80 | HeapFile table = SystemTestUtil.createRandomHeapFile( 81 | COLUMNS, ROWS, MAX_VALUE, null, createdTuples); 82 | 83 | // Compute the expected answer 84 | ArrayList> expected = 85 | aggregate(createdTuples, operation, 1, groupColumn); 86 | 87 | // validate that we get the answer 88 | validateAggregate(table, operation, 1, groupColumn, expected); 89 | } 90 | 91 | @Test public void testSum() throws IOException, DbException, TransactionAbortedException { 92 | doAggregate(Aggregator.Op.SUM, 0); 93 | } 94 | 95 | @Test public void testMin() throws IOException, DbException, TransactionAbortedException { 96 | doAggregate(Aggregator.Op.MIN, 0); 97 | } 98 | 99 | @Test public void testMax() throws IOException, DbException, TransactionAbortedException { 100 | doAggregate(Aggregator.Op.MAX, 0); 101 | } 102 | 103 | @Test public void testCount() throws IOException, DbException, TransactionAbortedException { 104 | doAggregate(Aggregator.Op.COUNT, 0); 105 | } 106 | 107 | @Test public void testAverage() throws IOException, DbException, TransactionAbortedException { 108 | doAggregate(Aggregator.Op.AVG, 0); 109 | } 110 | 111 | @Test public void testAverageNoGroup() 112 | throws IOException, DbException, TransactionAbortedException { 113 | doAggregate(Aggregator.Op.AVG, Aggregator.NO_GROUPING); 114 | } 115 | 116 | /** Make test compatible with older version of ant. */ 117 | public static junit.framework.Test suite() { 118 | return new junit.framework.JUnit4TestAdapter(AggregateTest.class); 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /test/simpledb/systemtest/DeleteTest.java: -------------------------------------------------------------------------------- 1 | package simpledb.systemtest; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import java.io.IOException; 6 | import java.util.ArrayList; 7 | import simpledb.*; 8 | 9 | public class DeleteTest extends FilterBase { 10 | ArrayList> expectedTuples = null; 11 | 12 | @Override 13 | protected int applyPredicate(HeapFile table, TransactionId tid, Predicate predicate) 14 | throws DbException, TransactionAbortedException, IOException { 15 | SeqScan ss = new SeqScan(tid, table.getId(), ""); 16 | Filter filter = new Filter(predicate, ss); 17 | Delete deleteOperator = new Delete(tid, filter); 18 | // Query q = new Query(deleteOperator, tid); 19 | 20 | // q.start(); 21 | deleteOperator.open(); 22 | boolean hasResult = false; 23 | int result = -1; 24 | while (deleteOperator.hasNext()) { 25 | Tuple t = deleteOperator.next(); 26 | assertFalse(hasResult); 27 | hasResult = true; 28 | assertEquals(SystemTestUtil.SINGLE_INT_DESCRIPTOR, t.getTupleDesc()); 29 | result = ((IntField) t.getField(0)).getValue(); 30 | } 31 | assertTrue(hasResult); 32 | 33 | deleteOperator.close(); 34 | 35 | // As part of the same transaction, scan the table 36 | if (result == 0) { 37 | // Deleted zero tuples: all tuples still in table 38 | expectedTuples = createdTuples; 39 | } else { 40 | assert result == createdTuples.size(); 41 | expectedTuples = new ArrayList>(); 42 | } 43 | SystemTestUtil.matchTuples(table, tid, expectedTuples); 44 | return result; 45 | } 46 | 47 | @Override 48 | protected void validateAfter(HeapFile table) 49 | throws DbException, TransactionAbortedException, IOException { 50 | // As part of a different transaction, scan the table 51 | SystemTestUtil.matchTuples(table, expectedTuples); 52 | } 53 | 54 | /** Make test compatible with older version of ant. */ 55 | public static junit.framework.Test suite() { 56 | return new junit.framework.JUnit4TestAdapter(DeleteTest.class); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /test/simpledb/systemtest/EvictionTest.java: -------------------------------------------------------------------------------- 1 | package simpledb.systemtest; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import java.io.IOException; 6 | import java.util.Arrays; 7 | 8 | import org.junit.Test; 9 | 10 | import junit.framework.Assert; 11 | import simpledb.*; 12 | 13 | /** 14 | * Creates a heap file with 1024*500 tuples with two integer fields each. Clears the buffer pool, 15 | * and performs a sequential scan through all of the pages. If the growth in JVM usage 16 | * is greater than 2 MB due to the scan, the test fails. Otherwise, the page eviction policy seems 17 | * to have worked. 18 | */ 19 | public class EvictionTest extends SimpleDbTestBase { 20 | private static final long MEMORY_LIMIT_IN_MB = 5; 21 | private static final int BUFFER_PAGES = 16; 22 | 23 | @Test public void testHeapFileScanWithManyPages() throws IOException, DbException, TransactionAbortedException { 24 | System.out.println("EvictionTest creating large table"); 25 | HeapFile f = SystemTestUtil.createRandomHeapFile(2, 1024*500, null, null); 26 | System.out.println("EvictionTest scanning large table"); 27 | Database.resetBufferPool(BUFFER_PAGES); 28 | long beginMem = SystemTestUtil.getMemoryFootprint(); 29 | TransactionId tid = new TransactionId(); 30 | SeqScan scan = new SeqScan(tid, f.getId(), ""); 31 | scan.open(); 32 | while (scan.hasNext()) { 33 | scan.next(); 34 | } 35 | System.out.println("EvictionTest scan complete, testing memory usage of scan"); 36 | long endMem = SystemTestUtil.getMemoryFootprint(); 37 | long memDiff = (endMem - beginMem) / (1<<20); 38 | if (memDiff > MEMORY_LIMIT_IN_MB) { 39 | Assert.fail("Did not evict enough pages. Scan took " + memDiff + " MB of RAM, when limit was " + MEMORY_LIMIT_IN_MB); 40 | } 41 | } 42 | 43 | public static void insertRow(HeapFile f, Transaction t) throws DbException, 44 | TransactionAbortedException { 45 | // Create a row to insert 46 | TupleDesc twoIntColumns = Utility.getTupleDesc(2); 47 | Tuple value = new Tuple(twoIntColumns); 48 | value.setField(0, new IntField(-42)); 49 | value.setField(1, new IntField(-43)); 50 | TupleIterator insertRow = new TupleIterator(Utility.getTupleDesc(2), Arrays.asList(new Tuple[]{value})); 51 | 52 | // Insert the row 53 | Insert insert = new Insert(t.getId(), insertRow, f.getId()); 54 | insert.open(); 55 | Tuple result = insert.next(); 56 | assertEquals(SystemTestUtil.SINGLE_INT_DESCRIPTOR, result.getTupleDesc()); 57 | assertEquals(1, ((IntField)result.getField(0)).getValue()); 58 | assertFalse(insert.hasNext()); 59 | insert.close(); 60 | } 61 | 62 | public static boolean findMagicTuple(HeapFile f, Transaction t) 63 | throws DbException, TransactionAbortedException { 64 | SeqScan ss = new SeqScan(t.getId(), f.getId(), ""); 65 | boolean found = false; 66 | ss.open(); 67 | while (ss.hasNext()) { 68 | Tuple v = ss.next(); 69 | int v0 = ((IntField)v.getField(0)).getValue(); 70 | int v1 = ((IntField)v.getField(1)).getValue(); 71 | if (v0 == -42 && v1 == -43) { 72 | assertFalse(found); 73 | found = true; 74 | } 75 | } 76 | ss.close(); 77 | return found; 78 | } 79 | 80 | /** Make test compatible with older version of ant. */ 81 | public static junit.framework.Test suite() { 82 | return new junit.framework.JUnit4TestAdapter(EvictionTest.class); 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /test/simpledb/systemtest/FilterBase.java: -------------------------------------------------------------------------------- 1 | package simpledb.systemtest; 2 | 3 | import java.io.IOException; 4 | import java.util.ArrayList; 5 | import java.util.HashMap; 6 | import java.util.Map; 7 | 8 | import static org.junit.Assert.*; 9 | import org.junit.Test; 10 | 11 | import simpledb.*; 12 | 13 | public abstract class FilterBase extends SimpleDbTestBase { 14 | private static final int COLUMNS = 3; 15 | private static final int ROWS = 1097; 16 | 17 | /** Should apply the predicate to table. This will be executed in transaction tid. */ 18 | protected abstract int applyPredicate(HeapFile table, TransactionId tid, Predicate predicate) 19 | throws DbException, TransactionAbortedException, IOException; 20 | 21 | /** Optional hook for validating database state after applyPredicate. */ 22 | protected void validateAfter(HeapFile table) 23 | throws DbException, TransactionAbortedException, IOException {} 24 | 25 | protected ArrayList> createdTuples; 26 | 27 | private int runTransactionForPredicate(HeapFile table, Predicate predicate) 28 | throws IOException, DbException, TransactionAbortedException { 29 | TransactionId tid = new TransactionId(); 30 | int result = applyPredicate(table, tid, predicate); 31 | Database.getBufferPool().transactionComplete(tid); 32 | return result; 33 | } 34 | 35 | private void validatePredicate(int column, int columnValue, int trueValue, int falseValue, 36 | Predicate.Op operation) throws IOException, DbException, TransactionAbortedException { 37 | // Test the true value 38 | HeapFile f = createTable(column, columnValue); 39 | Predicate predicate = new Predicate(column, operation, new IntField(trueValue)); 40 | assertEquals(ROWS, runTransactionForPredicate(f, predicate)); 41 | f = Utility.openHeapFile(COLUMNS, f.getFile()); 42 | validateAfter(f); 43 | 44 | // Test the false value 45 | f = createTable(column, columnValue); 46 | predicate = new Predicate(column, operation, new IntField(falseValue)); 47 | assertEquals(0, runTransactionForPredicate(f, predicate)); 48 | f = Utility.openHeapFile(COLUMNS, f.getFile()); 49 | validateAfter(f); 50 | } 51 | 52 | private HeapFile createTable(int column, int columnValue) 53 | throws IOException, DbException, TransactionAbortedException { 54 | Map columnSpecification = new HashMap(); 55 | columnSpecification.put(column, columnValue); 56 | createdTuples = new ArrayList>(); 57 | return SystemTestUtil.createRandomHeapFile( 58 | COLUMNS, ROWS, columnSpecification, createdTuples); 59 | } 60 | 61 | @Test public void testEquals() throws 62 | DbException, TransactionAbortedException, IOException { 63 | validatePredicate(0, 1, 1, 2, Predicate.Op.EQUALS); 64 | } 65 | 66 | @Test public void testLessThan() throws 67 | DbException, TransactionAbortedException, IOException { 68 | validatePredicate(1, 1, 2, 1, Predicate.Op.LESS_THAN); 69 | } 70 | 71 | @Test public void testLessThanOrEq() throws 72 | DbException, TransactionAbortedException, IOException { 73 | validatePredicate(2, 42, 42, 41, Predicate.Op.LESS_THAN_OR_EQ); 74 | } 75 | 76 | @Test public void testGreaterThan() throws 77 | DbException, TransactionAbortedException, IOException { 78 | validatePredicate(2, 42, 41, 42, Predicate.Op.GREATER_THAN); 79 | } 80 | 81 | @Test public void testGreaterThanOrEq() throws 82 | DbException, TransactionAbortedException, IOException { 83 | validatePredicate(2, 42, 42, 43, Predicate.Op.GREATER_THAN_OR_EQ); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /test/simpledb/systemtest/FilterTest.java: -------------------------------------------------------------------------------- 1 | package simpledb.systemtest; 2 | 3 | import java.io.IOException; 4 | import static org.junit.Assert.*; 5 | import simpledb.*; 6 | 7 | public class FilterTest extends FilterBase { 8 | @Override 9 | protected int applyPredicate(HeapFile table, TransactionId tid, Predicate predicate) 10 | throws DbException, TransactionAbortedException, IOException { 11 | SeqScan ss = new SeqScan(tid, table.getId(), ""); 12 | Filter filter = new Filter(predicate, ss); 13 | filter.open(); 14 | 15 | int resultCount = 0; 16 | while (filter.hasNext()) { 17 | assertNotNull(filter.next()); 18 | resultCount += 1; 19 | } 20 | 21 | filter.close(); 22 | return resultCount; 23 | } 24 | 25 | /** Make test compatible with older version of ant. */ 26 | public static junit.framework.Test suite() { 27 | return new junit.framework.JUnit4TestAdapter(FilterTest.class); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /test/simpledb/systemtest/InsertTest.java: -------------------------------------------------------------------------------- 1 | package simpledb.systemtest; 2 | 3 | import java.io.IOException; 4 | import java.util.ArrayList; 5 | import simpledb.*; 6 | 7 | import static org.junit.Assert.*; 8 | import org.junit.Test; 9 | 10 | public class InsertTest extends SimpleDbTestBase { 11 | private void validateInsert(int columns, int sourceRows, int destinationRows) 12 | throws DbException, IOException, TransactionAbortedException { 13 | // Create the two tables 14 | ArrayList> sourceTuples = new ArrayList>(); 15 | HeapFile source = SystemTestUtil.createRandomHeapFile( 16 | columns, sourceRows, null, sourceTuples); 17 | assert sourceTuples.size() == sourceRows; 18 | ArrayList> destinationTuples = new ArrayList>(); 19 | HeapFile destination = SystemTestUtil.createRandomHeapFile( 20 | columns, destinationRows, null, destinationTuples); 21 | assert destinationTuples.size() == destinationRows; 22 | 23 | // Insert source into destination 24 | TransactionId tid = new TransactionId(); 25 | SeqScan ss = new SeqScan(tid, source.getId(), ""); 26 | Insert insOp = new Insert(tid, ss, destination.getId()); 27 | 28 | // Query q = new Query(insOp, tid); 29 | insOp.open(); 30 | boolean hasResult = false; 31 | while (insOp.hasNext()) { 32 | Tuple tup = insOp.next(); 33 | assertFalse(hasResult); 34 | hasResult = true; 35 | assertEquals(SystemTestUtil.SINGLE_INT_DESCRIPTOR, tup.getTupleDesc()); 36 | assertEquals(sourceRows, ((IntField) tup.getField(0)).getValue()); 37 | } 38 | assertTrue(hasResult); 39 | insOp.close(); 40 | 41 | // As part of the same transaction, scan the table 42 | sourceTuples.addAll(destinationTuples); 43 | SystemTestUtil.matchTuples(destination, tid, sourceTuples); 44 | 45 | // As part of a different transaction, scan the table 46 | Database.getBufferPool().transactionComplete(tid); 47 | Database.getBufferPool().flushAllPages(); 48 | SystemTestUtil.matchTuples(destination, sourceTuples); 49 | } 50 | 51 | @Test public void testEmptyToEmpty() 52 | throws IOException, DbException, TransactionAbortedException { 53 | validateInsert(3, 0, 0); 54 | } 55 | 56 | @Test public void testEmptyToOne() 57 | throws IOException, DbException, TransactionAbortedException { 58 | validateInsert(8, 0, 1); 59 | } 60 | 61 | @Test public void testOneToEmpty() 62 | throws IOException, DbException, TransactionAbortedException { 63 | validateInsert(3, 1, 0); 64 | } 65 | 66 | @Test public void testOneToOne() 67 | throws IOException, DbException, TransactionAbortedException { 68 | validateInsert(1, 1, 1); 69 | } 70 | 71 | /** Make test compatible with older version of ant. */ 72 | public static junit.framework.Test suite() { 73 | return new junit.framework.JUnit4TestAdapter(InsertTest.class); 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /test/simpledb/systemtest/JoinTest.java: -------------------------------------------------------------------------------- 1 | package simpledb.systemtest; 2 | 3 | import java.io.IOException; 4 | import java.util.ArrayList; 5 | import java.util.HashMap; 6 | 7 | import org.junit.Test; 8 | 9 | import simpledb.*; 10 | 11 | public class JoinTest extends SimpleDbTestBase { 12 | private static final int COLUMNS = 2; 13 | public void validateJoin(int table1ColumnValue, int table1Rows, int table2ColumnValue, 14 | int table2Rows) 15 | throws IOException, DbException, TransactionAbortedException { 16 | // Create the two tables 17 | HashMap columnSpecification = new HashMap(); 18 | columnSpecification.put(0, table1ColumnValue); 19 | ArrayList> t1Tuples = new ArrayList>(); 20 | HeapFile table1 = SystemTestUtil.createRandomHeapFile( 21 | COLUMNS, table1Rows, columnSpecification, t1Tuples); 22 | assert t1Tuples.size() == table1Rows; 23 | 24 | columnSpecification.put(0, table2ColumnValue); 25 | ArrayList> t2Tuples = new ArrayList>(); 26 | HeapFile table2 = SystemTestUtil.createRandomHeapFile( 27 | COLUMNS, table2Rows, columnSpecification, t2Tuples); 28 | assert t2Tuples.size() == table2Rows; 29 | 30 | // Generate the expected results 31 | ArrayList> expectedResults = new ArrayList>(); 32 | for (ArrayList t1 : t1Tuples) { 33 | for (ArrayList t2 : t2Tuples) { 34 | // If the columns match, join the tuples 35 | if (t1.get(0).equals(t2.get(0))) { 36 | ArrayList out = new ArrayList(t1); 37 | out.addAll(t2); 38 | expectedResults.add(out); 39 | } 40 | } 41 | } 42 | 43 | // Begin the join 44 | TransactionId tid = new TransactionId(); 45 | SeqScan ss1 = new SeqScan(tid, table1.getId(), ""); 46 | SeqScan ss2 = new SeqScan(tid, table2.getId(), ""); 47 | JoinPredicate p = new JoinPredicate(0, Predicate.Op.EQUALS, 0); 48 | Join joinOp = new Join(p, ss1, ss2); 49 | 50 | // test the join results 51 | SystemTestUtil.matchTuples(joinOp, expectedResults); 52 | 53 | joinOp.close(); 54 | Database.getBufferPool().transactionComplete(tid); 55 | } 56 | 57 | @Test public void testSingleMatch() 58 | throws IOException, DbException, TransactionAbortedException { 59 | validateJoin(1, 1, 1, 1); 60 | } 61 | 62 | @Test public void testNoMatch() 63 | throws IOException, DbException, TransactionAbortedException { 64 | validateJoin(1, 2, 2, 10); 65 | } 66 | 67 | @Test public void testMultipleMatch() 68 | throws IOException, DbException, TransactionAbortedException { 69 | validateJoin(1, 3, 1, 3); 70 | } 71 | 72 | /** Make test compatible with older version of ant. */ 73 | public static junit.framework.Test suite() { 74 | return new junit.framework.JUnit4TestAdapter(JoinTest.class); 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /test/simpledb/systemtest/QueryTest.java: -------------------------------------------------------------------------------- 1 | package simpledb.systemtest; 2 | 3 | import java.io.File; 4 | import java.io.IOException; 5 | import java.util.ArrayList; 6 | import org.junit.Test; 7 | 8 | import simpledb.BufferPool; 9 | import simpledb.Database; 10 | import simpledb.DbException; 11 | import simpledb.HeapFile; 12 | import simpledb.HeapFileEncoder; 13 | import simpledb.Parser; 14 | import simpledb.TableStats; 15 | import simpledb.Transaction; 16 | import simpledb.TransactionAbortedException; 17 | import simpledb.Utility; 18 | 19 | public class QueryTest { 20 | 21 | /** 22 | * Given a matrix of tuples from SystemTestUtil.createRandomHeapFile, create an identical HeapFile table 23 | * @param tuples Tuples to create a HeapFile from 24 | * @param columns Each entry in tuples[] must have "columns == tuples.get(i).size()" 25 | * @param colPrefix String to prefix to the column names (the columns are named after their column number by default) 26 | * @return a new HeapFile containing the specified tuples 27 | * @throws IOException if a temporary file can't be created to hand to HeapFile to open and read its data 28 | */ 29 | public static HeapFile createDuplicateHeapFile(ArrayList> tuples, int columns, String colPrefix) throws IOException { 30 | File temp = File.createTempFile("table", ".dat"); 31 | temp.deleteOnExit(); 32 | HeapFileEncoder.convert(tuples, temp, BufferPool.PAGE_SIZE, columns); 33 | return Utility.openHeapFile(columns, colPrefix, temp); 34 | } 35 | 36 | @Test(timeout=20000) public void queryTest() throws IOException, DbException, TransactionAbortedException { 37 | final int IO_COST = 101; 38 | 39 | 40 | // Create all of the tables, and add them to the catalog 41 | ArrayList> empTuples = new ArrayList>(); 42 | HeapFile emp = SystemTestUtil.createRandomHeapFile(6, 1000, null, empTuples, "c"); 43 | Database.getCatalog().addTable(emp, "emp"); 44 | 45 | ArrayList> deptTuples = new ArrayList>(); 46 | HeapFile dept = SystemTestUtil.createRandomHeapFile(3, 100, null, deptTuples, "c"); 47 | Database.getCatalog().addTable(dept, "dept"); 48 | 49 | ArrayList> hobbyTuples = new ArrayList>(); 50 | HeapFile hobby = SystemTestUtil.createRandomHeapFile(6, 100, null, hobbyTuples, "c"); 51 | Database.getCatalog().addTable(hobby, "hobby"); 52 | 53 | ArrayList> hobbiesTuples = new ArrayList>(); 54 | HeapFile hobbies = SystemTestUtil.createRandomHeapFile(2, 2000, null, hobbiesTuples, "c"); 55 | Database.getCatalog().addTable(hobbies, "hobbies"); 56 | 57 | // Get TableStats objects for each of the tables that we just generated. 58 | TableStats.setTableStats("emp", new TableStats(Database.getCatalog().getTableId("emp"), IO_COST)); 59 | TableStats.setTableStats("dept", new TableStats(Database.getCatalog().getTableId("dept"), IO_COST)); 60 | TableStats.setTableStats("hobby", new TableStats(Database.getCatalog().getTableId("hobby"), IO_COST)); 61 | TableStats.setTableStats("hobbies", new TableStats(Database.getCatalog().getTableId("hobbies"), IO_COST)); 62 | 63 | Transaction t = new Transaction(); 64 | t.start(); 65 | Parser p = new Parser(); 66 | p.setTransaction(t); 67 | 68 | p.processNextStatement("SELECT * FROM emp,dept,hobbies,hobby WHERE emp.c1 = dept.c0 AND hobbies.c0 = emp.c2 AND hobbies.c1 = hobby.c0 AND emp.c3 < 1000;"); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /test/simpledb/systemtest/ScanTest.java: -------------------------------------------------------------------------------- 1 | package simpledb.systemtest; 2 | 3 | import simpledb.systemtest.SystemTestUtil; 4 | 5 | import static org.junit.Assert.*; 6 | 7 | import java.io.File; 8 | import java.io.IOException; 9 | import java.util.ArrayList; 10 | import java.util.NoSuchElementException; 11 | import java.util.Random; 12 | 13 | import org.junit.Test; 14 | 15 | import simpledb.*; 16 | 17 | /** 18 | * Dumps the contents of a table. 19 | * args[1] is the number of columns. E.g., if it's 5, then ScanTest will end 20 | * up dumping the contents of f4.0.txt. 21 | */ 22 | public class ScanTest extends SimpleDbTestBase { 23 | private final static Random r = new Random(); 24 | 25 | /** Tests the scan operator for a table with the specified dimensions. */ 26 | private void validateScan(int[] columnSizes, int[] rowSizes) 27 | throws IOException, DbException, TransactionAbortedException { 28 | for (int columns : columnSizes) { 29 | for (int rows : rowSizes) { 30 | ArrayList> tuples = new ArrayList>(); 31 | HeapFile f = SystemTestUtil.createRandomHeapFile(columns, rows, null, tuples); 32 | SystemTestUtil.matchTuples(f, tuples); 33 | Database.resetBufferPool(BufferPool.DEFAULT_PAGES); 34 | } 35 | } 36 | } 37 | 38 | /** Scan 1-4 columns. */ 39 | @Test public void testSmall() throws IOException, DbException, TransactionAbortedException { 40 | int[] columnSizes = new int[]{1, 2, 3, 4}; 41 | int[] rowSizes = 42 | new int[]{0, 1, 2, 511, 512, 513, 1023, 1024, 1025, 4096 + r.nextInt(4096)}; 43 | validateScan(columnSizes, rowSizes); 44 | } 45 | 46 | /** Test that rewinding a SeqScan iterator works. */ 47 | @Test public void testRewind() throws IOException, DbException, TransactionAbortedException { 48 | ArrayList> tuples = new ArrayList>(); 49 | HeapFile f = SystemTestUtil.createRandomHeapFile(2, 1000, null, tuples); 50 | 51 | TransactionId tid = new TransactionId(); 52 | SeqScan scan = new SeqScan(tid, f.getId(), "table"); 53 | scan.open(); 54 | for (int i = 0; i < 100; ++i) { 55 | assertTrue(scan.hasNext()); 56 | Tuple t = scan.next(); 57 | assertEquals(tuples.get(i), SystemTestUtil.tupleToList(t)); 58 | } 59 | 60 | scan.rewind(); 61 | for (int i = 0; i < 100; ++i) { 62 | assertTrue(scan.hasNext()); 63 | Tuple t = scan.next(); 64 | assertEquals(tuples.get(i), SystemTestUtil.tupleToList(t)); 65 | } 66 | scan.close(); 67 | Database.getBufferPool().transactionComplete(tid); 68 | } 69 | 70 | /** Verifies that the buffer pool is actually caching data. 71 | * @throws TransactionAbortedException 72 | * @throws DbException */ 73 | @Test public void testCache() throws IOException, DbException, TransactionAbortedException { 74 | /** Counts the number of readPage operations. */ 75 | class InstrumentedHeapFile extends HeapFile { 76 | public InstrumentedHeapFile(File f, TupleDesc td) { 77 | super(f, td); 78 | } 79 | 80 | @Override 81 | public Page readPage(PageId pid) throws NoSuchElementException { 82 | readCount += 1; 83 | return super.readPage(pid); 84 | } 85 | 86 | public int readCount = 0; 87 | } 88 | 89 | // Create the table 90 | final int PAGES = 30; 91 | ArrayList> tuples = new ArrayList>(); 92 | File f = SystemTestUtil.createRandomHeapFileUnopened(1, 992*PAGES, 1000, null, tuples); 93 | TupleDesc td = Utility.getTupleDesc(1); 94 | InstrumentedHeapFile table = new InstrumentedHeapFile(f, td); 95 | Database.getCatalog().addTable(table, SystemTestUtil.getUUID()); 96 | 97 | // Scan the table once 98 | SystemTestUtil.matchTuples(table, tuples); 99 | assertEquals(PAGES, table.readCount); 100 | table.readCount = 0; 101 | 102 | // Scan the table again: all pages should be cached 103 | SystemTestUtil.matchTuples(table, tuples); 104 | assertEquals(0, table.readCount); 105 | } 106 | 107 | /** Make test compatible with older version of ant. */ 108 | public static junit.framework.Test suite() { 109 | return new junit.framework.JUnit4TestAdapter(ScanTest.class); 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /test/simpledb/systemtest/SimpleDbTestBase.java: -------------------------------------------------------------------------------- 1 | package simpledb.systemtest; 2 | 3 | import org.junit.Before; 4 | 5 | import simpledb.Database; 6 | 7 | /** 8 | * Base class for all SimpleDb test classes. 9 | * @author nizam 10 | * 11 | */ 12 | public class SimpleDbTestBase { 13 | /** 14 | * Reset the database before each test is run. 15 | */ 16 | @Before public void setUp() throws Exception { 17 | Database.reset(); 18 | } 19 | 20 | } 21 | --------------------------------------------------------------------------------