diff --git a/build.xml b/build.xml
index 1c56836a22361efec21c177ee6244dc6fa203013..87256f4d20b1a7ef3948813e6a4c5f18f9d17ce2 100644
--- a/build.xml
+++ b/build.xml
@@ -22,7 +22,8 @@
     <property name="mile-file-7c" value="GenerateTest"/>
     <property name="mile-file-7d" value="DeleteTest"/>
     <property name="mile-file-7e" value="GroupTest"/>
-
+    <property name="mile-file-8a" value="MetaTests"/>
+    <property name="mile-file-8b" value="LoadedMetaTests"/>
      <!-- https://junit.org/junit5/docs/current/user-guide/#running-tests-console-launcher -->
     <macrodef name="run-milestone">
         <element name="arguments"/>
@@ -67,8 +68,8 @@
     </target>
 
     <target name="compile" depends="init">
-        <javac srcdir="${src.dir}" destdir="${build.dir}" classpath="${jar.dir}/${ant.project.name}.jar"/>
-		<javac srcdir="${test.dir}" destdir="${build-tests.dir}" classpathref = "test.classpath"/>
+        <javac srcdir="${src.dir}" destdir="${build.dir}" classpath="${jar.dir}/${ant.project.name}.jar" encoding="UTF-8"/>
+		<javac srcdir="${test.dir}" destdir="${build-tests.dir}" classpathref = "test.classpath" encoding="UTF-8"/>
     </target>
 
      <!-- https://junit.org/junit5/docs/current/user-guide/#running-tests-console-launcher -->
@@ -291,8 +292,53 @@
             </arguments>
         </run-milestone>
     </target>
-    <target name="Meilenstein7" depends="Meilenstein7e">
+	<target name="Meilenstein7" depends="Meilenstein7e"/>
+    <target name="Meilenstein8a" depends="compile">
+        <run-milestone>
+            <arguments>
+                <arg value="--select-class=${mile-file-1}"/>
+                <arg value="--select-class=${mile-file-2}"/>
+                <arg value="--select-class=${mile-file-3}"/>
+                <arg value="--select-class=${mile-file-4}"/>
+                <arg value="--select-class=${mile-file-5}"/>
+                <arg value="--select-class=${mile-file-6}"/>
+                <arg value="--select-class=${mile-file-7aa}"/>
+                <arg value="--select-class=${mile-file-7ab}"/>
+                <arg value="--select-class=${mile-file-7ac}"/>
+                <arg value="--select-class=${mile-file-7ad}"/>
+                <arg value="--select-class=${mile-file-7ae}"/>
+                <arg value="--select-class=${mile-file-7a}"/>
+                <arg value="--select-class=${mile-file-7c}"/>
+                <arg value="--select-class=${mile-file-7d}"/>
+                <arg value="--select-class=${mile-file-7e}"/>
+                <arg value="--select-class=${mile-file-8a}"/>
+            </arguments>
+        </run-milestone>
+    </target>
+    <target name="Meilenstein8b" depends="compile">
+        <run-milestone>
+            <arguments>
+                <arg value="--select-class=${mile-file-1}"/>
+                <arg value="--select-class=${mile-file-2}"/>
+                <arg value="--select-class=${mile-file-3}"/>
+                <arg value="--select-class=${mile-file-4}"/>
+                <arg value="--select-class=${mile-file-5}"/>
+                <arg value="--select-class=${mile-file-6}"/>
+                <arg value="--select-class=${mile-file-7aa}"/>
+                <arg value="--select-class=${mile-file-7ab}"/>
+                <arg value="--select-class=${mile-file-7ac}"/>
+                <arg value="--select-class=${mile-file-7ad}"/>
+                <arg value="--select-class=${mile-file-7ae}"/>
+                <arg value="--select-class=${mile-file-7a}"/>
+                <arg value="--select-class=${mile-file-7c}"/>
+                <arg value="--select-class=${mile-file-7d}"/>
+                <arg value="--select-class=${mile-file-7e}"/>
+                <arg value="--select-class=${mile-file-8a}"/>
+                <arg value="--select-class=${mile-file-8b}"/>
+            </arguments>
+        </run-milestone>
     </target>
+	<target name="Meilenstein8" depends="Meilenstein8b"/>
 
     <target name="Meilenstein9" depends="compile">
         <run-milestone>
@@ -316,5 +362,9 @@
         </jar>
     </target>
 
+    <target name="example" depends="compile">
+		<java classname="idb.example.SeqRecordFileExample" fork="true" dir="${build.dir}"/>
+    </target>
+
     <target name="main" depends="clean,run"/>
 </project>
diff --git a/src/Main.java b/src/Main.java
index df648c60cca967b01c7b4517be6ae02d7361f0cf..652e72c9c47ac42c08176783f3f07ce906b296f0 100644
--- a/src/Main.java
+++ b/src/Main.java
@@ -10,7 +10,7 @@ import java.util.List;
 
 import idb.buffer.Buffer;
 import idb.buffer.BufferNotEmptyException;
-import idb.datatypes.IntegerKey;
+import idb.datatypes.DBInteger;
 import idb.datatypes.TID;
 
 
@@ -58,8 +58,8 @@ outer: while(true) {
 		/*Buffer buf = new LRUBuffer(24, 20);
 
 		//Buffer buf = new SimpleBuffer(24);
-		HashIdx<IntegerKey, TID> index =
-			new HashImpl<IntegerKey, TID>(buf, 4711, 4712, new IntegerTIDFactory(), 4);
+		HashIdx<DBInteger, TID> index =
+			new HashImpl<DBIntege, TID>(buf, 4711, 4712, new IntegerTIDFactory(), 4);
 
 		// Remove these lines if you want to keep the data stored on the disk.
 		// For this testcase it's easier to see whats going on if the index is not modified by previous runs.
diff --git a/src/idb/buffer/DBBuffer.java b/src/idb/buffer/DBBuffer.java
index 56be80661d547c457654b508503e166e1d08d4cd..4bafdc50805002e4bf15f5270d9c6bb97f6259e5 100644
--- a/src/idb/buffer/DBBuffer.java
+++ b/src/idb/buffer/DBBuffer.java
@@ -21,7 +21,7 @@ public interface DBBuffer {
 	 * @throws IOException Indicates a problem with the underlying file system
 	 * @throws BufferFullException Indicates that the buffer capacity is exceeded
 	 */
-	public ByteBuffer fix(BlockFile file, int pageno) throws IOException, BufferFullException;
+	public ByteBuffer fix(BlockFile blockfile, int pageno) throws IOException, BufferFullException;
 	/**
 	 * Notify the buffer, that you are done with this page. This must not result in
 	 * instant writing the page back.
@@ -48,9 +48,11 @@ public interface DBBuffer {
 	 */
 	public void flush() throws IOException;
 	/**
-	 * flush buffer and close all segments
+	 * Flushes and closes the buffer.
+	 * Using the buffer after calling close() is undefined.
+	 * Does not close any BlockFiles.
+	 * @throws BufferNotEmptyException if there are fixed pages remaining
 	 * @throws IOException signals an error in the underlying file system
-	 * @throws BufferNotEmptyException thrown, when fixed pages remain
 	 * in the buffer on closing
 	 */
 	public void close() throws BufferNotEmptyException, IOException;
diff --git a/src/idb/buffer/SimpleDBBuffer.java b/src/idb/buffer/SimpleDBBuffer.java
index 927f4c16d7c45c46932cc9ca41e0fbad3183ce5c..1060a4f17daf54b353025a3b046535578ce0bd91 100644
--- a/src/idb/buffer/SimpleDBBuffer.java
+++ b/src/idb/buffer/SimpleDBBuffer.java
@@ -22,22 +22,22 @@ public class SimpleDBBuffer implements DBBuffer
 	}
 
 	@Override
-	public ByteBuffer fix(BlockFile file, int pageno) throws IOException, BufferFullException{
-		PageDescriptor pd = new PageDescriptor(file.filename(), pageno);
+	public ByteBuffer fix(BlockFile blockfile, int pageno) throws IOException, BufferFullException{
+		PageDescriptor pd = new PageDescriptor(blockfile.filename(), pageno);
 		Page page = pages.get(pd);
 		if (page == null) {
 			// Not referenced yet
 			page = new Page(pagesize);
 			pages.put(pd, page);
-			page.load(file, pageno);
+			page.load(blockfile, pageno);
 		}
 		page.fix();
 		return page.getData();
 	}
 
 	@Override
-	public void setDirty(BlockFile file, int pageno) {
-		PageDescriptor pd = new PageDescriptor(file.filename(), pageno);
+	public void setDirty(BlockFile blockfile, int pageno) {
+		PageDescriptor pd = new PageDescriptor(blockfile.filename(), pageno);
 		Page page = pages.get(pd);
 		page.setDirty();
 	}
diff --git a/src/idb/construct/Util.java b/src/idb/construct/Util.java
index 63c3a9586d4199d1b3a0af4c2d8454a3b7a1bdfd..bcc146508aa7e8e38c57a6d30a2a547d3034def8 100644
--- a/src/idb/construct/Util.java
+++ b/src/idb/construct/Util.java
@@ -20,7 +20,7 @@ import idb.datatypes.Key;
 import idb.datatypes.TID;
 import idb.datatypes.Bool;
 import idb.datatypes.DBString;
-import idb.datatypes.IntegerKey;
+import idb.datatypes.DBInteger;
 
 import idb.meta.Metadata;
 import idb.meta.FileCache;
@@ -52,11 +52,11 @@ public class Util{
 		return null;
 	}
 
-	public static <K extends idb.datatypes.Key, V extends DataObject> KeyRecordFile<V, K> rebuildHash(DBBuffer buf, BlockFile regular, BlockFile overflow) {
+	public static <V extends DataObject, K extends Key> KeyRecordFile<V, K> rebuildHash(DBBuffer buf, BlockFile regular, BlockFile overflow) {
 		return null;
 	}
 
-	public static <K extends idb.datatypes.Key, V extends DataObject> KeyRecordFile<V, K> generateHash(DBBuffer buf, BlockFile regular, BlockFile overflow, double threshhold, int initCapacity) {
+	public static <V extends DataObject, K extends Key> KeyRecordFile<V, K> generateHash(DBBuffer buf, BlockFile regular, BlockFile overflow, double threshhold, int initCapacity) {
 		return null;
 	}
 
@@ -151,12 +151,12 @@ public class Util{
 		return new Triplet<>(Util.namedCombinedRecordFrom(Integer.MIN_VALUE, n), n, (x, y) -> y.getInt(s) > x.getInt(n) ? Util.namedCombinedRecordFrom(y.getInt(s), n) : x);
 	}
 
-	public static void deleteIntIndex(Module m, String indexName, DirectRecordFile<TID, NamedCombinedRecord> tidFile, KeyRecordFile<TID, IntegerKey> indexFile, DBBuffer buf) {
+	public static void deleteIntIndex(Module m, String indexName, DirectRecordFile<TID, NamedCombinedRecord> tidFile, KeyRecordFile<TID, DBInteger> indexFile, DBBuffer buf) {
 		NamedCombinedRecord ncr = m.pull();
 		if (ncr == null) return;
 		m.reset();
 		switch(ncr.getType(indexName)) {
-			case INT: delete(m, indexName, tidFile, indexFile, x -> new IntegerKey(x.getInt(indexName)), buf); break;
+			case INT: delete(m, indexName, tidFile, indexFile, x -> new DBInteger(x.getInt(indexName)), buf); break;
 			default: throw new IllegalStateException("Type-Mismatch");
 		}
 	}
diff --git a/src/idb/datatypes/IntegerKey.java b/src/idb/datatypes/DBInteger.java
similarity index 75%
rename from src/idb/datatypes/IntegerKey.java
rename to src/idb/datatypes/DBInteger.java
index 2a3fff1325c2097a1f474a88e14a1ec901bd3595..94de04a19edecf0d1eee273750533fab5619b2ab 100644
--- a/src/idb/datatypes/IntegerKey.java
+++ b/src/idb/datatypes/DBInteger.java
@@ -2,10 +2,10 @@ package idb.datatypes;
 
 import java.nio.ByteBuffer;
 
-public class IntegerKey implements Key {
+public class DBInteger implements Key {
 	private int value;
 
-	public IntegerKey(int val){
+	public DBInteger(int val){
 		this.value = val;
 	}
 
@@ -26,8 +26,8 @@ public class IntegerKey implements Key {
 	}
 
 	@Override
-	public IntegerKey copy() {
-		return new IntegerKey(value);
+	public DBInteger copy() {
+		return new DBInteger(value);
 	}
 
 	public int hashCode(){
@@ -35,7 +35,7 @@ public class IntegerKey implements Key {
 	}
 
 	public boolean equals(Key o){
-		return value == ((IntegerKey)o).getValue();
+		return value == ((DBInteger)o).getValue();
 	}
 
 	public int getValue() {
diff --git a/src/idb/datatypes/DataObject.java b/src/idb/datatypes/DataObject.java
index 03575458215ac4e7bc34f25a6a51c30a1ad59ae0..375f811761865b9bc6fff10df506433c567a3dd3 100644
--- a/src/idb/datatypes/DataObject.java
+++ b/src/idb/datatypes/DataObject.java
@@ -80,6 +80,7 @@ public interface DataObject{
 	public default void readPart(List<Triplet<ByteBuffer, Integer, Integer>> parts/*{ByteBuffer, offsetBuf, size}[]*/){
 		int newSize = 0;
 		for (Triplet<ByteBuffer, Integer, Integer> t : parts) {
+			assert t.third() > 0: "Parts need to be larger than 0 and this part is "+t.third();
 			newSize += t.third();
 		}
 		ByteBuffer buffer = ByteBuffer.allocate(newSize);
diff --git a/src/idb/datatypes/IntegerData.java b/src/idb/datatypes/IntegerData.java
deleted file mode 100644
index c61701a749f5c4dd6614376ae9f023c3430f2bb3..0000000000000000000000000000000000000000
--- a/src/idb/datatypes/IntegerData.java
+++ /dev/null
@@ -1,49 +0,0 @@
-package idb.datatypes;
-
-import java.nio.ByteBuffer;
-
-public class IntegerData implements Key {
-	private int value;
-
-	public IntegerData(int val){
-		this.value = val;
-	}
-
-
-	@Override
-	public void read(int index, ByteBuffer bb) {
-		value = bb.getInt(index);
-	}
-
-	@Override
-	public void write(int index, ByteBuffer bb) {
-		bb.putInt(index, value);
-	}
-
-	@Override
-	public int size() {
-		return Integer.SIZE/Byte.SIZE;
-	}
-
-	@Override
-	public Key copy() {
-		return new IntegerData(value);
-	}
-
-	public int hashCode(){
-		return value;
-	}
-
-	public boolean equals(Key o){
-		//System.out.println(((IntegerData)o).getValue()+" "+value);
-		return value == ((IntegerData)o).getValue();
-	}
-
-	public int getValue() {
-		return value;
-	}
-	public String toString(){
-		return ""+value;
-	}
-
-}
diff --git a/src/idb/example/SeqRecordFileExample.java b/src/idb/example/SeqRecordFileExample.java
new file mode 100644
index 0000000000000000000000000000000000000000..379b92630a366e18f36501947d68cc1264fa7fa8
--- /dev/null
+++ b/src/idb/example/SeqRecordFileExample.java
@@ -0,0 +1,286 @@
+/**
+ * @file This file contains examples useful for SeqRecordFile and other RecordFiles.
+ *       It shows interaction between DBBuffer and idb.record,
+ *       as well as our interaction between DBBuffer and DataObject
+ * @author Tobias Heineken <tobias.heineken@fau.de>
+ */
+
+package idb.example;
+
+import idb.buffer.DBBuffer;
+import idb.block.BlockFile;
+
+import java.nio.ByteBuffer;
+import java.io.IOException;
+import java.io.File;
+
+import java.util.List;
+import java.util.ArrayList;
+
+import idb.datatypes.*;
+
+public class SeqRecordFileExample{
+	public void run(DBBuffer buffer, BlockFile blockFile) throws IOException, idb.buffer.BufferFullException {
+		// We start by checking if blockFile is already open
+		if (blockFile.filename() == null) {
+			// If the blockFile is not already opened, we cannot continue
+			// Normally, this check is not necessary as it's the callers responsibility to open
+			// the blockfile before passing it to any class in idb.record (see exercise sheets)
+			System.err.println("Running on an unopened blockfile is not legal");
+			return;
+		}
+
+		// Now we want to check if this is the first time to open this file (maybe interesting for TIDFile)
+		// The caller has to make sure to use one blockfile for one File in idb.record only (using it as a TIDFile and a SeqentialFile will break)
+		// As the caller is also not allowed to do something with the blockfile himself, we can check the first call to run by checking the size of blockfile:
+		boolean first_run = (blockFile.size() == 0);
+
+		if (first_run) {
+			// We can do some initialization here.
+			// For this example, we don't do anything interesting. We just want to make sure that the first_run boolean will be false next time
+			// To do so, we use BlockFile::append, which is the only modifiing function on BlockFile that we're allowed to call
+			System.out.println("This is the first run");
+			blockFile.append(1);
+
+			// BlockFile::append leaves us with a block of unknown contents. To modify this content, we call the following functions.
+			// DBBuffer::fix gets us a ByteBuffer. We treat the ByteBuffer as a "area of bare memory", a de facto replacement for a C char*
+			ByteBuffer bb = buffer.fix(blockFile, 0);
+			// Nesting calls to DBBuffer::fix are fine, we just have to make sure to call DBBuffer::unfix accordingly
+			ByteBuffer b2 = buffer.fix(blockFile, 0);
+			// ByteBuffers are objects like everything else in java. So we can pass it by reference to write, where we write some data to the ByteBuffer:
+			// Do not follow this function call immediatly, instead try to understand the run methode first and visit write and read afterwards.
+			write(bb);
+			// as we fixed that page twice, we need to call DBBuffer::unfix twice aswell, ...
+			buffer.unfix(blockFile, 0);
+			// but before we do that we have to inform the DBBuffer that we modfied the ByteBuffer.
+			// The DBBuffer would otherwise skip writing the data to disc because "no modifcation has taken place".
+			// To do so, we call DBBuffer::setDirty, informing the DBBuffer that we might have modfied the ByteBuffer and forcing DBBuffer to write the contents back to disc.
+			buffer.setDirty(blockFile, 0);
+			buffer.unfix(blockFile, 0);
+
+		}
+		else {
+			// This can be used to recover metadata on startup. We don't do that in this example.
+			System.out.println("We finished rebuilding");
+		}
+
+		// Now we are able to use the same procedure we already used for writing for reading data
+		ByteBuffer bb = buffer.fix(blockFile, 0);
+		read(bb);
+		buffer.unfix(blockFile, 0);
+		// We don't need DBBuffer::setDirty here and it would a mistake to use it, as it slows down the execution for no reason at all.
+	}
+	private void write(ByteBuffer bb) {
+		// Remember: bb is passed in as a reference. Any modification we do to it will be seen outside,
+		// and due to setDirty also persistant on the disc.
+		//
+		// So let's start and modify out ByteBuffer.
+
+		// First, we need some data. That's where idb.datatypes.DataObject comes into play:
+		// Every class derived from DataObject can be written (and read) the same way. Let's start by writing an Integer (42) at the beginning of the ByteBuffer.
+		// A DBInteger is exactly what we want for that task. It implements Key, and Key extends DataObject, so we can serialize it.
+		DataObject fourtyTwo = new DBInteger(42);
+
+		// forty_two offers a methode to write itself into a given byteBuffer: write(int, ByteBuffer). We need to specify a position somewhere in the ByteBuffer.
+		// Let's start with 0, at the beginning of the buffer.
+
+		fourtyTwo.write(0, bb);
+
+		// Now bb contains 42 at the beginning of the buffer. But we want to write more data.
+		// We cannot use positon 0 again, because that would override fourtyTwo.
+		// Remember, a ByteBuffer acts like a C char* and you can easily override (or partially override) data by accident.
+		// We need to know where fourtyTwo ended. Luckely, there's a method for that:
+		int nextOffset = fourtyTwo.size();
+
+		// Let's do something else: We might be interested in writing something to the end of a block (TIDFile). So how would we do that?
+		// First we create our DataObject:
+		DataObject endData = new DBInteger(-1);
+		// we can detect the size of any ByteBuffer by calling ByteBuffer::capacity
+		int size = bb.capacity();
+		// another option would be to ask the DBBuffer, he knows the pagesize aswell, but we don't have access to the DBBuffer right now.
+		// The rest is relativly simple, we just need to use these sizes to caluclate the correct position:
+		endData.write(size - endData.size(), bb);
+
+		// Now we want to do something more complex: Integers are boring, as their size is always a constant. Let's do something with variable size: a String.
+		DataObject varSize = new DBString("Hallo IDB");
+		// While I could be calculating the size of this String by hand, let's assume that I'm not able in the general case.
+		// Luckely, I don't have to care as the same procedure we used for fourtyTwo still works beatifully:
+		varSize.write(nextOffset, bb);
+		nextOffset += varSize.size();
+		// Note: When implementing a new DataObject of variable size (Exercise 6 or 7), make sure to be able to read the size when asked to "read".
+		// We don't know the size of these varSize DataObject a priori all the time, so it has to be part of the persistant data pattern.
+		// There are two main ways of doing so: A) prefix the data with its size or B) use a special character to define the end of the data.
+		//
+		// For fun, we add another Integer right behind the varSize-String:
+		DataObject forFun = new DBInteger(15);
+		forFun.write(nextOffset, bb);
+
+		// There are two special case that we did not cover (yet).
+		// Let's start with Fragmentation. While a database should not fragment records in one block, we do this here for educational purposes.
+		// Normally, fragmentation would only occur with different ByteBuffers - i.e. different Blocks.
+
+		// First: Setup: The initial data is about 20 bytes in size. We want some safty distance, so we'll start at byte 50.
+		// Sadly, someone told us that we my not use bytes 60 - 90, so we have to work around that. We do want to start at byte 50 though.
+		// Note that this is an unrealistic scenario for databases. It's made up to demostrate fragmentation. We do not have invalid bytes in our blockFiles!
+
+		// Testdata: fragmented. Again, assume we cannot calculate it's size when writing the code
+		// Comment the other line in to see the difference.
+		DataObject fragmented = new DBString("This is a very long string. Noone knows if it will be fine or not");
+		// DataObject fragmented = new DBString("sh");
+
+		// First: We need some help here. If fragmented is small eanugh, we don't want to fragment it.
+		// To solve that, we prefix the data with its size. When reading, we still know that bytes 60 - 90 are invalid and can calculate if the record is fragmented
+		DataObject fSize = new DBInteger(fragmented.size());
+		fSize.write(50, bb);
+		nextOffset = 50 + fSize.size();
+
+		// Now we can check if the remaing space is ok
+		if (fragmented.size() <= 60 - nextOffset){
+			System.out.println("No fragmentation needed");
+			fragmented.write(nextOffset, bb);
+		}
+		else {
+			System.out.println("We need fragmentation");
+			// In order to fragment a dataObject, we need to use the writePart method. It'll write only a part of the dataObject to the ByteBuffer:
+			fragmented.writePart(nextOffset, bb, 0, 60 - nextOffset);
+			// This call has two additional parameters for fragmented: (60 - nextOffset), which is the number ob bytes that should be written into bb,
+			// and 0, which tells fragmented to start at the beginning ("This is") of its data.
+
+			// After we started, we want to continue after byte 90, inserting all remaining bytes.
+			// As the exapmle is hand-crafted, we can be sure not to override endData, which would have to be checked for normally.
+			fragmented.writePart(90, bb, 60-nextOffset, fragmented.size() - (60 - nextOffset));
+		}
+
+		// To prove that bytes 60 - 90 are unsed, we write another Integer to byte 61:
+		DataObject prove = new DBInteger(1337);
+		prove.write(61, bb);
+
+		// The other special case is where you don't know the structure of the data you're working with.
+		// This happens espially during debugging, where you want to see why your data doesn't seem to be there anymore,
+		// and in TIDFile, where moving unknown data is a necessary thing.
+		//
+		// To do that, ByteBuffer offers two methods that we use too: put(int, byte) and get(int).
+		// Note that we don't use the relative variants put(byte) or get() and you're on your own if you want to use them in your database.
+		//
+		// Let's assume we want to know the content of position 62:
+		byte content = bb.get(62);
+		System.out.println("It is: " + content);
+
+		// Writing it back to position 75 works similar:
+		bb.put(75, content);
+
+		// While these functions are capabile of doing almost anything, most of the time it's easier to use a DataObject.
+	}
+
+	private void read(ByteBuffer bb) {
+		// We want to read all the data we wrote down during write:
+		// Please make sure to read the comments in write before continueing here
+		//
+		// First let's start with all these numbers at fixed positions:
+		// prove, fourtyTwo and endData
+
+		DBInteger prove = new DBInteger(1);
+		DBInteger fourtyTwo = new DBInteger(2);
+		DBInteger endData = new DBInteger(1);
+		// Two things to note here: A) we use the concrete class (DBInteger) instad of DataObject. This is just to display or understand the data.
+		// Reading itself does not need the concrete class, however you should not try to read with a wrong class (for example try to read a DBString at offset 61,
+		// where we wrote a DBInteger.) Most of the time it's the callers resonsibility to make sure the types are correct, but keep this note in mind as you might run into it
+		// if you have a bug in your program.
+		// Also, the inital paramters (1, 2, and 1) are not important at all for this program, they will be overriden as soon as "read" is called.
+		
+
+		// Reading is exactly the other way round compared to write. Instead of using a DataObject to write data to the ByteBuffer,
+		// read modifies the DataObject's own storage to contain the information in ByteBuffer.
+		prove.read(61, bb);
+		fourtyTwo.read(0, bb);
+		// endData is a bit tricky. But luckely integers always have a fixed size and so we're able to calculate its starting position a priori.
+		// Note that you cannot (easily) write a variable record at the end of a block because you won't be able to detect where it started when reading.
+		endData.read(bb.capacity() - endData.size(), bb);
+
+		System.out.println("We found prove: " + prove + ", fourtyTwo: " + fourtyTwo + ", and endData: " + endData);
+		// While it is very convinient to use these DBIntegers in a String, we can also get their values directly by calling DBInteger::getValue:
+
+		if (fourtyTwo.getValue() != 42) {
+			System.err.println("We've got a bug");
+		}
+
+		// there are three object still buried in the bb, and we want to read then: varSize, forFun and fragmented.
+		// Technically also fSize but we'll use fSize to read fragmented.
+		//
+		// varSize is the easiest, as we know it's exact position: fourtyTwo.size();
+		DBString varSize = new DBString("");
+		// Again, inital data is not important
+		varSize.read(fourtyTwo.size(), bb);
+
+		// DBString now detected it's size automatically, and read all necesarry information:
+		System.out.println("VarSize is: " + varSize);
+
+		// On variable records, their size is always depended on their current contents, therefore it can change on DataObject::read.
+		// So now, after reading, we can get the position for forFun. Doing this in the wrong order will result in a wrong readout for forFun.
+		DBInteger forFun = new DBInteger(0);
+		forFun.read(fourtyTwo.size() + varSize.size(), bb);
+		System.out.println("forFun is: "+ forFun);
+
+		// Now let's end this example with reading the fragmented (or maybe not fragmented) record.
+		// We start by extracting the additional size information we wrote at the front in write:
+		DBString fragmented = new DBString("");
+		DBInteger sizeInfo = new DBInteger(0);
+		sizeInfo.read(50, bb);
+		if (sizeInfo.getValue() <= 10 - sizeInfo.size()) {
+			// fragmented is not fragmented :D
+			// This is the simple case, just like with varSize:
+			fragmented.read(50 + sizeInfo.size(), bb);
+			System.out.println("Unfragmented: " +fragmented);
+		}
+		else {
+			// Now we need to collect all parts of fragmented in a list.
+			// We have to insert the parts in correct order in a list so it can be reassembled.
+			List<Triplet<ByteBuffer, Integer, Integer>> parts = new ArrayList<>();
+
+			// A Triplet is a generic Pair, just with three entries. In this case, as documented in DataObect::readPart,
+			// they represent 1. the Buffer to read this segment from, 2. the offset in the buffer where to start reading and 3. the size that should be read
+
+			// We know that the first segment starts at 50+sizeInfo.size() and ends at 60, resulting in a size of 10 - sizeInfo.size():
+			parts.add(new Triplet<>(bb, 50+sizeInfo.size(), 10 - sizeInfo.size()));
+
+			// And the second part contains all remaining bits, starting at 90.
+			// The size is tricky: We cannot ask fragmented how large it's going to be,
+			// but that's why we prefixed the string with its length and we can use that now.
+			parts.add(new Triplet<>(bb, 90, sizeInfo.getValue() - (10 - sizeInfo.size())));
+
+			// After collecting all parts (make sure that the ByteBuffer is still valid and not already unfixed!)
+			// we simply call readPart and wait for the magic to happen:
+
+			fragmented.readPart(parts);
+
+			System.out.println("Fragmented: " + fragmented);
+		}
+	}
+	public static void main(String[] args) {
+		try{
+			// Setup: create a new file in data: (Interesting for your own MetaImpl)
+			// As the example is run in build/main, we need to go two directories up. Not necessary for MetaImpl
+			String path = "../../data/";
+			File dir = new File(path);
+			File testFile = File.createTempFile("foobar", ".exmpl", dir);
+			// We don't want this file to stick around. Do not do if you want to keep the file :)
+			testFile.deleteOnExit();
+			BlockFile bf = idb.construct.Util.generateBlockFile(4096);
+			DBBuffer buf = idb.construct.Util.generateSimpleBuffer(4096);
+
+			// open the blockfile:
+			bf.open(testFile.getCanonicalPath(), "rw");
+
+			// run once to see initialization:
+			SeqRecordFileExample srfe = new SeqRecordFileExample();
+			srfe.run(buf, bf);
+
+			//run again to see that writing only happens once:
+			srfe.run(buf, bf);
+		}
+		catch (IOException | idb.buffer.BufferFullException ex) {
+			ex.printStackTrace();
+			return;
+		}
+	}
+}
diff --git a/src/idb/meta/FileCache.java b/src/idb/meta/FileCache.java
index 59c415563cb21c06d996e399bca01288e69e20df..4efffff726a39e9b5741e1b72e8dcbcdf114ad45 100644
--- a/src/idb/meta/FileCache.java
+++ b/src/idb/meta/FileCache.java
@@ -15,7 +15,7 @@ import idb.datatypes.TID;
 import idb.datatypes.Triplet;
 import idb.datatypes.Key;
 import idb.datatypes.DataObject;
-import idb.datatypes.IntegerKey;
+import idb.datatypes.DBInteger;
 import idb.datatypes.Bool;
 import idb.datatypes.DBString;
 
@@ -40,7 +40,7 @@ import java.nio.ByteBuffer;
 public class FileCache implements AutoCloseable {
 	private Map<String, SoftReference<DirectRecordFile<TID, NamedCombinedRecord>>> tidMap = new HashMap<>();
 	private Map<String, SoftReference<KeyRecordFile>> keyMap = new HashMap<>();
-	private Map<String, SoftReference<KeyRecordFile<TID, IntegerKey>>> intMap = new HashMap<>();
+	private Map<String, SoftReference<KeyRecordFile<TID, DBInteger>>> intMap = new HashMap<>();
 	private Map<String, SoftReference<KeyRecordFile<TID, Bool>>> boolMap = new HashMap<>();
 	private Map<String, SoftReference<KeyRecordFile<TID, DBString>>> stringMap = new HashMap<>();
 	private List<Triplet<WeakReference<BFProxy>,BlockFile,Void>> cleanupBFFiles = new ArrayList<>();
@@ -73,7 +73,7 @@ public class FileCache implements AutoCloseable {
 	 * This variant is used for Indices that use a int as key. There are variants for Bool and DBString aswell.
 	 * Note that depending on your implementation of ClockBuffer it might not be possible to use the buffer with different blockSizes.
 	 */
-	public KeyRecordFile<TID, IntegerKey> getIntIndex(String path, String overflowPath, int blockSize, int blockSizeOverflow) {
+	public KeyRecordFile<TID, DBInteger> getIntIndex(String path, String overflowPath, int blockSize, int blockSizeOverflow) {
 		return getIndex(path, overflowPath, blockSize, intMap, cleanupHashFiles, blockSizeOverflow);
 	}
 
@@ -82,7 +82,7 @@ public class FileCache implements AutoCloseable {
 	 * Otherwise these are used to build a new HsahFile and insert this into the Cache.
 	 * It is garanteed that the returned file will be identical to a previously returned file for this path if there is still at least
 	 * one strong reference to the earlier returned KRF.
-	 * This variant is used for Indices that use a bool as key. There are variants for IntegerKey and DBString aswell.
+	 * This variant is used for Indices that use a bool as key. There are variants for DBInteger and DBString aswell.
 	 * Note that depending on your implementation of ClockBuffer it might not be possible to use the buffer with different blockSizes.
 	 */
 	public KeyRecordFile<TID, Bool> getBoolIndex(String path, String overflowPath, int blockSize, int blockSizeOverflow) {
@@ -94,7 +94,7 @@ public class FileCache implements AutoCloseable {
 	 * Otherwise these are used to build a new HsahFile and insert this into the Cache.
 	 * It is garanteed that the returned file will be identical to a previously returned file for this path if there is still at least
 	 * one strong reference to the earlier returned KRF.
-	 * This variant is used for Indices that use a DBString as key. There are variants for Bool and IntegerKey aswell.
+	 * This variant is used for Indices that use a DBString as key. There are variants for Bool and DBInteger aswell.
 	 * Note that depending on your implementation of ClockBuffer it might not be possible to use the buffer with different blockSizes.
 	 */
 	public KeyRecordFile<TID, DBString> getStringIndex(String path, String overflowPath, int blockSize, int blockSizeOverflow) {
diff --git a/src/idb/meta/Metadata.java b/src/idb/meta/Metadata.java
index 140f22c2246653b3b1fd3a27f9717d62ebd915a1..1a06c36b2cdcc790224881f96bd3c8fd5e633147 100644
--- a/src/idb/meta/Metadata.java
+++ b/src/idb/meta/Metadata.java
@@ -7,7 +7,7 @@ package idb.meta;
 import idb.datatypes.NamedCombinedRecord;
 import idb.datatypes.DBString;
 import idb.datatypes.Bool;
-import idb.datatypes.IntegerKey;
+import idb.datatypes.DBInteger;
 import idb.datatypes.TID;
 import idb.datatypes.DataObject;
 
@@ -111,7 +111,7 @@ public interface Metadata{
 	 * @throws IllegalStateException if getType(relName, colname) != INT
 	 *
 	 */
-	public KeyRecordFile<TID, IntegerKey> getIntIndex(String relName, String colName);
+	public KeyRecordFile<TID, DBInteger> getIntIndex(String relName, String colName);
 
 	/**
 	 * Returns a KeyRecordFile containing an index for the attribute colName of relName.
diff --git a/src/idb/record/KeyRecordFile.java b/src/idb/record/KeyRecordFile.java
index c4c6a3eb1248cb3ad37e27095c7b4464790d0d26..b6d2b9953e9be4217bace7fbadd14a4c44d2324d 100644
--- a/src/idb/record/KeyRecordFile.java
+++ b/src/idb/record/KeyRecordFile.java
@@ -24,7 +24,7 @@ public interface KeyRecordFile<D extends DataObject, K extends Key>
 	 * Add a new key-value pair to the index file.
 	 * It is guarantied that the following list of instructions does not result in any observable change to this KeyRecordFile:
 	 * int sz = size(key, object);
-	 * insert(object, key);
+	 * insert(key, object);
 	 * delete(key, sz, object);
 	 *
 	 * Colloquial speeking it is guaranteed that the inserted object will be the one with the highest index for this key,
diff --git a/tests/AbstractBufferTest.java b/tests/AbstractBufferTest.java
index ab987459b88c1550a4d034ec7fea01f2932b68d6..6d8e59466433781b4d3d3567a7a7c52bdc05e9e1 100644
--- a/tests/AbstractBufferTest.java
+++ b/tests/AbstractBufferTest.java
@@ -153,7 +153,7 @@ public abstract class AbstractBufferTest{
 		bf.append(2);
 		buf.fix(bf, 1);
 		assertEquals(4096, buf.fix(bf, 1).capacity());
-		assertEquals(bf.getReads(), 1);
+		assertEquals(1, bf.getReads());
 	}
 
 	@Test
@@ -316,15 +316,19 @@ public abstract class AbstractBufferTest{
 
 	@Test
 	public void doubleFixTwoBlockfile() throws Exception {
+		CountingBlockFile bf0 = bfGenerator.apply(4096);
+		bf0.open(testFile.getCanonicalPath(), "rw");
+		bf0.append(2);
+		bf0.close();
+
 		DBBuffer buf = bufferGenerator.apply(4096);
 		CountingBlockFile bf1 = bfGenerator.apply(4096);
 		bf1.open(testFile.getCanonicalPath(), "rw");
 		CountingBlockFile bf2 = bfGenerator.apply(4096);
 		bf2.open(testFile.getCanonicalPath(), "rw");
-		bf2.append(2);
 		buf.fix(bf1, 1);
 		buf.fix(bf2, 1);
-		assertEquals(bf1.getReads()+bf2.getReads(), 1);
+		assertEquals(1, bf1.getReads()+bf2.getReads());
 	}
 
 	@Test
diff --git a/tests/HashTest.java b/tests/HashTest.java
index 35d64e558c45ae850fb14f79a0b31e0a7ea4bce4..a42a74c3e1f6f45f70e430912309528fa7c85789 100644
--- a/tests/HashTest.java
+++ b/tests/HashTest.java
@@ -4,7 +4,7 @@ import idb.buffer.BufferFullException;
 import idb.buffer.BufferNotEmptyException;
 import idb.datatypes.DataObject;
 import idb.datatypes.Key;
-import idb.datatypes.IntegerKey;
+import idb.datatypes.DBInteger;
 import idb.datatypes.DBString;
 import idb.record.KeyRecordFile;
 
@@ -54,7 +54,7 @@ public class HashTest {
 		bfO = new GarbageBlockFile(cbfO = new CountingBlockFile(bfgenerator.apply(pageSize)), pageSize);
 		bfO.open(testFileO.getCanonicalPath(), "rw");
 		buf = new CountingDBBuffer(bufferGenerator.apply(pageSize));
-		return Util.<K, D>generateHash(buf, bf, bfO, thresh, initCapacity);
+		return Util.<D,K>generateHash(buf, bf, bfO, thresh, initCapacity);
 	}
 
 	@AfterEach
@@ -86,24 +86,24 @@ public class HashTest {
 
 	@Test
 	public void readWriteNoOverflow() throws Exception {
-		KeyRecordFile<VariableRecord, IntegerKey> krf = generate(4096, 0.5, 7);
+		KeyRecordFile<VariableRecord, DBInteger> krf = generate(4096, 0.5, 7);
 		cbfO.reset();
 		for (int i = 0; i < 10; ++i) {
-			krf.insert(new IntegerKey(i), new VariableRecord(10 * i + 10)); // this is less than 200 bytes, so it should
+			krf.insert(new DBInteger(i), new VariableRecord(10 * i + 10)); // this is less than 200 bytes, so it should
 																			// not trigger an overflow or split
 		}
 		buf.flush();
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 		for (int i = 0; i < 10; ++i) {
-			assertEquals(1, krf.size(new IntegerKey(i), new VariableRecord(0)));
+			assertEquals(1, krf.size(new DBInteger(i), new VariableRecord(0)));
 		}
 		for (int i = 0; i < 10; ++i) {
 			VariableRecord vr = new VariableRecord(1);
-			krf.read(new IntegerKey(i), vr, 0);
+			krf.read(new DBInteger(i), vr, 0);
 			final int j = i;
 			assertThrows(ArrayIndexOutOfBoundsException.class,
-					() -> krf.read(new IntegerKey(j), new VariableRecord(1), 1));
+					() -> krf.read(new DBInteger(j), new VariableRecord(1), 1));
 			assertEquals((i + 1) * 10 * Integer.SIZE / Byte.SIZE, vr.size(), "In run" + i);
 		}
 		buf.flush();
@@ -113,9 +113,9 @@ public class HashTest {
 
 	@Test
 	public void readWriteForeign() throws Exception {
-		KeyRecordFile<VariableRecord, IntegerKey> krfI = generate(4096, 0.5, 7);
+		KeyRecordFile<VariableRecord, DBInteger> krfI = generate(4096, 0.5, 7);
 		for (int i = 0; i < 10; ++i) {
-			krfI.insert(new IntegerKey(i), new VariableRecord(10 * i + 10)); // this is less than 200 bytes, so it
+			krfI.insert(new DBInteger(i), new VariableRecord(10 * i + 10)); // this is less than 200 bytes, so it
 																				// should not trigger an overflow or
 																				// split
 		}
@@ -123,28 +123,28 @@ public class HashTest {
 		buf.flush();
 		cbfO.reset();
 		cbf.reset();
-		KeyRecordFile<VariableRecord, IntegerKey> krf = Util.<IntegerKey, VariableRecord>rebuildHash(buf, bf, bfO);
+		KeyRecordFile<VariableRecord, DBInteger> krf = Util.<VariableRecord, DBInteger>rebuildHash(buf, bf, bfO);
 		assertEquals(0, cbf.getReads());
 		assertEquals(0, cbf.getWrites());
 		assertEquals(1, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 		cbfO.reset();
 		for (int i = 0; i < 10; ++i) {
-			assertEquals(1, krf.size(new IntegerKey(i), new VariableRecord(0)));
+			assertEquals(1, krf.size(new DBInteger(i), new VariableRecord(0)));
 		}
 		for (int i = 0; i < 10; ++i) {
 			VariableRecord vr = new VariableRecord(1);
-			krf.read(new IntegerKey(i), vr, 0);
+			krf.read(new DBInteger(i), vr, 0);
 			final int j = i;
 			assertThrows(ArrayIndexOutOfBoundsException.class,
-					() -> krf.read(new IntegerKey(j), new VariableRecord(1), 1));
+					() -> krf.read(new DBInteger(j), new VariableRecord(1), 1));
 			assertEquals((i + 1) * 10 * Integer.SIZE / Byte.SIZE, vr.size());
 		}
 		buf.flush();
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 		for (int i = 0; i < 5; ++i) {
-			krfI.insert(new IntegerKey(10 + i), new VariableRecord(10 * i + 10)); // this is less than additional 100
+			krfI.insert(new DBInteger(10 + i), new VariableRecord(10 * i + 10)); // this is less than additional 100
 																					// bytes, so it should not trigger
 																					// an overflow or split
 		}
@@ -153,10 +153,10 @@ public class HashTest {
 		assertEquals(0, cbfO.getWrites());
 		for (int i = 0; i < 15; ++i) {
 			VariableRecord vr = new VariableRecord(1);
-			krf.read(new IntegerKey(i), vr, 0);
+			krf.read(new DBInteger(i), vr, 0);
 			final int j = i;
 			assertThrows(ArrayIndexOutOfBoundsException.class,
-					() -> krf.read(new IntegerKey(j), new VariableRecord(1), 1));
+					() -> krf.read(new DBInteger(j), new VariableRecord(1), 1));
 			int k = i;
 			if (i >= 10)
 				k = i - 10;
@@ -184,50 +184,50 @@ public class HashTest {
 	@RepeatedTest(10)
 	public void garbage(RepetitionInfo repetitionInfo) throws Exception {
 		ArrayList<VariableRecord> data = randomVR(1000, 100); // 100 ints is less than 4000 (bytes)
-		KeyRecordFile<VariableRecord, IntegerKey> krf = generate(4096, 0.5, 7);
+		KeyRecordFile<VariableRecord, DBInteger> krf = generate(4096, 0.5, 7);
 		bf.setGarbage(randomBB(4096, repetitionInfo.getCurrentRepetition()));
 		bfO.setGarbage(randomBB(4096, 50 * repetitionInfo.getCurrentRepetition()));
 		for (int i = 0; i < data.size(); ++i) {
-			krf.insert(new IntegerKey(i), data.get(i));
+			krf.insert(new DBInteger(i), data.get(i));
 		}
 		assertEquals(buf.getFix(), buf.getUnfix());
 		for (int i = 0; i < data.size(); ++i) {
 			VariableRecord ret = new VariableRecord(0);
-			assertEquals(1, krf.size(new IntegerKey(i), ret));
-			krf.read(new IntegerKey(i), ret, 0);
+			assertEquals(1, krf.size(new DBInteger(i), ret));
+			krf.read(new DBInteger(i), ret, 0);
 			assertEquals(data.get(i).size(), ret.size());
 		}
 		data = randomVR(2000, 100); // 100 ints is less than 4000 (bytes)
 		for (int i = 1000; i < data.size(); ++i) {
-			krf.insert(new IntegerKey(i - 1000), data.get(i));
+			krf.insert(new DBInteger(i - 1000), data.get(i));
 		}
 		for (int i = 0; i < 1000; ++i) {
 			VariableRecord ret = new VariableRecord(0);
-			assertEquals(2, krf.size(new IntegerKey(i), ret), "Int run: " + i);
-			krf.read(new IntegerKey(i), ret, 0);
+			assertEquals(2, krf.size(new DBInteger(i), ret), "Int run: " + i);
+			krf.read(new DBInteger(i), ret, 0);
 			assertEquals(data.get(i).size(), ret.size());
-			krf.read(new IntegerKey(i), ret, 1);
+			krf.read(new DBInteger(i), ret, 1);
 			assertEquals(data.get(i + 1000).size(), ret.size());
-			krf.delete(new IntegerKey(i), 1, ret); // This is allowed as the order must be preserved when removing the
+			krf.delete(new DBInteger(i), 1, ret); // This is allowed as the order must be preserved when removing the
 													// last element, as stated in @insert.
-			assertEquals(1, krf.size(new IntegerKey(i), ret), "Int run: " + i);
-			krf.read(new IntegerKey(i), ret, 0);
+			assertEquals(1, krf.size(new DBInteger(i), ret), "Int run: " + i);
+			krf.read(new DBInteger(i), ret, 0);
 			assertEquals(data.get(i).size(), ret.size());
 			final int j = i;
 			assertThrows(ArrayIndexOutOfBoundsException.class,
-					() -> krf.read(new IntegerKey(j), new VariableRecord(1), 1));
+					() -> krf.read(new DBInteger(j), new VariableRecord(1), 1));
 		}
 	}
 
 	@Test
 	public void readWriteMixedOverflow() throws Exception {
-		KeyRecordFile<VariableRecord, IntegerKey> krf = generate(4096, 0.5, 7);
+		KeyRecordFile<VariableRecord, DBInteger> krf = generate(4096, 0.5, 7);
 		cbfO.reset();
-		krf.insert(new IntegerKey(0), new VariableRecord(100));
+		krf.insert(new DBInteger(0), new VariableRecord(100));
 		buf.flush();
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
-		krf.insert(new IntegerKey(7), new VariableRecord(1000)); // this is too large the for bucket, so it has to go in
+		krf.insert(new DBInteger(7), new VariableRecord(1000)); // this is too large the for bucket, so it has to go in
 																	// overflow
 		buf.flush();
 		assertThat(cbfO.getReads(), either(equalTo(1)).or(equalTo(2))); // We need to read the meta-block at the
@@ -237,57 +237,57 @@ public class HashTest {
 		assertEquals(2, cbfO.getWrites()); // As appending is mapped to writes in GarbageBlockFile, we want 2 (one for
 											// appending and one for inserting the data)
 		cbfO.reset();
-		krf.insert(new IntegerKey(14), new VariableRecord(10)); // while this does fit into bucket1, one has to make
+		krf.insert(new DBInteger(14), new VariableRecord(10)); // while this does fit into bucket1, one has to make
 																// sure that no 14 is stored in the overflow.
 		buf.flush();
 		// Therefore we don't check Writes, but only reads
 		assertNotEquals(0, cbfO.getReads());
 		cbf.reset();
 		// All should be in the same bucket, bucket 0
-		assertEquals(1, assertBlockRead(cbf, 0, () -> krf.size(new IntegerKey(0), new VariableRecord(0))));
+		assertEquals(1, assertBlockRead(cbf, 0, () -> krf.size(new DBInteger(0), new VariableRecord(0))));
 		// Rerun without assertBlockRead to make sure use-after-free could be detectable
-		assertEquals(1, krf.size(new IntegerKey(0), new VariableRecord(0)));
+		assertEquals(1, krf.size(new DBInteger(0), new VariableRecord(0)));
 
 		VariableRecord vr = new VariableRecord(1);
-		krf.read(new IntegerKey(0), vr, 0);
+		krf.read(new DBInteger(0), vr, 0);
 		assertEquals(100 * Integer.SIZE / Byte.SIZE, vr.size());
 		cbf.reset();
-		assertBlockRead(cbf, 0, () -> {krf.read(new IntegerKey(0), vr, 0); return null;});
+		assertBlockRead(cbf, 0, () -> {krf.read(new DBInteger(0), vr, 0); return null;});
 		assertEquals(100 * Integer.SIZE / Byte.SIZE, vr.size());
 
 		// Repeat for 7 and 14
-		assertEquals(1, assertBlockRead(cbf, 0, () -> krf.size(new IntegerKey(7), new VariableRecord(0))));
-		assertEquals(1, krf.size(new IntegerKey(7), new VariableRecord(0)));
+		assertEquals(1, assertBlockRead(cbf, 0, () -> krf.size(new DBInteger(7), new VariableRecord(0))));
+		assertEquals(1, krf.size(new DBInteger(7), new VariableRecord(0)));
 
-		krf.read(new IntegerKey(7), vr, 0);
+		krf.read(new DBInteger(7), vr, 0);
 		assertEquals(1000 * Integer.SIZE / Byte.SIZE, vr.size());
 		cbf.reset();
-		assertBlockRead(cbf, 0, () -> {krf.read(new IntegerKey(7), vr, 0); return null;});
+		assertBlockRead(cbf, 0, () -> {krf.read(new DBInteger(7), vr, 0); return null;});
 		assertEquals(1000 * Integer.SIZE / Byte.SIZE, vr.size());
 
 		// Repeat for 14
-		assertEquals(1, krf.size(new IntegerKey(14), new VariableRecord(0)));
+		assertEquals(1, krf.size(new DBInteger(14), new VariableRecord(0)));
 		cbf.reset();
-		assertEquals(1, assertBlockRead(cbf, 0, () -> krf.size(new IntegerKey(14), new VariableRecord(0))));
+		assertEquals(1, assertBlockRead(cbf, 0, () -> krf.size(new DBInteger(14), new VariableRecord(0))));
 
-		krf.read(new IntegerKey(14), vr, 0);
+		krf.read(new DBInteger(14), vr, 0);
 		assertEquals(10 * Integer.SIZE / Byte.SIZE, vr.size());
 		cbf.reset();
-		assertBlockRead(cbf, 0, () -> {krf.read(new IntegerKey(14), vr, 0); return null;});
+		assertBlockRead(cbf, 0, () -> {krf.read(new DBInteger(14), vr, 0); return null;});
 		assertEquals(10 * Integer.SIZE / Byte.SIZE, vr.size());
 	}
 
 	@Test
 	public void doubleSplit() throws Exception {
-		KeyRecordFile<VariableRecord, IntegerKey> krf = generate(4096, 0.2, 7); // setting the threshhold to 0.2 should
+		KeyRecordFile<VariableRecord, DBInteger> krf = generate(4096, 0.2, 7); // setting the threshhold to 0.2 should
 																				// trigger a split after 1024*7/5 = 1433
 																				// Ints, after that at 1024 * 8/5 = 1638
 		cbfO.reset();
-		krf.insert(new IntegerKey(0), new VariableRecord(1000));
+		krf.insert(new DBInteger(0), new VariableRecord(1000));
 		buf.flush();
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
-		krf.insert(new IntegerKey(7), new VariableRecord(400)); // this is too large the for bucket, so it has to go in
+		krf.insert(new DBInteger(7), new VariableRecord(400)); // this is too large the for bucket, so it has to go in
 																// overflow
 		buf.flush();
 		assertThat(cbfO.getReads(), either(equalTo(1)).or(equalTo(2))); // We need to read the meta-block at the
@@ -296,11 +296,11 @@ public class HashTest {
 		// TODO: this is if free list is optional
 		assertEquals(2, cbfO.getWrites()); // As appending is mapped to writes in GarbageBlockFile, we want 2 (one for
 											// appending and one for inserting the data)
-		krf.insert(new IntegerKey(21), new VariableRecord(100));
+		krf.insert(new DBInteger(21), new VariableRecord(100));
 		buf.flush();
 		cbfO.reset();
 		cbf.reset();
-		krf.insert(new IntegerKey(8), new VariableRecord(100));
+		krf.insert(new DBInteger(8), new VariableRecord(100));
 		buf.flush();
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
@@ -308,138 +308,138 @@ public class HashTest {
 		cbf.reset();
 
 		// 0 should be in the bucket 0
-		assertEquals(1, krf.size(new IntegerKey(0), new VariableRecord(0)));
-		assertEquals(1, assertBlockRead(cbf, 0, () -> krf.size(new IntegerKey(0), new VariableRecord(0))));
+		assertEquals(1, krf.size(new DBInteger(0), new VariableRecord(0)));
+		assertEquals(1, assertBlockRead(cbf, 0, () -> krf.size(new DBInteger(0), new VariableRecord(0))));
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
 		VariableRecord vr = new VariableRecord(1);
-		krf.read(new IntegerKey(0), vr, 0);
+		krf.read(new DBInteger(0), vr, 0);
 		assertEquals(1000 * Integer.SIZE / Byte.SIZE, vr.size());
-		assertBlockRead(cbf, 0, () -> {krf.read(new IntegerKey(0), vr, 0); return null;});
+		assertBlockRead(cbf, 0, () -> {krf.read(new DBInteger(0), vr, 0); return null;});
 		assertEquals(1000 * Integer.SIZE / Byte.SIZE, vr.size());
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
 		// Repeat for 7 and 21
-		assertEquals(1, krf.size(new IntegerKey(7), new VariableRecord(0)));
-		assertEquals(1, assertBlockRead(cbf, 7, () -> krf.size(new IntegerKey(7), new VariableRecord(0))));
+		assertEquals(1, krf.size(new DBInteger(7), new VariableRecord(0)));
+		assertEquals(1, assertBlockRead(cbf, 7, () -> krf.size(new DBInteger(7), new VariableRecord(0))));
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
-		assertBlockRead(cbf, 7, () -> {krf.read(new IntegerKey(7), vr, 0); return null;});
+		assertBlockRead(cbf, 7, () -> {krf.read(new DBInteger(7), vr, 0); return null;});
 		assertEquals(400 * Integer.SIZE / Byte.SIZE, vr.size());
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
-		krf.read(new IntegerKey(7), vr, 0);
+		krf.read(new DBInteger(7), vr, 0);
 		assertEquals(400 * Integer.SIZE / Byte.SIZE, vr.size());
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
 		// Repeat for 21
-		assertEquals(1, krf.size(new IntegerKey(21), new VariableRecord(0)));
-		assertEquals(1, assertBlockRead(cbf, 7, () -> krf.size(new IntegerKey(21), new VariableRecord(0))));
+		assertEquals(1, krf.size(new DBInteger(21), new VariableRecord(0)));
+		assertEquals(1, assertBlockRead(cbf, 7, () -> krf.size(new DBInteger(21), new VariableRecord(0))));
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
-		krf.read(new IntegerKey(21), vr, 0);
+		krf.read(new DBInteger(21), vr, 0);
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 		assertEquals(100 * Integer.SIZE / Byte.SIZE, vr.size());
-		assertBlockRead(cbf, 7, () -> {krf.read(new IntegerKey(21), vr, 0); return null;});
+		assertBlockRead(cbf, 7, () -> {krf.read(new DBInteger(21), vr, 0); return null;});
 		assertEquals(100 * Integer.SIZE / Byte.SIZE, vr.size());
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
 		// Repeat for 8
-		assertEquals(1, krf.size(new IntegerKey(8), new VariableRecord(0)));
-		assertEquals(1, assertBlockRead(cbf, 1, () -> krf.size(new IntegerKey(8), new VariableRecord(0))));
+		assertEquals(1, krf.size(new DBInteger(8), new VariableRecord(0)));
+		assertEquals(1, assertBlockRead(cbf, 1, () -> krf.size(new DBInteger(8), new VariableRecord(0))));
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
-		krf.read(new IntegerKey(8), vr, 0);
+		krf.read(new DBInteger(8), vr, 0);
 		assertEquals(100 * Integer.SIZE / Byte.SIZE, vr.size());
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
-		assertBlockRead(cbf, 1, () -> {krf.read(new IntegerKey(8), vr, 0); return null;});
+		assertBlockRead(cbf, 1, () -> {krf.read(new DBInteger(8), vr, 0); return null;});
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 		assertEquals(100 * Integer.SIZE / Byte.SIZE, vr.size());
 
-		krf.insert(new IntegerKey(9), new VariableRecord(100));
+		krf.insert(new DBInteger(9), new VariableRecord(100));
 		buf.flush();
 		cbfO.reset();
 		cbf.reset();
 
 		// 0 should be in the bucket 0
-		assertEquals(1, krf.size(new IntegerKey(0), new VariableRecord(0)));
-		assertEquals(1, assertBlockRead(cbf, 0, () -> krf.size(new IntegerKey(0), new VariableRecord(0))));
+		assertEquals(1, krf.size(new DBInteger(0), new VariableRecord(0)));
+		assertEquals(1, assertBlockRead(cbf, 0, () -> krf.size(new DBInteger(0), new VariableRecord(0))));
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
-		krf.read(new IntegerKey(0), vr, 0);
+		krf.read(new DBInteger(0), vr, 0);
 		assertEquals(1000 * Integer.SIZE / Byte.SIZE, vr.size());
-		assertBlockRead(cbf, 0, () -> {krf.read(new IntegerKey(0), vr, 0); return null;});
+		assertBlockRead(cbf, 0, () -> {krf.read(new DBInteger(0), vr, 0); return null;});
 		assertEquals(1000 * Integer.SIZE / Byte.SIZE, vr.size());
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
 		// Repeat for 7 and 21
-		assertEquals(1, krf.size(new IntegerKey(7), new VariableRecord(0)));
-		assertEquals(1, assertBlockRead(cbf, 7, () -> krf.size(new IntegerKey(7), new VariableRecord(0))));
+		assertEquals(1, krf.size(new DBInteger(7), new VariableRecord(0)));
+		assertEquals(1, assertBlockRead(cbf, 7, () -> krf.size(new DBInteger(7), new VariableRecord(0))));
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
-		assertBlockRead(cbf, 7, () -> {krf.read(new IntegerKey(7), vr, 0); return null;});
+		assertBlockRead(cbf, 7, () -> {krf.read(new DBInteger(7), vr, 0); return null;});
 		assertEquals(400 * Integer.SIZE / Byte.SIZE, vr.size());
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
-		krf.read(new IntegerKey(7), vr, 0);
+		krf.read(new DBInteger(7), vr, 0);
 		assertEquals(400 * Integer.SIZE / Byte.SIZE, vr.size());
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
 		// Repeat for 21
-		assertEquals(1, krf.size(new IntegerKey(21), new VariableRecord(0)));
-		assertEquals(1, assertBlockRead(cbf, 7, () -> krf.size(new IntegerKey(21), new VariableRecord(0))));
+		assertEquals(1, krf.size(new DBInteger(21), new VariableRecord(0)));
+		assertEquals(1, assertBlockRead(cbf, 7, () -> krf.size(new DBInteger(21), new VariableRecord(0))));
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
-		krf.read(new IntegerKey(21), vr, 0);
+		krf.read(new DBInteger(21), vr, 0);
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 		assertEquals(100 * Integer.SIZE / Byte.SIZE, vr.size());
-		assertBlockRead(cbf, 7, () -> {krf.read(new IntegerKey(21), vr, 0); return null;});
+		assertBlockRead(cbf, 7, () -> {krf.read(new DBInteger(21), vr, 0); return null;});
 		assertEquals(100 * Integer.SIZE / Byte.SIZE, vr.size());
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
 		// Repeat for 8
-		assertEquals(1, krf.size(new IntegerKey(8), new VariableRecord(0)));
-		assertEquals(1, assertBlockRead(cbf, 8, () -> krf.size(new IntegerKey(8), new VariableRecord(0))));
+		assertEquals(1, krf.size(new DBInteger(8), new VariableRecord(0)));
+		assertEquals(1, assertBlockRead(cbf, 8, () -> krf.size(new DBInteger(8), new VariableRecord(0))));
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
-		krf.read(new IntegerKey(8), vr, 0);
+		krf.read(new DBInteger(8), vr, 0);
 		assertEquals(100 * Integer.SIZE / Byte.SIZE, vr.size());
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
-		assertBlockRead(cbf, 8, () -> {krf.read(new IntegerKey(8), vr, 0); return null;});
+		assertBlockRead(cbf, 8, () -> {krf.read(new DBInteger(8), vr, 0); return null;});
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 		assertEquals(100 * Integer.SIZE / Byte.SIZE, vr.size());
 
 		// Repeat for 9
-		assertEquals(1, krf.size(new IntegerKey(9), new VariableRecord(0)));
-		assertEquals(1, assertBlockRead(cbf, 2, () -> krf.size(new IntegerKey(9), new VariableRecord(0))));
+		assertEquals(1, krf.size(new DBInteger(9), new VariableRecord(0)));
+		assertEquals(1, assertBlockRead(cbf, 2, () -> krf.size(new DBInteger(9), new VariableRecord(0))));
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
-		krf.read(new IntegerKey(9), vr, 0);
+		krf.read(new DBInteger(9), vr, 0);
 		assertEquals(100 * Integer.SIZE / Byte.SIZE, vr.size());
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
-		assertBlockRead(cbf, 2, () -> {krf.read(new IntegerKey(9), vr, 0); return null;});
+		assertBlockRead(cbf, 2, () -> {krf.read(new DBInteger(9), vr, 0); return null;});
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 		assertEquals(100 * Integer.SIZE / Byte.SIZE, vr.size());
@@ -447,15 +447,15 @@ public class HashTest {
 
 	@Test
 	public void readWriteMixedSplit() throws Exception {
-		KeyRecordFile<VariableRecord, IntegerKey> krf = generate(4096, 0.2, 7); // setting the threshhold to 0.2 should
+		KeyRecordFile<VariableRecord, DBInteger> krf = generate(4096, 0.2, 7); // setting the threshhold to 0.2 should
 																				// trigger a split after 1024*7/5 = 1433
 																				// Ints
 		cbfO.reset();
-		krf.insert(new IntegerKey(0), new VariableRecord(1000));
+		krf.insert(new DBInteger(0), new VariableRecord(1000));
 		buf.flush();
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
-		krf.insert(new IntegerKey(7), new VariableRecord(400)); // this is too large the for bucket, so it has to go in
+		krf.insert(new DBInteger(7), new VariableRecord(400)); // this is too large the for bucket, so it has to go in
 																// overflow
 		buf.flush();
 		assertThat(cbfO.getReads(), either(equalTo(1)).or(equalTo(2))); // We need to read the meta-block at the
@@ -464,47 +464,47 @@ public class HashTest {
 		// TODO: this is if free list is optional
 		assertEquals(2, cbfO.getWrites()); // As appending is mapped to writes in GarbageBlockFile, we want 2 (one for
 											// appending and one for inserting the data)
-		krf.insert(new IntegerKey(21), new VariableRecord(100));
+		krf.insert(new DBInteger(21), new VariableRecord(100));
 		buf.flush();
 		cbfO.reset();
 		cbf.reset();
 
 		VariableRecord vr = new VariableRecord(1);
 		// 0 should be in bucket 0
-		assertEquals(1, krf.size(new IntegerKey(0), new VariableRecord(0)));
-		assertEquals(1, assertBlockRead(cbf, 0, () -> krf.size(new IntegerKey(0), new VariableRecord(0))));
+		assertEquals(1, krf.size(new DBInteger(0), new VariableRecord(0)));
+		assertEquals(1, assertBlockRead(cbf, 0, () -> krf.size(new DBInteger(0), new VariableRecord(0))));
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
-		krf.read(new IntegerKey(0), vr, 0);
+		krf.read(new DBInteger(0), vr, 0);
 		assertEquals(1000 * Integer.SIZE / Byte.SIZE, vr.size());
-		assertBlockRead(cbf, 0, () -> {krf.read(new IntegerKey(0), vr, 0); return null;});
+		assertBlockRead(cbf, 0, () -> {krf.read(new DBInteger(0), vr, 0); return null;});
 		assertEquals(1000 * Integer.SIZE / Byte.SIZE, vr.size());
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
 		// Repeat for 7
-		assertEquals(1, krf.size(new IntegerKey(7), new VariableRecord(0)));
-		assertEquals(1, assertBlockRead(cbf, 7, () -> krf.size(new IntegerKey(7), new VariableRecord(0))));
+		assertEquals(1, krf.size(new DBInteger(7), new VariableRecord(0)));
+		assertEquals(1, assertBlockRead(cbf, 7, () -> krf.size(new DBInteger(7), new VariableRecord(0))));
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
-		krf.read(new IntegerKey(7), vr, 0);
+		krf.read(new DBInteger(7), vr, 0);
 		assertEquals(400 * Integer.SIZE / Byte.SIZE, vr.size());
-		assertBlockRead(cbf, 7, () -> {krf.read(new IntegerKey(7), vr, 0); return null;});
+		assertBlockRead(cbf, 7, () -> {krf.read(new DBInteger(7), vr, 0); return null;});
 		assertEquals(400 * Integer.SIZE / Byte.SIZE, vr.size());
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
 		// Repeat for 21
-		assertEquals(1, krf.size(new IntegerKey(21), new VariableRecord(0)));
-		assertEquals(1, assertBlockRead(cbf, 7, () -> krf.size(new IntegerKey(21), new VariableRecord(0))));
+		assertEquals(1, krf.size(new DBInteger(21), new VariableRecord(0)));
+		assertEquals(1, assertBlockRead(cbf, 7, () -> krf.size(new DBInteger(21), new VariableRecord(0))));
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
-		krf.read(new IntegerKey(21), vr, 0);
+		krf.read(new DBInteger(21), vr, 0);
 		assertEquals(100 * Integer.SIZE / Byte.SIZE, vr.size());
-		assertBlockRead(cbf, 7, () -> {krf.read(new IntegerKey(21), vr, 0); return null;});
+		assertBlockRead(cbf, 7, () -> {krf.read(new DBInteger(21), vr, 0); return null;});
 		assertEquals(100 * Integer.SIZE / Byte.SIZE, vr.size());
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
@@ -512,23 +512,23 @@ public class HashTest {
 
 	@Test
 	public void reclaimSpace() throws Exception {
-		KeyRecordFile<VariableRecord, IntegerKey> krf = generate(4096, 0.2, 7); // setting the threshhold to 0.2 should
+		KeyRecordFile<VariableRecord, DBInteger> krf = generate(4096, 0.2, 7); // setting the threshhold to 0.2 should
 																				// trigger a split after 1024*7/5 = 1433
 																				// Ints
 		cbfO.reset();
-		krf.insert(new IntegerKey(0), new VariableRecord(1000));
+		krf.insert(new DBInteger(0), new VariableRecord(1000));
 		buf.flush();
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
-		krf.insert(new IntegerKey(2), new VariableRecord(400));
+		krf.insert(new DBInteger(2), new VariableRecord(400));
 		buf.flush();
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
-		krf.delete(new IntegerKey(0), 0, new VariableRecord(0)); // now there is a lot free space again
+		krf.delete(new DBInteger(0), 0, new VariableRecord(0)); // now there is a lot free space again
 		buf.flush();
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
-		krf.insert(new IntegerKey(21), new VariableRecord(100));
+		krf.insert(new DBInteger(21), new VariableRecord(100));
 		buf.flush();
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
@@ -536,38 +536,38 @@ public class HashTest {
 		cbf.reset();
 		VariableRecord vr = new VariableRecord(0);
 		// Read 2
-		assertEquals(1, krf.size(new IntegerKey(2), new VariableRecord(0)));
+		assertEquals(1, krf.size(new DBInteger(2), new VariableRecord(0)));
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
-		assertEquals(1, assertBlockRead(cbf, 2, () -> krf.size(new IntegerKey(2), new VariableRecord(0))));
+		assertEquals(1, assertBlockRead(cbf, 2, () -> krf.size(new DBInteger(2), new VariableRecord(0))));
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
-		krf.read(new IntegerKey(2), vr, 0);
+		krf.read(new DBInteger(2), vr, 0);
 		assertEquals(0, cbf.getWrites());
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 		assertEquals(400 * Integer.SIZE / Byte.SIZE, vr.size());
-		assertBlockRead(cbf, 2, () -> {krf.read(new IntegerKey(2), vr, 0); return null;});
+		assertBlockRead(cbf, 2, () -> {krf.read(new DBInteger(2), vr, 0); return null;});
 		assertEquals(0, cbf.getWrites());
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 		assertEquals(400 * Integer.SIZE / Byte.SIZE, vr.size());
 
 		// Read 21
-		assertEquals(1, krf.size(new IntegerKey(21), new VariableRecord(0)));
+		assertEquals(1, krf.size(new DBInteger(21), new VariableRecord(0)));
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
-		assertEquals(1, assertBlockRead(cbf, 0, () -> krf.size(new IntegerKey(21), new VariableRecord(0))));
+		assertEquals(1, assertBlockRead(cbf, 0, () -> krf.size(new DBInteger(21), new VariableRecord(0))));
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 
-		krf.read(new IntegerKey(21), vr, 0);
+		krf.read(new DBInteger(21), vr, 0);
 		assertEquals(0, cbf.getWrites());
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
 		assertEquals(100 * Integer.SIZE / Byte.SIZE, vr.size());
-		assertBlockRead(cbf, 0, () -> {krf.read(new IntegerKey(21), vr, 0); return null;});
+		assertBlockRead(cbf, 0, () -> {krf.read(new DBInteger(21), vr, 0); return null;});
 		assertEquals(0, cbf.getWrites());
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
@@ -576,13 +576,13 @@ public class HashTest {
 
 	@Test
 	public void readWriteSameOverflow() throws Exception {
-		KeyRecordFile<VariableRecord, IntegerKey> krf = generate(4096, 0.5, 7);
+		KeyRecordFile<VariableRecord, DBInteger> krf = generate(4096, 0.5, 7);
 		cbfO.reset();
-		krf.insert(new IntegerKey(0), new VariableRecord(100));
+		krf.insert(new DBInteger(0), new VariableRecord(100));
 		buf.flush();
 		assertEquals(0, cbfO.getReads());
 		assertEquals(0, cbfO.getWrites());
-		krf.insert(new IntegerKey(0), new VariableRecord(1000)); // this is too large the for bucket, so it has to go in
+		krf.insert(new DBInteger(0), new VariableRecord(1000)); // this is too large the for bucket, so it has to go in
 																	// overflow
 		buf.flush();
 		assertThat(cbfO.getReads(), either(equalTo(1)).or(equalTo(2))); // We need to read the meta-block at the
@@ -592,20 +592,20 @@ public class HashTest {
 		assertEquals(2, cbfO.getWrites()); // As appending is mapped to writes in GarbageBlockFile, we want 2 (one for
 											// appending and one for inserting the data)
 		cbfO.reset();
-		krf.insert(new IntegerKey(0), new VariableRecord(10)); // while this does fit into bucket1, one has to make sure
+		krf.insert(new DBInteger(0), new VariableRecord(10)); // while this does fit into bucket1, one has to make sure
 																// that the index is correct
-		assertEquals(3, krf.size(new IntegerKey(0), new VariableRecord(0)));
+		assertEquals(3, krf.size(new DBInteger(0), new VariableRecord(0)));
 		VariableRecord vr = new VariableRecord(1);
-		krf.read(new IntegerKey(0), vr, 0);
+		krf.read(new DBInteger(0), vr, 0);
 		assertEquals(100 * Integer.SIZE / Byte.SIZE, vr.size());
-		krf.read(new IntegerKey(0), vr, 1);
+		krf.read(new DBInteger(0), vr, 1);
 		assertEquals(1000 * Integer.SIZE / Byte.SIZE, vr.size());
-		krf.read(new IntegerKey(0), vr, 2);
+		krf.read(new DBInteger(0), vr, 2);
 		assertEquals(10 * Integer.SIZE / Byte.SIZE, vr.size());
-		krf.delete(new IntegerKey(0), 2, vr);
-		krf.read(new IntegerKey(0), vr, 0);
+		krf.delete(new DBInteger(0), 2, vr);
+		krf.read(new DBInteger(0), vr, 0);
 		assertEquals(100 * Integer.SIZE / Byte.SIZE, vr.size());
-		krf.read(new IntegerKey(0), vr, 1);
+		krf.read(new DBInteger(0), vr, 1);
 		assertEquals(1000 * Integer.SIZE / Byte.SIZE, vr.size());
 	}
 
@@ -634,7 +634,7 @@ public class HashTest {
 
 	@Test
 	public void loadTest() throws Exception {
-		KeyRecordFile<VariableRecord, IntegerKey> krf = generate(4096, 0.5, 7);
+		KeyRecordFile<VariableRecord, DBInteger> krf = generate(4096, 0.5, 7);
 		HashMap<Integer, ArrayList<Integer>> inMemory = new HashMap<>(); // key->(index->Value)
 		Random r = new Random(13763);
 		for (int i = 0; i < 100; ++i) {
@@ -648,10 +648,10 @@ public class HashTest {
 				delete(krf, inMemory, r);
 			}
 			if (r.nextInt(3) == 1) {
-				modifiy(krf, inMemory, r);
+				modify(krf, inMemory, r);
 			}
 			if (r.nextInt(4) == 1) {
-				modifiyKey(krf, inMemory, r);
+				modifyKey(krf, inMemory, r);
 			}
 			if (r.nextInt(40) == 1) {
 				deleteAll(krf, inMemory, r);
@@ -659,17 +659,17 @@ public class HashTest {
 		}
 	}
 
-	private void refresh(KeyRecordFile<VariableRecord, IntegerKey> krf, HashMap<Integer, ArrayList<Integer>> inMemory)
+	private void refresh(KeyRecordFile<VariableRecord, DBInteger> krf, HashMap<Integer, ArrayList<Integer>> inMemory)
 			throws IOException, BufferFullException {
 		HashMap<Integer, ArrayList<Integer>> out = new HashMap<>();
 		for (Integer k : inMemory.keySet()) {
-			int sz = krf.size(new IntegerKey(k), new VariableRecord(1));
+			int sz = krf.size(new DBInteger(k), new VariableRecord(1));
 			ArrayList<Integer> curList = inMemory.get(k);
 			assertEquals(curList.size(), sz, "For Key: " + k);
 			ArrayList<Integer> actual = new ArrayList<>(sz);
 			for (int i = 0; i < sz; ++i) {
 				VariableRecord vr = new VariableRecord(1);
-				krf.read(new IntegerKey(k), vr, i);
+				krf.read(new DBInteger(k), vr, i);
 				actual.add(vr.size());
 			}
 			assertTrue(actual.stream().allMatch(v -> curList.contains(v)),
@@ -680,34 +680,34 @@ public class HashTest {
 		inMemory.putAll(out);
 	}
 
-	private void insert(KeyRecordFile<VariableRecord, IntegerKey> krf, HashMap<Integer, ArrayList<Integer>> inMemory,
+	private void insert(KeyRecordFile<VariableRecord, DBInteger> krf, HashMap<Integer, ArrayList<Integer>> inMemory,
 			Random r) throws IOException, BufferFullException {
 		int key = r.nextInt(500);
 		int amm = r.nextInt(500) + 1;
 		VariableRecord vr = new VariableRecord(amm);
 		inMemory.computeIfAbsent(key, ArrayList::new).add(vr.size());
-		krf.insert(new IntegerKey(key), vr);
+		krf.insert(new DBInteger(key), vr);
 		check(krf, inMemory);
 	}
 
-	private void check(KeyRecordFile<VariableRecord, IntegerKey> krf, HashMap<Integer, ArrayList<Integer>> inMemory)
+	private void check(KeyRecordFile<VariableRecord, DBInteger> krf, HashMap<Integer, ArrayList<Integer>> inMemory)
 			throws IOException, BufferFullException {
 		for (Integer k : inMemory.keySet()) {
-			int sz = krf.size(new IntegerKey(k), new VariableRecord(1));
+			int sz = krf.size(new DBInteger(k), new VariableRecord(1));
 			ArrayList<Integer> curList = inMemory.get(k);
 			assertEquals(curList.size(), sz);
 			for (int i = 0; i < sz; ++i) {
 				VariableRecord vr = new VariableRecord(1);
-				krf.read(new IntegerKey(k), vr, i);
+				krf.read(new DBInteger(k), vr, i);
 				assertEquals(curList.get(i), vr.size());
 			}
 			final int j = k;
 			assertThrows(ArrayIndexOutOfBoundsException.class,
-					() -> krf.read(new IntegerKey(j), new VariableRecord(1), sz));
+					() -> krf.read(new DBInteger(j), new VariableRecord(1), sz));
 		}
 	}
 
-	private void delete(KeyRecordFile<VariableRecord, IntegerKey> krf, HashMap<Integer, ArrayList<Integer>> inMemory,
+	private void delete(KeyRecordFile<VariableRecord, DBInteger> krf, HashMap<Integer, ArrayList<Integer>> inMemory,
 			Random r) throws IOException, BufferFullException {
 		int key = choice(inMemory.keySet(), r);
 		ArrayList<Integer> cur = inMemory.get(key);
@@ -716,17 +716,17 @@ public class HashTest {
 		if (r.nextInt(5) < 2) {
 			// remove last element
 			cur.remove(cur.size() - 1);
-			krf.delete(new IntegerKey(key), cur.size(), new VariableRecord(1));
+			krf.delete(new DBInteger(key), cur.size(), new VariableRecord(1));
 			check(krf, inMemory);
 			return;
 		}
 		int idx = r.nextInt(cur.size());
 		cur.remove(idx);
-		krf.delete(new IntegerKey(key), idx, new VariableRecord(1));
+		krf.delete(new DBInteger(key), idx, new VariableRecord(1));
 		refresh(krf, inMemory);
 	}
 
-	private void modifiy(KeyRecordFile<VariableRecord, IntegerKey> krf, HashMap<Integer, ArrayList<Integer>> inMemory,
+	private void modify(KeyRecordFile<VariableRecord, DBInteger> krf, HashMap<Integer, ArrayList<Integer>> inMemory,
 			Random r) throws IOException, BufferFullException {
 		int key = choice(inMemory.keySet(), r);
 		ArrayList<Integer> cur = inMemory.get(key);
@@ -738,11 +738,11 @@ public class HashTest {
 		int amm = r.nextInt(500) + 1;
 		VariableRecord vr = new VariableRecord(amm);
 		cur.set(idx, vr.size());
-		krf.modify(new IntegerKey(key), idx, vr);
+		krf.modify(new DBInteger(key), idx, vr);
 		refresh(krf, inMemory);
 	}
 
-	private void modifiyKey(KeyRecordFile<VariableRecord, IntegerKey> krf,
+	private void modifyKey(KeyRecordFile<VariableRecord, DBInteger> krf,
 			HashMap<Integer, ArrayList<Integer>> inMemory, Random r) throws IOException, BufferFullException {
 		int key = choice(inMemory.keySet(), r);
 		ArrayList<Integer> cur = inMemory.get(key);
@@ -752,16 +752,16 @@ public class HashTest {
 		ArrayList<Integer> cur2 = inMemory.get(key2);
 		cur2.addAll(cur);
 		cur.clear();
-		krf.modifyKey(new IntegerKey(key), new IntegerKey(key2), new VariableRecord(1));
+		krf.modifyKey(new DBInteger(key), new DBInteger(key2), new VariableRecord(1));
 		refresh(krf, inMemory);
 	}
 
-	private void deleteAll(KeyRecordFile<VariableRecord, IntegerKey> krf, HashMap<Integer, ArrayList<Integer>> inMemory,
+	private void deleteAll(KeyRecordFile<VariableRecord, DBInteger> krf, HashMap<Integer, ArrayList<Integer>> inMemory,
 			Random r) throws IOException, BufferFullException {
 		int key = choice(inMemory.keySet(), r);
 		ArrayList<Integer> cur = inMemory.get(key);
 		cur.clear();
-		krf.delete(new IntegerKey(key), new VariableRecord(1));
+		krf.delete(new DBInteger(key), new VariableRecord(1));
 		refresh(krf, inMemory);
 	}
 
diff --git a/tests/TIDTest.java b/tests/TIDTest.java
index f24936c2a107fdc5b76bcc4e5a2b32fd87443648..00906c4a58543dcdfba2b6724702d83c48e658d5 100644
--- a/tests/TIDTest.java
+++ b/tests/TIDTest.java
@@ -4,7 +4,7 @@ import idb.record.DeletedRecordException;
 import idb.record.View;
 import idb.datatypes.TID;
 import idb.datatypes.DataObject;
-import idb.datatypes.IntegerData;
+import idb.datatypes.DBInteger;
 import idb.datatypes.DBString;
 import idb.block.BlockFile;
 import idb.buffer.DBBuffer;
@@ -33,9 +33,9 @@ class CountingDBBuffer implements DBBuffer {
 		instance = other;
 	}
 
-	public ByteBuffer fix(BlockFile file, int pageno) throws IOException, BufferFullException {
+	public ByteBuffer fix(BlockFile blockfile, int pageno) throws IOException, BufferFullException {
 		fix++;
-		return instance.fix(file, pageno);
+		return instance.fix(blockfile, pageno);
 	}
 	public void unfix(BlockFile blockfile, int pageno) throws IOException {
 		unfix++;
@@ -128,7 +128,8 @@ class UnguardedVariableRecord implements DataObject {
 	public void read(int index, ByteBuffer bb) {
 		size = bb.getInt(index);
 		for (int i=1; i < size; ++i){
-			assertEquals(size, bb.getInt(index + i*(Integer.SIZE / Byte.SIZE)), "At Pos "+ i);
+			final int finalI = i;
+			assertEquals(size, bb.getInt(index + i*(Integer.SIZE / Byte.SIZE)), () ->"At Pos "+ finalI);
 		}
 	}
 
@@ -148,6 +149,11 @@ class UnguardedVariableRecord implements DataObject {
 	public DataObject copy() {
 		return new UnguardedVariableRecord(size);
 	}
+
+	@Override
+	public String toString() {
+		return "{size: " + size+"}";
+	}
 }
 
 class ForeignVSR extends VariableShortRecord {
@@ -298,11 +304,11 @@ public class TIDTest {
 		return ret;
 	}
 
-	private ArrayList<IntegerData> random(int size) {
-		ArrayList<IntegerData> ret = new ArrayList<>();
+	private ArrayList<DBInteger> random(int size) {
+		ArrayList<DBInteger> ret = new ArrayList<>();
 		Random r = new Random(42);
 		for(int i=0; i < size; ++i){
-			ret.add(new IntegerData(r.nextInt(Integer.MAX_VALUE)));
+			ret.add(new DBInteger(r.nextInt(Integer.MAX_VALUE)));
 		}
 		return ret;
 	}
@@ -336,19 +342,19 @@ public class TIDTest {
 	@Test
 	public void freshTID0() throws Exception {
 		// The first TID shall be TID(0, 0) on an empty file
-		DirectRecordFile<idb.datatypes.TID, idb.datatypes.IntegerData> drf = generate(4096, new IntegerData(0));
-		idb.datatypes.TID first = drf.insert(new IntegerData(0));
+		DirectRecordFile<idb.datatypes.TID, idb.datatypes.DBInteger> drf = generate(4096, new DBInteger(0));
+		idb.datatypes.TID first = drf.insert(new DBInteger(0));
 		assertEquals(0,first.getIndex());
 		assertEquals(0,first.getBlock());
 	}
 
 	@Test
 	public void loadTest() throws Exception {
-		ArrayList<IntegerData> data = random(10_000);
-		DirectRecordFile<TID, IntegerData> drf = generate(4096, new IntegerData(0));
+		ArrayList<DBInteger> data = random(10_000);
+		DirectRecordFile<TID, DBInteger> drf = generate(4096, new DBInteger(0));
 		ArrayList<TID> tids = fill(drf, data);
 		for (int i = 0 ; i < tids.size(); ++i){
-			IntegerData ret = new IntegerData(0);
+			DBInteger ret = new DBInteger(0);
 			drf.read(tids.get(i), ret);
 			assertEquals(data.get(i).getValue(), ret.getValue());
 		}
@@ -378,27 +384,27 @@ public class TIDTest {
 
 	@Test
 	public void emptyView() throws Exception {
-		DirectRecordFile<idb.datatypes.TID, idb.datatypes.IntegerData> drf = generate(4096, new IntegerData(0));
-		checkView(List.<IntegerData>of(), drf.view(), (x, y) -> x.getValue() == y.getValue(), new IntegerData(0));
+		DirectRecordFile<idb.datatypes.TID, idb.datatypes.DBInteger> drf = generate(4096, new DBInteger(0));
+		checkView(List.<DBInteger>of(), drf.view(), (x, y) -> x.getValue() == y.getValue(), new DBInteger(0));
 	}
 
 	@Test
 	public void unmovedTIDs() throws Exception {
 		// unmoved TIDs have to be found in one fix, 0 unfix and 0 dirties
-		ArrayList<IntegerData> data = random(100);
-		DirectRecordFile<TID, IntegerData> drf = generate(4096, new IntegerData(0));
+		ArrayList<DBInteger> data = random(100);
+		DirectRecordFile<TID, DBInteger> drf = generate(4096, new DBInteger(0));
 		// if no modify is called, they have to be at one place (as long as they're not splitted)
-		// fragmented may only happen if new IntegerData(0).size() + 5 (max IndexSize) > 4096, which is not true
+		// fragmented may only happen if new DBInteger(0).size() + 5 (max IndexSize) > 4096, which is not true
 		ArrayList<TID> tids = fill(drf, data);
 		buf.reset();
 		for (int i = 0; i < tids.size(); ++i){
-			drf.read(tids.get(i), new IntegerData(0));
+			drf.read(tids.get(i), new DBInteger(0));
 			assertEquals(1, buf.getFix());
 			assertEquals(1, buf.getUnfix());
 			assertEquals(0, buf.getDirty());
 			buf.reset();
 		}
-		checkView(data, drf.view(), (a, b) -> a.getValue() == b.getValue(), new IntegerData(0));
+		checkView(data, drf.view(), (a, b) -> a.getValue() == b.getValue(), new DBInteger(0));
 	}
 
 	private void needsDirty(){
@@ -485,60 +491,60 @@ public class TIDTest {
 
 	@Test
 	public void afterDeleteRead() throws Exception {
-		DirectRecordFile<idb.datatypes.TID, idb.datatypes.IntegerData> drf = generate(4096, new IntegerData(0));
-		idb.datatypes.TID first = drf.insert(new IntegerData(0));
-		idb.datatypes.TID snd = drf.insert(new IntegerData(200));
+		DirectRecordFile<idb.datatypes.TID, idb.datatypes.DBInteger> drf = generate(4096, new DBInteger(0));
+		idb.datatypes.TID first = drf.insert(new DBInteger(0));
+		idb.datatypes.TID snd = drf.insert(new DBInteger(200));
 		drf.delete(first);
-		IntegerData id = new IntegerData(0);
+		DBInteger id = new DBInteger(0);
 		drf.read(snd, id);
 		assertEquals(200, id.getValue());
 	}
 
 	@Test
 	public void deletedRead() throws Exception {
-		DirectRecordFile<idb.datatypes.TID, idb.datatypes.IntegerData> drf = generate(4096, new IntegerData(0));
-		idb.datatypes.TID first = drf.insert(new IntegerData(0));
-		idb.datatypes.TID snd = drf.insert(new IntegerData(200));
+		DirectRecordFile<idb.datatypes.TID, idb.datatypes.DBInteger> drf = generate(4096, new DBInteger(0));
+		idb.datatypes.TID first = drf.insert(new DBInteger(0));
+		idb.datatypes.TID snd = drf.insert(new DBInteger(200));
 		drf.delete(first);
-		IntegerData id = new IntegerData(0);
+		DBInteger id = new DBInteger(0);
 		assertThrows(idb.record.DeletedRecordException.class, () ->drf.read(first, id));
 	}
 
 	@Test
 	public void deletedMod() throws Exception {
-		DirectRecordFile<idb.datatypes.TID, idb.datatypes.IntegerData> drf = generate(4096, new IntegerData(0));
-		idb.datatypes.TID first = drf.insert(new IntegerData(0));
-		idb.datatypes.TID snd = drf.insert(new IntegerData(200));
+		DirectRecordFile<idb.datatypes.TID, idb.datatypes.DBInteger> drf = generate(4096, new DBInteger(0));
+		idb.datatypes.TID first = drf.insert(new DBInteger(0));
+		idb.datatypes.TID snd = drf.insert(new DBInteger(200));
 		drf.delete(first);
-		IntegerData id = new IntegerData(5);
+		DBInteger id = new DBInteger(5);
 		assertThrows(idb.record.DeletedRecordException.class, () ->drf.modify(first, id));
 	}
 
 	@Test
 	public void deletedReadLater() throws Exception {
-		DirectRecordFile<idb.datatypes.TID, idb.datatypes.IntegerData> drf = generate(4096, new IntegerData(0));
-		idb.datatypes.TID first = drf.insert(new IntegerData(0));
-		idb.datatypes.TID snd = drf.insert(new IntegerData(200));
+		DirectRecordFile<idb.datatypes.TID, idb.datatypes.DBInteger> drf = generate(4096, new DBInteger(0));
+		idb.datatypes.TID first = drf.insert(new DBInteger(0));
+		idb.datatypes.TID snd = drf.insert(new DBInteger(200));
 		drf.delete(first);
-		ArrayList<IntegerData> data = random(100);
+		ArrayList<DBInteger> data = random(100);
 		ArrayList<TID> tids = fill(drf, data);
-		IntegerData id = new IntegerData(0);
+		DBInteger id = new DBInteger(0);
 		assertThrows(idb.record.DeletedRecordException.class, () ->drf.read(first, id));
 	}
 
 	@Test
 	public void loadTestDeleted() throws Exception {
 		Random r = new Random(11);
-		ArrayList<IntegerData> data = random(10_000);
-		DirectRecordFile<TID, IntegerData> drf = generate(4096, new IntegerData(0));
+		ArrayList<DBInteger> data = random(10_000);
+		DirectRecordFile<TID, DBInteger> drf = generate(4096, new DBInteger(0));
 		ArrayList<TID> tids = fill(drf, data);
-		ArrayList<IntegerData> rData = new ArrayList<>();
+		ArrayList<DBInteger> rData = new ArrayList<>();
 		for (int i = 0 ; i < tids.size(); ++i){
 			if (r.nextBoolean()){
-				IntegerData ret = new IntegerData(0);
+				DBInteger ret = new DBInteger(0);
 				drf.read(tids.get(i), ret);
 				assertEquals(data.get(i).getValue(), ret.getValue());
-				rData.add(new IntegerData(data.get(i).getValue()));
+				rData.add(new DBInteger(data.get(i).getValue()));
 			} else {
 				drf.delete(tids.get(i));
 			}
@@ -546,15 +552,15 @@ public class TIDTest {
 		r.setSeed(11);
 		for (int i = 0 ; i < tids.size(); ++i){
 			if (r.nextBoolean()){
-				IntegerData ret = new IntegerData(0);
+				DBInteger ret = new DBInteger(0);
 				drf.read(tids.get(i), ret);
 				assertEquals(data.get(i).getValue(), ret.getValue());
 			} else {
 				TID tid = tids.get(i);
-				assertThrows(idb.record.DeletedRecordException.class, () -> drf.read(tid, new IntegerData(0)));
+				assertThrows(idb.record.DeletedRecordException.class, () -> drf.read(tid, new DBInteger(0)));
 			}
 		}
-		checkView(rData, drf.view(), (a, b) -> a.getValue() == b.getValue(), new IntegerData(0));
+		checkView(rData, drf.view(), (a, b) -> a.getValue() == b.getValue(), new DBInteger(0));
 	}
 
 	@Test
@@ -1071,9 +1077,9 @@ public class TIDTest {
 	@Test
 	public void extremeString() throws Exception {
 		DirectRecordFile<idb.datatypes.TID, DBString> drf = generate(4096, new DBString(""));
-		ArrayList<IntegerData> data = random(7_201);
+		ArrayList<DBInteger> data = random(7_201);
 		StringBuilder sb = new StringBuilder();
-		for(IntegerData i : data) {
+		for(DBInteger i : data) {
 			sb.append(i.getValue());
 		}
 		String res = sb.toString();
@@ -1162,4 +1168,44 @@ public class TIDTest {
 
 		checkView(List.of(new VariableRecord(75), new VariableRecord(190), new VariableRecord(400)), drf.view(), (a, b) -> a.size() == b.size(), new VariableRecord(0));
 	}
+
+	// @author Daniel Schuell, Tobias Heineken
+	@RepeatedTest(20)
+	public void randomModifyOversized(RepetitionInfo repInfo) throws Exception {
+		Random rng = new Random(repInfo.getCurrentRepetition() + 4091);
+		int recordCount = rng.nextInt(3) + 3;
+
+		// start with size=4 where all records initially fit in first block so all tids will be on one block
+		// and then increase so tids are spread across blocks until size=8000
+		int initialRecordSize = Integer.BYTES + (repInfo.getCurrentRepetition() - 1) * 8000 / repInfo.getTotalRepetitions();
+
+		DirectRecordFile<idb.datatypes.TID, UnguardedVariableRecord> drf = generate(4096, new UnguardedVariableRecord(3));
+		TID[] tids = new TID[recordCount];
+		UnguardedVariableRecord[] records = new UnguardedVariableRecord[recordCount];
+		for (int i=0; i < recordCount; ++i) {
+			records[i] = new UnguardedVariableRecord(initialRecordSize / Integer.BYTES);
+			tids[i] = drf.insert(records[i]);
+			needsDirty();
+			assertEquals(buf.getFix(), buf.getUnfix());
+			buf.reset();
+		}
+		final int iter_count = 300;
+		for (int i=0; i < iter_count; ++i) {
+			//TODO: reopen at r.nextInt(20) == 0
+			// check contents
+			checkView(List.of(records), drf.view(), (a, b) -> a.size() == b.size(), new UnguardedVariableRecord(0));
+			assertEquals(0, buf.getDirty());
+			assertEquals(buf.getFix(), buf.getUnfix());
+			buf.reset();
+			// change size of random record
+			int record = rng.nextInt(recordCount);
+			int newSize = Integer.BYTES + rng.nextInt(8000);
+			records[record] = new UnguardedVariableRecord(newSize / Integer.BYTES);
+			drf.modify(tids[record], records[record]);
+			needsDirty();
+			assertEquals(buf.getFix(), buf.getUnfix());
+			buf.reset();
+		}
+		checkView(List.of(records), drf.view(), (a, b) -> a.size() == b.size(), new UnguardedVariableRecord(0));
+	}
 }